From e6487f4c9088768c7829dc07bc018017b26677ba Mon Sep 17 00:00:00 2001 From: Taras Halturin Date: Wed, 4 Sep 2024 07:39:26 +0200 Subject: [PATCH] version 3.0.0 (#173) --- .github/workflows/testLinuxWindowsMacOS.yml | 48 - .gitignore | 1 + ChangeLog.md => CHANGELOG.md | 18 + CODE_OF_CONDUCT.md | 128 - README.md | 359 +- act/README.md | 4 + act/actor.go | 353 ++ act/errors.go | 16 + act/pool.go | 351 ++ act/supervisor.go | 737 ++++ act/supervisor_arfo.go | 503 +++ act/supervisor_ofo.go | 383 ++ act/supervisor_sofo.go | 236 + act/supervisor_unit_test.go | 65 + act/web_worker.go | 297 ++ app/system/app.go | 32 + app/system/inspect/connection.go | 124 + app/system/inspect/inspect.go | 329 ++ app/system/inspect/log.go | 149 + app/system/inspect/message.go | 234 + app/system/inspect/meta.go | 119 + app/system/inspect/meta_state.go | 122 + app/system/inspect/network.go | 108 + app/system/inspect/node.go | 126 + app/system/inspect/process.go | 130 + app/system/inspect/process_list.go | 119 + app/system/inspect/process_state.go | 119 + app/system/metrics.go | 199 + app/system/sup.go | 34 + apps/cloud/app.go | 42 - apps/cloud/client.go | 229 - apps/cloud/handshake.go | 282 -- apps/cloud/sup.go | 28 - apps/cloud/types.go | 84 - apps/erlang/appmon.go | 190 - apps/erlang/erlang.go | 136 - apps/erlang/global_name_server.go | 17 - apps/erlang/net_kernel.go | 92 - apps/system/app.go | 39 - apps/system/metrics.go | 178 - apps/system/sup.go | 28 - apps/system/types.go | 29 - debug.go | 5 +- ergo.go | 79 +- etf/cache.go | 176 - etf/decode.go | 1680 ------- etf/decode_test.go | 707 --- etf/encode.go | 886 ---- etf/encode_test.go | 1489 ------- etf/etf.go | 837 ---- etf/etf_test.go | 903 ---- gen/README.md | 68 - gen/application.go | 319 +- gen/cert.go | 43 + gen/core.go | 68 + gen/default.go | 59 + gen/errors.go | 58 + gen/logger.go | 116 + gen/mailbox.go | 46 + gen/message.go | 130 + gen/meta.go | 72 + gen/network.go | 481 ++ gen/node.go | 349 ++ gen/pool.go | 131 - gen/pool_worker.go | 84 - gen/process.go | 495 +++ gen/raft.go | 2616 ----------- gen/registrar.go | 137 + gen/saga.go | 1347 ------ gen/saga_worker.go | 235 - gen/server.go | 585 --- gen/stage.go | 992 ----- gen/stage_dispatcher.go | 556 --- gen/supervisor.go | 459 -- gen/tcp.go | 408 -- gen/tcp_handler.go | 207 - gen/types.go | 543 +-- gen/udp.go | 317 -- gen/udp_handler.go | 152 - gen/web.go | 223 - gen/web_handler.go | 351 -- go.mod | 4 +- lib/buffer.go | 185 + lib/cert.go | 27 - lib/compress.go | 177 + lib/compress_test.go | 66 + lib/errors.go | 53 - lib/flusher.go | 103 + lib/map.go | 87 + lib/mpsc.go | 79 +- lib/mpsc_test.go | 8 +- lib/netreadwriter.go | 21 - lib/norecover.go | 7 + lib/notrace.go | 7 + lib/recover.go | 7 + lib/timer.go | 30 + lib/tools.go | 253 -- lib/tools_test.go | 18 - lib/trace.go | 7 + meta/tcp_connection.go | 198 + meta/tcp_server.go | 131 + meta/tcp_types.go | 44 + meta/udp_server.go | 147 + meta/udp_types.go | 22 + meta/web_handler.go | 123 + meta/web_server.go | 79 + meta/web_types.go | 25 + net/README.md | 3 + net/edf/benchmarks_test.go | 694 +++ net/edf/decode.go | 1257 ++++++ net/edf/decode_test.go | 3549 +++++++++++++++ net/edf/edf.go | 59 + net/edf/edf_test.go | 40 + net/edf/encode.go | 644 +++ net/edf/encode_test.go | 3917 +++++++++++++++++ net/edf/init.go | 292 ++ net/edf/register.go | 767 ++++ net/edf/register_test.go | 139 + net/handshake/accept.go | 175 + net/handshake/handshake.go | 206 + net/handshake/join.go | 56 + net/handshake/start.go | 158 + net/handshake/types.go | 86 + net/proto/connection.go | 2944 +++++++++++++ net/proto/enp.go | 145 + net/proto/types.go | 231 + net/registrar/client.go | 382 ++ net/registrar/server.go | 296 ++ net/registrar/types.go | 39 + node/acceptor.go | 78 + node/application.go | 300 ++ node/core.go | 1982 ++++++--- node/log.go | 95 + node/logger.go | 30 + node/meta.go | 269 ++ node/monitor.go | 1111 ----- node/network.go | 2265 ++++------ node/node.go | 1875 ++++++-- node/process.go | 2067 ++++++--- node/static.go | 176 + node/target.go | 122 + node/types.go | 612 --- proto/dist/epmd.go | 237 - proto/dist/flusher.go | 105 - proto/dist/handshake.go | 836 ---- proto/dist/proto.go | 2261 ---------- proto/dist/proto_test.go | 294 -- proto/dist/registrar.go | 362 -- proto/dist/types.go | 41 - tests/001_local/common.go | 32 + tests/001_local/t000_node_test.go | 258 ++ tests/001_local/t001_process_test.go | 406 ++ tests/001_local/t002_actor_test.go | 195 + tests/001_local/t003_actor_send_test.go | 249 ++ tests/001_local/t004_actor_call_test.go | 340 ++ tests/001_local/t005_actor_monitor_test.go | 767 ++++ tests/001_local/t006_actor_link_test.go | 683 +++ tests/001_local/t007_actor_event_test.go | 364 ++ tests/001_local/t008_actor_split_test.go | 196 + tests/001_local/t009_supervisor_sofo_test.go | 486 ++ tests/001_local/t010_supervisor_ofo_test.go | 719 +++ tests/001_local/t011_supervisor_arfo_test.go | 1083 +++++ tests/001_local/t012_application_test.go | 550 +++ tests/001_local/t013_meta_process_test.go | 416 ++ tests/001_local/t014_pool_test.go | 323 ++ tests/001_local/t015_web_test.go | 260 ++ tests/001_local/t016_tcp_test.go | 473 ++ tests/001_local/t017_udp_test.go | 221 + tests/001_local/txxx_template_test.go | 92 + tests/002_distributed/common.go | 32 + tests/002_distributed/t000_connect_test.go | 112 + .../002_distributed/t001_remote_spawn_test.go | 279 ++ .../t002_remote_app_start_test.go | 85 + tests/002_distributed/t003_send_test.go | 532 +++ tests/002_distributed/t004_call_test.go | 322 ++ tests/002_distributed/t005_link_test.go | 872 ++++ tests/002_distributed/t006_monitor_test.go | 858 ++++ tests/002_distributed/t007_event_test.go | 226 + tests/application_test.go | 521 --- tests/atomcache_test.go | 1264 ------ tests/core_test.go | 224 - tests/monitor_test.go | 1814 -------- tests/node_test.go | 1777 -------- tests/raft_data_test.go | 209 - tests/raft_manual_test.go | 138 - tests/raft_test.go | 360 -- tests/saga_cancel_test.go | 550 --- tests/saga_commit_test.go | 259 -- tests/saga_dist_test.go | 332 -- tests/saga_test.go | 245 -- tests/server_test.go | 943 ---- tests/stage_test.go | 973 ---- tests/supervisor_ofa_test.go | 305 -- tests/supervisor_ofo_test.go | 346 -- tests/supervisor_rfo_test.go | 310 -- tests/supervisor_sofo_test.go | 387 -- tests/tcp_test.go | 137 - tests/udp_test.go | 74 - tests/web_test.go | 74 - version.go | 12 +- 200 files changed, 42827 insertions(+), 38189 deletions(-) delete mode 100644 .github/workflows/testLinuxWindowsMacOS.yml rename ChangeLog.md => CHANGELOG.md (92%) delete mode 100644 CODE_OF_CONDUCT.md create mode 100644 act/README.md create mode 100644 act/actor.go create mode 100644 act/errors.go create mode 100644 act/pool.go create mode 100644 act/supervisor.go create mode 100644 act/supervisor_arfo.go create mode 100644 act/supervisor_ofo.go create mode 100644 act/supervisor_sofo.go create mode 100644 act/supervisor_unit_test.go create mode 100644 act/web_worker.go create mode 100644 app/system/app.go create mode 100644 app/system/inspect/connection.go create mode 100644 app/system/inspect/inspect.go create mode 100644 app/system/inspect/log.go create mode 100644 app/system/inspect/message.go create mode 100644 app/system/inspect/meta.go create mode 100644 app/system/inspect/meta_state.go create mode 100644 app/system/inspect/network.go create mode 100644 app/system/inspect/node.go create mode 100644 app/system/inspect/process.go create mode 100644 app/system/inspect/process_list.go create mode 100644 app/system/inspect/process_state.go create mode 100644 app/system/metrics.go create mode 100644 app/system/sup.go delete mode 100644 apps/cloud/app.go delete mode 100644 apps/cloud/client.go delete mode 100644 apps/cloud/handshake.go delete mode 100644 apps/cloud/sup.go delete mode 100644 apps/cloud/types.go delete mode 100644 apps/erlang/appmon.go delete mode 100644 apps/erlang/erlang.go delete mode 100644 apps/erlang/global_name_server.go delete mode 100644 apps/erlang/net_kernel.go delete mode 100644 apps/system/app.go delete mode 100644 apps/system/metrics.go delete mode 100644 apps/system/sup.go delete mode 100644 apps/system/types.go delete mode 100644 etf/cache.go delete mode 100644 etf/decode.go delete mode 100644 etf/decode_test.go delete mode 100644 etf/encode.go delete mode 100644 etf/encode_test.go delete mode 100644 etf/etf.go delete mode 100644 etf/etf_test.go delete mode 100644 gen/README.md create mode 100644 gen/cert.go create mode 100644 gen/core.go create mode 100644 gen/default.go create mode 100644 gen/errors.go create mode 100644 gen/logger.go create mode 100644 gen/mailbox.go create mode 100644 gen/message.go create mode 100644 gen/meta.go create mode 100644 gen/network.go create mode 100644 gen/node.go delete mode 100644 gen/pool.go delete mode 100644 gen/pool_worker.go create mode 100644 gen/process.go delete mode 100644 gen/raft.go create mode 100644 gen/registrar.go delete mode 100644 gen/saga.go delete mode 100644 gen/saga_worker.go delete mode 100644 gen/server.go delete mode 100644 gen/stage.go delete mode 100644 gen/stage_dispatcher.go delete mode 100644 gen/supervisor.go delete mode 100644 gen/tcp.go delete mode 100644 gen/tcp_handler.go delete mode 100644 gen/udp.go delete mode 100644 gen/udp_handler.go delete mode 100644 gen/web.go delete mode 100644 gen/web_handler.go create mode 100644 lib/buffer.go create mode 100644 lib/compress.go create mode 100644 lib/compress_test.go delete mode 100644 lib/errors.go create mode 100644 lib/flusher.go create mode 100644 lib/map.go delete mode 100644 lib/netreadwriter.go create mode 100644 lib/norecover.go create mode 100644 lib/notrace.go create mode 100644 lib/recover.go create mode 100644 lib/timer.go delete mode 100644 lib/tools_test.go create mode 100644 lib/trace.go create mode 100644 meta/tcp_connection.go create mode 100644 meta/tcp_server.go create mode 100644 meta/tcp_types.go create mode 100644 meta/udp_server.go create mode 100644 meta/udp_types.go create mode 100644 meta/web_handler.go create mode 100644 meta/web_server.go create mode 100644 meta/web_types.go create mode 100644 net/README.md create mode 100644 net/edf/benchmarks_test.go create mode 100644 net/edf/decode.go create mode 100644 net/edf/decode_test.go create mode 100644 net/edf/edf.go create mode 100644 net/edf/edf_test.go create mode 100644 net/edf/encode.go create mode 100644 net/edf/encode_test.go create mode 100644 net/edf/init.go create mode 100644 net/edf/register.go create mode 100644 net/edf/register_test.go create mode 100644 net/handshake/accept.go create mode 100644 net/handshake/handshake.go create mode 100644 net/handshake/join.go create mode 100644 net/handshake/start.go create mode 100644 net/handshake/types.go create mode 100644 net/proto/connection.go create mode 100644 net/proto/enp.go create mode 100644 net/proto/types.go create mode 100644 net/registrar/client.go create mode 100644 net/registrar/server.go create mode 100644 net/registrar/types.go create mode 100644 node/acceptor.go create mode 100644 node/application.go create mode 100644 node/log.go create mode 100644 node/logger.go create mode 100644 node/meta.go delete mode 100644 node/monitor.go create mode 100644 node/static.go create mode 100644 node/target.go delete mode 100644 node/types.go delete mode 100644 proto/dist/epmd.go delete mode 100644 proto/dist/flusher.go delete mode 100644 proto/dist/handshake.go delete mode 100644 proto/dist/proto.go delete mode 100644 proto/dist/proto_test.go delete mode 100644 proto/dist/registrar.go delete mode 100644 proto/dist/types.go create mode 100644 tests/001_local/common.go create mode 100644 tests/001_local/t000_node_test.go create mode 100644 tests/001_local/t001_process_test.go create mode 100644 tests/001_local/t002_actor_test.go create mode 100644 tests/001_local/t003_actor_send_test.go create mode 100644 tests/001_local/t004_actor_call_test.go create mode 100644 tests/001_local/t005_actor_monitor_test.go create mode 100644 tests/001_local/t006_actor_link_test.go create mode 100644 tests/001_local/t007_actor_event_test.go create mode 100644 tests/001_local/t008_actor_split_test.go create mode 100644 tests/001_local/t009_supervisor_sofo_test.go create mode 100644 tests/001_local/t010_supervisor_ofo_test.go create mode 100644 tests/001_local/t011_supervisor_arfo_test.go create mode 100644 tests/001_local/t012_application_test.go create mode 100644 tests/001_local/t013_meta_process_test.go create mode 100644 tests/001_local/t014_pool_test.go create mode 100644 tests/001_local/t015_web_test.go create mode 100644 tests/001_local/t016_tcp_test.go create mode 100644 tests/001_local/t017_udp_test.go create mode 100644 tests/001_local/txxx_template_test.go create mode 100644 tests/002_distributed/common.go create mode 100644 tests/002_distributed/t000_connect_test.go create mode 100644 tests/002_distributed/t001_remote_spawn_test.go create mode 100644 tests/002_distributed/t002_remote_app_start_test.go create mode 100644 tests/002_distributed/t003_send_test.go create mode 100644 tests/002_distributed/t004_call_test.go create mode 100644 tests/002_distributed/t005_link_test.go create mode 100644 tests/002_distributed/t006_monitor_test.go create mode 100644 tests/002_distributed/t007_event_test.go delete mode 100644 tests/application_test.go delete mode 100644 tests/atomcache_test.go delete mode 100644 tests/core_test.go delete mode 100644 tests/monitor_test.go delete mode 100644 tests/node_test.go delete mode 100644 tests/raft_data_test.go delete mode 100644 tests/raft_manual_test.go delete mode 100644 tests/raft_test.go delete mode 100644 tests/saga_cancel_test.go delete mode 100644 tests/saga_commit_test.go delete mode 100644 tests/saga_dist_test.go delete mode 100644 tests/saga_test.go delete mode 100644 tests/server_test.go delete mode 100644 tests/stage_test.go delete mode 100644 tests/supervisor_ofa_test.go delete mode 100644 tests/supervisor_ofo_test.go delete mode 100644 tests/supervisor_rfo_test.go delete mode 100644 tests/supervisor_sofo_test.go delete mode 100644 tests/tcp_test.go delete mode 100644 tests/udp_test.go delete mode 100644 tests/web_test.go diff --git a/.github/workflows/testLinuxWindowsMacOS.yml b/.github/workflows/testLinuxWindowsMacOS.yml deleted file mode 100644 index 6906d0c1..00000000 --- a/.github/workflows/testLinuxWindowsMacOS.yml +++ /dev/null @@ -1,48 +0,0 @@ -name: TestLinuxWindowsMacOS - -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - -jobs: - - test-master-on-ubuntu-latest: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - name: Set up Go - uses: actions/setup-go@v2 - with: - go-version: 1.17 - - - name: Test - run: go test -v ./... - - test-master-on-macos-latest: - runs-on: macos-latest - steps: - - uses: actions/checkout@v2 - - - name: Set up Go - uses: actions/setup-go@v2 - with: - go-version: 1.17 - - - name: Test - run: go test -v ./... - - test-master-on-windows-latest: - runs-on: windows-latest - steps: - - uses: actions/checkout@v2 - - - name: Set up Go - uses: actions/setup-go@v2 - with: - go-version: 1.17 - - - name: Test - run: go test -v ./... diff --git a/.gitignore b/.gitignore index 8cc37593..98ccabef 100644 --- a/.gitignore +++ b/.gitignore @@ -9,4 +9,5 @@ tags cover.out tests/cover.out sandbox +.DS_Store diff --git a/ChangeLog.md b/CHANGELOG.md similarity index 92% rename from ChangeLog.md rename to CHANGELOG.md index c3fa515c..79a50ad2 100644 --- a/ChangeLog.md +++ b/CHANGELOG.md @@ -4,6 +4,24 @@ All notable changes to this project will be documented in this file. This format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +#### [v3.0.0](https://github.com/ergo-services/ergo/releases/tag/v1.999.300) 2024-09-04 [tag version v1.999.300] #### + +This version marks a significant milestone in the evolution of the Ergo Framework. The framework's design has been completely overhauled, and this version was built from the ground up. It includes: + +- Significant API Improvements: The `gen.Process`, `gen.Node`, and `gen.Network` interfaces have been enhanced with numerous convenient methods. +- A New Network Stack: This version introduces a completely new network stack for improved performance and flexibility. See https://github.com/ergo-services/benchmarks for the details + +Alongside the release of Ergo Framework 3.0.0, new tools and an additional components library are also introduced: + +- Tools (observer, saturn) https://github.com/ergo-services/tools +- Loggers (rotate, colored) - https://github.com/ergo-services/logger +- Meta (websocket) - https://github.com/ergo-services/meta +- Application (observer) - https://github.com/ergo-services/application +- Registrar (client Saturn) - https://github.com/ergo-services/registrar +- Proto (erlang23) - https://github.com/ergo-services/proto + +Finally, we've published comprehensive documentation for the framework, providing detailed guides to assist you in leveraging all the capabilities of Ergo Framework effectively. Its available at https://docs.ergo.services. + #### [v2.2.4](https://github.com/ergo-services/ergo/releases/tag/v1.999.224) 2023-05-01 [tag version v1.999.224] #### This release includes fixes: diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index 007e4447..00000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,128 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, religion, or sexual identity -and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -* Focusing on what is best not just for us as individuals, but for the - overall community - -Examples of unacceptable behavior include: - -* The use of sexualized language or imagery, and sexual attention or - advances of any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email - address, without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -ceo@ergo.services. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series -of actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or -permanent ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within -the community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. - -Community Impact Guidelines were inspired by [Mozilla's code of conduct -enforcement ladder](https://github.com/mozilla/diversity). - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see the FAQ at -https://www.contributor-covenant.org/faq. Translations are available at -https://www.contributor-covenant.org/translations. diff --git a/README.md b/README.md index b01c2dcf..90a3e45b 100644 --- a/README.md +++ b/README.md @@ -1,331 +1,168 @@

Ergo Framework

- -[![GoDoc](https://pkg.go.dev/badge/ergo-services/ergo)](https://pkg.go.dev/github.com/ergo-services/ergo) +[![Gitbook Documentation](https://img.shields.io/badge/GitBook-Documentation-f37f40?style=plastic&logo=gitbook&logoColor=white&style=flat)](https://docs.ergo.services) +[![GoDoc](https://pkg.go.dev/badge/ergo-services/ergo)](https://pkg.go.dev/ergo.services/ergo) [![MIT license](https://img.shields.io/badge/license-MIT-brightgreen.svg)](https://opensource.org/licenses/MIT) -[![Telegram Community](https://img.shields.io/badge/Telegram-Community-blue?style=flat&logo=telegram)](https://t.me/ergo_services) -[![Discord Community](https://img.shields.io/badge/Discord-Community-5865F2?style=flat&logo=discord&logoColor=white)](https://discord.gg/sdscxKGV62) -[![Twitter](https://img.shields.io/badge/Twitter-ergo__services-1DA1F2?style=flat&logo=twitter&logoColor=white)](https://twitter.com/ergo_services) +[![Telegram Community](https://img.shields.io/badge/Telegram-ergo__services-229ed9?style=flat&logo=telegram&logoColor=white)](https://t.me/ergo_services) +[![Twitter](https://img.shields.io/badge/twitter-ergo__services-00acee?style=flat&logo=x&logoColor=white)](https://x.com/ergo_services) +[![Reddit](https://img.shields.io/badge/Reddit-r/ergo__services-ff4500?style=plastic&logo=reddit&logoColor=white&style=flat)](https://reddit.com/r/ergo_services) -Technologies and design patterns of Erlang/OTP have been proven over the years. Now in Golang. -Up to x5 times faster than original Erlang/OTP in terms of network messaging. -The easiest way to create an OTP-designed application in Golang. +The Ergo Framework is an implementation of ideas, technologies, and design patterns from the Erlang world in the Go programming language. It is based on the actor model, network transparency, and a set of ready-to-use components for development. This significantly simplifies the creation of complex and distributed solutions while maintaining a high level of reliability and performance. -[https://ergo.services](https://ergo.services) - -### Purpose ### +### Features ### -The goal of this project is to leverage Erlang/OTP experience with Golang performance. Ergo Framework implements [DIST protocol](https://erlang.org/doc/apps/erts/erl_dist_protocol.html), [ETF data format](https://erlang.org/doc/apps/erts/erl_ext_dist.html) and [OTP design patterns](https://erlang.org/doc/design_principles/des_princ.html) `gen.Server`, `gen.Supervisor`, `gen.Application` which makes you able to create distributed, high performance and reliable microservice solutions having native integration with Erlang infrastructure +1. **Actor Model**: enables the creation of scalable and fault-tolerant systems using isolated actors that interact through message passing. Actors can exchange asynchronous messages as well as perform synchronous requests, offering flexibility in communication patterns. -### Cloud ### +2. **Network Transparency**: actors can interact regardless of their physical location, supported by a [high-performance](https://github.com/ergo-services/benchmarks) implementation of the [network stack](https://docs.ergo.services/networking/network-stack), which simplifies the creation of distributed systems. -Distributed Cloud is coming. With Ergo Framework you can join your services into a single cluster with transparent networking using our **Cloud Overlay Network** where they can connect to each other smoothly, no matter where they run - AWS, Azure or GCP, or anywhere else. All these connections are secured with end-to-end encryption. Read more in this article [https://blog.ergo.services/cloud-overlay-network-3a133d47efe5](https://blog.ergo.services/cloud-overlay-network-3a133d47efe5). +3. **Observability**: framework includes built-in observability features, including [service discovery](https://docs.ergo.services/networking/service-discovering) and [static routes](https://docs.ergo.services/networking/static-routes), allowing nodes to automatically register themselves and find routes to remote nodes. This mechanism simplifies managing distributed systems by enabling seamless communication and interaction between nodes across the network. -### Quick start ### +4. **Ready-to-use Components**: A set of [ready-to-use actors](https://docs.ergo.services/actors) simplifying development, including state management and error handling. -First, you need to install the boilerplate code generation tool `ergo` - https://github.com/ergo-services/tools using command below - -`go install ergo.services/tools/ergo@latest` - -And then, you can create your project with just one command. Here is example: - - Supervision tree - ``` - mynode - |- myapp - | | - | `- mysup - | | - | `- myactor - |- myweb - `- myactor2 - ``` - - To generate project for this design use the following command: - - `ergo -init MyNode -with-app MyApp -with-sup MyApp:MySup -with-actor MySup:MyActor -with-web "MyWeb{port:8000,handlers:3}" -with-actor MyActor2` - - as a result you will get generated project: - - ``` - mynode/ - |-- apps/ - | `-- myapp/ - | |-- myactor.go - | |-- myapp.go - | `-- mysup.go - |-- cmd/ - | |-- myactor2.go - | |-- mynode.go - | |-- myweb.go - | `-- myweb_handler.go - |-- README.md - |-- go.mod - `-- go.sum - ``` - - to try it: - ``` - $ cd mynode - $ go run ./cmd/ - ``` - - You may also read our article about this tool with a great example https://blog.ergo.services/quick-start-1094d56d4e2 - -### Features ### +5. **Support for Distributed Systems**: framework includes built-in mechanisms for creating and managing clustered systems, [distributed events](https://docs.ergo.services/basics/events) (publish/subscribe mechanism), [remote actor spawning](https://docs.ergo.services/networking/remote-spawn-process), and [remote application startup](https://docs.ergo.services/networking/remote-start-application). These features enable easy scaling, efficient message broadcasting across your cluster, and the ability to manage distributed components seamlessly. -![image](https://user-images.githubusercontent.com/118860/113710255-c57d5500-96e3-11eb-9970-20f49008a990.png) - -* Support Erlang 25 - allows you connect your node to (and accept connection from) any Erlang/Elixir node within a cluster -* Spawn Erlang-like processes -* Register/unregister processes with simple atom -* Set of ready-to-use disign patterns (behaviors) - * `gen.Server` behavior with atomic state and Erlang's gen_server support to make sync request `ServerProcess.Call`, async - `ServerProcess.Cast` or `Process.Send` in fashion of `gen_server:call`, `gen_server:cast`, `erlang:send` accordingly - * `gen.Supervisor` behavior with all known [restart strategies](https://erlang.org/doc/design_principles/sup_princ.html#restart-strategy) (One For One, One For All, Rest For One, Simple One For One) - * `gen.Application` behavior with all known [starting types](https://erlang.org/doc/design_principles/applications.html#application-start-types) (Permanent, Temporary, Transient) - * `gen.Pool` a basic design pattern with a pool of workers. All messages/requests received by the pool process are forwarded to the workers using the "Round Robin" algorithm. The worker process is automatically restarting on termination - * `gen.TCP` - socket acceptor pool for TCP protocols. This behavior aims to provide everything you need to accept TCP connections and process packets with a small code base and low latency while being easy to use. - * `gen.UDP` - acceptor pool for UDP protocols. This behavior provides the same feature set as TCP but for handling UDP packets using pool of handlers - * `gen.Web` - Web API Gateway behavior. This behavior allows you to listen HTTP port and handle HTTP-request using pool of workers. - * `gen.Stage` behavior support (originated from Elixir's [GenStage](https://hexdocs.pm/gen_stage/GenStage.html)). This is abstraction built on top of `gen.Server` to provide a simple way to create a distributed Producer/Consumer architecture, while automatically managing the concept of backpressure. This implementation is fully compatible with Elixir's GenStage. Example is here [examples/genstage](https://github.com/ergo-services/examples/tree/master/genstage) or just run `go run ./examples/genstage` to see it in action - * `gen.Saga` behavior support. It implements Saga design pattern - a sequence of transactions that updates each service state and publishes the result (or cancels the transaction or triggers the next transaction step). `gen.Saga` also provides a feature of interim results (can be used as transaction progress or as a part of pipeline processing), time deadline (to limit transaction lifespan), two-phase commit (to make distributed transaction atomic). Here is example [examples/gensaga](https://github.com/ergo-services/examples/tree/master/gensaga). - * `gen.Raft` behavior support. It's improved implementation of [Raft consensus algorithm](https://raft.github.io). The key improvement is using quorum under the hood to manage the leader election process and make the Raft cluster more reliable. This implementation supports quorums of 3, 5, 7, 9, or 11 quorum members. Here is an example of this feature [examples/genraft](https://github.com/ergo-services/examples/tree/master/genraft) -* Monitor processes/nodes, local/remote with Erlang support -* Link processes local/remote with Erlang support -* [embedded EPMD](#epmd) (in order to get rid of erlang' dependencies) with Erlang support -* Unmarshalling terms into the struct using `etf.TermIntoStruct`, `etf.TermProplistIntoStruct` or to the string using `etf.TermToString` including custom marshaling/unmarshaling via `Marshal` and `Unmarshal` interfaces. But it's highly recommended to use `etf.RegisterType` so you will be receiving messages in a native Golang-type -* Encryption (TLS 1.3) support (including autogenerating self-signed certificates) -* Compression support (with customization of compression level and threshold). It can be configured for the node or a particular process. -* Proxy support with end-to-end encryption, includeing compression/fragmentation/linking/monitoring features. -* Tested and confirmed support Windows, Darwin (MacOS), Linux, FreeBSD. -* Zero dependencies. All features are implemented using the standard Golang library. +6. **Reliability and Fault Tolerance**: the framework is designed to minimize failures and ensure automatic recovery, featuring a [supervisor tree](https://docs.ergo.services/basics/supervision-tree) structure to manage and [restart failed actors](https://docs.ergo.services/actors/supervisor#restart-strategy), which is crucial for mission-critical applications. + +7. **Flexibility**: This framework offers convenient interfaces for customizing [network stack components](https://docs.ergo.services/networking/network-stack#network-stack-interfaces), creating and integrating custom [loggers](https://docs.ergo.services/basics/logging), [managing SSL certificates](https://docs.ergo.services/basics/certmanager), and more. -### Requirements ### +In the https://github.com/ergo-services/examples repository, you will find examples that demonstrate a range of the framework's capabilities. -* Go 1.17.x and above +### Observer ### +To inspect the node, network stack, running applications, and processes, you can use the [`observer`](https://github.com/ergo-services/tools/) tool -### Versioning ### + -Golang introduced [v2 rule](https://go.dev/blog/v2-go-modules) a while ago to solve complicated dependency issues. We found this solution very controversial and there is still a lot of discussion around it. So, we decided to keep the old way for the versioning, but have to use the git tag with v1 as a major version (due to "v2 rule" restrictions). Since now we use git tag pattern 1.999.XYZ where X - major number, Y - minor, Z - patch version. +To install the Observer tool, you need to have the Go compiler version 1.20 or higher. Run the following command: -### Changelog ### +``` +$ go install ergo.services/tools/observer@latest +``` -Here are the changes of latest release. For more details see the [ChangeLog](ChangeLog.md) +You can also embed the [Observer application](https://docs.ergo.services/extra-library/applications/observer) into your node. To see it in action, see example `demo` at https://github.com/ergo-services/examples. For more information https://docs.ergo.services/tools/observer -#### [v2.2.4](https://github.com/ergo-services/ergo/releases/tag/v1.999.224) 2023-05-01 [tag version v1.999.224] #### -This release includes fixes: -- Fixed incorrect handling of `gen.SupervisorStrategyRestartTransient` restart strategy in `gen.Supervisor` -- Fixed missing `ServerBehavior` in [`gen.Pool`, `gen.Raft`, `gen.Saga`, `gen.Stage`, `gen.TCP`, `gen.UDP`, `gen.Web`] behavior interfaces -- Introduced the new tool for boilerplate code generation - `ergo` https://github.com/ergo-services/tools. You may read more information about this tool in our article with a great example https://blog.ergo.services/quick-start-1094d56d4e2 -### Benchmarks ### +### Quick start ### -Here is simple EndToEnd test demonstrates performance of messaging subsystem +For a quick start, use the [`ergo`](https://docs.ergo.services/tools/ergo) tool — a command-line utility designed to simplify the process of generating boilerplate code for your project based on the Ergo Framework. With this tool, you can rapidly create a complete project structure, including applications, actors, supervisors, network components, and more. It offers a set of arguments that allow you to customize the project according to specific requirements, ensuring it is ready for immediate development. -Hardware: workstation with AMD Ryzen Threadripper 3970X (64) @ 3.700GHz +To install use the following command: ``` -❯❯❯❯ go test -bench=NodeParallel -run=XXX -benchtime=10s -goos: linux -goarch: amd64 -pkg: github.com/ergo-services/ergo/tests -cpu: AMD Ryzen Threadripper 3970X 32-Core Processor -BenchmarkNodeParallel-64 4738918 2532 ns/op -BenchmarkNodeParallelSingleNode-64 100000000 429.8 ns/op - -PASS -ok github.com/ergo-services/ergo/tests 29.596s +$ go install ergo.services/tools/ergo@latest ``` -these numbers show almost **500.000 sync requests per second** for the network messaging via localhost and **10.000.000 sync requests per second** for the local messaging (within a node). - -#### Compression +Now, you can create your project with just one command. Here is example: -This benchmark shows the performance of compression for sending 1MB message between two nodes (via a network). +Supervision tree ``` -❯❯❯❯ go test -bench=NodeCompression -run=XXX -benchtime=10s -goos: linux -goarch: amd64 -pkg: github.com/ergo-services/ergo/tests -cpu: AMD Ryzen Threadripper 3970X 32-Core Processor -BenchmarkNodeCompressionDisabled1MBempty-64 2400 4957483 ns/op -BenchmarkNodeCompressionEnabled1MBempty-64 5769 2088051 ns/op -BenchmarkNodeCompressionEnabled1MBstring-64 5202 2077099 ns/op -PASS -ok github.com/ergo-services/ergo/tests 56.708s + mynode + ├─ myapp + │ │ + │ └─ mysup + │ │ + │ └─ myactor + ├─ myweb + └─ myactor2 ``` -It demonstrates **more than 2 times** improvement. - -#### Proxy - -This benchmark demonstrates how proxy feature and e2e encryption impact a messaging performance. +To generate project for this design use the following command: ``` -❯❯❯❯ go test -bench=NodeProxy -run=XXX -benchtime=10s -goos: linux -goarch: amd64 -pkg: github.com/ergo-services/ergo/tests -cpu: AMD Ryzen Threadripper 3970X 32-Core Processor -BenchmarkNodeProxy_NodeA_to_NodeC_direct_Message_1KB-64 1908477 6337 ns/op -BenchmarkNodeProxy_NodeA_to_NodeC_via_NodeB_Message_1KB-64 1700984 7062 ns/op -BenchmarkNodeProxy_NodeA_to_NodeC_via_NodeB_Message_1KB_Encrypted-64 1271125 9410 ns/op -PASS -ok github.com/ergo-services/ergo/tests 45.649s - +$ ergo -init MyNode \ + -with-app MyApp \ + -with-sup MyApp:MySup \ + -with-actor MySup:MyActor \ + -with-web MyWeb \ + -with-actor MyActor2 \ + -with-observer ``` +as a result you will get generated project: -#### Ergo Framework vs original Erlang/OTP - -Hardware: laptop with Intel(R) Core(TM) i5-8265U (4 cores. 8 with HT) +``` + mynode + ├── apps + │ └── myapp + │ ├── myactor.go + │ ├── myapp.go + │ └── mysup.go + ├── cmd + │ ├── myactor2.go + │ ├── mynode.go + │ ├── myweb.go + │ └── myweb_worker.go + ├── go.mod + ├── go.sum + └── README.md +``` -![benchmarks](https://raw.githubusercontent.com/halturin/ergobenchmarks/master/ergobenchmark.png) +to try it: -sources of these benchmarks are [here](https://github.com/halturin/ergobenchmarks) +``` +$ cd mynode +$ go run ./cmd +``` +Since we included Observer application, open http://localhost:9911 to inspect your node and running processes. -### EPMD ### +### Erlang support ### -*Ergo Framework* has embedded EPMD implementation in order to run your node without external epmd process needs. By default, it works as a client with erlang' epmd daemon or others ergo's nodes either. +Starting from version 3.0.0, support for the Erlang network stack has been moved to a separate module and is distributed under the BSL 1.1 license - https://github.com/ergo-services/proto. You can find detailed information on using this module in the documentation at https://docs.ergo.services/extra-library/network-protocols/erlang. -The one thing that makes embedded EPMD different is the behavior of handling connection hangs - if ergo' node is running as an EPMD client and lost connection, it tries either to run its own embedded EPMD service or to restore the lost connection. +### Requirements ### -### Examples ### +* Go 1.20.x and above -Code below is a simple implementation of gen.Server pattern [examples/genserver](https://github.com/ergo-services/examples/tree/master/genserver) +### Changelog ### -```golang -package main +Fully detailed changelog see in the [ChangeLog](CHANGELOG.md) file. -import ( - "fmt" - "time" +#### [v3.0.0](https://github.com/ergo-services/ergo/releases/tag/v1.999.300) 2024-09-04 [tag version v1.999.300] #### - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" -) +This version marks a significant milestone in the evolution of the Ergo Framework. The framework's design has been completely overhauled, and this version was built from the ground up. It includes: -type simple struct { - gen.Server -} +- Significant API Improvements: The `gen.Process`, `gen.Node`, and `gen.Network` interfaces have been enhanced with numerous convenient methods. +- A New Network Stack: This version introduces a completely new network stack for improved performance and flexibility. See https://github.com/ergo-services/benchmarks for the details -func (s *simple) HandleInfo(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - value := message.(int) - fmt.Printf("HandleInfo: %#v \n", message) - if value > 104 { - return gen.ServerStatusStop - } - // sending message with delay 1 second - fmt.Println("increase this value by 1 and send it to itself again") - process.SendAfter(process.Self(), value+1, time.Second) - return gen.ServerStatusOK -} +Alongside the release of Ergo Framework 3.0.0, new tools and an additional components library are also introduced: -``` +- Tools (observer, saturn) https://github.com/ergo-services/tools +- Loggers (rotate, colored) - https://github.com/ergo-services/logger +- Meta (websocket) - https://github.com/ergo-services/meta +- Application (observer) - https://github.com/ergo-services/application +- Registrar (client Saturn) - https://github.com/ergo-services/registrar +- Proto (erlang23) - https://github.com/ergo-services/proto -here is output of this code - -```shell -$ go run ./examples/simple -HandleInfo: 100 -HandleInfo: 101 -HandleInfo: 102 -HandleInfo: 103 -HandleInfo: 104 -HandleInfo: 105 -exited -``` +Finally, we've published comprehensive documentation for the framework, providing detailed guides to assist you in leveraging all the capabilities of Ergo Framework effectively. Its available at https://docs.ergo.services. -See [https://github.com/ergo-services/examples](https://github.com/ergo-services/examples/) for more details - -* [gen.Application](https://github.com/ergo-services/examples/tree/master/application) -* [gen.Supervisor](https://github.com/ergo-services/examples/tree/master/supervisor) -* [gen.Server](https://github.com/ergo-services/examples/tree/master/genserver) -* [gen.Pool](https://github.com/ergo-services/examples/tree/master/genpool) -* [gen.Stage](https://github.com/ergo-services/examples/tree/master/genstage) -* [gen.Saga](https://github.com/ergo-services/examples/tree/master/gensaga) -* [gen.Raft](https://github.com/ergo-services/examples/tree/master/genraft) -* [gen.Custom](https://github.com/ergo-services/examples/tree/master/gencustom) -* [gen.Web](https://github.com/ergo-services/examples/tree/master/genweb) -* [gen.TCP](https://github.com/ergo-services/examples/tree/master/gentcp) -* [gen.UDP](https://github.com/ergo-services/examples/tree/master/genudp) -* [events](https://github.com/ergo-services/examples/tree/master/events) -* [erlang](https://github.com/ergo-services/examples/tree/master/erlang) -* [proxy](https://github.com/ergo-services/examples/tree/master/proxy) -* [cloud](https://github.com/ergo-services/examples/tree/master/cloud) - -### Elixir Phoenix Users ### - -Users of the Elixir Phoenix framework might encounter timeouts when trying to connect a Phoenix node -to an ergo node. The reason is that, in addition to global_name_server and net_kernel, -Phoenix attempts to broadcast messages to the [pg2 PubSub handler](https://hexdocs.pm/phoenix/1.1.0/Phoenix.PubSub.PG2.html) - -To work with Phoenix nodes, you must create and register a dedicated pg2 GenServer, and -spawn it inside your node. The spawning process must have "pg2" as a process name: - -```golang -type Pg2GenServer struct { - gen.Server -} - -func main() { - // ... - pg2 := &Pg2GenServer{} - node1, _ := ergo.StartNode("node1@localhost", "cookies", node.Options{}) - process, _ := node1.Spawn("pg2", gen.ProcessOptions{}, pg2, nil) - // ... -} -``` -### Development and debugging ### +### Benchmarks ### -There are options already defined that you might want to use +You can find available benchmarks in the following repository https://github.com/ergo-services/benchmarks. -* `-ergo.trace` - enable debug info (logging via `lib.Log(...)`) -* `-ergo.debug` - enable extended debug info (logging via `lib.Log(...)` and `lib.Warning(...)`) -* `-ergo.norecover` - disable panic catching -* `-ergo.warning` - enable/disable warnings (logging via `lib.Warning(...)`. Default: enable) +* Messaging performance (local, network) -To enable Golang profiler just add `--tags debug` in your `go run` or `go build` like this: +* Memory consumption per process (demonstrates framework memory footprint). -``` -go run --tags debug ./examples/genserver/demoGenServer.go -``` +### Development and debugging ### -Now golang' profiler is available at `http://localhost:9009/debug/pprof` +To enable Golang profiler just add `--tags debug` in your `go run` or `go build` (profiler runs at +`http://localhost:9009/debug/pprof`) -To check test coverage: +To disable panic recovery use `--tags norecover`. -``` -go test -coverprofile=cover.out ./... -go tool cover -html=cover.out -o coverage.html -``` +To enable trace logging level for the internals (node, network,...) use `--tags trace` and set the log level `gen.LogLevelTrace` for your node. To run tests with cleaned test cache: ``` go vet go clean -testcache -go test -v ./... -``` - -To run benchmarks: - +go test -v ./tests/... ``` -go test -bench=Node -run=X -benchmem -``` - -### Companies are using Ergo Framework ### - -[![Kaspersky](.github/images/kaspersky.png)](https://kaspersky.com) -[![RingCentral](.github/images/ringcentral.png)](https://www.ringcentral.com) -[![LilithGames](.github/images/lilithgames.png)](https://lilithgames.com) - -is your company using Ergo? add your company logo/name here ### Commercial support -please, contact ceo@ergo.services for more information +please, contact support@ergo.services for more information diff --git a/act/README.md b/act/README.md new file mode 100644 index 00000000..75e66c1f --- /dev/null +++ b/act/README.md @@ -0,0 +1,4 @@ +## Standard Actors Library ## + +Doc: https://docs.ergo.services/actors + diff --git a/act/actor.go b/act/actor.go new file mode 100644 index 00000000..ee4599df --- /dev/null +++ b/act/actor.go @@ -0,0 +1,353 @@ +package act + +import ( + "fmt" + "reflect" + "runtime" + "strings" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +// ActorBehavior interface +type ActorBehavior interface { + gen.ProcessBehavior + + // Init invoked on a spawn Actor for the initializing. + Init(args ...any) error + + // HandleMessage invoked if Actor received a message sent with gen.Process.Send(...). + // Non-nil value of the returning error will cause termination of this process. + // To stop this process normally, return gen.TerminateReasonNormal + // or any other for abnormal termination. + HandleMessage(from gen.PID, message any) error + + // HandleCall invoked if Actor got a synchronous request made with gen.Process.Call(...). + // Return nil as a result to handle this request asynchronously and + // to provide the result later using the gen.Process.SendResponse(...) method. + HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) + + // Terminate invoked on a termination process + Terminate(reason error) + + // HandleMessageName invoked if split handling was enabled using SetSplitHandle(true) + // and message has been sent by name + HandleMessageName(name gen.Atom, from gen.PID, message any) error + HandleMessageAlias(alias gen.Alias, from gen.PID, message any) error + HandleCallName(name gen.Atom, from gen.PID, ref gen.Ref, request any) (any, error) + HandleCallAlias(alias gen.Alias, from gen.PID, ref gen.Ref, request any) (any, error) + + // HandleLog invoked on a log message if this process was added as a logger. + HandleLog(message gen.MessageLog) error + + // HandleEvent invoked on an event message if this process got subscribed on + // this event using gen.Process.LinkEvent or gen.Process.MonitorEvent + HandleEvent(message gen.MessageEvent) error + + // HandleInspect invoked on the request made with gen.Process.Inspect(...) + HandleInspect(from gen.PID, item ...string) map[string]string +} + +// Actor implementats ProcessBehavior interface and provides callbacks for +// - initialization +// - handling messages/requests. +// - termination +// All callbacks of the ActorBehavior are optional for the implementation. +type Actor struct { + gen.Process + + behavior ActorBehavior + mailbox gen.ProcessMailbox + + trap bool // trap exit + split bool // split handle callback +} + +// SetTrapExit enables/disables the trap on exit requests sent by SendExit(...). +// Enabled trap makes the actor ignore such requests transforming them into +// regular messages (gen.MessageExitPID) except for the request from the parent +// process with the reason gen.TerminateReasonShutdown. +// With disabled trap, actor gracefully terminates by invoking Terminate callback +// with the given reason +func (a *Actor) SetTrapExit(trap bool) { + a.trap = trap +} + +// TrapExit returns whether the trap was enabled on this actor +func (a *Actor) TrapExit() bool { + return a.trap +} + +// SetSplitHandle enables/disables splitting invoke callback depending on the target type. +// Enabled splitting makes this process to invoke +// - HandleCall/HandleMessage for the request/message addressed by gen.PID +// - HandleCallName/HandleMessageName for the request/message addressed by gen.ProcessID +// - HandleCallAlias/HandleMessageAlias for the request/message addressed by gen.Alias +func (a *Actor) SetSplitHandle(split bool) { + a.split = split +} + +// SplitHandle returns whether the splitting was enabled on this actor +func (a *Actor) SplitHandle() bool { + return a.split +} + +// +// ProcessBehavior implementation +// + +// ProcessInit +func (a *Actor) ProcessInit(process gen.Process, args ...any) (rr error) { + var ok bool + + if a.behavior, ok = process.Behavior().(ActorBehavior); ok == false { + unknown := strings.TrimPrefix(reflect.TypeOf(process.Behavior()).String(), "*") + return fmt.Errorf("ProcessInit: not an ActorBehavior %s", unknown) + } + + if lib.Recover() { + defer func() { + if r := recover(); r != nil { + pc, fn, line, _ := runtime.Caller(2) + a.Log().Panic("Actor initialization failed. Panic reason: %#v at %s[%s:%d]", + r, runtime.FuncForPC(pc).Name(), fn, line) + rr = gen.TerminateReasonPanic + } + }() + } + + a.Process = process + a.mailbox = process.Mailbox() + + return a.behavior.Init(args...) +} + +func (a *Actor) ProcessRun() (rr error) { + var message *gen.MailboxMessage + + if lib.Recover() { + defer func() { + if r := recover(); r != nil { + pc, fn, line, _ := runtime.Caller(2) + a.Log().Panic("Actor terminated. Panic reason: %#v at %s[%s:%d]", + r, runtime.FuncForPC(pc).Name(), fn, line) + rr = gen.TerminateReasonPanic + } + }() + } + + for { + if a.State() != gen.ProcessStateRunning { + // process was killed by the node. + return gen.TerminateReasonKill + } + + if message != nil { + gen.ReleaseMailboxMessage(message) + message = nil + } + + for { + // check queues + msg, ok := a.mailbox.Urgent.Pop() + if ok { + // got new urgent message. handle it + message = msg.(*gen.MailboxMessage) + break + } + + msg, ok = a.mailbox.System.Pop() + if ok { + // got new system message. handle it + message = msg.(*gen.MailboxMessage) + break + } + + msg, ok = a.mailbox.Main.Pop() + if ok { + // got new regular message. handle it + message = msg.(*gen.MailboxMessage) + break + } + + msg, ok = a.mailbox.Log.Pop() + if ok { + if reason := a.behavior.HandleLog(msg.(gen.MessageLog)); reason != nil { + return reason + } + continue + } + + // no messages in the mailbox + return nil + } + + retry: + switch message.Type { + case gen.MailboxMessageTypeRegular: + var reason error + + if a.split { + switch target := message.Target.(type) { + case gen.Atom: + reason = a.behavior.HandleMessageName(target, message.From, message.Message) + case gen.Alias: + reason = a.behavior.HandleMessageAlias(target, message.From, message.Message) + default: + reason = a.behavior.HandleMessage(message.From, message.Message) + } + } else { + reason = a.behavior.HandleMessage(message.From, message.Message) + } + + if reason != nil { + return reason + } + + case gen.MailboxMessageTypeRequest: + var reason error + var result any + + if a.split { + switch target := message.Target.(type) { + case gen.Atom: + result, reason = a.behavior.HandleCallName(target, message.From, message.Ref, message.Message) + case gen.Alias: + result, reason = a.behavior.HandleCallAlias(target, message.From, message.Ref, message.Message) + default: + result, reason = a.behavior.HandleCall(message.From, message.Ref, message.Message) + } + } else { + result, reason = a.behavior.HandleCall(message.From, message.Ref, message.Message) + } + + if reason != nil { + // if reason is "normal" and we got response - send it before termination + if reason == gen.TerminateReasonNormal && result != nil { + a.SendResponse(message.From, message.Ref, result) + } + return reason + } + + if result == nil { + // async handling of sync request. response could be sent + // later, even by the other process + continue + } + + a.SendResponse(message.From, message.Ref, result) + + case gen.MailboxMessageTypeEvent: + if reason := a.behavior.HandleEvent(message.Message.(gen.MessageEvent)); reason != nil { + return reason + } + + case gen.MailboxMessageTypeExit: + switch exit := message.Message.(type) { + case gen.MessageExitPID: + // trap exit signal if it wasn't send by parent + // and TrapExit == true + if a.trap && message.From != a.Parent() { + message.Type = gen.MailboxMessageTypeRegular + goto retry + } + return fmt.Errorf("%s: %w", exit.PID, exit.Reason) + + case gen.MessageExitProcessID: + if a.trap { + message.Type = gen.MailboxMessageTypeRegular + goto retry + } + return fmt.Errorf("%s: %w", exit.ProcessID, exit.Reason) + + case gen.MessageExitAlias: + if a.trap { + message.Type = gen.MailboxMessageTypeRegular + goto retry + } + return fmt.Errorf("%s: %w", exit.Alias, exit.Reason) + + case gen.MessageExitEvent: + if a.trap { + message.Type = gen.MailboxMessageTypeRegular + goto retry + } + return fmt.Errorf("%s: %w", exit.Event, exit.Reason) + + case gen.MessageExitNode: + if a.trap { + message.Type = gen.MailboxMessageTypeRegular + goto retry + } + return fmt.Errorf("%s: %w", exit.Name, gen.ErrNoConnection) + + default: + panic(fmt.Sprintf("unknown exit message: %#v", exit)) + } + + case gen.MailboxMessageTypeInspect: + result := a.behavior.HandleInspect(message.From, message.Message.([]string)...) + a.SendResponse(message.From, message.Ref, result) + } + + } +} +func (a *Actor) ProcessTerminate(reason error) { + a.behavior.Terminate(reason) +} + +// +// default callbacks for ActorBehavior interface +// + +// Init +func (a *Actor) Init(args ...any) error { + return nil +} + +func (a *Actor) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + a.Log().Warning("Actor.HandleCall: unhandled request from %s", from) + return nil, nil +} + +func (a *Actor) HandleMessage(from gen.PID, message any) error { + a.Log().Warning("Actor.HandleMessage: unhandled message from %s", from) + return nil +} + +func (a *Actor) HandleInspect(from gen.PID, item ...string) map[string]string { + return nil +} + +func (a *Actor) HandleLog(message gen.MessageLog) error { + a.Log().Warning("Actor.HandleLog: unhandled log message %#v", message) + return nil +} + +func (a *Actor) HandleEvent(message gen.MessageEvent) error { + a.Log().Warning("Actor.HandleEvent: unhandled event message %#v", message) + return nil +} + +func (a *Actor) Terminate(reason error) {} + +func (a *Actor) HandleMessageName(name gen.Atom, from gen.PID, message any) error { + a.Log().Warning("Actor.HandleMessageName %s: unhandled message from %s", a.Name(), from) + return nil +} + +func (a *Actor) HandleMessageAlias(alias gen.Alias, from gen.PID, message any) error { + a.Log().Warning("Actor.HandleMessageAlias %s: unhandled message from %s", alias, from) + return nil +} + +func (a *Actor) HandleCallName(name gen.Atom, from gen.PID, ref gen.Ref, request any) (any, error) { + a.Log().Warning("Actor.HandleCallName %s: unhandled request from %s", a.Name(), from) + return nil, nil +} + +func (a *Actor) HandleCallAlias(alias gen.Alias, from gen.PID, ref gen.Ref, request any) (any, error) { + a.Log().Warning("Actor.HandleCallAlias %s: unhandled request from %s", alias, from) + return nil, nil +} diff --git a/act/errors.go b/act/errors.go new file mode 100644 index 00000000..43d73a4b --- /dev/null +++ b/act/errors.go @@ -0,0 +1,16 @@ +package act + +import ( + "errors" +) + +var ( + ErrSupervisorStrategyActive = errors.New("supervisor strategy is active") + ErrSupervisorChildUnknown = errors.New("unknown child") + ErrSupervisorChildRunning = errors.New("child process is already running") + ErrSupervisorChildDisabled = errors.New("child is disabled") + ErrSupervisorRestartsExceeded = errors.New("restart intensity is exceeded") + ErrSupervisorChildDuplicate = errors.New("duplicate child spec Name") + + ErrPoolEmpty = errors.New("no worker process in the pool") +) diff --git a/act/pool.go b/act/pool.go new file mode 100644 index 00000000..38b2ff40 --- /dev/null +++ b/act/pool.go @@ -0,0 +1,351 @@ +package act + +import ( + "fmt" + "reflect" + "runtime" + "strings" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +// PoolBehavior interface +const ( + defaultPoolSize = 3 +) + +type PoolBehavior interface { + gen.ProcessBehavior + + // Init invoked on a spawn Pool for the initializing. + Init(args ...any) (PoolOptions, error) + + // HandleMessage invoked if Pool received a message sent with gen.Process.Send(...). + // Non-nil value of the returning error will cause termination of this process. + // To stop this process normally, return gen.TerminateReasonNormal + // or any other for abnormal termination. + HandleMessage(from gen.PID, message any) error + + // HandleCall invoked if Pool got a synchronous request made with gen.Process.Call(...). + // Return nil as a result to handle this request asynchronously and + // to provide the result later using the gen.Process.SendResponse(...) method. + HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) + + // Terminate invoked on a termination process + Terminate(reason error) + + // HandleEvent invoked on an event message if this process got subscribed on + // this event using gen.Process.LinkEvent or gen.Process.MonitorEvent + HandleEvent(message gen.MessageEvent) error + + // HandleInspect invoked on the request made with gen.Process.Inspect(...) + HandleInspect(from gen.PID, item ...string) map[string]string +} + +type Pool struct { + gen.Process + + behavior PoolBehavior + mailbox gen.ProcessMailbox + sWorkerBehavior string + forwarded uint64 + restarts uint64 + unhandled uint64 + + options PoolOptions + pool lib.QueueMPSC +} + +type PoolOptions struct { + WorkerMailboxSize int64 + PoolSize int64 + WorkerFactory gen.ProcessFactory + WorkerArgs []any +} + +func (p *Pool) AddWorkers(n int) (int64, error) { + if p.State() != gen.ProcessStateRunning { + return 0, gen.ErrNotAllowed + } + + wopt := gen.ProcessOptions{ + MailboxSize: p.options.WorkerMailboxSize, + LinkParent: true, + } + for i := 0; i < n; i++ { + pid, err := p.Spawn(p.options.WorkerFactory, wopt, p.options.WorkerArgs...) + if err != nil { + return 0, err + } + p.pool.Push(pid) + } + + return p.pool.Len(), nil +} + +func (p *Pool) RemoveWorkers(n int) (int64, error) { + if p.State() != gen.ProcessStateRunning { + return 0, gen.ErrNotAllowed + } + for i := 0; i < n; i++ { + v, ok := p.pool.Pop() + if ok == false { + return 0, ErrPoolEmpty + } + pid := v.(gen.PID) + p.SendExit(pid, gen.TerminateReasonNormal) + } + + return p.pool.Len(), nil +} + +func (p *Pool) ProcessInit(process gen.Process, args ...any) (rr error) { + var ok bool + + if p.behavior, ok = process.Behavior().(PoolBehavior); ok == false { + unknown := strings.TrimPrefix(reflect.TypeOf(process.Behavior()).String(), "*") + return fmt.Errorf("ProcessInit: not a PoolBehavior %s", unknown) + } + p.Process = process + p.mailbox = process.Mailbox() + + if lib.Recover() { + defer func() { + if r := recover(); r != nil { + pc, fn, line, _ := runtime.Caller(2) + p.Log().Panic("Pool initialization failed. Panic reason: %#v at %s[%s:%d]", + r, runtime.FuncForPC(pc).Name(), fn, line) + rr = gen.TerminateReasonPanic + } + }() + } + + options, err := p.behavior.Init(args...) + if err != nil { + return err + } + p.options = options + if options.PoolSize < 1 { + options.PoolSize = defaultPoolSize + } + + p.pool = lib.NewQueueLimitMPSC(options.PoolSize*100, false) + wopt := gen.ProcessOptions{ + MailboxSize: options.WorkerMailboxSize, + LinkParent: true, + } + for i := int64(0); i < options.PoolSize; i++ { + pid, err := p.Spawn(options.WorkerFactory, wopt, options.WorkerArgs...) + if err != nil { + return err + } + + p.pool.Push(pid) + if i == 0 { + pi, _ := p.Node().ProcessInfo(pid) + p.sWorkerBehavior = pi.Behavior + } + } + + return nil +} + +func (p *Pool) ProcessRun() (rr error) { + var message *gen.MailboxMessage + + if lib.Recover() { + defer func() { + if r := recover(); r != nil { + pc, fn, line, _ := runtime.Caller(2) + p.Log().Panic("Pool terminated. Panic reason: %#v at %s[%s:%d]", + r, runtime.FuncForPC(pc).Name(), fn, line) + rr = gen.TerminateReasonPanic + } + }() + } + + for { + if p.State() != gen.ProcessStateRunning { + // process was killed by the node. + return gen.TerminateReasonKill + } + + if message != nil { + gen.ReleaseMailboxMessage(message) + message = nil + } + + for { + // check queues + msg, ok := p.mailbox.Urgent.Pop() + if ok { + // got new urgent message. handle it + message = msg.(*gen.MailboxMessage) + break + } + + msg, ok = p.mailbox.System.Pop() + if ok { + // got new system message. handle it + message = msg.(*gen.MailboxMessage) + break + } + + msg, ok = p.mailbox.Main.Pop() + if ok { + // got new regular message. handle it + message = msg.(*gen.MailboxMessage) + if message.Type < gen.MailboxMessageTypeExit { + // MailboxMessageTypeRegular, MailboxMessageTypeRequest, MailboxMessageTypeEvent + p.forward(message) + // it shouldn't be "released" back to the pool + message = nil + continue + } + + break + } + + if _, ok := p.mailbox.Log.Pop(); ok { + panic("pool process can not be a logger") + } + + // no messages in the mailbox + return nil + } + + switch message.Type { + case gen.MailboxMessageTypeRegular: + if reason := p.behavior.HandleMessage(message.From, message.Message); reason != nil { + return reason + } + + case gen.MailboxMessageTypeRequest: + var reason error + var result any + + result, reason = p.behavior.HandleCall(message.From, message.Ref, message.Message) + + if reason != nil { + // if reason is "normal" and we got response - send it before termination + if reason == gen.TerminateReasonNormal && result != nil { + p.SendResponse(message.From, message.Ref, result) + } + return reason + } + + if result == nil { + // async handling of sync request. response could be sent + // later, even by the other process + continue + } + + p.SendResponse(message.From, message.Ref, result) + + case gen.MailboxMessageTypeEvent: + if reason := p.behavior.HandleEvent(message.Message.(gen.MessageEvent)); reason != nil { + return reason + } + + case gen.MailboxMessageTypeExit: + switch exit := message.Message.(type) { + case gen.MessageExitPID: + return fmt.Errorf("%s: %w", exit.PID, exit.Reason) + + case gen.MessageExitProcessID: + return fmt.Errorf("%s: %w", exit.ProcessID, exit.Reason) + + case gen.MessageExitAlias: + return fmt.Errorf("%s: %w", exit.Alias, exit.Reason) + + case gen.MessageExitEvent: + return fmt.Errorf("%s: %w", exit.Event, exit.Reason) + + case gen.MessageExitNode: + return fmt.Errorf("%s: %w", exit.Name, gen.ErrNoConnection) + + default: + panic(fmt.Sprintf("unknown exit message: %#v", exit)) + } + + case gen.MailboxMessageTypeInspect: + result := p.behavior.HandleInspect(message.From, message.Message.([]string)...) + p.SendResponse(message.From, message.Ref, result) + } + + } +} + +func (p *Pool) ProcessTerminate(reason error) { + p.behavior.Terminate(reason) +} + +// +// default callbacks for PoolBehavior interface +// + +func (p *Pool) HandleMessage(from gen.PID, message any) error { + p.Log().Warning("Pool.HandleMessage: unhandled message from %s", from) + return nil +} +func (p *Pool) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + p.Log().Warning("Pool.HandleCall: unhandled request from %s", from) + return nil, nil +} +func (p *Pool) Terminate(reason error) {} +func (p *Pool) HandleEvent(message gen.MessageEvent) error { + p.Log().Warning("Pool.HandleEvent: unhandled event message %#v", message) + return nil +} +func (p *Pool) HandleInspect(from gen.PID, item ...string) map[string]string { + return map[string]string{ + "pool_size": fmt.Sprintf("%d", p.options.PoolSize), + "worker_behavior": p.sWorkerBehavior, + "worker_mailbox_size": fmt.Sprintf("%d", p.options.WorkerMailboxSize), + "worker_restarts": fmt.Sprintf("%d", p.restarts), + "messages_forwarded": fmt.Sprintf("%d", p.forwarded), + "messages_unhandled": fmt.Sprintf("%d", p.unhandled), + } +} + +// private + +func (p *Pool) forward(message *gen.MailboxMessage) { + var err error + l := p.pool.Len() + for i := int64(0); i < l; i++ { + err = nil + v, _ := p.pool.Pop() + pid := v.(gen.PID) + err = p.Forward(pid, message, gen.MessagePriorityNormal) + if err == nil { + // back to pool + p.pool.Push(v) + p.forwarded++ + return + } + if err == gen.ErrProcessUnknown || err == gen.ErrProcessTerminated { + // restart + wopt := gen.ProcessOptions{ + MailboxSize: p.options.WorkerMailboxSize, + LinkParent: true, + } + pid, err := p.Spawn(p.options.WorkerFactory, wopt, p.options.WorkerArgs...) + if err != nil { + p.Log().Error("unable to spawn new worker process: %s", err) + continue + } + p.Forward(pid, message, gen.MessagePriorityNormal) + p.pool.Push(pid) + p.forwarded++ + p.restarts++ + return + } + + // mailbox is full. try next worker + p.pool.Push(v) + } + p.Log().Error("no available worker process. ignored message from %s", message.From) + p.unhandled++ +} diff --git a/act/supervisor.go b/act/supervisor.go new file mode 100644 index 00000000..2cc4cbcf --- /dev/null +++ b/act/supervisor.go @@ -0,0 +1,737 @@ +package act + +import ( + "fmt" + "reflect" + "runtime" + "sort" + "strings" + "time" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +const ( + defaultRestartIntensity uint16 = 5 + defaultRestartPeriod uint16 = 5 +) + +type SupervisorBehavior interface { + gen.ProcessBehavior + + // Init invoked on a spawn Supervisor process. This is a mandatory callback for the implementation + Init(args ...any) (SupervisorSpec, error) + + // HandleChildStart invoked on a successful child process starting if option EnableHandleChild + // was enabled in act.SupervisorSpec + HandleChildStart(name gen.Atom, pid gen.PID) error + + // HandleChildTerminate invoked on a child process termination if option EnableHandleChild + // was enabled in act.SupervisorSpec + HandleChildTerminate(name gen.Atom, pid gen.PID, reason error) error + + // HandleMessage invoked if Supervisor received a message sent with gen.Process.Send(...). + // Non-nil value of the returning error will cause termination of this process. + // To stop this process normally, return gen.TerminateReasonNormal or + // gen.TerminateReasonShutdown. Any other - for abnormal termination. + HandleMessage(from gen.PID, message any) error + + // HandleCall invoked if Supervisor got a synchronous request made with gen.Process.Call(...). + // Return nil as a result to handle this request asynchronously and + // to provide the result later using the gen.Process.SendResponse(...) method. + HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) + + // Terminate invoked on a termination supervisor process + Terminate(reason error) + + // HandleEvent invoked on an event message if this process got subscribed on + // this event using gen.Process.LinkEvent or gen.Process.MonitorEvent + HandleEvent(message gen.MessageEvent) error + + // HandleInspect invoked on the request made with gen.Process.Inspect(...) + HandleInspect(from gen.PID, item ...string) map[string]string +} + +type Supervisor struct { + gen.Process + + behavior SupervisorBehavior + mailbox gen.ProcessMailbox + + sup supBehavior + + handleChild bool + children map[gen.PID]gen.Atom + state supState +} + +// SupervisorType +type SupervisorType int + +func (s SupervisorType) String() string { + switch s { + case SupervisorTypeOneForOne: + return "One For One" + case SupervisorTypeAllForOne: + return "All For One" + case SupervisorTypeRestForOne: + return "Rest For One" + case SupervisorTypeSimpleOneForOne: + return "Simple One For One" + } + return "Bug: unknown supervisor type" +} + +const ( + + // SupervisorTypeOneForOne If one child process terminates and is to be restarted, only + // that child process is affected. This is the default restart strategy. + SupervisorTypeOneForOne SupervisorType = 0 + + // SupervisorTypeAllForOne If one child process terminates and is to be restarted, all other + // child processes are terminated and then all child processes are restarted. + SupervisorTypeAllForOne SupervisorType = 1 + + // SupervisorTypeRestForOne If one child process terminates and is to be restarted, + // the 'rest' of the child processes (that is, the child + // processes after the terminated child process in the start order) + // are terminated. Then the terminated child process and all + // child processes after it are restarted + SupervisorTypeRestForOne SupervisorType = 2 + + // SupervisorTypeSimpleOneForOne A simplified one_for_one supervisor, where all + // child processes are dynamically added instances + // of the same process type, that is, running the same code. + SupervisorTypeSimpleOneForOne SupervisorType = 3 +) + +// SupervisorStrategy defines restart strategy for the children processes +type SupervisorStrategy int + +func (s SupervisorStrategy) String() string { + switch s { + case SupervisorStrategyTransient: + return "Transient" + case SupervisorStrategyTemporary: + return "Temporary" + case SupervisorStrategyPermanent: + return "Permanent" + } + return "Bug: unknown supervisor strategy type" +} + +const ( + // SupervisorStrategyTransient child process is restarted only if + // it terminates abnormally, that is, with an exit reason other + // than TerminateReasonNormal, TerminateReasonShutdown. + // This is default strategy. + SupervisorStrategyTransient SupervisorStrategy = 0 + + // SupervisorStrategyTemporary child process is never restarted + // (not even when the supervisor restart strategy is rest_for_one + // or one_for_all and a sibling death causes the temporary process + // to be terminated) + SupervisorStrategyTemporary SupervisorStrategy = 1 + + // SupervisorStrategyPermanent child process is always restarted + SupervisorStrategyPermanent SupervisorStrategy = 2 +) + +// SupervisorSpec +type SupervisorSpec struct { + Children []SupervisorChildSpec + Type SupervisorType + Restart SupervisorRestart + + // EnableHandleChild enables HandleChildStart/HandleChildTerminate callback + // invoking on starting/stopping child processes. These callbacks are invoked + // after the restart strategy finishes its work with all children. + EnableHandleChild bool + + // DisableAutoShutdown makes the supervisor not shutdown if there is no one + // running child process left (it happens if all child processes have been + // terminated normally - by itself or the child spec was disabled). + // This options is ignored for SupervisorTypeSimpleOneForOne + // or if used restart strategy SupervisorStrategyPermanent + DisableAutoShutdown bool +} + +type SupervisorRestart struct { + Strategy SupervisorStrategy + Intensity uint16 + Period uint16 + KeepOrder bool // ignored for SupervisorTypeSimpleOneForOne and SupervisorTypeOneForOne +} + +// SupervisorChildSpec +type SupervisorChildSpec struct { + Name gen.Atom + Significant bool // ignored for SupervisorTypeSimpleOneForOne or if used restart strategy SupervisorStrategyPermanent + Factory gen.ProcessFactory + Options gen.ProcessOptions + Args []any +} + +type SupervisorChild struct { + Spec gen.Atom + Name gen.Atom + PID gen.PID + Significant bool + Disabled bool +} + +// Children returns a list of supervisor children processes +func (s *Supervisor) Children() []SupervisorChild { + return s.sup.children() +} + +// StartChild starts new child process defined in the supervisor spec. +func (s *Supervisor) StartChild(name gen.Atom, args ...any) error { + if s.State() != gen.ProcessStateRunning { + return gen.ErrNotAllowed + } + + if s.state != supStateNormal { + return ErrSupervisorStrategyActive + } + + action, err := s.sup.childSpec(name) + if err != nil { + return err + } + if len(args) > 0 { + action.spec.Args = args + } + return s.handleAction(action) +} + +// AddChild allows add a new child to the supervisor. Returns error if +// spawning child process is failed. +func (s *Supervisor) AddChild(child SupervisorChildSpec) error { + if s.State() != gen.ProcessStateRunning { + return gen.ErrNotAllowed + } + + action, err := s.sup.childAddSpec(child) + if err != nil { + return err + } + return s.handleAction(action) +} + +// EnableChild enables the child process in the supervisor spec and attempts to +// start it. Returns error if spawning child process is failed. +func (s *Supervisor) EnableChild(name gen.Atom) error { + if s.State() != gen.ProcessStateRunning { + return gen.ErrNotAllowed + } + + if s.state != supStateNormal { + return ErrSupervisorStrategyActive + } + + action, err := s.sup.childEnable(name) + if err != nil { + return err + } + return s.handleAction(action) +} + +// DisableChild stops the child process with gen.TerminateReasonShutdown +// and disables it in the supervisor spec. +func (s *Supervisor) DisableChild(name gen.Atom) error { + if s.State() != gen.ProcessStateRunning { + return gen.ErrNotAllowed + } + + action, err := s.sup.childDisable(name) + if err != nil { + return err + } + return s.handleAction(action) +} + +// +// ProcessBehavior implementation +// + +// ProcessInit +func (s *Supervisor) ProcessInit(process gen.Process, args ...any) (rr error) { + var ok bool + + if s.behavior, ok = process.Behavior().(SupervisorBehavior); ok == false { + unknown := strings.TrimPrefix(reflect.TypeOf(process.Behavior()).String(), "*") + return fmt.Errorf("ProcessInit: not a SupervisorBehavior %s", unknown) + } + + s.Process = process + s.mailbox = process.Mailbox() + + if lib.Recover() { + defer func() { + if r := recover(); r != nil { + pc, fn, line, _ := runtime.Caller(2) + s.Log().Panic("Supervisor initialization failed. Panic reason: %#v at %s[%s:%d]", + r, runtime.FuncForPC(pc).Name(), fn, line) + rr = gen.TerminateReasonPanic + } + }() + } + + spec, err := s.behavior.Init(args...) + if err != nil { + return err + } + + // validate restart strategy + switch spec.Restart.Strategy { + case SupervisorStrategyTransient, SupervisorStrategyTemporary, + SupervisorStrategyPermanent: + break + default: + return fmt.Errorf("unknown supervisor restart strategy") + } + + // validate restart options + if spec.Restart.Intensity == 0 { + spec.Restart.Intensity = defaultRestartIntensity + } + if spec.Restart.Period == 0 { + spec.Restart.Period = defaultRestartPeriod + } + + // validate child spec list + if len(spec.Children) == 0 { + return fmt.Errorf("children list can not be empty") + } + + s.handleChild = spec.EnableHandleChild + + duplicate := make(map[gen.Atom]bool) + for _, s := range spec.Children { + if err := validateChildSpec(s); err != nil { + return err + } + _, dup := duplicate[s.Name] + if dup { + return ErrSupervisorChildDuplicate + } + } + + // create supervisor + switch spec.Type { + case SupervisorTypeOneForOne: + s.sup = createSupOneForOne() + case SupervisorTypeRestForOne: + s.sup = createSupAllRestForOne() + case SupervisorTypeAllForOne: + s.sup = createSupAllRestForOne() + case SupervisorTypeSimpleOneForOne: + s.sup = createSupSimpleOneForOne() + default: + return fmt.Errorf("unknown supervisor type") + } + + action, err := s.sup.init(spec) + if err != nil { + return err + } + + s.children = make(map[gen.PID]gen.Atom) + err = s.handleAction(action) + if err != nil { + return err + } + return nil +} + +func (s *Supervisor) ProcessRun() (rr error) { + var message *gen.MailboxMessage + + if lib.Recover() { + defer func() { + if r := recover(); r != nil { + pc, fn, line, _ := runtime.Caller(2) + + s.Log().Panic("Supervisor got panic. Shutting down with reason: %#v at %s[%s:%d]", + r, runtime.FuncForPC(pc).Name(), fn, line) + + action := s.sup.childTerminated(s.Name(), s.PID(), gen.TerminateReasonPanic) + rr = s.handleAction(action) + } + }() + } + + for { + if s.State() != gen.ProcessStateRunning { + // process was killed. + return gen.TerminateReasonKill + } + + if message != nil { + gen.ReleaseMailboxMessage(message) + message = nil + } + + for { + + // check queues + msg, ok := s.mailbox.Urgent.Pop() + if ok { + // got new urgent message. handle it + message = msg.(*gen.MailboxMessage) + break + } + + // exit-signals are comming into the Urgent queue, if supervisor is + // in supStateStrategy state we check this queue only + // and skip the others. + if s.state != supStateNormal { + return nil + } + + msg, ok = s.mailbox.System.Pop() + if ok { + // got new system message. handle it + message = msg.(*gen.MailboxMessage) + break + } + + msg, ok = s.mailbox.Main.Pop() + if ok { + // got new regular message. handle it + message = msg.(*gen.MailboxMessage) + break + } + + if _, ok := s.mailbox.Log.Pop(); ok { + panic("supervisor process can not be a logger") + } + + // no messages in the mailbox + return nil + } + + switch message.Type { + + case gen.MailboxMessageTypeRegular: + var reason error + if s.handleChild { + switch m := message.Message.(type) { + case supMessageChildStart: + reason = s.behavior.HandleChildStart(m.name, m.pid) + case supMessageChildTerminate: + reason = s.behavior.HandleChildTerminate(m.name, m.pid, m.reason) + default: + reason = s.behavior.HandleMessage(message.From, message.Message) + } + } else { + reason = s.behavior.HandleMessage(message.From, message.Message) + } + + if reason != nil { + action := s.sup.childTerminated(s.Name(), s.PID(), reason) + if err := s.handleAction(action); err != nil { + return err + } + } + + case gen.MailboxMessageTypeRequest: + result, reason := s.behavior.HandleCall(message.From, message.Ref, message.Message) + if result != nil { + s.SendResponse(message.From, message.Ref, result) + } + if reason != nil { + action := s.sup.childTerminated(s.Name(), s.PID(), reason) + if err := s.handleAction(action); err != nil { + return err + } + } + + case gen.MailboxMessageTypeEvent: + if reason := s.behavior.HandleEvent(message.Message.(gen.MessageEvent)); reason != nil { + return reason + } + + case gen.MailboxMessageTypeExit: + switch exit := message.Message.(type) { + case gen.MessageExitPID: + name, found := s.children[exit.PID] + if found { + delete(s.children, exit.PID) + if s.handleChild { + s.Send(s.PID(), supMessageChildTerminate{name, exit.PID, exit.Reason}) + } + } + action := s.sup.childTerminated(name, exit.PID, exit.Reason) + if err := s.handleAction(action); err != nil { + return err + } + + case gen.MessageExitProcessID: + reason := fmt.Errorf("%s: %w", exit.ProcessID, exit.Reason) + action := s.sup.childTerminated(s.Name(), s.PID(), reason) + if err := s.handleAction(action); err != nil { + return err + } + + case gen.MessageExitAlias: + reason := fmt.Errorf("%s: %w", exit.Alias, exit.Reason) + action := s.sup.childTerminated(s.Name(), s.PID(), reason) + if err := s.handleAction(action); err != nil { + return err + } + + case gen.MessageExitEvent: + reason := fmt.Errorf("%s: %w", exit.Event, exit.Reason) + action := s.sup.childTerminated(s.Name(), s.PID(), reason) + if err := s.handleAction(action); err != nil { + return err + } + case gen.MessageExitNode: + reason := fmt.Errorf("%s: %w", exit.Name, gen.ErrNoConnection) + action := s.sup.childTerminated(s.Name(), s.PID(), reason) + if err := s.handleAction(action); err != nil { + return err + } + + default: + panic(fmt.Sprintf("unknown exit message: %#v", exit)) + } + + case gen.MailboxMessageTypeInspect: + result := s.behavior.HandleInspect(message.From, message.Message.([]string)...) + s.SendResponse(message.From, message.Ref, result) + } + } +} + +// +// SupervisorBehavior default callbacks +// + +func (s *Supervisor) HandleChildStart(name gen.Atom, pid gen.PID) error { + s.Log().Warning("Supervisor.HandleChildStart: unhandled message") + return nil +} + +func (s *Supervisor) HandleChildTerminate(name gen.Atom, pid gen.PID, reason error) error { + s.Log().Warning("Supervisor.HandleChildTerminate: unhandled message") + return nil +} + +func (s *Supervisor) HandleMessage(from gen.PID, message any) error { + s.Log().Warning("Supervisor.HandleMessage: unhandled message from %s", from) + return nil +} + +func (s *Supervisor) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + s.Log().Warning("Supervisor.HandleCall: unhandled request from %s", from) + return nil, nil +} +func (s *Supervisor) HandleEvent(message gen.MessageEvent) error { + s.Log().Warning("Supervisor.HandleEvent: unhandled event message %#v", message) + return nil +} + +func (s *Supervisor) HandleInspect(from gen.PID, item ...string) map[string]string { + return nil +} + +func (s *Supervisor) Terminate(reason error) {} + +// +// +// + +func (s *Supervisor) handleAction(action supAction) error { + for { + switch action.do { + case supActionDoNothing: + s.state = supStateNormal + break + + case supActionStartChild: + s.state = supStateStrategy + var pid gen.PID + var err error + + action.spec.Options.LinkChild = true + action.spec.Options.LinkParent = true + + if action.spec.register { + pid, err = s.SpawnRegister(action.spec.Name, action.spec.Factory, action.spec.Options, action.spec.Args...) + } else { + pid, err = s.Spawn(action.spec.Factory, action.spec.Options, action.spec.Args...) + } + + if err != nil { + action = s.sup.childTerminated(action.spec.Name, pid, err) + continue + } + + if s.handleChild { + s.Send(s.PID(), supMessageChildStart{action.spec.Name, pid}) + } + + s.children[pid] = action.spec.Name + action = s.sup.childStarted(action.spec, pid) + continue + + case supActionTerminateChildren: + // on disabling child spec + s.state = supStateStrategy + for _, pid := range action.terminate { + s.SendExit(pid, action.reason) + s.Log().Info("Supervisor: terminate children %s", pid) + } + return nil + + case supActionTerminate: + s.behavior.Terminate(action.reason) + return action.reason + + default: + panic("unknown supAction") + } + + break + } + return nil +} + +func (s *Supervisor) ProcessTerminate(reason error) { + s.behavior.Terminate(reason) +} + +// +// internals +// + +type supBehavior interface { + init(spec SupervisorSpec) (supAction, error) + + childAddSpec(spec SupervisorChildSpec) (supAction, error) + + childSpec(name gen.Atom) (supAction, error) + childStarted(spec supChildSpec, pid gen.PID) supAction + childTerminated(name gen.Atom, pid gen.PID, reason error) supAction + + childEnable(name gen.Atom) (supAction, error) + childDisable(name gen.Atom) (supAction, error) + + children() []SupervisorChild +} + +type supState int + +const ( + supStateNormal = 0 + supStateStrategy = 1 +) + +type supActionType int + +const ( + // just relax + supActionDoNothing supActionType = 0 + // start child + supActionStartChild supActionType = 1 + // just stop children (on disabling child spec) + supActionTerminateChildren supActionType = 2 + // stop children due to restart strategy activated + supActionTerminateChildrenStrategy supActionType = 3 + supActionTerminate supActionType = 4 +) + +type supAction struct { + do supActionType + + // for supActionStartChild + spec supChildSpec + + // for supActionTerminateChildren + terminate []gen.PID + reason error +} + +type supMessageChildStart struct { + name gen.Atom + pid gen.PID +} + +type supMessageChildTerminate struct { + name gen.Atom + pid gen.PID + reason error +} + +// checkRestartIntensity returns true if exceeded +func supCheckRestartIntensity(restarts []int64, period int, intensity int) ([]int64, bool) { + restarts = append(restarts, time.Now().Unix()) + if len(restarts) < intensity { + return restarts, false + } + + p := int(time.Now().Unix() - restarts[0]) + if p > period { + restarts = restarts[1:] + return restarts, false + } + return restarts, true +} + +func validateChildSpec(s SupervisorChildSpec) error { + if s.Name == "" { + return fmt.Errorf("invalid child spec Name") + } + + if s.Factory == nil { + return fmt.Errorf("child spec Factory is nil") + } + + return nil +} + +type supChild struct { + pid gen.PID + spec supChildSpec +} + +type supChildSpec struct { + SupervisorChildSpec + register bool + disabled bool + i int + pid gen.PID +} + +func sortSupChild(c []supChild) []SupervisorChild { + var children []SupervisorChild + + if len(c) == 0 { + return children + } + + sort.Slice(c, func(i, j int) bool { + if c[i].spec.i == c[j].spec.i { + return c[i].pid.ID < c[j].pid.ID + } + return c[i].spec.i < c[j].spec.i + }) + + for _, v := range c { + child := SupervisorChild{ + Spec: v.spec.Name, + PID: v.pid, + Significant: v.spec.Significant, + Disabled: v.spec.disabled, + } + if v.spec.register { + child.Name = v.spec.Name + } + children = append(children, child) + } + return children +} diff --git a/act/supervisor_arfo.go b/act/supervisor_arfo.go new file mode 100644 index 00000000..786c4298 --- /dev/null +++ b/act/supervisor_arfo.go @@ -0,0 +1,503 @@ +package act + +import ( + "ergo.services/ergo/gen" +) + +// +// All|Rest For One implementation +// + +func createSupAllRestForOne() supBehavior { + return &supARFO{ + wait: make(map[gen.PID]bool), + } +} + +type supARFO struct { + spec []*supChildSpec + rest bool + + restart SupervisorRestart + restarts []int64 + autoshutdown bool + + mode int // 0 - normal, (1 - starting, 2 - stopping) [re]starting, 3 - shutdown + + keeporder bool + shutdownReason error + restartI int + wait map[gen.PID]bool + + i int +} + +func (s *supARFO) init(spec SupervisorSpec) (supAction, error) { + var action supAction + + if spec.Type == SupervisorTypeRestForOne { + s.rest = true + } + + s.restart = spec.Restart + for _, c := range spec.Children { + cs := supChildSpec{ + SupervisorChildSpec: c, + } + cs.register = true + cs.i = s.i + s.i++ + s.spec = append(s.spec, &cs) + } + + action.do = supActionStartChild + action.spec = *s.spec[0] + + s.mode = 1 // starting + s.autoshutdown = spec.DisableAutoShutdown == false + s.keeporder = spec.Restart.KeepOrder + + return action, nil +} + +func (s *supARFO) childAddSpec(spec SupervisorChildSpec) (supAction, error) { + var action supAction + + if s.mode != 0 { + return action, ErrSupervisorStrategyActive + } + + if err := validateChildSpec(spec); err != nil { + return action, err + } + + for _, cs := range s.spec { + if cs.Name == spec.Name { + return action, ErrSupervisorChildDuplicate + } + } + + cs := supChildSpec{ + SupervisorChildSpec: spec, + } + cs.register = true + cs.i = s.i + s.i++ + s.spec = append(s.spec, &cs) + + // start this child + action.do = supActionStartChild + action.spec = cs + + return action, nil +} + +func (s *supARFO) childSpec(name gen.Atom) (supAction, error) { + var action supAction + var empty gen.PID + + // single start (if it was terminated normally before) + + if s.mode != 0 { + return action, ErrSupervisorStrategyActive + } + + for _, spec := range s.spec { + if spec.Name != name { + continue + } + + if spec.disabled { + return action, ErrSupervisorChildDisabled + } + + if spec.pid == empty { + action.do = supActionStartChild + action.spec = *spec + return action, nil + } + + // already running + return action, ErrSupervisorChildRunning + } + + return action, ErrSupervisorChildUnknown +} + +func (s *supARFO) childStarted(cs supChildSpec, pid gen.PID) supAction { + var action supAction + var empty gen.PID + + // let panic if got unknown child + spec := s.spec[cs.i] + if cs.Name != spec.Name { + panic(gen.ErrInternal) + } + + // update args, keep the pid and do nothing + spec.Args = cs.Args + spec.pid = pid + + if s.mode != 1 { // is not in starting mode? + // do nothing + return action + } + + // starting mode. start the rest + + if cs.i == len(s.spec)-1 { + // it was the last spec. do nothing more + s.mode = 0 // normal + return action + } + + // check the rest children if they are running + for i := cs.i + 1; i < len(s.spec); i++ { + if s.spec[i].pid != empty { + continue + } + + if s.spec[i].disabled { + continue + } + + action.do = supActionStartChild + action.spec = *s.spec[i] + action.spec.i = i + return action + } + + return action +} + +func (s *supARFO) childTerminated(name gen.Atom, pid gen.PID, reason error) supAction { + var action supAction + var spec *supChildSpec + var empty gen.PID + + delete(s.wait, pid) + + if s.mode == 3 { // shutdown + if len(s.wait) > 0 { + // return action with empty list + action.do = supActionTerminateChildren + return action + } + + action.do = supActionTerminate + action.reason = s.shutdownReason + return action + } + + found := false + runningChildren := []gen.PID{} + // in case we should terminate all children keep the running pids in a map (awaiting termination) + wait := make(map[gen.PID]bool) + specI := 0 + for i, cs := range s.spec { + if cs.Name == name || cs.pid == pid { + cs.pid = empty + found = true + spec = cs + specI = i + continue + } + + if cs.pid == empty { + continue + } + + runningChildren = append(runningChildren, cs.pid) + wait[cs.pid] = true + } + + if found == false { + // seems supervisor got exit-signal from a non-child process. + if len(runningChildren) == 0 { + // no child processes running. just terminate supervisor + action.reason = reason + action.do = supActionTerminate + return action + } + + // start supervisor termination + action.terminate = runningChildren + action.do = supActionTerminateChildren + action.reason = reason + s.wait = wait + s.mode = 3 + s.shutdownReason = reason + return action + } + + if s.mode == 2 { // stopping (restarting) + + if s.keeporder == false { + if len(s.wait) > 0 { + // return action with empty list. just wait for the child processes + // to be terminated + action.do = supActionTerminateChildren + return action + } + + } else { + if len(s.wait) > 0 { + // must be 0 + panic(gen.ErrInternal) + } + + if specI < s.restartI { + // terminated child is not among we are waiting for termination. + // update the position + s.restartI = specI + } + + terminate := s.childrenForTermination() + if len(terminate) > 0 { + action.do = supActionTerminateChildren + action.reason = reason + action.terminate = terminate + return action + } + } + + s.mode = 1 // starting (restarting) + action.do = supActionStartChild + action.spec = s.childForStart() + s.restartI = 0 + return action + } + + if spec.disabled { + // auto shutdown is enabled + if len(runningChildren) == 0 && s.autoshutdown { + // there is no more running child processes. start supervisor termination + action.reason = reason + action.do = supActionTerminate + return action + } + // do nothing + return action + } + + // activate restart strategy + switch s.restart.Strategy { + case SupervisorStrategyTemporary: + if spec.Significant { + // significant child has terminated. + if len(runningChildren) == 0 { + action.reason = reason + action.do = supActionTerminate + return action + } + + action.terminate = runningChildren + action.do = supActionTerminateChildren + action.reason = reason + s.wait = wait + s.mode = 3 // shutdown + s.shutdownReason = reason + return action + } + + // auto shutdown is enabled + if len(runningChildren) == 0 && s.autoshutdown { + // there is no more running child processes. start supervisor termination + action.reason = reason + action.do = supActionTerminate + return action + } + + // do nothing + return action + + case SupervisorStrategyTransient: + if reason == gen.TerminateReasonNormal || reason == gen.TerminateReasonShutdown { + if spec.Significant { + // significant child has terminated + if len(runningChildren) == 0 { + action.reason = reason + action.do = supActionTerminate + return action + } + + action.terminate = runningChildren + action.do = supActionTerminateChildren + action.reason = reason + s.wait = wait + s.mode = 3 // shutdown + s.shutdownReason = reason + return action + } + + // auto shutdown is enabled + if len(runningChildren) == 0 && s.autoshutdown { + // there is no more running child processes. shutting down this supervisor + action.reason = reason + action.do = supActionTerminate + return action + } + + // do nothing + return action + } + } + + // check for restart intensity + restarts, exceeded := supCheckRestartIntensity(s.restarts, + int(s.restart.Period), + int(s.restart.Intensity)) + s.restarts = restarts + + if exceeded { + // exceeded intensity. start supervisor termination + action.terminate = runningChildren + action.do = supActionTerminateChildren + action.reason = ErrSupervisorRestartsExceeded + s.wait = wait + s.mode = 3 // shutdown + s.shutdownReason = reason + return action + } + + // + // activate restart strategy + // + + // set the restarting position + if s.rest { + s.restartI = specI // restart from the last to the i-th + } + + terminate := s.childrenForTermination() + if len(terminate) == 0 { + // nothing to stop. start children + action.do = supActionStartChild + action.spec = s.childForStart() + s.mode = 1 // starting (restarting) + return action + + } + action.do = supActionTerminateChildren + action.reason = reason + action.terminate = terminate + s.mode = 2 // stopping (restarting) + return action +} + +func (s *supARFO) childEnable(name gen.Atom) (supAction, error) { + var action supAction + if s.mode != 0 { + return action, ErrSupervisorStrategyActive + } + + for _, cs := range s.spec { + if cs.Name != name { + continue + } + + if cs.disabled == false { + // do nothing. its already enabled + return action, nil + } + + // it was disabled. enable it and start child process with this spec + + action.do = supActionStartChild + action.spec = *cs + + return action, nil + } + + return action, ErrSupervisorChildUnknown +} + +func (s *supARFO) childDisable(name gen.Atom) (supAction, error) { + var action supAction + var empty gen.PID + + if s.mode != 0 { + return action, ErrSupervisorStrategyActive + } + + for _, cs := range s.spec { + if cs.Name != name { + continue + } + + if cs.disabled { + // do nothing. its already disabled + return action, nil + } + + if cs.pid == empty { + return action, nil + } + + cs.disabled = true + action.do = supActionTerminateChildren + action.terminate = []gen.PID{cs.pid} + action.reason = gen.TerminateReasonShutdown + s.wait[cs.pid] = true + return action, nil + } + + return action, ErrSupervisorChildUnknown +} + +func (s *supARFO) children() []SupervisorChild { + var c []supChild + + for _, cs := range s.spec { + c = append(c, supChild{cs.pid, *cs}) + } + return sortSupChild(c) +} + +func (s *supARFO) childrenForTermination() []gen.PID { + var terminate []gen.PID + var empty gen.PID + + for i := range s.spec { + // in reverse order + k := len(s.spec) - 1 - i + if k < s.restartI { + break + } + if s.spec[k].disabled { + continue + } + if s.spec[k].pid == empty { + continue + } + + pid := s.spec[k].pid + s.wait[pid] = true + terminate = append(terminate, pid) + if s.keeporder { + // only the last one + break + } + } + return terminate +} + +func (s *supARFO) childForStart() supChildSpec { + var empty gen.PID + + // get the first enabled spec + for _, cs := range s.spec[s.restartI:] { + if cs.disabled { + continue + } + + if cs.pid != empty { + // shouldn't be running child processes in the range of s.spec[s.restartI:] + panic(gen.ErrInternal) + } + + return *cs + } + + panic(gen.ErrInternal) +} diff --git a/act/supervisor_ofo.go b/act/supervisor_ofo.go new file mode 100644 index 00000000..c6e90ef9 --- /dev/null +++ b/act/supervisor_ofo.go @@ -0,0 +1,383 @@ +package act + +import ( + "ergo.services/ergo/gen" +) + +// +// One For One implementation +// + +func createSupOneForOne() supBehavior { + return &supOFO{} +} + +type supOFO struct { + spec []*supChildSpec + + restart SupervisorRestart + restarts []int64 + autoshutdown bool + + mode int // 0 - normal, (1 - starting, 2 - stopping) [re]starting + + shutdown bool + shutdownReason error + wait map[gen.PID]bool + + i int +} + +func (s *supOFO) init(spec SupervisorSpec) (supAction, error) { + var action supAction + + s.restart = spec.Restart + for _, c := range spec.Children { + cs := supChildSpec{ + SupervisorChildSpec: c, + } + cs.register = true + cs.i = s.i + s.i++ + s.spec = append(s.spec, &cs) + } + + action.do = supActionStartChild + action.spec = *s.spec[0] + + s.mode = 1 // starting + s.autoshutdown = spec.DisableAutoShutdown == false + + return action, nil +} + +func (s *supOFO) childAddSpec(spec SupervisorChildSpec) (supAction, error) { + var action supAction + + if s.mode != 0 { + return action, ErrSupervisorStrategyActive + } + + if err := validateChildSpec(spec); err != nil { + return action, err + } + + for _, cs := range s.spec { + if cs.Name == spec.Name { + return action, ErrSupervisorChildDuplicate + } + } + + cs := supChildSpec{ + SupervisorChildSpec: spec, + } + cs.register = true + cs.i = s.i + s.i++ + s.spec = append(s.spec, &cs) + + // start this child + action.do = supActionStartChild + action.spec = cs + + return action, nil +} + +func (s *supOFO) childSpec(name gen.Atom) (supAction, error) { + var action supAction + var empty gen.PID + + // single start (if it was terminated normally before) + + if s.mode != 0 { + return action, ErrSupervisorStrategyActive + } + + for _, spec := range s.spec { + if spec.Name != name { + continue + } + + if spec.disabled { + return action, ErrSupervisorChildDisabled + } + + if spec.pid == empty { + action.do = supActionStartChild + action.spec = *spec + return action, nil + } + + // already running + return action, ErrSupervisorChildRunning + } + + return action, ErrSupervisorChildUnknown +} + +func (s *supOFO) childStarted(cs supChildSpec, pid gen.PID) supAction { + var action supAction + var empty gen.PID + + // let panic if got unknown child + spec := s.spec[cs.i] + if cs.Name != spec.Name { + panic(gen.ErrInternal) + } + + // update args, keep the pid and do nothing + spec.Args = cs.Args + spec.pid = pid + + if s.mode != 1 { // is not in starting mode? + // do nothing + return action + } + + // starting mode. start the rest + + if cs.i == len(s.spec)-1 { + // it was the last spec. do nothing more + s.mode = 0 // normal + return action + } + + // check the rest children if they are running + for i := cs.i + 1; i < len(s.spec); i++ { + if s.spec[i].pid != empty { + continue + } + + if s.spec[i].disabled { + continue + } + + action.do = supActionStartChild + action.spec = *s.spec[i] + action.spec.i = i + return action + } + + return action +} + +func (s *supOFO) childTerminated(name gen.Atom, pid gen.PID, reason error) supAction { + var action supAction + var spec *supChildSpec + var empty gen.PID + + delete(s.wait, pid) + + if s.shutdown { + if len(s.wait) > 0 { + // return action with empty list + action.do = supActionTerminateChildren + return action + } + + action.do = supActionTerminate + action.reason = s.shutdownReason + return action + } + + found := false + runningChildren := []gen.PID{} + wait := make(map[gen.PID]bool) + for _, cs := range s.spec { + if cs.Name == name || cs.pid == pid { + cs.pid = empty + found = true + spec = cs + continue + } + + if cs.pid == empty { + continue + } + + runningChildren = append(runningChildren, cs.pid) + wait[cs.pid] = true + } + + if found == false { + // seems supervisor got exit-signal from a non-child process. + // start supervisor termination + if len(runningChildren) == 0 { + action.reason = reason + action.do = supActionTerminate + return action + } + action.terminate = runningChildren + action.do = supActionTerminateChildren + action.reason = reason + s.wait = wait + s.shutdown = true + s.shutdownReason = reason + return action + } + + if spec.disabled { + // auto shutdown is enabled + if len(runningChildren) == 0 && s.autoshutdown { + // there is no more running child processes. terminate supervisor + action.reason = reason + action.do = supActionTerminate + return action + } + // do nothing + return action + } + + // check strategy + switch s.restart.Strategy { + case SupervisorStrategyTemporary: + if spec.Significant { + // significant child has terminated. + if len(runningChildren) == 0 { + action.reason = reason + action.do = supActionTerminate + return action + } + + action.terminate = runningChildren + action.do = supActionTerminateChildren + action.reason = reason + s.wait = wait + s.shutdown = true + s.shutdownReason = reason + return action + } + + // auto shutdown is enabled + if len(runningChildren) == 0 && s.autoshutdown { + // there is no more running child processes. terminate supervisor + action.reason = reason + action.do = supActionTerminate + return action + } + + // do nothing + return action + + case SupervisorStrategyTransient: + if reason == gen.TerminateReasonNormal || reason == gen.TerminateReasonShutdown { + if spec.Significant { + // significant child has terminated + if len(runningChildren) == 0 { + action.reason = reason + action.do = supActionTerminate + return action + } + + action.terminate = runningChildren + action.do = supActionTerminateChildren + action.reason = reason + + s.wait = wait + s.shutdown = true + s.shutdownReason = reason + return action + } + + // auto shutdown is enabled + if len(runningChildren) == 0 && s.autoshutdown { + // there is no more running child processes. terminate supervisor + action.reason = reason + action.do = supActionTerminate + return action + } + + // do nothing + return action + } + } + + // check for restart intensity + restarts, exceeded := supCheckRestartIntensity(s.restarts, + int(s.restart.Period), + int(s.restart.Intensity)) + s.restarts = restarts + + if exceeded == false { + // do restart + action.do = supActionStartChild + action.spec = *spec + + return action + } + + // exceeded intensity. start termination + for _, cs := range s.spec { + if cs.pid == empty { + continue + } + action.terminate = append(action.terminate, pid) + } + action.do = supActionTerminateChildren + action.reason = ErrSupervisorRestartsExceeded + s.wait = wait + s.shutdown = true + s.shutdownReason = reason + + return action +} + +func (s *supOFO) childEnable(name gen.Atom) (supAction, error) { + var action supAction + for _, cs := range s.spec { + if cs.Name != name { + continue + } + + if cs.disabled == false { + // do nothing. its already enabled + return action, nil + } + + // it was disabled. enable it and start child process with this spec + + action.do = supActionStartChild + action.spec = *cs + + return action, nil + } + + return action, ErrSupervisorChildUnknown +} + +func (s *supOFO) childDisable(name gen.Atom) (supAction, error) { + var action supAction + var empty gen.PID + + for _, cs := range s.spec { + if cs.Name != name { + continue + } + + if cs.disabled { + // do nothing. its already disabled + return action, nil + } + + if cs.pid == empty { + return action, nil + } + + cs.disabled = true + action.do = supActionTerminateChildren + action.terminate = []gen.PID{cs.pid} + action.reason = gen.TerminateReasonShutdown + return action, nil + } + + return action, ErrSupervisorChildUnknown +} + +func (s *supOFO) children() []SupervisorChild { + var c []supChild + + for _, cs := range s.spec { + c = append(c, supChild{cs.pid, *cs}) + } + return sortSupChild(c) +} diff --git a/act/supervisor_sofo.go b/act/supervisor_sofo.go new file mode 100644 index 00000000..5238cd2c --- /dev/null +++ b/act/supervisor_sofo.go @@ -0,0 +1,236 @@ +package act + +import ( + "fmt" + + "ergo.services/ergo/gen" +) + +// +// Simple One For One implementation +// + +func createSupSimpleOneForOne() supBehavior { + return &supSOFO{ + spec: make(map[gen.Atom]*supChildSpec), + pids: make(map[gen.PID]*supChildSpec), + } +} + +type supSOFO struct { + spec map[gen.Atom]*supChildSpec + pids map[gen.PID]*supChildSpec + + restart SupervisorRestart + restarts []int64 + + i int + shutdown bool + shutdownReason error + wait map[gen.PID]bool +} + +func (s *supSOFO) init(spec SupervisorSpec) (supAction, error) { + var action supAction + + s.restart = spec.Restart + for _, c := range spec.Children { + cs := supChildSpec{ + SupervisorChildSpec: c, + } + cs.i = s.i + s.i++ + s.spec[cs.Name] = &cs + } + s.wait = make(map[gen.PID]bool) + return action, nil +} + +func (s *supSOFO) childAddSpec(spec SupervisorChildSpec) (supAction, error) { + var action supAction + + if s.shutdown { + return action, fmt.Errorf("shutting down") + } + + if err := validateChildSpec(spec); err != nil { + return action, err + } + if _, duplicate := s.spec[spec.Name]; duplicate { + return action, ErrSupervisorChildDuplicate + } + + cs := supChildSpec{ + SupervisorChildSpec: spec, + } + cs.i = s.i + s.i++ + s.spec[cs.Name] = &cs + + // SOFO doesn't start it on adding, so do nothing + return action, nil +} + +func (s *supSOFO) childSpec(name gen.Atom) (supAction, error) { + var action supAction + + if s.shutdown { + return action, nil + } + + spec, found := s.spec[name] + if found == false { + return action, ErrSupervisorChildUnknown + } + + if spec.disabled { + return action, ErrSupervisorChildDisabled + } + action.do = supActionStartChild + action.spec = *spec + return action, nil +} + +func (s *supSOFO) childStarted(spec supChildSpec, pid gen.PID) supAction { + var action supAction + + if s.shutdown { + return action + } + + sc, found := s.spec[spec.Name] + if found == false { + // do nothing + return action + } + + // do not overwrite args since it is a dynamic child + // sc.Args = spec.Args + + // keep it and do nothing + s.pids[pid] = sc + return action +} + +func (s *supSOFO) childTerminated(name gen.Atom, pid gen.PID, reason error) supAction { + var action supAction + + delete(s.pids, pid) + + if s.shutdown { + delete(s.wait, pid) + if len(s.wait) > 0 { + // return action with empty process list for termination + action.do = supActionTerminateChildren + return action + } + + // children terminated. shutdown the supervisor + action.do = supActionTerminate + action.reason = s.shutdownReason + return action + } + + spec, found := s.spec[name] + if found { + + // check strategy + switch s.restart.Strategy { + case SupervisorStrategyTemporary: + // do nothing + return action + case SupervisorStrategyTransient: + if reason == gen.TerminateReasonNormal || reason == gen.TerminateReasonShutdown { + // do nothing + return action + } + } + + if spec.disabled { + // do nothing + return action + } + + // check for restart intensity + restarts, exceeded := supCheckRestartIntensity(s.restarts, + int(s.restart.Period), + int(s.restart.Intensity)) + s.restarts = restarts + + if exceeded == false { + // do restart + action.do = supActionStartChild + action.spec = *spec + + return action + } + + // exceeded intensity. start termination + action.do = supActionTerminateChildren + action.reason = ErrSupervisorRestartsExceeded + } else { + action.do = supActionTerminateChildren + action.reason = reason + } + + for pid := range s.pids { + action.terminate = append(action.terminate, pid) + s.wait[pid] = true + } + s.shutdown = true + s.shutdownReason = action.reason + return action +} + +func (s *supSOFO) childEnable(name gen.Atom) (supAction, error) { + var action supAction + + if s.shutdown { + return action, fmt.Errorf("shutting down") + } + + spec, found := s.spec[name] + if found == false { + return action, ErrSupervisorChildUnknown + } + spec.disabled = false + return action, nil +} + +func (s *supSOFO) childDisable(name gen.Atom) (supAction, error) { + var action supAction + + if s.shutdown { + return action, fmt.Errorf("shutting down") + } + + spec, found := s.spec[name] + if found == false { + return action, ErrSupervisorChildUnknown + } + spec.disabled = true + + terminate := []gen.PID{} + for pid, spec := range s.pids { + if spec.Name != name { + continue + } + terminate = append(terminate, pid) + s.wait[pid] = true + } + + if len(terminate) > 0 { + action.do = supActionTerminateChildren + action.reason = gen.TerminateReasonShutdown + action.terminate = terminate + } + return action, nil +} + +func (s *supSOFO) children() []SupervisorChild { + var c []supChild + for pid, spec := range s.pids { + c = append(c, supChild{pid, *spec}) + } + return sortSupChild(c) +} diff --git a/act/supervisor_unit_test.go b/act/supervisor_unit_test.go new file mode 100644 index 00000000..d6772af9 --- /dev/null +++ b/act/supervisor_unit_test.go @@ -0,0 +1,65 @@ +package act + +import ( + "reflect" + "testing" + + "ergo.services/ergo/gen" +) + +func Test_sortSupChild(t *testing.T) { + node := gen.Atom("node1@localhost") + spec1 := supChildSpec{ + i: 30, + } + spec1.Name = "s1" + spec2 := supChildSpec{ + i: 20, + } + spec2.Name = "s2" + spec3 := supChildSpec{ + i: 25, + register: true, + } + spec3.Name = "s3" + + data := []supChild{ + { + pid: gen.PID{Node: node, ID: 1018}, + spec: spec1, + }, + { + pid: gen.PID{Node: node, ID: 1014}, + spec: spec2, + }, + { + pid: gen.PID{Node: node, ID: 1044}, + spec: spec3, + }, + { + pid: gen.PID{Node: node, ID: 1013}, + spec: spec1, + }, + { + pid: gen.PID{Node: node, ID: 1024}, + spec: spec2, + }, + { + pid: gen.PID{Node: node, ID: 1019}, + spec: spec2, + }, + } + + children := sortSupChild(data) + expected := []SupervisorChild{ + {"s2", "", gen.PID{Node: node, ID: 1014}, false, false}, + {"s2", "", gen.PID{Node: node, ID: 1019}, false, false}, + {"s2", "", gen.PID{Node: node, ID: 1024}, false, false}, + {"s3", "s3", gen.PID{Node: node, ID: 1044}, false, false}, + {"s1", "", gen.PID{Node: node, ID: 1013}, false, false}, + {"s1", "", gen.PID{Node: node, ID: 1018}, false, false}, + } + if reflect.DeepEqual(children, expected) == false { + t.Fatal("mismatch") + } +} diff --git a/act/web_worker.go b/act/web_worker.go new file mode 100644 index 00000000..11637029 --- /dev/null +++ b/act/web_worker.go @@ -0,0 +1,297 @@ +package act + +import ( + "fmt" + "net/http" + "reflect" + "runtime" + "strings" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" + "ergo.services/ergo/meta" +) + +// WebWorker interface + +type WebWorkerBehavior interface { + gen.ProcessBehavior + + // Init invoked on a spawn WebWorker for the initializing. + Init(args ...any) error + + // HandleMessage invoked if WebWorker received a message sent with gen.Process.Send(...). + // Non-nil value of the returning error will cause termination of this process. + // To stop this process normally, return gen.TerminateReasonNormal + // or any other for abnormal termination. + HandleMessage(from gen.PID, message any) error + + // HandleCall invoked if WebWorker got a synchronous request made with gen.Process.Call(...). + // Return nil as a result to handle this request asynchronously and + // to provide the result later using the gen.Process.SendResponse(...) method. + HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) + + // Terminate invoked on a termination process + Terminate(reason error) + + // HandleEvent invoked on an event message if this process got subscribed on + // this event using gen.Process.LinkEvent or gen.Process.MonitorEvent + HandleEvent(message gen.MessageEvent) error + + // HandleInspect invoked on the request made with gen.Process.Inspect(...) + HandleInspect(from gen.PID, item ...string) map[string]string + + // HandleGet invoked on a GET request + HandleGet(from gen.PID, writer http.ResponseWriter, request *http.Request) error + // HandlePOST invoked on a POST request + HandlePost(from gen.PID, writer http.ResponseWriter, request *http.Request) error + // HandlePut invoked on a PUT request + HandlePut(from gen.PID, writer http.ResponseWriter, request *http.Request) error + // HandlePatch invoked on a PATCH request + HandlePatch(from gen.PID, writer http.ResponseWriter, request *http.Request) error + // HandleDelete invoked on a DELETE request + HandleDelete(from gen.PID, writer http.ResponseWriter, request *http.Request) error + // HandleHead invoked on a HEAD request + HandleHead(from gen.PID, writer http.ResponseWriter, request *http.Request) error + // HandleOptions invoked on an OPTIONS request + HandleOptions(from gen.PID, writer http.ResponseWriter, request *http.Request) error +} + +type WebWorker struct { + gen.Process + + behavior WebWorkerBehavior + mailbox gen.ProcessMailbox +} + +func (w *WebWorker) ProcessInit(process gen.Process, args ...any) (rr error) { + var ok bool + if w.behavior, ok = process.Behavior().(WebWorkerBehavior); ok == false { + unknown := strings.TrimPrefix(reflect.TypeOf(process.Behavior()).String(), "*") + return fmt.Errorf("ProcessInit: not a WebWorkerBehavior %s", unknown) + } + w.Process = process + w.mailbox = process.Mailbox() + + if lib.Recover() { + defer func() { + if r := recover(); r != nil { + pc, fn, line, _ := runtime.Caller(2) + w.Log().Panic("WebWorker initialization failed. Panic reason: %#v at %s[%s:%d]", + r, runtime.FuncForPC(pc).Name(), fn, line) + rr = gen.TerminateReasonPanic + } + }() + } + + return w.behavior.Init(args...) +} + +func (w *WebWorker) ProcessRun() (rr error) { + var message *gen.MailboxMessage + + if lib.Recover() { + defer func() { + if r := recover(); r != nil { + pc, fn, line, _ := runtime.Caller(2) + w.Log().Panic("Web terminated. Panic reason: %#v at %s[%s:%d]", + r, runtime.FuncForPC(pc).Name(), fn, line) + rr = gen.TerminateReasonPanic + } + }() + } + + for { + if w.State() != gen.ProcessStateRunning { + // process was killed by the node. + return gen.TerminateReasonKill + } + + if message != nil { + gen.ReleaseMailboxMessage(message) + message = nil + } + + for { + // check queues + msg, ok := w.mailbox.Urgent.Pop() + if ok { + // got new urgent message. handle it + message = msg.(*gen.MailboxMessage) + break + } + + msg, ok = w.mailbox.System.Pop() + if ok { + // got new system message. handle it + message = msg.(*gen.MailboxMessage) + break + } + + msg, ok = w.mailbox.Main.Pop() + if ok { + // got new regular message. handle it + message = msg.(*gen.MailboxMessage) + break + } + + if _, ok := w.mailbox.Log.Pop(); ok { + panic("web process can not be a logger") + } + + // no messages in the mailbox + return nil + } + + switch message.Type { + case gen.MailboxMessageTypeRegular: + if r, ok := message.Message.(meta.MessageWebRequest); ok { + var reason error + switch r.Request.Method { + case "GET": + reason = w.behavior.HandleGet(message.From, r.Response, r.Request) + case "POST": + reason = w.behavior.HandlePost(message.From, r.Response, r.Request) + case "PUT": + reason = w.behavior.HandlePut(message.From, r.Response, r.Request) + case "PATCH": + reason = w.behavior.HandlePatch(message.From, r.Response, r.Request) + case "DELETE": + reason = w.behavior.HandleDelete(message.From, r.Response, r.Request) + case "HEAD": + reason = w.behavior.HandleHead(message.From, r.Response, r.Request) + case "OPTIONS": + reason = w.behavior.HandleOptions(message.From, r.Response, r.Request) + default: + http.Error(r.Response, + "unknown request type: "+r.Request.Method, + http.StatusNotImplemented) + } + r.Done() + if reason != nil { + return reason + } + continue + } + + if reason := w.behavior.HandleMessage(message.From, message.Message); reason != nil { + return reason + } + + case gen.MailboxMessageTypeRequest: + var reason error + var result any + + result, reason = w.behavior.HandleCall(message.From, message.Ref, message.Message) + + if reason != nil { + // if reason is "normal" and we got response - send it before termination + if reason == gen.TerminateReasonNormal && result != nil { + w.SendResponse(message.From, message.Ref, result) + } + return reason + } + + if result == nil { + // async handling of sync request. response could be sent + // later, even by the other process + continue + } + + w.SendResponse(message.From, message.Ref, result) + + case gen.MailboxMessageTypeEvent: + if reason := w.behavior.HandleEvent(message.Message.(gen.MessageEvent)); reason != nil { + return reason + } + + case gen.MailboxMessageTypeExit: + switch exit := message.Message.(type) { + case gen.MessageExitPID: + return fmt.Errorf("%s: %w", exit.PID, exit.Reason) + + case gen.MessageExitProcessID: + return fmt.Errorf("%s: %w", exit.ProcessID, exit.Reason) + + case gen.MessageExitAlias: + return fmt.Errorf("%s: %w", exit.Alias, exit.Reason) + + case gen.MessageExitEvent: + return fmt.Errorf("%s: %w", exit.Event, exit.Reason) + + case gen.MessageExitNode: + return fmt.Errorf("%s: %w", exit.Name, gen.ErrNoConnection) + + default: + panic(fmt.Sprintf("unknown exit message: %#v", exit)) + } + + case gen.MailboxMessageTypeInspect: + result := w.behavior.HandleInspect(message.From, message.Message.([]string)...) + w.SendResponse(message.From, message.Ref, result) + } + + } +} + +func (w *WebWorker) ProcessTerminate(reason error) { + w.behavior.Terminate(reason) +} + +// default callbacks for WebWorkerBehavior interface +func (w *WebWorker) Init(args ...any) error { + return nil +} +func (w *WebWorker) HandleMessage(from gen.PID, message any) error { + w.Log().Warning("WebWorker.HandleMessage: unhandled message from %s", from) + return nil +} +func (w *WebWorker) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + w.Log().Warning("WebWorker.HandleCall: unhandled request from %s", from) + return nil, nil +} + +func (w *WebWorker) HandleEvent(message gen.MessageEvent) error { + w.Log().Warning("WebWorker.HandleEvent: unhandled event message %#v", message) + return nil +} + +func (w *WebWorker) Terminate(reason error) {} +func (w *WebWorker) HandleInspect(from gen.PID, item ...string) map[string]string { + return nil +} +func (w *WebWorker) HandleGet(from gen.PID, writer http.ResponseWriter, request *http.Request) error { + w.Log().Warning("WebWorker.HandleGet: unhandled request from %s with URI: %s", from, request.RequestURI) + http.Error(writer, "unhandled request", http.StatusNotImplemented) + return nil +} +func (w *WebWorker) HandlePost(from gen.PID, writer http.ResponseWriter, request *http.Request) error { + w.Log().Warning("WebWorker.HandlePost: unhandled request from %s with URI: %s", from, request.RequestURI) + http.Error(writer, "unhandled request", http.StatusNotImplemented) + return nil +} +func (w *WebWorker) HandlePut(from gen.PID, writer http.ResponseWriter, request *http.Request) error { + w.Log().Warning("WebWorker.HandlePut: unhandled request from %s with URI: %s", from, request.RequestURI) + http.Error(writer, "unhandled request", http.StatusNotImplemented) + return nil +} +func (w *WebWorker) HandlePatch(from gen.PID, writer http.ResponseWriter, request *http.Request) error { + w.Log().Warning("WebWorker.HandlePatch: unhandled request from %s with URI: %s", from, request.RequestURI) + http.Error(writer, "unhandled request", http.StatusNotImplemented) + return nil +} +func (w *WebWorker) HandleDelete(from gen.PID, writer http.ResponseWriter, request *http.Request) error { + w.Log().Warning("WebWorker.HandleDelete: unhandled request from %s with URI: %s", from, request.RequestURI) + http.Error(writer, "unhandled request", http.StatusNotImplemented) + return nil +} +func (w *WebWorker) HandleHead(from gen.PID, writer http.ResponseWriter, request *http.Request) error { + w.Log().Warning("WebWorker.HandleHead: unhandled request from %s with URI: %s", from, request.RequestURI) + http.Error(writer, "unhandled request", http.StatusNotImplemented) + return nil +} +func (w *WebWorker) HandleOptions(from gen.PID, writer http.ResponseWriter, request *http.Request) error { + w.Log().Warning("WebWorker.HandleOptions: unhandled request from %s for: %s", from, request.RequestURI) + http.Error(writer, "unhandled request", http.StatusNotImplemented) + return nil +} diff --git a/app/system/app.go b/app/system/app.go new file mode 100644 index 00000000..9d712d55 --- /dev/null +++ b/app/system/app.go @@ -0,0 +1,32 @@ +package system + +import ( + "ergo.services/ergo/gen" +) + +const Name gen.Atom = "system_app" + +func CreateApp() gen.ApplicationBehavior { + return &systemApp{} +} + +type systemApp struct { + node gen.Node +} + +func (sa *systemApp) Load(node gen.Node, args ...any) (gen.ApplicationSpec, error) { + return gen.ApplicationSpec{ + Name: Name, + Description: "System Application", + Group: []gen.ApplicationMemberSpec{ + { + Factory: factory_sup, + Name: "system_sup", + }, + }, + Mode: gen.ApplicationModePermanent, + }, nil +} + +func (sa *systemApp) Start(mode gen.ApplicationMode) {} +func (sa *systemApp) Terminate(reason error) {} diff --git a/app/system/inspect/connection.go b/app/system/inspect/connection.go new file mode 100644 index 00000000..5668c5a6 --- /dev/null +++ b/app/system/inspect/connection.go @@ -0,0 +1,124 @@ +package inspect + +import ( + "fmt" + + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +func factory_connection() gen.ProcessBehavior { + return &connection{} +} + +type connection struct { + act.Actor + token gen.Ref + + event gen.Atom + generating bool + remote gen.Atom +} + +func (ic *connection) Init(args ...any) error { + ic.remote = args[0].(gen.Atom) + ic.Log().SetLogger("default") + ic.Log().Debug("connection inspector started") + // RegisterEvent is not allowed here + ic.Send(ic.PID(), register{}) + return nil +} + +func (ic *connection) HandleMessage(from gen.PID, message any) error { + switch m := message.(type) { + case generate: + if ic.generating == false { + ic.Log().Debug("generating canceled") + break // cancelled + } + ic.Log().Debug("generating event") + + ev := MessageInspectConnection{ + Node: ic.Node().Name(), + Disconnected: true, + } + + if remote, err := ic.Node().Network().Node(ic.remote); err == nil { + ev.Disconnected = false + ev.Info = remote.Info() + } + + if err := ic.SendEvent(ic.event, ic.token, ev); err != nil { + ic.Log().Error("unable to send event %q: %s", inspectNetwork, err) + return gen.TerminateReasonNormal + } + + if ev.Disconnected { + return gen.TerminateReasonNormal + } + ic.SendAfter(ic.PID(), generate{}, inspectNetworkPeriod) + + case requestInspect: + response := ResponseInspectConnection{ + Event: gen.Event{ + Name: ic.event, + Node: ic.Node().Name(), + }, + Disconnected: true, + } + if remote, err := ic.Node().Network().Node(ic.remote); err == nil { + response.Disconnected = false + response.Info = remote.Info() + } + ic.SendResponse(m.pid, m.ref, response) + ic.Log().Debug("sent response for the inspect connection request to: %s", m.pid) + if response.Disconnected { + return gen.TerminateReasonNormal + } + + case register: + eopts := gen.EventOptions{ + Notify: true, + Buffer: 1, // keep the last event + } + evname := gen.Atom(fmt.Sprintf("%s_%s", inspectConnection, ic.remote)) + token, err := ic.RegisterEvent(evname, eopts) + if err != nil { + ic.Log().Error("unable to register connection event: %s", err) + return err + } + ic.Log().Info("registered event %s", inspectNetwork) + ic.event = evname + + ic.token = token + ic.SendAfter(ic.PID(), shutdown{}, inspectNetworkIdlePeriod) + + case shutdown: + if ic.generating { + ic.Log().Debug("ignore shutdown. generating is active") + break // ignore. + } + return gen.TerminateReasonNormal + + case gen.MessageEventStart: // got first subscriber + ic.Log().Debug("got first subscriber. start generating events...") + ic.Send(ic.PID(), generate{}) + ic.generating = true + + case gen.MessageEventStop: // no subscribers + ic.Log().Debug("no subscribers. stop generating") + if ic.generating { + ic.generating = false + ic.SendAfter(ic.PID(), shutdown{}, inspectNetworkIdlePeriod) + } + + default: + ic.Log().Error("unknown message (ignored) %#v", message) + } + + return nil +} + +func (ic *connection) Terminate(reason error) { + ic.Log().Debug("network inspector terminated: %s", reason) +} diff --git a/app/system/inspect/inspect.go b/app/system/inspect/inspect.go new file mode 100644 index 00000000..ea036ce2 --- /dev/null +++ b/app/system/inspect/inspect.go @@ -0,0 +1,329 @@ +package inspect + +import ( + "errors" + "fmt" + "sort" + "time" + + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +const ( + Name gen.Atom = "system_inspect" + + inspectNode = "inspect_node" + inspectNodePeriod = time.Second + inspectNodeIdlePeriod = 5 * time.Second + + inspectProcessList = "inspect_process_list" + inspectProcessListPeriod = time.Second + inspectProcessListIdlePeriod = 5 * time.Second + + inspectProcess = "inspect_process" + inspectProcessPeriod = time.Second + inspectProcessIdlePeriod = 5 * time.Second + + inspectProcessState = "inspect_process_state" + inspectProcessStatePeriod = time.Second + inspectProcessStateIdlePeriod = 5 * time.Second + + inspectMeta = "inspect_meta" + inspectMetaPeriod = time.Second + inspectMetaIdlePeriod = 5 * time.Second + + inspectMetaState = "inspect_meta_state" + inspectMetaStatePeriod = time.Second + inspectMetaStateIdlePeriod = 5 * time.Second + + inspectNetwork = "inspect_network" + inspectNetworkPeriod = time.Second + inspectNetworkIdlePeriod = 5 * time.Second + + inspectConnection = "inspect_connection" + inspectConnectionPeriod = time.Second + inspectConnectionIdlePeriod = 5 * time.Second + + inspectLog = "inspect_log" + inspectLogIdlePeriod = 10 * time.Second +) + +var ( + inspectLogFilter = []gen.LogLevel{ + gen.LogLevelDebug, + gen.LogLevelInfo, + gen.LogLevelWarning, + gen.LogLevelError, + gen.LogLevelPanic, + } +) + +func Factory() gen.ProcessBehavior { + return &inspect{} +} + +type inspect struct { + act.Actor +} + +type requestInspect struct { + pid gen.PID + ref gen.Ref +} + +type register struct{} +type shutdown struct{} +type generate struct{} + +func (i *inspect) Init(args ...any) error { + i.Log().SetLogger("default") + i.Log().Debug("%s started", i.Name()) + return nil +} + +func (i *inspect) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + switch r := request.(type) { + case RequestInspectNode: + // try to spawn node inspector process + opts := gen.ProcessOptions{ + LinkParent: true, + } + _, err := i.SpawnRegister(inspectNode, factory_node, opts) + if err != nil && err != gen.ErrTaken { + return err, nil + } + // forward this request + forward := requestInspect{ + pid: from, + ref: ref, + } + i.Send(inspectNode, forward) + return nil, nil // no reply + + case RequestInspectNetwork: + opts := gen.ProcessOptions{ + LinkParent: true, + } + _, err := i.SpawnRegister(inspectNetwork, factory_network, opts) + if err != nil && err != gen.ErrTaken { + return err, nil + } + // forward this request + forward := requestInspect{ + pid: from, + ref: ref, + } + i.Send(inspectNetwork, forward) + return nil, nil // no reply + + case RequestInspectConnection: + opts := gen.ProcessOptions{ + LinkParent: true, + } + pname := gen.Atom(fmt.Sprintf("%s_%s", inspectConnection, r.RemoteNode)) + _, err := i.SpawnRegister(pname, factory_connection, opts, r.RemoteNode) + if err != nil && err != gen.ErrTaken { + return err, nil + } + // forward this request + forward := requestInspect{ + pid: from, + ref: ref, + } + i.Send(pname, forward) + return nil, nil // no reply + + case RequestInspectProcessList: + opts := gen.ProcessOptions{ + LinkParent: true, + } + if r.Start < 1000 { + r.Start = 1000 + } + if r.Limit < 1 { + r.Limit = 1000 + } + pname := gen.Atom(fmt.Sprintf("%s_%d_%d", inspectProcessList, r.Start, r.Start+r.Limit-1)) + _, err := i.SpawnRegister(pname, factory_process_list, opts, r.Start, r.Limit) + if err != nil && err != gen.ErrTaken { + return err, nil + } + // forward this request + forward := requestInspect{ + pid: from, + ref: ref, + } + i.Send(pname, forward) + return nil, nil // no reply + + case RequestInspectProcess: + opts := gen.ProcessOptions{ + LinkParent: true, + } + pname := gen.Atom(fmt.Sprintf("%s_%s", inspectProcess, r.PID)) + _, err := i.SpawnRegister(pname, factory_process, opts, r.PID) + if err != nil && err != gen.ErrTaken { + return err, nil + } + // forward this request + forward := requestInspect{ + pid: from, + ref: ref, + } + i.Send(pname, forward) + return nil, nil // no reply + + case RequestInspectProcessState: + if r.PID == i.PID() { + return errors.New("unable to inspect the state of itself"), nil + } + opts := gen.ProcessOptions{ + LinkParent: true, + } + pname := gen.Atom(fmt.Sprintf("%s_%s", inspectProcessState, r.PID)) + _, err := i.SpawnRegister(pname, factory_process_state, opts, r.PID) + if err != nil && err != gen.ErrTaken { + return err, nil + } + // forward this request + forward := requestInspect{ + pid: from, + ref: ref, + } + i.Send(pname, forward) + return nil, nil // no reply + + case RequestInspectMeta: + opts := gen.ProcessOptions{ + LinkParent: true, + } + pname := gen.Atom(fmt.Sprintf("%s_%s", inspectMeta, r.Meta)) + _, err := i.SpawnRegister(pname, factory_meta, opts, r.Meta) + if err != nil && err != gen.ErrTaken { + return err, nil + } + // forward this request + forward := requestInspect{ + pid: from, + ref: ref, + } + i.Send(pname, forward) + return nil, nil // no reply + + case RequestInspectMetaState: + opts := gen.ProcessOptions{ + LinkParent: true, + } + pname := gen.Atom(fmt.Sprintf("%s_%s", inspectMetaState, r.Meta)) + _, err := i.SpawnRegister(pname, factory_meta_state, opts, r.Meta) + if err != nil && err != gen.ErrTaken { + return err, nil + } + // forward this request + forward := requestInspect{ + pid: from, + ref: ref, + } + i.Send(pname, forward) + return nil, nil // no reply + + case RequestInspectLog: + // try to spawn node inspector process + opts := gen.ProcessOptions{ + LinkParent: true, + } + + name := "diwep" + levels := r.Levels + if len(r.Levels) > 0 { + b := []byte{} + sort.Slice(r.Levels, func(i, j int) bool { + return r.Levels[i] < r.Levels[j] + }) + for i := range r.Levels { + switch r.Levels[i] { + case gen.LogLevelDebug: + b = append(b, 'd') + case gen.LogLevelInfo: + b = append(b, 'i') + case gen.LogLevelWarning: + b = append(b, 'w') + case gen.LogLevelError: + b = append(b, 'e') + case gen.LogLevelPanic: + b = append(b, 'p') + } + } + name = string(b) + } else { + levels = inspectLogFilter + } + + pname := gen.Atom(fmt.Sprintf("%s_%s", inspectLog, name)) + _, err := i.SpawnRegister(pname, factory_log, opts, levels) + if err != nil && err != gen.ErrTaken { + return err, nil + } + // forward this request + forward := requestInspect{ + pid: from, + ref: ref, + } + i.Send(pname, forward) + return nil, nil // no reply + + // do commands + + case RequestDoSend: + response := ResponseDoSend{ + Error: i.SendWithPriority(r.PID, r.Message, r.Priority), + } + return response, nil + + case RequestDoSendMeta: + response := ResponseDoSendMeta{ + Error: i.SendAlias(r.Meta, r.Message), + } + return response, nil + + case RequestDoSendExit: + response := ResponseDoSendExit{ + Error: i.SendExit(r.PID, r.Reason), + } + return response, nil + + case RequestDoSendExitMeta: + response := ResponseDoSendExit{ + Error: i.SendExitMeta(r.Meta, r.Reason), + } + return response, nil + + case RequestDoKill: + response := ResponseDoKill{ + Error: i.Node().Kill(r.PID), + } + return response, nil + + case RequestDoSetLogLevel: + response := ResponseDoSetLogLevel{ + Error: i.Node().Log().SetLevel(r.Level), + } + return response, nil + + case RequestDoSetLogLevelProcess: + response := ResponseDoSetLogLevel{ + Error: i.Node().SetLogLevelProcess(r.PID, r.Level), + } + return response, nil + + case RequestDoSetLogLevelMeta: + response := ResponseDoSetLogLevel{ + Error: i.Node().SetLogLevelMeta(r.Meta, r.Level), + } + return response, nil + } + + i.Log().Error("unsupported request: %#v", request) + return gen.ErrUnsupported, nil +} diff --git a/app/system/inspect/log.go b/app/system/inspect/log.go new file mode 100644 index 00000000..1ffb873f --- /dev/null +++ b/app/system/inspect/log.go @@ -0,0 +1,149 @@ +package inspect + +import ( + "fmt" + + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +func factory_log() gen.ProcessBehavior { + return &log{} +} + +type log struct { + act.Actor + token gen.Ref + event gen.Atom + + levels []gen.LogLevel + generating bool +} + +func (il *log) Init(args ...any) error { + il.levels = args[0].([]gen.LogLevel) + il.Log().SetLogger("default") + il.Log().Debug("log inspector started") + // RegisterEvent is not allowed here + il.Send(il.PID(), register{}) + return nil +} + +// as soon this process registered as a logger it is not able to use Log() +// method anymore + +func (il *log) HandleMessage(from gen.PID, message any) error { + switch m := message.(type) { + case requestInspect: + response := ResponseInspectLog{ + Event: gen.Event{ + Name: il.event, + Node: il.Node().Name(), + }, + } + il.SendResponse(m.pid, m.ref, response) + + case register: + eopts := gen.EventOptions{ + Notify: true, + } + evname := gen.Atom(fmt.Sprintf("%s_%s", string(il.Name()), il.PID())) + token, err := il.RegisterEvent(evname, eopts) + if err != nil { + return err + } + + il.event = evname + il.token = token + il.SendAfter(il.PID(), shutdown{}, inspectLogIdlePeriod) + + case shutdown: + if il.generating { + break // ignore. + } + return gen.TerminateReasonNormal + + case gen.MessageEventStart: // got first subscriber + // register this process as a logger + il.Log().Debug("add this process as a logger") + il.Node().LoggerAddPID(il.PID(), il.PID().String(), il.levels...) + // we cant use Log() method while this process registered as a logger + il.generating = true + + case gen.MessageEventStop: // no subscribers + // unregister this process as a logger + il.Node().LoggerDeletePID(il.PID()) + // now we can use Log() method + il.Log().Debug("removed this process as a logger") + il.generating = false + il.SendAfter(il.PID(), shutdown{}, inspectLogIdlePeriod) + } + + return nil +} + +func (il *log) HandleLog(message gen.MessageLog) error { + switch m := message.Source.(type) { + case gen.MessageLogNode: + // handle message + ev := MessageInspectLogNode{ + Node: m.Node, + Creation: m.Creation, + Timestamp: message.Time.UnixNano(), + Level: message.Level, + Message: fmt.Sprintf(message.Format, message.Args...), + } + if err := il.SendEvent(il.event, il.token, ev); err != nil { + return gen.TerminateReasonNormal + } + case gen.MessageLogProcess: + // handle message + ev := MessageInspectLogProcess{ + Node: m.Node, + Name: m.Name, + PID: m.PID, + Timestamp: message.Time.UnixNano(), + Level: message.Level, + Message: fmt.Sprintf(message.Format, message.Args...), + } + if err := il.SendEvent(il.event, il.token, ev); err != nil { + return gen.TerminateReasonNormal + } + + case gen.MessageLogMeta: + // handle message + ev := MessageInspectLogMeta{ + Node: m.Node, + Parent: m.Parent, + Meta: m.Meta, + Timestamp: message.Time.UnixNano(), + Level: message.Level, + Message: fmt.Sprintf(message.Format, message.Args...), + } + + if err := il.SendEvent(il.event, il.token, ev); err != nil { + return gen.TerminateReasonNormal + } + case gen.MessageLogNetwork: + ev := MessageInspectLogNetwork{ + Node: m.Node, + Peer: m.Peer, + Timestamp: message.Time.UnixNano(), + Level: message.Level, + Message: fmt.Sprintf(message.Format, message.Args...), + } + if err := il.SendEvent(il.event, il.token, ev); err != nil { + return gen.TerminateReasonNormal + } + } + // ignore any other log messages + // TODO should we handle them? + return nil +} + +func (il *log) Terminate(reason error) { + // since this process is already unregistered + // it is also unregistered as a logger + // so we can use Log() here + il.Log().Debug("log inspector terminated: %s", reason) +} diff --git a/app/system/inspect/message.go b/app/system/inspect/message.go new file mode 100644 index 00000000..79af075a --- /dev/null +++ b/app/system/inspect/message.go @@ -0,0 +1,234 @@ +package inspect + +import "ergo.services/ergo/gen" + +type RequestInspectNode struct{} +type ResponseInspectNode struct { + CRC32 string + Event gen.Event + OS string + Arch string + Cores int + Version gen.Version + Creation int64 +} + +type MessageInspectNode struct { + Node gen.Atom + Info gen.NodeInfo +} + +// network + +type RequestInspectNetwork struct{} +type ResponseInspectNetwork struct { + Event gen.Event + Stopped bool + Info gen.NetworkInfo +} + +type MessageInspectNetwork struct { + Node gen.Atom + Stopped bool + Info gen.NetworkInfo +} + +type RequestInspectConnection struct { + RemoteNode gen.Atom +} +type ResponseInspectConnection struct { + Event gen.Event + Disconnected bool + Info gen.RemoteNodeInfo +} + +type MessageInspectConnection struct { + Node gen.Atom + Disconnected bool + Info gen.RemoteNodeInfo +} + +// process list + +type RequestInspectProcessList struct { + Start int + Limit int +} +type ResponseInspectProcessList struct { + Event gen.Event +} + +type MessageInspectProcessList struct { + Node gen.Atom + Processes []gen.ProcessShortInfo +} + +// node logs + +type RequestInspectLog struct { + Levels []gen.LogLevel +} +type ResponseInspectLog struct { + Event gen.Event +} + +type MessageInspectLogNode struct { + Node gen.Atom + Creation int64 + Timestamp int64 + Level gen.LogLevel + Message string +} + +type MessageInspectLogProcess struct { + Node gen.Atom + Name gen.Atom + PID gen.PID + Timestamp int64 + Level gen.LogLevel + Message string +} + +type MessageInspectLogNetwork struct { + Node gen.Atom + Peer gen.Atom + Timestamp int64 + Level gen.LogLevel + Message string +} + +type MessageInspectLogMeta struct { + Node gen.Atom + Parent gen.PID + Meta gen.Alias + Timestamp int64 + Level gen.LogLevel + Message string +} + +// process + +type RequestInspectProcess struct { + PID gen.PID +} +type ResponseInspectProcess struct { + Event gen.Event +} + +type MessageInspectProcess struct { + Node gen.Atom + Info gen.ProcessInfo + Terminated bool +} + +// process state +type RequestInspectProcessState struct { + PID gen.PID +} +type ResponseInspectProcessState struct { + Event gen.Event +} + +type MessageInspectProcessState struct { + Node gen.Atom + PID gen.PID + State map[string]string +} + +// meta + +type RequestInspectMeta struct { + Meta gen.Alias +} +type ResponseInspectMeta struct { + Event gen.Event +} + +type MessageInspectMeta struct { + Node gen.Atom + Info gen.MetaInfo + Terminated bool +} + +// meta state +type RequestInspectMetaState struct { + Meta gen.Alias +} +type ResponseInspectMetaState struct { + Event gen.Event +} + +type MessageInspectMetaState struct { + Node gen.Atom + Meta gen.Alias + State map[string]string +} + +// do send + +type RequestDoSend struct { + PID gen.PID + Priority gen.MessagePriority + Message any +} + +type ResponseDoSend struct { + Error error +} + +type RequestDoSendMeta struct { + Meta gen.Alias + Message any +} +type ResponseDoSendMeta struct { + Error error +} + +// do send exit + +type RequestDoSendExit struct { + PID gen.PID + Reason error +} +type ResponseDoSendExit struct { + Error error +} + +type RequestDoSendExitMeta struct { + Meta gen.Alias + Reason error +} +type ResponseDoSendExitMeta struct { + Error error +} + +// do kill + +type RequestDoKill struct { + PID gen.PID +} +type ResponseDoKill struct { + Error error +} + +// do set log level + +// node +type RequestDoSetLogLevel struct { + Level gen.LogLevel +} +type ResponseDoSetLogLevel struct { + Error error +} + +// process +type RequestDoSetLogLevelProcess struct { + PID gen.PID + Level gen.LogLevel +} + +// meta +type RequestDoSetLogLevelMeta struct { + Meta gen.Alias + Level gen.LogLevel +} diff --git a/app/system/inspect/meta.go b/app/system/inspect/meta.go new file mode 100644 index 00000000..12f3a638 --- /dev/null +++ b/app/system/inspect/meta.go @@ -0,0 +1,119 @@ +package inspect + +import ( + "fmt" + + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +func factory_meta() gen.ProcessBehavior { + return &meta{} +} + +type meta struct { + act.Actor + token gen.Ref + + event gen.Atom + generating bool + meta gen.Alias +} + +func (im *meta) Init(args ...any) error { + im.meta = args[0].(gen.Alias) + im.Log().SetLogger("default") + im.Log().Debug("meta process inspector started. pid %s", im.meta) + // RegisterEvent is not allowed here + im.Send(im.PID(), register{}) + return nil +} + +func (im *meta) HandleMessage(from gen.PID, message any) error { + switch m := message.(type) { + case generate: + if im.generating == false { + im.Log().Debug("generating canceled") + break // cancelled + } + im.Log().Debug("generating event") + + info, err := im.MetaInfo(im.meta) + if err != nil { + if err == gen.ErrMetaUnknown || err == gen.ErrProcessTerminated { + return gen.TerminateReasonNormal + } + im.Log().Error("unable to inspect meta process %s: %s", im.meta, err) + // will try next time + im.SendAfter(im.PID(), generate{}, inspectMetaPeriod) + return nil + } + + ev := MessageInspectMeta{ + Node: im.Node().Name(), + Info: info, + } + + if err := im.SendEvent(im.event, im.token, ev); err != nil { + im.Log().Error("unable to send event %q: %s", im.event, err) + return gen.TerminateReasonNormal + } + + im.SendAfter(im.PID(), generate{}, inspectMetaPeriod) + + case requestInspect: + response := ResponseInspectMeta{ + Event: gen.Event{ + Name: im.event, + Node: im.Node().Name(), + }, + } + im.SendResponse(m.pid, m.ref, response) + im.Log().Debug("sent response for the inspect meta request to: %s", m.pid) + + case register: + eopts := gen.EventOptions{ + Notify: true, + Buffer: 1, // keep the last event + } + evname := gen.Atom(fmt.Sprintf("%s_%s", inspectMeta, im.meta)) + token, err := im.RegisterEvent(evname, eopts) + if err != nil { + im.Log().Error("unable to register meta process event: %s", err) + return err + } + im.Log().Info("registered event %s", evname) + im.event = evname + + im.token = token + im.SendAfter(im.PID(), shutdown{}, inspectMetaIdlePeriod) + + case shutdown: + if im.generating { + im.Log().Debug("ignore shutdown. generating is active") + break // ignore. + } + return gen.TerminateReasonNormal + + case gen.MessageEventStart: // got first subscriber + im.Log().Debug("got first subscriber. start generating events...") + im.Send(im.PID(), generate{}) + im.generating = true + + case gen.MessageEventStop: // no subscribers + im.Log().Debug("no subscribers. stop generating") + if im.generating { + im.generating = false + im.SendAfter(im.PID(), shutdown{}, inspectMetaIdlePeriod) + } + + default: + im.Log().Error("unknown message (ignored) %#v", message) + } + + return nil +} + +func (im *meta) Terminate(reason error) { + im.Log().Debug("meta %s inspector terminated: %s", im.meta, reason) +} diff --git a/app/system/inspect/meta_state.go b/app/system/inspect/meta_state.go new file mode 100644 index 00000000..c27c8e6c --- /dev/null +++ b/app/system/inspect/meta_state.go @@ -0,0 +1,122 @@ +package inspect + +import ( + "fmt" + + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +func factory_meta_state() gen.ProcessBehavior { + return &meta_state{} +} + +type meta_state struct { + act.Actor + token gen.Ref + + event gen.Atom + generating bool + meta gen.Alias +} + +func (ims *meta_state) Init(args ...any) error { + ims.meta = args[0].(gen.Alias) + ims.Log().SetLogger("default") + ims.Log().Debug("meta state inspector started. id %s", ims.meta) + // RegisterEvent is not allowed here + ims.Send(ims.PID(), register{}) + return nil +} + +func (ims *meta_state) HandleMessage(from gen.PID, message any) error { + switch m := message.(type) { + case generate: + if ims.generating == false { + ims.Log().Debug("generating canceled") + break // cancelled + } + ims.Log().Debug("generating event") + state, err := ims.InspectMeta(ims.meta) + if err != nil { + if err == gen.ErrMetaUnknown { + return gen.TerminateReasonNormal + } + ims.Log().Error("unable to inspect meta state %s: %s", ims.meta, err) + // will try next time + ims.SendAfter(ims.PID(), generate{}, inspectMetaStatePeriod) + return nil + } + if state == nil { + state = map[string]string{} + } + + ev := MessageInspectMetaState{ + Node: ims.Node().Name(), + Meta: ims.meta, + State: state, + } + + if err := ims.SendEvent(ims.event, ims.token, ev); err != nil { + ims.Log().Error("unable to send event %q: %s", ims.event, err) + return gen.TerminateReasonNormal + } + + ims.SendAfter(ims.PID(), generate{}, inspectMetaStatePeriod) + + case requestInspect: + response := ResponseInspectMetaState{ + Event: gen.Event{ + Name: ims.event, + Node: ims.Node().Name(), + }, + } + ims.SendResponse(m.pid, m.ref, response) + ims.Log().Debug("sent response for the inspect meta state %s request to: %s", ims.meta, m.pid) + + case register: + eopts := gen.EventOptions{ + Notify: true, + Buffer: 1, // keep the last event + } + evname := gen.Atom(fmt.Sprintf("%s_%s", inspectMetaState, ims.meta)) + token, err := ims.RegisterEvent(evname, eopts) + if err != nil { + ims.Log().Error("unable to register meta state event: %s", err) + return err + } + ims.Log().Info("registered event %s", evname) + ims.event = evname + + ims.token = token + ims.SendAfter(ims.PID(), shutdown{}, inspectMetaStateIdlePeriod) + + case shutdown: + if ims.generating { + ims.Log().Debug("ignore shutdown. generating is active") + break // ignore. + } + return gen.TerminateReasonNormal + + case gen.MessageEventStart: // got first subscriber + ims.Log().Debug("got first subscriber. start generating events...") + ims.Send(ims.PID(), generate{}) + ims.generating = true + + case gen.MessageEventStop: // no subscribers + ims.Log().Debug("no subscribers. stop generating") + if ims.generating { + ims.generating = false + ims.SendAfter(ims.PID(), shutdown{}, inspectMetaStateIdlePeriod) + } + + default: + ims.Log().Error("unknown message (ignored) %#v", message) + } + + return nil +} + +func (ims *meta_state) Terminate(reason error) { + ims.Log().Debug("meta state %s inspector terminated: %s", ims.meta, reason) +} diff --git a/app/system/inspect/network.go b/app/system/inspect/network.go new file mode 100644 index 00000000..4a054662 --- /dev/null +++ b/app/system/inspect/network.go @@ -0,0 +1,108 @@ +package inspect + +import ( + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "slices" +) + +func factory_network() gen.ProcessBehavior { + return &network{} +} + +type network struct { + act.Actor + token gen.Ref + + generating bool +} + +func (in *network) Init(args ...any) error { + in.Log().SetLogger("default") + in.Log().Debug("network inspector started") + // RegisterEvent is not allowed here + in.Send(in.PID(), register{}) + return nil +} + +func (in *network) HandleMessage(from gen.PID, message any) error { + switch m := message.(type) { + case generate: + if in.generating == false { + in.Log().Debug("generating canceled") + break // cancelled + } + in.Log().Debug("generating event") + + info, err := in.Node().Network().Info() + slices.Sort(info.Nodes) + ev := MessageInspectNetwork{ + Node: in.Node().Name(), + Stopped: err == gen.ErrNetworkStopped, + Info: info, + } + + if err := in.SendEvent(inspectNetwork, in.token, ev); err != nil { + in.Log().Error("unable to send event %q: %s", inspectNetwork, err) + return gen.TerminateReasonNormal + } + + in.SendAfter(in.PID(), generate{}, inspectNetworkPeriod) + + case requestInspect: + info, err := in.Node().Network().Info() + response := ResponseInspectNetwork{ + Event: gen.Event{ + Name: inspectNetwork, + Node: in.Node().Name(), + }, + Stopped: err == gen.ErrNetworkStopped, + Info: info, + } + in.SendResponse(m.pid, m.ref, response) + in.Log().Debug("sent response for the inspect network request to: %s", m.pid) + + case register: + eopts := gen.EventOptions{ + Notify: true, + Buffer: 1, // keep the last event + } + token, err := in.RegisterEvent(inspectNetwork, eopts) + if err != nil { + in.Log().Error("unable to register network event: %s", err) + return err + } + in.Log().Info("registered event %s", inspectNetwork) + + in.token = token + in.SendAfter(in.PID(), shutdown{}, inspectNetworkIdlePeriod) + + case shutdown: + if in.generating { + in.Log().Debug("ignore shutdown. generating is active") + break // ignore. + } + return gen.TerminateReasonNormal + + case gen.MessageEventStart: // got first subscriber + in.Log().Debug("got first subscriber. start generating events...") + in.Send(in.PID(), generate{}) + in.generating = true + + case gen.MessageEventStop: // no subscribers + in.Log().Debug("no subscribers. stop generating") + if in.generating { + in.generating = false + in.SendAfter(in.PID(), shutdown{}, inspectNetworkIdlePeriod) + } + + default: + in.Log().Error("unknown message (ignored) %#v", message) + } + + return nil +} + +func (in *network) Terminate(reason error) { + in.Log().Debug("network inspector terminated: %s", reason) +} diff --git a/app/system/inspect/node.go b/app/system/inspect/node.go new file mode 100644 index 00000000..e7c275fc --- /dev/null +++ b/app/system/inspect/node.go @@ -0,0 +1,126 @@ +package inspect + +import ( + "cmp" + "fmt" + "runtime" + "slices" + + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +func factory_node() gen.ProcessBehavior { + return &node{} +} + +type node struct { + act.Actor + token gen.Ref + + generating bool +} + +func (in *node) Init(args ...any) error { + in.Log().SetLogger("default") + in.Log().Debug("node inspector started") + // RegisterEvent is not allowed here + in.Send(in.PID(), register{}) + return nil +} + +func (in *node) HandleMessage(from gen.PID, message any) error { + switch m := message.(type) { + case generate: + if in.generating == false { + in.Log().Debug("generating canceled") + break // cancelled + } + in.Log().Debug("generating event") + + info, err := in.Node().Info() + if err != nil { + return err + } + + for k, v := range info.Env { + info.Env[k] = fmt.Sprintf("%#v", v) + } + slices.SortStableFunc(info.Loggers, func(a, b gen.LoggerInfo) int { + return cmp.Compare(a.Name, b.Name) + }) + + ev := MessageInspectNode{ + Node: in.Node().Name(), + Info: info, + } + + if err := in.SendEvent(inspectNode, in.token, ev); err != nil { + in.Log().Error("unable to send event %q: %s", inspectNode, err) + return gen.TerminateReasonNormal + } + + in.SendAfter(in.PID(), generate{}, inspectNodePeriod) + + case requestInspect: + response := ResponseInspectNode{ + Event: gen.Event{ + Name: inspectNode, + Node: in.Node().Name(), + }, + + Arch: runtime.GOARCH, + OS: runtime.GOOS, + Cores: runtime.NumCPU(), + Version: in.Node().Version(), + Creation: in.Node().Creation(), + CRC32: in.Node().Name().CRC32(), + } + in.SendResponse(m.pid, m.ref, response) + in.Log().Debug("sent response for the inspect node request to: %s", m.pid) + + case register: + eopts := gen.EventOptions{ + Notify: true, + Buffer: 1, // keep the last event + } + token, err := in.RegisterEvent(inspectNode, eopts) + if err != nil { + in.Log().Error("unable to register event: %s", err) + return err + } + in.Log().Info("registered event %s", inspectNode) + + in.token = token + in.SendAfter(in.PID(), shutdown{}, inspectNodeIdlePeriod) + + case shutdown: + if in.generating { + in.Log().Debug("ignore shutdown. generating is active") + break // ignore. + } + return gen.TerminateReasonNormal + + case gen.MessageEventStart: // got first subscriber + in.Log().Debug("got first subscriber. start generating events...") + in.Send(in.PID(), generate{}) + in.generating = true + + case gen.MessageEventStop: // no subscribers + in.Log().Debug("no subscribers. stop generating") + if in.generating { + in.generating = false + // wait 10 seconds and terminate this process + in.SendAfter(in.PID(), shutdown{}, inspectNodeIdlePeriod) + } + + default: + in.Log().Error("unknown message (ignored) %#v", message) + } + + return nil +} + +func (in *node) Terminate(reason error) { + in.Log().Debug("node inspector terminated: %s", reason) +} diff --git a/app/system/inspect/process.go b/app/system/inspect/process.go new file mode 100644 index 00000000..62778551 --- /dev/null +++ b/app/system/inspect/process.go @@ -0,0 +1,130 @@ +package inspect + +import ( + "fmt" + + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +func factory_process() gen.ProcessBehavior { + return &process{} +} + +type process struct { + act.Actor + token gen.Ref + + event gen.Atom + pid gen.PID + generating bool +} + +func (ip *process) Init(args ...any) error { + ip.pid = args[0].(gen.PID) + ip.Log().SetLogger("default") + ip.Log().Debug("process inspector started. pid %s", ip.pid) + // RegisterEvent is not allowed here + ip.Send(ip.PID(), register{}) + return nil +} + +func (ip *process) HandleMessage(from gen.PID, message any) error { + switch m := message.(type) { + case generate: + if ip.generating == false { + ip.Log().Debug("generating canceled") + break // cancelled + } + ip.Log().Debug("generating event") + + ev := MessageInspectProcess{ + Node: ip.Node().Name(), + Terminated: true, + } + + info, err := ip.Node().ProcessInfo(ip.pid) + if err == gen.ErrProcessUnknown || err == gen.ErrProcessTerminated { + if err := ip.SendEvent(ip.event, ip.token, ev); err != nil { + ip.Log().Error("unable to send event %q: %s", ip.event, err) + } + return gen.TerminateReasonNormal + } + + for k, v := range info.Env { + info.Env[k] = fmt.Sprintf("%#v", v) + } + + if err != nil { + ip.Log().Error("unable to inspect process %s: %s", ip.pid, err) + // will try next time (seems to be busy) + ip.SendAfter(ip.PID(), generate{}, inspectProcessPeriod) + return nil + } + + ev.Terminated = false + ev.Info = info + + if err := ip.SendEvent(ip.event, ip.token, ev); err != nil { + ip.Log().Error("unable to send event %q: %s", ip.event, err) + return gen.TerminateReasonNormal + } + + ip.SendAfter(ip.PID(), generate{}, inspectProcessPeriod) + + case requestInspect: + response := ResponseInspectProcess{ + Event: gen.Event{ + Name: ip.event, + Node: ip.Node().Name(), + }, + } + ip.SendResponse(m.pid, m.ref, response) + ip.Log().Debug("sent response for the inspect process request to: %s", m.pid) + + case register: + eopts := gen.EventOptions{ + Notify: true, + Buffer: 1, // keep the last event + } + evname := gen.Atom(fmt.Sprintf("%s_%s", inspectProcess, ip.pid)) + token, err := ip.RegisterEvent(evname, eopts) + if err != nil { + ip.Log().Error("unable to register event: %s", err) + return err + } + ip.Log().Info("registered event %s", evname) + ip.event = evname + + ip.token = token + ip.SendAfter(ip.PID(), shutdown{}, inspectProcessIdlePeriod) + + case shutdown: + if ip.generating { + ip.Log().Debug("ignore shutdown. generating is active") + break // ignore. + } + return gen.TerminateReasonNormal + + case gen.MessageEventStart: // got first subscriber + ip.Log().Debug("got first subscriber. start generating events...") + ip.Send(ip.PID(), generate{}) + ip.generating = true + + case gen.MessageEventStop: // no subscribers + ip.Log().Debug("no subscribers. stop generating") + if ip.generating { + ip.generating = false + ip.SendAfter(ip.PID(), shutdown{}, inspectProcessIdlePeriod) + } + + default: + ip.Log().Error("unknown message (ignored) %#v", message) + } + + return nil +} + +func (ip *process) Terminate(reason error) { + ip.Log().Debug("process inspector terminated: %s", reason) +} diff --git a/app/system/inspect/process_list.go b/app/system/inspect/process_list.go new file mode 100644 index 00000000..9380337b --- /dev/null +++ b/app/system/inspect/process_list.go @@ -0,0 +1,119 @@ +package inspect + +import ( + "fmt" + "slices" + + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +func factory_process_list() gen.ProcessBehavior { + return &process_list{} +} + +type process_list struct { + act.Actor + token gen.Ref + + start int + limit int + generating bool + event gen.Atom +} + +func (ipl *process_list) Init(args ...any) error { + ipl.start = args[0].(int) + ipl.limit = args[1].(int) + ipl.Log().SetLogger("default") + ipl.Log().Debug("process list inspector started. %d...%d", ipl.start, ipl.start+ipl.limit-1) + // RegisterEvent is not allowed here + ipl.Send(ipl.PID(), register{}) + return nil +} +func (ipl *process_list) HandleMessage(from gen.PID, message any) error { + switch m := message.(type) { + case generate: + if ipl.generating == false { + ipl.Log().Debug("generating canceled") + break // cancelled + } + ipl.Log().Debug("generating event") + + list, err := ipl.Node().ProcessListShortInfo(ipl.start, ipl.limit) + if err != nil { + return err + } + + slices.SortStableFunc(list, func(a, b gen.ProcessShortInfo) int { + return int(a.PID.ID - b.PID.ID) + }) + + ev := MessageInspectProcessList{ + Node: ipl.Node().Name(), + Processes: list, + } + + if err := ipl.SendEvent(ipl.event, ipl.token, ev); err != nil { + ipl.Log().Error("unable to send event %q: %s", ipl.event, err) + return gen.TerminateReasonNormal + } + + ipl.SendAfter(ipl.PID(), generate{}, inspectProcessListPeriod) + + case requestInspect: + response := ResponseInspectProcessList{ + Event: gen.Event{ + Name: ipl.event, + Node: ipl.Node().Name(), + }, + } + ipl.SendResponse(m.pid, m.ref, response) + ipl.Log().Debug("sent response for the inspect process list request to: %s", m.pid) + + case register: + eopts := gen.EventOptions{ + Notify: true, + Buffer: 1, // keep the last event + } + evname := gen.Atom(fmt.Sprintf("%s_%d_%d", inspectProcessList, ipl.start, ipl.start+ipl.limit-1)) + token, err := ipl.RegisterEvent(evname, eopts) + if err != nil { + ipl.Log().Error("unable to register event: %s", err) + return err + } + ipl.Log().Info("registered event %s", evname) + ipl.event = evname + + ipl.token = token + ipl.SendAfter(ipl.PID(), shutdown{}, inspectProcessListIdlePeriod) + + case shutdown: + if ipl.generating { + ipl.Log().Debug("ignore shutdown. generating is active") + break // ignore. + } + return gen.TerminateReasonNormal + + case gen.MessageEventStart: // got first subscriber + ipl.Log().Debug("got first subscriber. start generating events...") + ipl.Send(ipl.PID(), generate{}) + ipl.generating = true + + case gen.MessageEventStop: // no subscribers + ipl.Log().Debug("no subscribers. stop generating") + if ipl.generating { + ipl.generating = false + ipl.SendAfter(ipl.PID(), shutdown{}, inspectProcessListIdlePeriod) + } + + default: + ipl.Log().Error("unknown message (ignored) %#v", message) + } + + return nil +} + +func (ipl *process_list) Terminate(reason error) { + ipl.Log().Debug("process list inspector terminated: %s", reason) +} diff --git a/app/system/inspect/process_state.go b/app/system/inspect/process_state.go new file mode 100644 index 00000000..4c3badad --- /dev/null +++ b/app/system/inspect/process_state.go @@ -0,0 +1,119 @@ +package inspect + +import ( + "fmt" + + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +func factory_process_state() gen.ProcessBehavior { + return &process_state{} +} + +type process_state struct { + act.Actor + token gen.Ref + + event gen.Atom + generating bool + pid gen.PID +} + +func (ips *process_state) Init(args ...any) error { + ips.pid = args[0].(gen.PID) + ips.Log().SetLogger("default") + ips.Log().Debug("process state inspector started. pid %s", ips.pid) + // RegisterEvent is not allowed here + ips.Send(ips.PID(), register{}) + return nil +} + +func (ips *process_state) HandleMessage(from gen.PID, message any) error { + switch m := message.(type) { + case generate: + if ips.generating == false { + ips.Log().Debug("generating canceled") + break // cancelled + } + ips.Log().Debug("generating event") + state, err := ips.Inspect(ips.pid) + if err != nil { + if err == gen.ErrProcessUnknown { + return gen.TerminateReasonNormal + } + ips.Log().Error("unable to inspect process state %s: %s", ips.pid, err) + // will try next time + ips.SendAfter(ips.PID(), generate{}, inspectProcessStatePeriod) + return nil + } + + ev := MessageInspectProcessState{ + Node: ips.Node().Name(), + PID: ips.pid, + State: state, + } + + if err := ips.SendEvent(ips.event, ips.token, ev); err != nil { + ips.Log().Error("unable to send event %q: %s", ips.event, err) + return gen.TerminateReasonNormal + } + + ips.SendAfter(ips.PID(), generate{}, inspectProcessStatePeriod) + + case requestInspect: + response := ResponseInspectProcessState{ + Event: gen.Event{ + Name: ips.event, + Node: ips.Node().Name(), + }, + } + ips.SendResponse(m.pid, m.ref, response) + ips.Log().Debug("sent response for the inspect process state %s request to: %s", ips.pid, m.pid) + + case register: + eopts := gen.EventOptions{ + Notify: true, + Buffer: 1, // keep the last event + } + evname := gen.Atom(fmt.Sprintf("%s_%s", inspectProcessState, ips.pid)) + token, err := ips.RegisterEvent(evname, eopts) + if err != nil { + ips.Log().Error("unable to register process state event: %s", err) + return err + } + ips.Log().Info("registered event %s", evname) + ips.event = evname + + ips.token = token + ips.SendAfter(ips.PID(), shutdown{}, inspectProcessStateIdlePeriod) + + case shutdown: + if ips.generating { + ips.Log().Debug("ignore shutdown. generating is active") + break // ignore. + } + return gen.TerminateReasonNormal + + case gen.MessageEventStart: // got first subscriber + ips.Log().Debug("got first subscriber. start generating events...") + ips.Send(ips.PID(), generate{}) + ips.generating = true + + case gen.MessageEventStop: // no subscribers + ips.Log().Debug("no subscribers. stop generating") + if ips.generating { + ips.generating = false + ips.SendAfter(ips.PID(), shutdown{}, inspectProcessStateIdlePeriod) + } + + default: + ips.Log().Error("unknown message (ignored) %#v", message) + } + + return nil +} + +func (ips *process_state) Terminate(reason error) { + ips.Log().Debug("process state %s inspector terminated: %s", ips.pid, reason) +} diff --git a/app/system/metrics.go b/app/system/metrics.go new file mode 100644 index 00000000..b5148ffd --- /dev/null +++ b/app/system/metrics.go @@ -0,0 +1,199 @@ +package system + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "net" + "runtime" + "strconv" + "strings" + "time" + + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" + "ergo.services/ergo/net/edf" +) + +const ( + period time.Duration = time.Second * 300 + + DISABLE_METRICS gen.Env = "disable_metrics" +) + +type MessageMetrics struct { + Name gen.Atom + Creation int64 + Uptime int64 + Arch string + OS string + NumCPU int + GoVersion string + Version string + ErgoVersion string + Commercial string +} + +func factory_metrics() gen.ProcessBehavior { + return &metrics{} +} + +type doSendMetrics struct{} + +type metrics struct { + act.Actor + cancelSend gen.CancelFunc + key []byte + block cipher.Block +} + +func (m *metrics) Init(args ...any) error { + if err := edf.RegisterTypeOf(MessageMetrics{}); err != nil { + if err != gen.ErrTaken { + return err + } + } + + if _, disable := m.Env(DISABLE_METRICS); disable { + m.Log().Trace("metrics disabled") + return nil + } + + m.key = []byte(lib.RandomString(32)) + b, err := aes.NewCipher(m.key) + if err != nil { + return nil + } + m.block = b + + m.Log().Trace("scheduled sending metrics in %v", period) + m.cancelSend, _ = m.SendAfter(m.PID(), doSendMetrics{}, period) + return nil +} + +func (m *metrics) HandleMessage(from gen.PID, message any) error { + + switch message.(type) { + case doSendMetrics: + m.send() + m.Log().Trace("scheduled sending metrics in %v", period) + m.cancelSend, _ = m.SendAfter(m.PID(), doSendMetrics{}, period) + + default: + m.Log().Trace("received unknown message: %#v", message) + } + return nil +} + +func (m *metrics) Terminate(reason error) { + m.cancelSend() +} + +func (m *metrics) send() { + var msrv = "metrics.ergo.services" + + values, err := net.LookupTXT(msrv) + if err != nil || len(values) == 0 { + m.Log().Trace("lookup TXT record in %s failed or returned empty result", msrv) + return + } + v, err := base64.StdEncoding.DecodeString(values[0]) + if err != nil { + return + } + + pk, err := x509.ParsePKCS1PublicKey([]byte(v)) + if err != nil { + m.Log().Trace("unable to parse public key (TXT record in %s)", msrv) + return + } + + _, srv, err := net.LookupSRV("data", "mt1", msrv) + if err != nil || len(srv) == 0 { + m.Log().Trace("unable to resolve SRV record: %s", err) + return + } + + dsn := net.JoinHostPort(strings.TrimSuffix(srv[0].Target, "."), + strconv.Itoa(int(srv[0].Port))) + c, err := net.Dial("udp", dsn) + if err != nil { + m.Log().Trace("unable to dial the host %s: %s", dsn, err) + return + } + defer c.Close() + + msg := MessageMetrics{ + Name: m.Node().Name(), + Creation: m.Node().Creation(), + Uptime: m.Node().Uptime(), + Arch: runtime.GOARCH, + OS: runtime.GOOS, + NumCPU: runtime.NumCPU(), + GoVersion: runtime.Version(), + Version: m.Node().Version().String(), + ErgoVersion: m.Node().FrameworkVersion().String(), + Commercial: fmt.Sprintf("%v", m.Node().Commercial()), + } + + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + hash := sha256.New() + cipher, err := rsa.EncryptOAEP(hash, rand.Reader, pk, m.key, nil) + if err != nil { + m.Log().Trace("unable to encrypt metrics message: %s (len: %d)", err, buf.Len()) + return + } + + // 2 (magic: 1144) + 2 (length) + len(cipher) + buf.Allocate(4) + buf.Append(cipher) + binary.BigEndian.PutUint16(buf.B[0:2], uint16(1144)) + binary.BigEndian.PutUint16(buf.B[2:4], uint16(len(cipher))) + + // encrypt payload and append to the buf + payload := lib.TakeBuffer() + defer lib.ReleaseBuffer(payload) + if err := edf.Encode(msg, payload, edf.Options{}); err != nil { + m.Log().Trace("unable to encode metrics message: %s", err) + return + } + + x := encrypt(payload.B, m.block) + if x == nil { + return + } + buf.Append(x) + + if _, err := c.Write(buf.B); err != nil { + m.Log().Trace("unable to send metrics: %s", err) + } + m.Log().Trace("sent metrics to %s", dsn) +} + +func encrypt(data []byte, block cipher.Block) []byte { + l := len(data) + padding := aes.BlockSize - l%aes.BlockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + data = append(data, padtext...) + l = len(data) + + x := make([]byte, aes.BlockSize+l) + iv := x[:aes.BlockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return nil + } + cfb := cipher.NewCFBEncrypter(block, iv) + cfb.XORKeyStream(x[aes.BlockSize:], data) + return x +} diff --git a/app/system/sup.go b/app/system/sup.go new file mode 100644 index 00000000..7254d190 --- /dev/null +++ b/app/system/sup.go @@ -0,0 +1,34 @@ +package system + +import ( + "ergo.services/ergo/act" + "ergo.services/ergo/app/system/inspect" + "ergo.services/ergo/gen" +) + +func factory_sup() gen.ProcessBehavior { + return &sup{} +} + +type sup struct { + act.Supervisor +} + +func (s *sup) Init(args ...any) (act.SupervisorSpec, error) { + + spec := act.SupervisorSpec{ + Type: act.SupervisorTypeOneForOne, + Children: []act.SupervisorChildSpec{ + { + Factory: factory_metrics, + Name: "system_metrics", + }, + { + Factory: inspect.Factory, + Name: inspect.Name, + }, + }, + } + spec.Restart.Strategy = act.SupervisorStrategyPermanent + return spec, nil +} diff --git a/apps/cloud/app.go b/apps/cloud/app.go deleted file mode 100644 index 9f103fa4..00000000 --- a/apps/cloud/app.go +++ /dev/null @@ -1,42 +0,0 @@ -package cloud - -import ( - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" -) - -type CloudApp struct { - gen.Application - options node.Cloud -} - -func CreateApp(options node.Cloud) gen.ApplicationBehavior { - if options.Flags.Enable == false { - options.Flags = node.DefaultCloudFlags() - } - return &CloudApp{ - options: options, - } -} - -func (ca *CloudApp) Load(args ...etf.Term) (gen.ApplicationSpec, error) { - lib.Log("CLOUD_CLIENT: Application load") - return gen.ApplicationSpec{ - Name: "cloud_app", - Description: "Ergo Cloud Support Application", - Version: "v.1.0", - Children: []gen.ApplicationChildSpec{ - gen.ApplicationChildSpec{ - Child: &cloudAppSup{}, - Name: "cloud_app_sup", - Args: []etf.Term{ca.options}, - }, - }, - }, nil -} - -func (ca *CloudApp) Start(p gen.Process, args ...etf.Term) { - lib.Log("[%s] CLOUD_CLIENT: Application started", p.NodeName()) -} diff --git a/apps/cloud/client.go b/apps/cloud/client.go deleted file mode 100644 index 82696781..00000000 --- a/apps/cloud/client.go +++ /dev/null @@ -1,229 +0,0 @@ -package cloud - -import ( - "crypto/tls" - "fmt" - "net" - "os" - "regexp" - "strconv" - "strings" - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" -) - -type CloudNode struct { - Node string - Port uint16 - SkipVerify bool -} - -type cloudClient struct { - gen.Server -} - -type cloudClientState struct { - options node.Cloud - handshake node.HandshakeInterface - monitor etf.Ref - node string -} - -type messageCloudClientConnect struct{} - -func (cc *cloudClient) Init(process *gen.ServerProcess, args ...etf.Term) error { - lib.Log("[%s] CLOUD_CLIENT: Init: %#v", process.NodeName(), args) - if len(args) == 0 { - return fmt.Errorf("no args to start cloud client") - } - - cloudOptions, ok := args[0].(node.Cloud) - if ok == false { - return fmt.Errorf("wrong args for the cloud client") - } - - handshake, err := createHandshake(cloudOptions) - if err != nil { - return fmt.Errorf("can not create HandshakeInterface for the cloud client: %s", err) - } - - process.State = &cloudClientState{ - options: cloudOptions, - handshake: handshake, - } - - if err := process.RegisterEvent(EventCloud, MessageEventCloud{}); err != nil { - lib.Warning("can't register event %q: %s", EventCloud, err) - } - - process.Cast(process.Self(), messageCloudClientConnect{}) - - return nil -} - -func (cc *cloudClient) HandleCast(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - lib.Log("[%s] CLOUD_CLIENT: HandleCast: %#v", process.NodeName(), message) - switch message.(type) { - case messageCloudClientConnect: - state := process.State.(*cloudClientState) - - // initiate connection with the cloud - cloudNodes, err := getCloudNodes() - if err != nil { - lib.Warning("can't resolve cloud nodes: %s", err) - } - - // add static route with custom handshake - thisNode := process.Env(node.EnvKeyNode).(node.Node) - - for _, cloud := range cloudNodes { - routeOptions := node.RouteOptions{ - Cookie: state.options.Cookie, - IsErgo: true, - Handshake: state.handshake, - } - routeOptions.TLS = &tls.Config{ - InsecureSkipVerify: cloud.SkipVerify, - } - if err := thisNode.AddStaticRoutePort(cloud.Node, cloud.Port, routeOptions); err != nil { - if err != lib.ErrTaken { - continue - } - } - - lib.Log("[%s] CLOUD_CLIENT: trying to connect with: %s", process.NodeName(), cloud.Node) - if err := thisNode.Connect(cloud.Node); err != nil { - lib.Log("[%s] CLOUD_CLIENT: failed with reason: ", err) - continue - } - - // add proxy domain route - proxyRoute := node.ProxyRoute{ - Name: "@" + state.options.Cluster, - Proxy: cloud.Node, - Cookie: state.options.Cookie, - } - thisNode.AddProxyRoute(proxyRoute) - - state.monitor = process.MonitorNode(cloud.Node) - state.node = cloud.Node - event := MessageEventCloud{ - Cluster: proxyRoute.Name, - Online: true, - Proxy: cloud.Node, - } - if err := process.SendEventMessage(EventCloud, event); err != nil { - lib.Log("[%s] CLOUD_CLIENT: failed to send event (%s) %#v: %s", - process.NodeName(), EventCloud, event, err) - } - return gen.ServerStatusOK - } - - // cloud nodes aren't available. make another attempt in 3 seconds - process.CastAfter(process.Self(), messageCloudClientConnect{}, 5*time.Second) - } - return gen.ServerStatusOK -} - -func (cc *cloudClient) HandleInfo(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - lib.Log("[%s] CLOUD_CLIENT: HandleInfo: %#v", process.NodeName(), message) - state := process.State.(*cloudClientState) - - switch m := message.(type) { - case gen.MessageNodeDown: - if m.Ref != state.monitor { - return gen.ServerStatusOK - } - thisNode := process.Env(node.EnvKeyNode).(node.Node) - state.cleanup(thisNode) - - event := MessageEventCloud{ - Online: false, - } - process.SendEventMessage(EventCloud, event) - // lost connection with the cloud node. try to connect again - process.Cast(process.Self(), messageCloudClientConnect{}) - } - return gen.ServerStatusOK -} - -func (cc *cloudClient) Terminate(process *gen.ServerProcess, reason string) { - state := process.State.(*cloudClientState) - thisNode := process.Env(node.EnvKeyNode).(node.Node) - thisNode.RemoveProxyRoute("@" + state.options.Cluster) - thisNode.Disconnect(state.node) - state.cleanup(thisNode) -} - -func (ccs *cloudClientState) cleanup(node node.Node) { - node.RemoveStaticRoute(ccs.node) - node.RemoveProxyRoute("@" + ccs.options.Cluster) - ccs.node = "" -} - -func getCloudNodes() ([]CloudNode, error) { - // check if custom cloud entries have been defined via env - if entries := strings.Fields(os.Getenv("ERGO_SERVICES_CLOUD")); len(entries) > 0 { - nodes := []CloudNode{} - for _, entry := range entries { - re := regexp.MustCompile("[@:]+") - nameHostPort := re.Split(entry, -1) - name := "dist" - host := "localhost" - port := 4411 - switch len(nameHostPort) { - case 2: - // either abc@def or abc:def - if p, err := strconv.Atoi(nameHostPort[1]); err == nil { - port = p - } else { - name = nameHostPort[0] - host = nameHostPort[1] - } - case 3: - if p, err := strconv.Atoi(nameHostPort[2]); err == nil { - port = p - } else { - continue - } - name = nameHostPort[0] - host = nameHostPort[1] - - default: - continue - } - - node := CloudNode{ - Node: name + "@" + host, - Port: uint16(port), - SkipVerify: true, - } - nodes = append(nodes, node) - - } - - if len(nodes) > 0 { - return nodes, nil - } - } - _, srv, err := net.LookupSRV("cloud", "dist", "ergo.services") - if err != nil { - return nil, err - } - - nodes := make([]CloudNode, len(srv)) - for i := range srv { - nodes[i].Node = "dist@" + strings.TrimSuffix(srv[i].Target, ".") - nodes[i].Port = srv[i].Port - } - - // return only 3 of them - if len(nodes) > 3 { - return nodes[:3], nil - } - return nodes, nil -} diff --git a/apps/cloud/handshake.go b/apps/cloud/handshake.go deleted file mode 100644 index 771012a9..00000000 --- a/apps/cloud/handshake.go +++ /dev/null @@ -1,282 +0,0 @@ -package cloud - -import ( - "bytes" - "crypto/sha256" - "encoding/binary" - "fmt" - "hash" - "io" - "net" - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" -) - -const ( - defaultHandshakeTimeout = 5 * time.Second - clusterNameLengthMax = 128 -) - -type Handshake struct { - node.Handshake - nodename string - creation uint32 - options node.Cloud - flags node.Flags -} - -type handshakeDetails struct { - cookieHash []byte - digestRemote []byte - details node.HandshakeDetails - mapName string - hash hash.Hash -} - -func createHandshake(options node.Cloud) (node.HandshakeInterface, error) { - if options.Timeout == 0 { - options.Timeout = defaultHandshakeTimeout - } - - if err := RegisterTypes(); err != nil { - return nil, err - } - - return &Handshake{ - options: options, - }, nil -} - -func (ch *Handshake) Init(nodename string, creation uint32, flags node.Flags) error { - if flags.EnableProxy == false { - s := "proxy feature must be enabled for the cloud connection" - lib.Warning(s) - return fmt.Errorf(s) - } - if ch.options.Cluster == "" { - s := "option Cloud.Cluster can not be empty" - lib.Warning(s) - return fmt.Errorf(s) - } - if len(ch.options.Cluster) > clusterNameLengthMax { - s := "option Cloud.Cluster has too long name" - lib.Warning(s) - return fmt.Errorf(s) - } - ch.nodename = nodename - ch.creation = creation - ch.flags = flags - if ch.options.Flags.Enable == false { - return nil - } - - ch.flags.EnableRemoteSpawn = ch.options.Flags.EnableRemoteSpawn - return nil -} - -func (ch *Handshake) Start(remote net.Addr, conn lib.NetReadWriter, tls bool, cookie string) (node.HandshakeDetails, error) { - hash := sha256.New() - handshake := &handshakeDetails{ - cookieHash: hash.Sum([]byte(cookie)), - hash: hash, - } - handshake.details.Flags = ch.flags - - ch.sendV1Auth(conn) - - // define timeout for the handshaking - timer := time.NewTimer(ch.options.Timeout) - defer timer.Stop() - - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - asyncReadChannel := make(chan error, 2) - asyncRead := func() { - _, err := b.ReadDataFrom(conn, 1024) - asyncReadChannel <- err - } - - expectingBytes := 4 - await := []byte{ProtoHandshakeV1AuthReply, ProtoHandshakeV1Error} - rest := []byte{} - - for { - go asyncRead() - select { - case <-timer.C: - return handshake.details, fmt.Errorf("timeout") - case err := <-asyncReadChannel: - if err != nil { - return handshake.details, err - } - - if b.Len() < expectingBytes { - continue - } - - if b.B[0] != ProtoHandshakeV1 { - return handshake.details, fmt.Errorf("malformed handshake proto") - } - - l := int(binary.BigEndian.Uint16(b.B[2:4])) - buffer := b.B[4 : l+4] - - if len(buffer) != l { - return handshake.details, fmt.Errorf("malformed handshake (wrong packet length)") - } - - // check if we got correct message type regarding to 'await' value - if bytes.Count(await, b.B[1:2]) == 0 { - return handshake.details, fmt.Errorf("malformed handshake sequence") - } - - await, rest, err = ch.handle(conn, b.B[1], buffer, handshake) - if err != nil { - return handshake.details, err - } - - if await == nil && rest != nil { - // handshaked with some extra data. keep them for the Proto handler - handshake.details.Buffer = lib.TakeBuffer() - handshake.details.Buffer.Set(rest) - } - - b.Reset() - } - - if await == nil { - // handshaked - break - } - } - - return handshake.details, nil -} - -func (ch *Handshake) handle(socket io.Writer, messageType byte, buffer []byte, details *handshakeDetails) ([]byte, []byte, error) { - switch messageType { - case ProtoHandshakeV1AuthReply: - if err := ch.handleV1AuthReply(buffer, details); err != nil { - return nil, nil, err - } - if err := ch.sendV1Challenge(socket, details); err != nil { - return nil, nil, err - } - return []byte{ProtoHandshakeV1ChallengeAccept, ProtoHandshakeV1Error}, nil, nil - - case ProtoHandshakeV1ChallengeAccept: - rest, err := ch.handleV1ChallegeAccept(buffer, details) - if err != nil { - return nil, nil, err - } - return nil, rest, err - - case ProtoHandshakeV1Error: - return nil, nil, ch.handleV1Error(buffer) - - default: - return nil, nil, fmt.Errorf("unknown message type") - } -} - -func (ch *Handshake) sendV1Auth(socket io.Writer) error { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - message := MessageHandshakeV1Auth{ - Node: ch.nodename, - Cluster: ch.options.Cluster, - Creation: ch.creation, - Flags: ch.options.Flags, - } - b.Allocate(1 + 1 + 2) - b.B[0] = ProtoHandshakeV1 - b.B[1] = ProtoHandshakeV1Auth - if err := etf.Encode(message, b, etf.EncodeOptions{}); err != nil { - return err - } - binary.BigEndian.PutUint16(b.B[2:4], uint16(b.Len()-4)) - if err := b.WriteDataTo(socket); err != nil { - return err - } - - return nil -} - -func (ch *Handshake) sendV1Challenge(socket io.Writer, handshake *handshakeDetails) error { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - digest := GenDigest(handshake.hash, []byte(ch.nodename), handshake.digestRemote, handshake.cookieHash) - message := MessageHandshakeV1Challenge{ - Digest: digest, - } - b.Allocate(1 + 1 + 2) - b.B[0] = ProtoHandshakeV1 - b.B[1] = ProtoHandshakeV1Challenge - if err := etf.Encode(message, b, etf.EncodeOptions{}); err != nil { - return err - } - binary.BigEndian.PutUint16(b.B[2:4], uint16(b.Len()-4)) - if err := b.WriteDataTo(socket); err != nil { - return err - } - - return nil - -} - -func (ch *Handshake) handleV1AuthReply(buffer []byte, handshake *handshakeDetails) error { - m, _, err := etf.Decode(buffer, nil, etf.DecodeOptions{}) - if err != nil { - return fmt.Errorf("malformed MessageHandshakeV1AuthReply message: %s", err) - } - message, ok := m.(MessageHandshakeV1AuthReply) - if ok == false { - return fmt.Errorf("malformed MessageHandshakeV1AuthReply message: %#v", m) - } - - digest := GenDigest(handshake.hash, []byte(message.Node), []byte(ch.options.Cluster), handshake.cookieHash) - if bytes.Compare(message.Digest, digest) != 0 { - return fmt.Errorf("authorization failed") - } - handshake.digestRemote = digest - handshake.details.Name = message.Node - handshake.details.Creation = message.Creation - - return nil -} - -func (ch *Handshake) handleV1ChallegeAccept(buffer []byte, handshake *handshakeDetails) ([]byte, error) { - m, rest, err := etf.Decode(buffer, nil, etf.DecodeOptions{}) - if err != nil { - return nil, fmt.Errorf("malformed MessageHandshakeV1ChallengeAccept message: %s", err) - } - message, ok := m.(MessageHandshakeV1ChallengeAccept) - if ok == false { - return nil, fmt.Errorf("malformed MessageHandshakeV1ChallengeAccept message: %#v", m) - } - - mapping := etf.NewAtomMapping() - mapping.In[etf.Atom(message.Node)] = etf.Atom(ch.nodename) - mapping.Out[etf.Atom(ch.nodename)] = etf.Atom(message.Node) - handshake.details.AtomMapping = mapping - handshake.mapName = message.Node - return rest, nil -} - -func (ch *Handshake) handleV1Error(buffer []byte) error { - m, _, err := etf.Decode(buffer, nil, etf.DecodeOptions{}) - if err != nil { - return fmt.Errorf("malformed MessageHandshakeV1Error message: %s", err) - } - message, ok := m.(MessageHandshakeV1Error) - if ok == false { - return fmt.Errorf("malformed MessageHandshakeV1Error message: %#v", m) - } - return fmt.Errorf(message.Reason) -} diff --git a/apps/cloud/sup.go b/apps/cloud/sup.go deleted file mode 100644 index 847c6a4d..00000000 --- a/apps/cloud/sup.go +++ /dev/null @@ -1,28 +0,0 @@ -package cloud - -import ( - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" -) - -type cloudAppSup struct { - gen.Supervisor -} - -func (cas *cloudAppSup) Init(args ...etf.Term) (gen.SupervisorSpec, error) { - return gen.SupervisorSpec{ - Children: []gen.SupervisorChildSpec{ - gen.SupervisorChildSpec{ - Name: "cloud_client", - Child: &cloudClient{}, - Args: args, - }, - }, - Strategy: gen.SupervisorStrategy{ - Type: gen.SupervisorStrategyOneForOne, - Intensity: 10, - Period: 5, - Restart: gen.SupervisorStrategyRestartPermanent, - }, - }, nil -} diff --git a/apps/cloud/types.go b/apps/cloud/types.go deleted file mode 100644 index 57eee193..00000000 --- a/apps/cloud/types.go +++ /dev/null @@ -1,84 +0,0 @@ -package cloud - -import ( - "hash" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" -) - -const ( - EventCloud gen.Event = "cloud" - - ProtoHandshakeV1 = 41 - ProtoHandshakeV1Auth = 100 - ProtoHandshakeV1AuthReply = 101 - ProtoHandshakeV1Challenge = 102 - ProtoHandshakeV1ChallengeAccept = 103 - ProtoHandshakeV1Error = 200 -) - -type MessageEventCloud struct { - Cluster string - Online bool - Proxy string -} - -func RegisterTypes() error { - types := []interface{}{ - node.CloudFlags{}, - MessageHandshakeV1Auth{}, - MessageHandshakeV1AuthReply{}, - MessageHandshakeV1Challenge{}, - MessageHandshakeV1ChallengeAccept{}, - MessageHandshakeV1Error{}, - } - rtOpts := etf.RegisterTypeOptions{Strict: true} - - for _, t := range types { - if _, err := etf.RegisterType(t, rtOpts); err != nil && err != lib.ErrTaken { - return err - } - } - return nil -} - -func GenDigest(h hash.Hash, items ...[]byte) []byte { - x := []byte{} - for _, i := range items { - x = append(x, i...) - } - return h.Sum(x) -} - -// client -> cloud -type MessageHandshakeV1Auth struct { - Node string - Cluster string - Creation uint32 - Flags node.CloudFlags -} - -// cloud -> client -type MessageHandshakeV1AuthReply struct { - Node string - Creation uint32 - Digest []byte -} - -// client -> cloud -type MessageHandshakeV1Challenge struct { - Digest []byte -} - -// cloud -> client -type MessageHandshakeV1ChallengeAccept struct { - Node string // mapped node name -} - -// cloud -> client -type MessageHandshakeV1Error struct { - Reason string -} diff --git a/apps/erlang/appmon.go b/apps/erlang/appmon.go deleted file mode 100644 index 4d912500..00000000 --- a/apps/erlang/appmon.go +++ /dev/null @@ -1,190 +0,0 @@ -package erlang - -// TODO: https://github.com/erlang/otp/blob/master/lib/runtime_tools-1.13.1/src/appmon_info.erl - -import ( - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" -) - -type appMon struct { - gen.Server -} - -type appMonState struct { - jobs map[etf.Atom][]jobDetails -} - -type jobDetails struct { - name etf.Atom - args etf.List - sendTo etf.Pid -} - -// Init initializes process state using arbitrary arguments -func (am *appMon) Init(process *gen.ServerProcess, args ...etf.Term) error { - lib.Log("APP_MON: Init %#v", args) - from := args[0] - process.Link(from.(etf.Pid)) - process.State = &appMonState{ - jobs: make(map[etf.Atom][]jobDetails), - } - return nil -} - -// HandleCast -func (am *appMon) HandleCast(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - var appState *appMonState = process.State.(*appMonState) - lib.Log("APP_MON: HandleCast: %#v", message) - node := process.Env(node.EnvKeyNode).(node.Node) - switch message { - case "sendStat": - - for cmd, jobs := range appState.jobs { - switch cmd { - case "app_ctrl": - // From ! {delivery, self(), Cmd, Aux, Result} - apps := node.WhichApplications() - for i := range jobs { - appList := make(etf.List, len(apps)) - for ai, a := range apps { - appList[ai] = etf.Tuple{a.PID, etf.Atom(a.Name), - etf.Tuple{etf.Atom(a.Name), a.Description, a.Version}, - } - } - delivery := etf.Tuple{etf.Atom("delivery"), process.Self(), cmd, jobs[i].name, appList} - process.Send(jobs[i].sendTo, delivery) - } - - case "app": - for i := range jobs { - appTree := am.makeAppTree(process, jobs[i].name) - if appTree == nil { - continue - } - delivery := etf.Tuple{etf.Atom("delivery"), process.Self(), cmd, jobs[i].name, appTree} - process.Send(jobs[i].sendTo, delivery) - } - - } - } - - process.CastAfter(process.Self(), "sendStat", 2*time.Second) - return gen.ServerStatusOK - - default: - switch m := message.(type) { - case etf.Tuple: - if len(m) == 5 { - // etf.Tuple{etf.Pid{Node:"erl-demo@127.0.0.1", Id:0x7c, Serial:0x0, Creation:0x1}, "app_ctrl", "demo@127.0.0.1", "true", etf.List{}} - job := jobDetails{ - name: m.Element(3).(etf.Atom), - args: m.Element(5).(etf.List), - sendTo: m.Element(1).(etf.Pid), - } - - if m.Element(4) == etf.Atom("true") { - // add new job - if len(appState.jobs) == 0 { - process.Cast(process.Self(), "sendStat") - } - - if jobList, ok := appState.jobs[m.Element(2).(etf.Atom)]; ok { - for i := range jobList { - if jobList[i].name == job.name { - return gen.ServerStatusOK - } - } - jobList = append(jobList, job) - appState.jobs[m.Element(2).(etf.Atom)] = jobList - } else { - appState.jobs[m.Element(2).(etf.Atom)] = []jobDetails{job} - } - - } else { - // remove a job - if jobList, ok := appState.jobs[m.Element(2).(etf.Atom)]; ok { - for i := range jobList { - if jobList[i].name == job.name { - jobList[i] = jobList[0] - jobList = jobList[1:] - - if len(jobList) > 0 { - appState.jobs[m.Element(2).(etf.Atom)] = jobList - } else { - delete(appState.jobs, m.Element(2).(etf.Atom)) - } - break - } - } - } - - if len(appState.jobs) == 0 { - return gen.ServerStatusStop - } - - } - return gen.ServerStatusOK - } - } - } - - return gen.ServerStatusStop -} - -func (am *appMon) makeAppTree(process gen.Process, app etf.Atom) etf.Tuple { - node := process.Env(node.EnvKeyNode).(node.Node) - appInfo, err := node.ApplicationInfo(string(app)) - if err != nil { - return nil - } - - resolver := make(map[etf.Pid]interface{}) - - tree := makeTree(process, resolver, appInfo.PID) - children := etf.List{etf.Tuple{appInfo.PID, appInfo.PID.String()}} - for p, n := range resolver { - children = append(children, etf.Tuple{p, n}) - } - - appTree := etf.Tuple{ - appInfo.PID.String(), // pid or registered name - children, - tree, - etf.List{}, // TODO: links - } - - return appTree -} - -func makeTree(process gen.Process, resolver map[etf.Pid]interface{}, pid etf.Pid) etf.List { - - pidProcess := process.ProcessByPid(pid) - if pidProcess == nil { - return etf.List{} - } - if name := pidProcess.Name(); name != "" { - resolver[pid] = name - } else { - resolver[pid] = pid.String() - } - - tree := etf.List{} - - pchildren, err := pidProcess.Children() - if err != nil { - return tree - } - for _, cp := range pchildren { - children := makeTree(process, resolver, cp) - child := etf.Tuple{resolver[pid], resolver[cp]} - tree = append(tree, child) - tree = append(tree, children...) - } - - return tree -} diff --git a/apps/erlang/erlang.go b/apps/erlang/erlang.go deleted file mode 100644 index d445bb2b..00000000 --- a/apps/erlang/erlang.go +++ /dev/null @@ -1,136 +0,0 @@ -package erlang - -// TODO: https://github.com/erlang/otp/blob/master/lib/runtime_tools-1.13.1/src/erlang_info.erl - -import ( - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" -) - -type erlang struct { - gen.Server -} - -// Init -func (e *erlang) Init(process *gen.ServerProcess, args ...etf.Term) error { - lib.Log("ERLANG: Init: %#v", args) - return nil -} - -// HandleCall -func (e *erlang) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) { - lib.Log("ERLANG: HandleCall: %#v, From: %#v", message, from) - - switch m := message.(type) { - case etf.Tuple: - switch m.Element(1) { - case etf.Atom("process_info"): - args := m.Element(2).(etf.List) - reply := processInfo(process, args[0].(etf.Pid), args[1]) - return reply, gen.ServerStatusOK - case etf.Atom("system_info"): - args := m.Element(2).(etf.List) - reply := systemInfo(process, args[0].(etf.Atom)) - return reply, gen.ServerStatusOK - - case etf.Atom("function_exported"): - return true, gen.ServerStatusOK - } - - } - return etf.Atom("ok"), gen.ServerStatusOK -} - -func processInfo(p gen.Process, pid etf.Pid, property etf.Term) etf.Term { - process := p.ProcessByPid(pid) - if process == nil { - return etf.Atom("undefined") - } - - switch property { - case etf.Atom("registered_name"): - name := process.Name() - if name == "" { - return etf.List{} - } - - return etf.Tuple{property, etf.Atom(name)} - case etf.Atom("messages"): - return etf.Tuple{property, etf.List{}} - case etf.Atom("dictionary"): - return etf.Tuple{property, etf.List{}} - case etf.Atom("current_stacktrace"): - return etf.Tuple{property, etf.List{}} - } - - switch p := property.(type) { - case etf.List: - values := etf.List{} - info := process.Info() - for i := range p { - switch p[i] { - case etf.Atom("binary"): - values = append(values, etf.Tuple{p[i], etf.List{}}) - case etf.Atom("catchlevel"): - // values = append(values, etf.Tuple{p[i], 0}) - case etf.Atom("current_function"): - values = append(values, etf.Tuple{p[i], info.CurrentFunction}) - case etf.Atom("error_handler"): - // values = append(values, etf.Tuple{p[i], }) - case etf.Atom("garbage_collection"): - values = append(values, etf.Tuple{p[i], etf.List{}}) - case etf.Atom("group_leader"): - values = append(values, etf.Tuple{p[i], info.GroupLeader}) - case etf.Atom("heap_size"): - // values = append(values, etf.Tuple{p[i], etf.Tuple{etf.Atom("words"), 0}}) - case etf.Atom("initial_call"): - values = append(values, etf.Tuple{p[i], "object:loop"}) - case etf.Atom("last_calls"): - // values = append(values, etf.Tuple{p[i], }) - case etf.Atom("links"): - values = append(values, etf.Tuple{p[i], info.Links}) - case etf.Atom("memory"): - values = append(values, etf.Tuple{p[i], 0}) - case etf.Atom("message_queue_len"): - values = append(values, etf.Tuple{p[i], info.MessageQueueLen}) - case etf.Atom("monitored_by"): - values = append(values, etf.Tuple{p[i], info.MonitoredBy}) - case etf.Atom("monitors"): - values = append(values, etf.Tuple{p[i], info.Monitors}) - case etf.Atom("priority"): - // values = append(values, etf.Tuple{p[i], 0}) - case etf.Atom("reductions"): - values = append(values, etf.Tuple{p[i], 0}) - case etf.Atom("registered_name"): - values = append(values, etf.Tuple{p[i], process.Name()}) - case etf.Atom("sequential_trace_token"): - // values = append(values, etf.Tuple{p[i], }) - case etf.Atom("stack_size"): - // values = append(values, etf.Tuple{p[i], etf.Tuple{etf.Atom("words"), 0}}) - case etf.Atom("status"): - values = append(values, etf.Tuple{p[i], info.Status}) - case etf.Atom("suspending"): - // values = append(values, etf.Tuple{p[i], }) - case etf.Atom("total_heap_size"): - // values = append(values, etf.Tuple{p[i], etf.Tuple{etf.Atom("words"), 0}}) - case etf.Atom("trace"): - // values = append(values, etf.Tuple{p[i], 0}) - case etf.Atom("trap_exit"): - values = append(values, etf.Tuple{p[i], info.TrapExit}) - - } - - } - return values - } - return nil -} - -func systemInfo(p gen.Process, name etf.Atom) etf.Term { - switch name { - case etf.Atom("dirty_cpu_schedulers"): - return 1 - } - return etf.Atom("unknown") -} diff --git a/apps/erlang/global_name_server.go b/apps/erlang/global_name_server.go deleted file mode 100644 index e1099654..00000000 --- a/apps/erlang/global_name_server.go +++ /dev/null @@ -1,17 +0,0 @@ -package erlang - -import ( - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" -) - -// TODO: https://github.com/erlang/otp/blob/master/lib/kernel/src/global.erl - -type globalNameServer struct { - gen.Server -} - -// HandleCast -func (gns *globalNameServer) HandleCast(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - return gen.ServerStatusOK -} diff --git a/apps/erlang/net_kernel.go b/apps/erlang/net_kernel.go deleted file mode 100644 index 18e4e6aa..00000000 --- a/apps/erlang/net_kernel.go +++ /dev/null @@ -1,92 +0,0 @@ -package erlang - -// https://github.com/erlang/otp/blob/master/lib/kernel/src/net_kernel.erl - -import ( - "context" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" -) - -func CreateApp() gen.ApplicationBehavior { - return &kernelApp{} -} - -// KernelApp -type kernelApp struct { - gen.Application -} - -// Load -func (nka *kernelApp) Load(args ...etf.Term) (gen.ApplicationSpec, error) { - return gen.ApplicationSpec{ - Name: "erlang", - Description: "Erlang support app", - Version: "v.1.0", - Children: []gen.ApplicationChildSpec{ - { - Child: &netKernelSup{}, - Name: "net_kernel_sup", - }, - }, - }, nil -} - -// Start -func (nka *kernelApp) Start(p gen.Process, args ...etf.Term) {} - -type netKernelSup struct { - gen.Supervisor -} - -// Init -func (nks *netKernelSup) Init(args ...etf.Term) (gen.SupervisorSpec, error) { - return gen.SupervisorSpec{ - Children: []gen.SupervisorChildSpec{ - { - Name: "net_kernel", - Child: &netKernel{}, - }, - { - Name: "global_name_server", - Child: &globalNameServer{}, - }, - { - Name: "erlang", - Child: &erlang{}, - }, - }, - Strategy: gen.SupervisorStrategy{ - Type: gen.SupervisorStrategyOneForOne, - Intensity: 10, - Period: 5, - Restart: gen.SupervisorStrategyRestartPermanent, - }, - }, nil -} - -type netKernel struct { - gen.Server - routinesCtx map[etf.Pid]context.CancelFunc -} - -// Init -func (nk *netKernel) Init(process *gen.ServerProcess, args ...etf.Term) error { - lib.Log("NET_KERNEL: Init: %#v", args) - nk.routinesCtx = make(map[etf.Pid]context.CancelFunc) - return nil -} - -// HandleCall -func (nk *netKernel) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (reply etf.Term, status gen.ServerStatus) { - lib.Log("NET_KERNEL: HandleCall: %#v, From: %#v", message, from) - return -} - -// HandleInfo -func (nk *netKernel) HandleInfo(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - lib.Log("NET_KERNEL: HandleInfo: %#v", message) - return gen.ServerStatusOK -} diff --git a/apps/system/app.go b/apps/system/app.go deleted file mode 100644 index 46ac7340..00000000 --- a/apps/system/app.go +++ /dev/null @@ -1,39 +0,0 @@ -package system - -import ( - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" -) - -func CreateApp(options node.System) gen.ApplicationBehavior { - return &systemApp{ - options: options, - } -} - -type systemApp struct { - gen.Application - options node.System -} - -func (sa *systemApp) Load(args ...etf.Term) (gen.ApplicationSpec, error) { - lib.Log("SYSTEM: Application load") - return gen.ApplicationSpec{ - Name: "system_app", - Description: "System Application", - Version: "v.1.0", - Children: []gen.ApplicationChildSpec{ - gen.ApplicationChildSpec{ - Child: &systemAppSup{}, - Name: "system_app_sup", - Args: []etf.Term{sa.options}, - }, - }, - }, nil -} - -func (sa *systemApp) Start(p gen.Process, args ...etf.Term) { - lib.Log("[%s] SYSTEM: Application started", p.NodeName()) -} diff --git a/apps/system/metrics.go b/apps/system/metrics.go deleted file mode 100644 index 0c66e228..00000000 --- a/apps/system/metrics.go +++ /dev/null @@ -1,178 +0,0 @@ -package system - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/base64" - "encoding/binary" - "net" - "runtime" - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/lib/osdep" - "github.com/ergo-services/ergo/node" -) - -var ( - defaultMetricsPeriod = time.Minute -) - -type systemMetrics struct { - gen.Server -} - -type systemMetricsState struct { - // gather last 10 stats - stats [10]nodeFullStats - i int -} -type messageSystemAnonInfo struct{} -type messageSystemGatherStats struct{} - -type nodeFullStats struct { - timestamp int64 - utime int64 - stime int64 - - memAlloc uint64 - memTotalAlloc uint64 - memFrees uint64 - memSys uint64 - memNumGC uint32 - - node node.NodeStats - network []node.NetworkStats -} - -func (sb *systemMetrics) Init(process *gen.ServerProcess, args ...etf.Term) error { - lib.Log("[%s] SYSTEM_METRICS: Init: %#v", process.NodeName(), args) - if err := RegisterTypes(); err != nil { - return err - } - options := args[0].(node.System) - process.State = &systemMetricsState{} - if options.DisableAnonMetrics == false { - process.CastAfter(process.Self(), messageSystemAnonInfo{}, defaultMetricsPeriod) - } - process.CastAfter(process.Self(), messageSystemGatherStats{}, defaultMetricsPeriod) - return nil -} - -func (sb *systemMetrics) HandleCast(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - lib.Log("[%s] SYSTEM_METRICS: HandleCast: %#v", process.NodeName(), message) - state := process.State.(*systemMetricsState) - switch message.(type) { - case messageSystemAnonInfo: - ver := process.Env(node.EnvKeyVersion).(node.Version) - sendAnonInfo(process.NodeName(), ver) - - case messageSystemGatherStats: - stats := gatherStats(process) - if state.i > len(state.stats)-1 { - state.i = 0 - } - state.stats[state.i] = stats - state.i++ - process.CastAfter(process.Self(), messageSystemGatherStats{}, defaultMetricsPeriod) - } - return gen.ServerStatusOK -} - -func (sb *systemMetrics) Terminate(process *gen.ServerProcess, reason string) { - lib.Log("[%s] SYSTEM_METRICS: Terminate with reason %q", process.NodeName(), reason) -} - -// private routines - -func sendAnonInfo(name string, ver node.Version) { - metricsHost := "metrics.ergo.services" - - values, err := net.LookupTXT(metricsHost) - if err != nil || len(values) == 0 { - return - } - - v, err := base64.StdEncoding.DecodeString(values[0]) - if err != nil { - return - } - - pk, err := x509.ParsePKCS1PublicKey([]byte(v)) - if err != nil { - return - } - - c, err := net.Dial("udp", metricsHost+":4411") - if err != nil { - return - } - defer c.Close() - - // FIXME get it back before the release - // nameHash := crc32.Checksum([]byte(name), lib.CRC32Q) - nameHash := name - - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - message := MessageSystemAnonMetrics{ - Name: nameHash, - Arch: runtime.GOARCH, - OS: runtime.GOOS, - NumCPU: runtime.NumCPU(), - GoVersion: runtime.Version(), - ErgoVersion: ver.Release, - } - if err := etf.Encode(message, b, etf.EncodeOptions{}); err != nil { - return - } - - hash := sha256.New() - cipher, err := rsa.EncryptOAEP(hash, rand.Reader, pk, b.B, nil) - if err != nil { - return - } - - // 2 (magic: 1144) + 2 (length) + len(cipher) - b.Reset() - b.Allocate(4) - b.Append(cipher) - binary.BigEndian.PutUint16(b.B[0:2], uint16(1144)) - binary.BigEndian.PutUint16(b.B[2:4], uint16(len(cipher))) - c.Write(b.B) -} - -func gatherStats(process *gen.ServerProcess) nodeFullStats { - fullStats := nodeFullStats{} - - // CPU (windows doesn't support this feature) - fullStats.utime, fullStats.stime = osdep.ResourceUsage() - - // Memory - mem := runtime.MemStats{} - runtime.ReadMemStats(&mem) - fullStats.memAlloc = mem.Alloc - fullStats.memTotalAlloc = mem.TotalAlloc - fullStats.memSys = mem.Sys - fullStats.memFrees = mem.Frees - fullStats.memNumGC = mem.NumGC - - // Network - node := process.Env(node.EnvKeyNode).(node.Node) - for _, name := range node.Nodes() { - ns, err := node.NetworkStats(name) - if err != nil { - continue - } - fullStats.network = append(fullStats.network, ns) - } - - fullStats.node = node.Stats() - fullStats.timestamp = time.Now().Unix() - return fullStats -} diff --git a/apps/system/sup.go b/apps/system/sup.go deleted file mode 100644 index a36d5622..00000000 --- a/apps/system/sup.go +++ /dev/null @@ -1,28 +0,0 @@ -package system - -import ( - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" -) - -type systemAppSup struct { - gen.Supervisor -} - -func (sas *systemAppSup) Init(args ...etf.Term) (gen.SupervisorSpec, error) { - return gen.SupervisorSpec{ - Children: []gen.SupervisorChildSpec{ - gen.SupervisorChildSpec{ - Name: "system_metrics", - Child: &systemMetrics{}, - Args: args, - }, - }, - Strategy: gen.SupervisorStrategy{ - Type: gen.SupervisorStrategyOneForOne, - Intensity: 10, - Period: 5, - Restart: gen.SupervisorStrategyRestartPermanent, - }, - }, nil -} diff --git a/apps/system/types.go b/apps/system/types.go deleted file mode 100644 index 7711e337..00000000 --- a/apps/system/types.go +++ /dev/null @@ -1,29 +0,0 @@ -package system - -import ( - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -type MessageSystemAnonMetrics struct { - Name string - Arch string - OS string - NumCPU int - GoVersion string - ErgoVersion string -} - -func RegisterTypes() error { - types := []interface{}{ - MessageSystemAnonMetrics{}, - } - rtOpts := etf.RegisterTypeOptions{Strict: true} - - for _, t := range types { - if _, err := etf.RegisterType(t, rtOpts); err != nil && err != lib.ErrTaken { - return err - } - } - return nil -} diff --git a/debug.go b/debug.go index 89b4e8a6..f2b3ac1b 100644 --- a/debug.go +++ b/debug.go @@ -1,5 +1,4 @@ //go:build debug -// +build debug package ergo @@ -9,5 +8,7 @@ import ( ) func init() { - go http.ListenAndServe("0.0.0.0:9009", nil) + // start profiler + dsn := "localhost:9009" + go http.ListenAndServe(dsn, nil) } diff --git a/ergo.go b/ergo.go index f4145d38..4c7d5c39 100644 --- a/ergo.go +++ b/ergo.go @@ -1,70 +1,41 @@ package ergo import ( - "context" + "runtime/debug" - "github.com/ergo-services/ergo/apps/cloud" - "github.com/ergo-services/ergo/apps/erlang" - "github.com/ergo-services/ergo/apps/system" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" - "github.com/ergo-services/ergo/proto/dist" + "ergo.services/ergo/app/system" + "ergo.services/ergo/gen" + "ergo.services/ergo/node" ) -// StartNode create new node with name and cookie string -func StartNode(name string, cookie string, opts node.Options) (node.Node, error) { - return StartNodeWithContext(context.Background(), name, cookie, opts) -} - -// StartNodeWithContext create new node with specified context, name and cookie string -func StartNodeWithContext(ctx context.Context, name string, cookie string, opts node.Options) (node.Node, error) { - version := node.Version{ - Release: Version, - Prefix: VersionPrefix, - OTP: VersionOTP, - } - if opts.Env == nil { - opts.Env = make(map[gen.EnvKey]interface{}) +// StartNode starts a new node with given name +func StartNode(name gen.Atom, options gen.NodeOptions) (gen.Node, error) { + var empty gen.Version + + if options.Version == empty { + if info, ok := debug.ReadBuildInfo(); ok { + options.Version.Name = info.Main.Path + options.Version.Release = info.Main.Version + for _, setting := range info.Settings { + if setting.Key == "vcs.revision" { + options.Version.Commit = setting.Value + break + } + } + } } - opts.Env[node.EnvKeyVersion] = version // add default applications: defaultApps := []gen.ApplicationBehavior{ - system.CreateApp(opts.System), // system application (bus, metrics etc.) - erlang.CreateApp(), // erlang support - } - - // add cloud support if it's enabled - if opts.Cloud.Enable { - cloudApp := cloud.CreateApp(opts.Cloud) - defaultApps = append(defaultApps, cloudApp) - if opts.Proxy.Accept == false { - lib.Warning("Disabled option Proxy.Accept makes this node inaccessible to the other nodes within your cloud cluster, but it still allows initiate connection to the others with this option enabled.") - } - } - opts.Applications = append(defaultApps, opts.Applications...) - - if opts.Handshake == nil { - // create default handshake for the node (Erlang Dist Handshake) - opts.Handshake = dist.CreateHandshake(dist.HandshakeOptions{}) + system.CreateApp(), } - if opts.Proto == nil { - // create default proto handler (Erlang Dist Proto) - protoOptions := node.DefaultProtoOptions() - opts.Proto = dist.CreateProto(protoOptions) - } - - if opts.StaticRoutesOnly == false && opts.Registrar == nil { - // create default registrar (with enabled Erlang EPMD server) - opts.Registrar = dist.CreateRegistrarWithLocalEPMD("", dist.DefaultEPMDPort) - } + options.Applications = append(defaultApps, options.Applications...) - if len(opts.Listeners) == 0 { - listener := node.DefaultListener() - opts.Listeners = append(opts.Listeners, listener) + n, err := node.Start(name, options, FrameworkVersion) + if err != nil { + return nil, err } - return node.StartWithContext(ctx, name, cookie, opts) + return n, nil } diff --git a/etf/cache.go b/etf/cache.go deleted file mode 100644 index 2d1fef32..00000000 --- a/etf/cache.go +++ /dev/null @@ -1,176 +0,0 @@ -package etf - -import ( - "sync" -) - -const ( - maxCacheItems = int16(2048) -) - -type AtomCache struct { - In *AtomCacheIn - Out *AtomCacheOut -} - -type AtomCacheIn struct { - Atoms [maxCacheItems]*Atom -} - -// AtomCache -type AtomCacheOut struct { - sync.RWMutex - cacheMap map[Atom]int16 - id int16 - cacheList [maxCacheItems]Atom -} - -// CacheItem -type CacheItem struct { - ID int16 - Encoded bool - Name Atom -} - -var ( - encodingAtomCachePool = &sync.Pool{ - New: func() interface{} { - l := &EncodingAtomCache{ - L: make([]CacheItem, 0, 255), - added: make(map[Atom]uint8), - } - l.original = l.L - return l - }, - } -) - -// NewAtomCache -func NewAtomCache() AtomCache { - return AtomCache{ - In: &AtomCacheIn{}, - Out: &AtomCacheOut{ - cacheMap: make(map[Atom]int16), - id: -1, - }, - } -} - -type AtomMapping struct { - MutexIn sync.RWMutex - In map[Atom]Atom - MutexOut sync.RWMutex - Out map[Atom]Atom -} - -// NewAtomMapping -func NewAtomMapping() *AtomMapping { - return &AtomMapping{ - In: make(map[Atom]Atom), - Out: make(map[Atom]Atom), - } -} - -// Append -func (a *AtomCacheOut) Append(atom Atom) (int16, bool) { - a.Lock() - defer a.Unlock() - - if a.id > maxCacheItems-2 { - return 0, false - } - - if id, exist := a.cacheMap[atom]; exist { - return id, false - } - - a.id++ - a.cacheList[a.id] = atom - a.cacheMap[atom] = a.id - - return a.id, true -} - -// LastID -func (a *AtomCacheOut) LastAdded() (Atom, int16) { - a.RLock() - defer a.RUnlock() - l := len(a.cacheList) - if l == 0 { - return "", -1 - } - return a.cacheList[l-1], int16(l - 1) -} - -// ListSince -func (a *AtomCacheOut) ListSince(id int16) []Atom { - if id < 0 { - id = 0 - } - if int(id) > len(a.cacheList)-1 { - return nil - } - return a.cacheList[id:] -} - -// EncodingAtomCache -type EncodingAtomCache struct { - L []CacheItem - original []CacheItem - added map[Atom]uint8 - HasLongAtom bool -} - -// TakeEncodingAtomCache -func TakeEncodingAtomCache() *EncodingAtomCache { - return encodingAtomCachePool.Get().(*EncodingAtomCache) -} - -// ReleaseEncodingAtomCache -func ReleaseEncodingAtomCache(l *EncodingAtomCache) { - l.L = l.original[:0] - if len(l.added) > 0 { - for k, _ := range l.added { - delete(l.added, k) - } - } - encodingAtomCachePool.Put(l) -} - -// Reset -func (l *EncodingAtomCache) Reset() { - l.L = l.original[:0] - l.HasLongAtom = false - if len(l.added) > 0 { - for k, _ := range l.added { - delete(l.added, k) - } - } -} - -// Append -func (l *EncodingAtomCache) Append(a CacheItem) uint8 { - id, added := l.added[a.Name] - if added { - return id - } - - l.L = append(l.L, a) - if !a.Encoded && len(a.Name) > 255 { - l.HasLongAtom = true - } - id = uint8(len(l.L) - 1) - l.added[a.Name] = id - return id -} - -// Delete -func (l *EncodingAtomCache) Delete(atom Atom) { - // clean up in order to get rid of map reallocation which is pretty expensive - delete(l.added, atom) -} - -// Len -func (l *EncodingAtomCache) Len() int { - return len(l.L) -} diff --git a/etf/decode.go b/etf/decode.go deleted file mode 100644 index 3289bba5..00000000 --- a/etf/decode.go +++ /dev/null @@ -1,1680 +0,0 @@ -package etf - -import ( - "encoding/binary" - "fmt" - "math" - "math/big" - "reflect" - - "github.com/ergo-services/ergo/lib" -) - -// linked list for decoding complex types like list/map/tuple -type stackElement struct { - parent *stackElement - reg *reflect.Value // used for registered types decoding - term Term //value - tmp Term // temporary value. used as a temporary storage for a key of map - i int // current - children int - termType byte - - regKeyKind reflect.Kind // we should align key and value types for maps (int*, float*) - regValueKind reflect.Kind // in case of decoding into value with registered type - - strict bool // if encoding/decoding registered types must be strict -} - -var ( - biggestInt = big.NewInt(0xfffffffffffffff) - lowestInt = big.NewInt(-0xfffffffffffffff) - - termNil = make(List, 0) - - errMalformedAtomUTF8 = fmt.Errorf("Malformed ETF. ettAtomUTF8") - errMalformedSmallAtomUTF8 = fmt.Errorf("Malformed ETF. ettSmallAtomUTF8") - errMalformedString = fmt.Errorf("Malformed ETF. ettString") - errMalformedCacheRef = fmt.Errorf("Malformed ETF. ettCacheRef") - errMalformedNewFloat = fmt.Errorf("Malformed ETF. ettNewFloat") - errMalformedFloat = fmt.Errorf("Malformed ETF. ettFloat") - errMalformedSmallInteger = fmt.Errorf("Malformed ETF. ettSmallInteger") - errMalformedInteger = fmt.Errorf("Malformed ETF. ettInteger") - errMalformedSmallBig = fmt.Errorf("Malformed ETF. ettSmallBig") - errMalformedLargeBig = fmt.Errorf("Malformed ETF. ettLargeBig") - errMalformedList = fmt.Errorf("Malformed ETF. ettList") - errMalformedSmallTuple = fmt.Errorf("Malformed ETF. ettSmallTuple") - errMalformedLargeTuple = fmt.Errorf("Malformed ETF. ettLargeTuple") - errMalformedMap = fmt.Errorf("Malformed ETF. ettMap") - errMalformedBinary = fmt.Errorf("Malformed ETF. ettBinary") - errMalformedBitBinary = fmt.Errorf("Malformed ETF. ettBitBinary") - errMalformedPid = fmt.Errorf("Malformed ETF. ettPid") - errMalformedNewPid = fmt.Errorf("Malformed ETF. ettNewPid") - errMalformedRef = fmt.Errorf("Malformed ETF. ettNewRef") - errMalformedNewRef = fmt.Errorf("Malformed ETF. ettNewerRef") - errMalformedPort = fmt.Errorf("Malformed ETF. ettPort") - errMalformedNewPort = fmt.Errorf("Malformed ETF. ettNewPort") - errMalformedFun = fmt.Errorf("Malformed ETF. ettNewFun") - errMalformedExport = fmt.Errorf("Malformed ETF. ettExport") - errMalformedUnknownType = fmt.Errorf("Malformed ETF. unknown type") - - errMalformed = fmt.Errorf("Malformed ETF") - errInternal = fmt.Errorf("Internal error") -) - -// DecodeOptions -type DecodeOptions struct { - AtomMapping *AtomMapping - FlagBigPidRef bool -} - -// stackless implementation is speeding up decoding function up to x25 times - -// it might looks hard to understand the logic, but -// there are only two stages -// 1) Stage1: decoding basic types (atoms, strings, numbers...) -// 2) Stage2: decoding list/tuples/maps and complex types like Port/Pid/Ref using linked list named 'stack' -// -// see comments within this function - -// Decode -func Decode(packet []byte, cache []Atom, options DecodeOptions) (retTerm Term, retByte []byte, retErr error) { - var term Term - var stack *stackElement - var child *stackElement - var t byte - if lib.CatchPanic() { - defer func() { - // We should catch any panic happened during decoding the raw data. - // Some of the Erlang' types can not be supported in Golang. - // As an example: Erlang map with tuple as a key cause a panic - // in Golang runtime with message: - // 'panic: runtime error: hash of unhashable type etf.Tuple' - // The problem is in etf.Tuple type - it is interface type. At the same - // time Golang does support hashable key in map (like struct as a key), - // but it should be known implicitly. It means we can encode such kind - // of data, but can not to decode it back. - if r := recover(); r != nil { - retTerm = nil - retByte = nil - retErr = fmt.Errorf("%v", r) - } - }() - } - - for { - child = nil - if len(packet) == 0 { - return nil, nil, errMalformed - } - - t = packet[0] - packet = packet[1:] - - // Stage 1: decoding base type. if have encountered List/Map/Tuple - // or complex type like Pid/Ref/Port: - // save the state in stackElement and push it to the stack (basically, - // we just append the new item to the linked list) - // - - switch t { - case ettAtomUTF8, ettAtom: - if len(packet) < 2 { - return nil, nil, errMalformedAtomUTF8 - } - - n := binary.BigEndian.Uint16(packet) - if len(packet) < int(n+2) { - return nil, nil, errMalformedAtomUTF8 - } - - atom := Atom(packet[2 : n+2]) - if len([]rune(atom)) > 255 { - return nil, nil, errMalformedAtomUTF8 - } - - // replace atom value if we have mapped value for it - if options.AtomMapping != nil { - options.AtomMapping.MutexIn.RLock() - if mapped, ok := options.AtomMapping.In[atom]; ok { - atom = mapped - } - options.AtomMapping.MutexIn.RUnlock() - } - - term = atom - packet = packet[n+2:] - - case ettSmallAtomUTF8, ettSmallAtom: - if len(packet) == 0 { - return nil, nil, errMalformedSmallAtomUTF8 - } - - n := int(packet[0]) - if len(packet) < n+1 { - return nil, nil, errMalformedSmallAtomUTF8 - } - - switch Atom(packet[1 : n+1]) { - case "true": - term = true - case "false": - term = false - default: - atom := Atom(packet[1 : n+1]) - // replace atom value if we have mapped value for it - if options.AtomMapping != nil { - options.AtomMapping.MutexIn.RLock() - if mapped, ok := options.AtomMapping.In[atom]; ok { - atom = mapped - } - options.AtomMapping.MutexIn.RUnlock() - } - term = atom - } - packet = packet[n+1:] - - case ettString: - if len(packet) < 2 { - return nil, nil, errMalformedString - } - - n := binary.BigEndian.Uint16(packet) - if len(packet) < int(n+2) { - return nil, nil, errMalformedString - } - - term = string(packet[2 : n+2]) - packet = packet[n+2:] - - case ettCacheRef: - if len(packet) == 0 { - return nil, nil, errMalformedCacheRef - } - - switch cache[int(packet[0])] { - case "true": - term = true - case "false": - term = false - default: - atom := cache[int(packet[0])] - // replace atom value if we have mapped value for it - if options.AtomMapping != nil { - options.AtomMapping.MutexIn.RLock() - if mapped, ok := options.AtomMapping.In[atom]; ok { - atom = mapped - } - options.AtomMapping.MutexIn.RUnlock() - } - term = atom - } - packet = packet[1:] - - case ettSmallInteger: - if len(packet) == 0 { - return nil, nil, errMalformedSmallInteger - } - - i := int(packet[0]) - term = int(i) - packet = packet[1:] - - if stack == nil { - break - } - - t := stack.regKeyKind - if stack.i&0x01 == 1 { - t = stack.regValueKind - } - switch t { - case reflect.Invalid: - // not registered type - break - case reflect.Int64: - term = int64(i) - case reflect.Int: - term = i - case reflect.Int8: - if i > math.MaxInt8 || i < math.MinInt8 { - panic("overflows int8") - } - term = int8(i) - case reflect.Int16: - term = int16(i) - case reflect.Int32: - term = int32(i) - case reflect.Uint: - term = uint(i) - case reflect.Uint8: - term = uint8(i) - case reflect.Uint16: - term = uint16(i) - case reflect.Uint32: - term = uint32(i) - default: - panic("destination value is not an int* or overflows") - } - - case ettInteger: - if len(packet) < 4 { - return nil, nil, errMalformedInteger - } - - // negatives are encoded as ettSmallBig so the value shouldn't be - // greater int32 - i := int32(binary.BigEndian.Uint32(packet[:4])) - term = int64(i) - packet = packet[4:] - - if stack == nil { - break - } - - t := stack.regKeyKind - if stack.i&0x01 == 1 { - t = stack.regValueKind - } - switch t { - case reflect.Invalid: - // not registered type - break - case reflect.Int64: - term = int64(i) - case reflect.Int: - term = int(i) - case reflect.Int8: - if i > math.MaxInt8 || i < math.MinInt8 { - panic("overflows int8") - } - term = int8(i) - case reflect.Int16: - if i > math.MaxInt16 || i < math.MinInt16 { - panic("overflows int16") - } - term = int16(i) - case reflect.Int32: - term = i - case reflect.Uint: - term = uint(i) - case reflect.Uint8: - if i > math.MaxUint8 { - panic("overflows uint") - } - term = uint8(i) - case reflect.Uint16: - if i > math.MaxUint16 { - panic("overflows uint") - } - term = uint16(i) - case reflect.Uint32: - term = uint32(i) - default: - panic("destination value is not an int* or overflows") - } - - case ettSmallBig: - if len(packet) == 0 { - return nil, nil, errMalformedSmallBig - } - - n := packet[0] - negative := packet[1] == 1 // sign - - ///// this block improves performance at least 4 times - if n < 9 { // treat as an int64/uint64 - le8 := make([]byte, 8) - copy(le8, packet[2:n+2]) - smallBig := binary.LittleEndian.Uint64(le8) - if negative { - term = int64(-smallBig) - } else { - if smallBig > math.MaxInt64 { - term = uint64(smallBig) - } else { - term = int64(smallBig) - } - } - packet = packet[n+2:] - - if stack == nil { - break - } - - t := stack.regKeyKind - if stack.i&0x01 == 1 { - t = stack.regValueKind - } - switch t { - case reflect.Invalid: - // not registered type - break - case reflect.Int64: - if negative { - if smallBig > -math.MinInt64 { - panic("overflows int64") - } - term = int64(-smallBig) - } else { - if smallBig > math.MaxInt64 { - panic("overflows int64") - } - term = int64(smallBig) - } - case reflect.Int: - if negative { - if smallBig > -math.MinInt { - panic("overflows int") - } - term = int(-smallBig) - } else { - if smallBig > math.MaxInt { - panic("overflows int") - } - term = int(smallBig) - } - case reflect.Int8: - if negative { - if smallBig > -math.MinInt8 { - panic("overflows int8") - } - term = int8(-smallBig) - } else { - if smallBig > math.MaxInt8 { - panic("overflows int8") - } - term = int8(smallBig) - } - case reflect.Int16: - if negative { - if smallBig > -math.MinInt16 { - panic("overflows int16") - } - term = int16(-smallBig) - } else { - if smallBig > math.MaxInt16 { - panic("overflows int16") - } - term = int16(smallBig) - } - case reflect.Int32: - if negative { - if smallBig > -math.MinInt32 { - panic("overflows int32") - } - term = int32(-smallBig) - } else { - if smallBig > math.MaxInt32 { - panic("overflows int32") - } - term = int32(smallBig) - } - case reflect.Uint: - if negative { - panic("signed integer value") - } - if smallBig > math.MaxUint { - panic("overflows uint") - } - term = uint(smallBig) - case reflect.Uint8: - if negative { - panic("signed integer value") - } - if smallBig > math.MaxUint8 { - panic("overflows uint8") - } - term = uint8(smallBig) - case reflect.Uint16: - if negative { - panic("signed integer value") - } - if smallBig > math.MaxUint16 { - panic("overflows uint16") - } - term = uint16(smallBig) - case reflect.Uint32: - if negative { - panic("signed integer value") - } - if smallBig > math.MaxUint32 { - panic("overflows uint32") - } - term = uint32(smallBig) - default: - panic("destination value is not an int* or overflows") - } - - break - } - ///// - - if len(packet) < int(n+2) { - return nil, nil, errMalformedSmallBig - } - bytes := packet[2 : n+2] - - // encoded as a little endian. convert it to the big endian order - l := len(bytes) - for i := 0; i < l/2; i++ { - bytes[i], bytes[l-1-i] = bytes[l-1-i], bytes[i] - } - - bigInt := &big.Int{} - bigInt.SetBytes(bytes) - if negative { - bigInt = bigInt.Neg(bigInt) - } - - // try int and int64 - if bigInt.Cmp(biggestInt) < 0 && bigInt.Cmp(lowestInt) > 0 { - term = bigInt.Int64() - packet = packet[n+2:] - break - } - - term = bigInt - packet = packet[n+2:] - - case ettLargeBig: - if len(packet) < 256 { // must be longer than ettSmallBig - return nil, nil, errMalformedLargeBig - } - - n := binary.BigEndian.Uint32(packet[:4]) - negative := packet[4] == 1 // sign - - if len(packet) < int(n+5) { - return nil, nil, errMalformedLargeBig - } - bytes := packet[5 : n+5] - - // encoded as a little endian. convert it to the big endian order - l := len(bytes) - for i := 0; i < l/2; i++ { - bytes[i], bytes[l-1-i] = bytes[l-1-i], bytes[i] - } - - bigInt := &big.Int{} - bigInt.SetBytes(bytes) - if negative { - bigInt = bigInt.Neg(bigInt) - } - - term = bigInt - packet = packet[n+5:] - - case ettList: - if len(packet) < 4 { - return nil, nil, errMalformedList - } - - n := binary.BigEndian.Uint32(packet[:4]) - if n == 0 { - // must be encoded as ettNil - return nil, nil, errMalformedList - } - - term = make(List, n+1) - packet = packet[4:] - child = &stackElement{ - parent: stack, - termType: ettList, - term: term, - children: int(n + 1), - } - - case ettSmallTuple: - if len(packet) == 0 { - return nil, nil, errMalformedSmallTuple - } - - n := packet[0] - packet = packet[1:] - term = make(Tuple, n) - - if n == 0 { - break - } - - child = &stackElement{ - parent: stack, - termType: ettSmallTuple, - term: term, - children: int(n), - } - - case ettLargeTuple: - if len(packet) < 4 { - return nil, nil, errMalformedLargeTuple - } - - n := binary.BigEndian.Uint32(packet[:4]) - packet = packet[4:] - term = make(Tuple, n) - - if n == 0 { - break - } - - child = &stackElement{ - parent: stack, - termType: ettLargeTuple, - term: term, - children: int(n), - } - - case ettMap: - if len(packet) < 4 { - return nil, nil, errMalformedMap - } - - n := binary.BigEndian.Uint32(packet[:4]) - packet = packet[4:] - term = make(Map) - - if n == 0 { - break - } - - child = &stackElement{ - parent: stack, - termType: ettMap, - term: term, - children: int(n) * 2, - } - - case ettBinary: - if len(packet) < 4 { - return nil, nil, errMalformedBinary - } - - n := binary.BigEndian.Uint32(packet) - if len(packet) < int(n+4) { - return nil, nil, errMalformedBinary - } - - b := make([]byte, n) - copy(b, packet[4:n+4]) - - term = b - packet = packet[n+4:] - - case ettNil: - // for registered types we should use a nil value - // otherwise - treat it as an empty list - if stack.reg != nil { - term = nil - } else { - term = termNil - } - - case ettPid, ettNewPid: - child = &stackElement{ - parent: stack, - termType: t, - children: 1, - } - - case ettNewRef, ettNewerRef: - if len(packet) < 2 { - return nil, nil, errMalformedRef - } - - l := binary.BigEndian.Uint16(packet[:2]) - packet = packet[2:] - - child = &stackElement{ - parent: stack, - termType: t, - children: 1, - tmp: l, // save length in temporary place of the stack element - } - - case ettExport: - child = &stackElement{ - parent: stack, - termType: t, - term: Export{}, - children: 3, - } - - case ettNewFun: - var unique [16]byte - - if len(packet) < 32 { - return nil, nil, errMalformedFun - } - - copy(unique[:], packet[5:21]) - l := binary.BigEndian.Uint32(packet[25:29]) - - fun := Function{ - Arity: packet[4], - Unique: unique, - Index: binary.BigEndian.Uint32(packet[21:25]), - FreeVars: make([]Term, l), - } - - child = &stackElement{ - parent: stack, - termType: t, - term: fun, - children: 4 + int(l), - } - packet = packet[29:] - - case ettPort, ettNewPort: - child = &stackElement{ - parent: stack, - termType: t, - children: 1, - } - - case ettBitBinary: - if len(packet) < 6 { - return nil, nil, errMalformedBitBinary - } - - n := binary.BigEndian.Uint32(packet) - bits := uint(packet[4]) - - b := make([]byte, n) - copy(b, packet[5:n+5]) - b[n-1] = b[n-1] >> (8 - bits) - - term = b - packet = packet[n+5:] - - case ettNewFloat: - if len(packet) < 8 { - return nil, nil, errMalformedNewFloat - } - bits := binary.BigEndian.Uint64(packet[:8]) - - f := math.Float64frombits(bits) - term = f - packet = packet[8:] - - if stack == nil { - break - } - - t := stack.regKeyKind - if stack.i&0x01 == 1 { - t = stack.regValueKind - } - switch t { - case reflect.Invalid: - // not registered type - break - case reflect.Float64: - break - case reflect.Float32: - if f > math.MaxFloat32 { - panic("overflows float32") - } - term = float32(f) - default: - panic("destination value is not an float* or overflows") - } - - case ettFloat: - if len(packet) < 31 { - return nil, nil, errMalformedFloat - } - - var f float64 - if r, err := fmt.Sscanf(string(packet[:31]), "%f", &f); err != nil || r != 1 { - return nil, nil, errMalformedFloat - } - term = f - packet = packet[31:] - - if stack == nil { - break - } - - t := stack.regKeyKind - if stack.i&0x01 == 1 { - t = stack.regValueKind - } - switch t { - case reflect.Invalid: - // not registered type - break - case reflect.Float64: - break - case reflect.Float32: - if f > math.MaxFloat32 { - panic("overflows float32") - } - term = float32(f) - default: - panic("destination value is not an float* or overflows") - } - - default: - term = nil - return nil, nil, errMalformedUnknownType - } - - // it was a single element - if stack == nil && child == nil { - break - } - - // decoding child item of List/Map/Tuple/Pid/Ref/Port/... going deeper - if child != nil { - stack = child - continue - } - - // Stage 2 - processStack: - if stack != nil { - var field reflect.Value - var set_field bool - - switch stack.termType { - case ettList: - if stack.i == 0 { - // if the first value is atom, check for the registered type - if typeName, isAtom := term.(Atom); isAtom == true { - registered.RLock() - r, found := registered.typesDec[typeName] - registered.RUnlock() - if found == true { - switch r.rtype.Kind() { - case reflect.Slice: - reg := reflect.MakeSlice(r.rtype, stack.children-2, stack.children-1) - stack.reg = ® - case reflect.Array: - reg := reflect.Indirect(reflect.New(r.rtype)) - stack.reg = ® - default: - if r.strict { - panic("destination value of registered type is not a slice/array") - } - } - stack.strict = r.strict - if r.strict == false { - stack.term.(List)[stack.i] = term - } - stack.i++ - break - } - } - } - - if stack.reg != nil { - if stack.i+1 == stack.children { - if t != ettNil { - x := reflect.Append(*stack.reg, reflect.ValueOf(term)) - stack.reg = &x - } - } else { - set_field = true - field = stack.reg.Index(stack.i - 1) - } - - if stack.strict == true { - stack.i++ - break - } - } - - stack.term.(List)[stack.i] = term - stack.i++ - // remove the last element for proper list (its ettNil) - if stack.i == stack.children && t == ettNil { - stack.term = stack.term.(List)[:stack.i-1] - } - - case ettSmallTuple, ettLargeTuple: - - if stack.i == 0 { - // if the first value is atom, check for the registered type - if typeName, isAtom := term.(Atom); isAtom == true { - registered.RLock() - r, found := registered.typesDec[typeName] - registered.RUnlock() - if found == true { - reg := reflect.Indirect(reflect.New(r.rtype)) - stack.reg = ® - stack.strict = r.strict - if r.strict == false { - stack.term.(Tuple)[stack.i] = term - } - stack.i++ - break - } - } - } - - if stack.reg != nil { - set_field = true - field = stack.reg.Field(stack.i - 1) - if stack.strict == true { - stack.i++ - break - } - } - stack.term.(Tuple)[stack.i] = term - stack.i++ - - case ettMap: - if stack.i == 0 { - // if the first key is atom, check for the registered type - if typeName, isAtom := term.(Atom); isAtom == true { - registered.RLock() - r, found := registered.typesDec[typeName] - registered.RUnlock() - if found == true { - if r.rtype.Kind() == reflect.Map { - reg := reflect.MakeMapWithSize(r.rtype, stack.children/2) - if r.rtype.Key().Kind() != reflect.Interface { - stack.regKeyKind = r.rtype.Key().Kind() - } - if r.rtype.Elem().Kind() != reflect.Interface { - stack.regValueKind = r.rtype.Elem().Kind() - } - stack.reg = ® - } else { - if r.strict { - panic("destination value of registered type is not a map") - } - } - stack.strict = r.strict - if r.strict == false { - if stack.i&0x01 == 0x01 { // a value - stack.term.(Map)[stack.tmp] = term - } else { - stack.tmp = term - } - } - stack.i++ - break - } - } - } - if stack.i == 1 && stack.reg != nil && stack.strict == true { - // skip it. the value of the key which is the registered type - stack.i++ - break - - } - if stack.i&0x01 == 0x01 { // a value - if stack.i > 1 && stack.reg != nil { - set_field = true - field = *stack.reg - } - // Erlang can use any value as a key in the map. - // OTP 25 sends a message to the 'global_name_server' process - // with etf.Tuple as a key in the map, so it caused a panic - // and this connection is dropping. - switch stack.tmp.(type) { - case Tuple: - lib.Warning("Erlang sent a etf.Tuple as a map key: %#v => %#v. Ignored this item. Ergo doesn't support it.", stack.tmp, term) - default: - stack.term.(Map)[stack.tmp] = term - } - stack.i++ - break - } - - // a key - stack.tmp = term - stack.i++ - - case ettPid: - if len(packet) < 9 { - return nil, nil, errMalformedPid - } - - name, ok := term.(Atom) - if !ok { - return nil, nil, errMalformedPid - } - pid := Pid{ - Node: name, - // Same as NEW_PID_EXT except the Creation field is - // only one byte and only two bits are significant, - // the rest are to be 0. - Creation: uint32(packet[8]) & 3, - } - - id := uint64(binary.BigEndian.Uint32(packet[:4])) - serial := uint64(binary.BigEndian.Uint32(packet[4:8])) - if options.FlagBigPidRef { - id = id | (serial << 32) - } else { - // id 15 bits only 2**15 - 1 = 32767 - // serial 13 bits only 2**13 - 1 = 8191 - id = (id & 32767) | ((serial & 8191) << 15) - } - pid.ID = id - - packet = packet[9:] - stack.term = pid - stack.i++ - - case ettNewPid: - if len(packet) < 12 { - return nil, nil, errMalformedNewPid - } - - name, ok := term.(Atom) - if !ok { - return nil, nil, errMalformedPid - } - - id := uint64(binary.BigEndian.Uint32(packet[:4])) - serial := uint64(binary.BigEndian.Uint32(packet[4:8])) - pid := Pid{ - Node: name, - Creation: binary.BigEndian.Uint32(packet[8:12]), - } - if options.FlagBigPidRef { - id = id | (serial << 32) - } else { - // id 15 bits only 2**15 - 1 = 32767 - // serial 13 bits only 2**13 - 1 = 8191 - id = (id & 32767) | ((serial & 8191) << 15) - } - pid.ID = id - - packet = packet[12:] - stack.term = pid - stack.i++ - - case ettNewRef: - var id uint32 - name, ok := term.(Atom) - if !ok { - return nil, nil, errMalformedRef - } - - l := stack.tmp.(uint16) - if l > 5 { - return nil, nil, errMalformedRef - } - if l > 3 && !options.FlagBigPidRef { - return nil, nil, errMalformedRef - } - stack.tmp = nil - expectedLength := int(1 + l*4) - - if len(packet) < expectedLength { - return nil, nil, errMalformedRef - } - - ref := Ref{ - Node: name, - Creation: uint32(packet[0]), - } - packet = packet[1:] - - for i := 0; i < int(l); i++ { - // In the first word (4 bytes) of ID, only 18 bits - // are significant, the rest must be 0. - if i == 0 { - // 2**18 - 1 = 262143 - id = binary.BigEndian.Uint32(packet[:4]) & 262143 - } else { - id = binary.BigEndian.Uint32(packet[:4]) - } - ref.ID[i] = id - packet = packet[4:] - } - - stack.term = ref - stack.i++ - - case ettNewerRef: - var id uint32 - name, ok := term.(Atom) - if !ok { - return nil, nil, errMalformedRef - } - - l := stack.tmp.(uint16) - if l > 5 { - return nil, nil, errMalformedRef - } - if l > 3 && !options.FlagBigPidRef { - return nil, nil, errMalformedRef - } - stack.tmp = nil - expectedLength := int(4 + l*4) - - if len(packet) < expectedLength { - return nil, nil, errMalformedRef - } - - ref := Ref{ - Node: name, - Creation: binary.BigEndian.Uint32(packet[:4]), - } - packet = packet[4:] - - for i := 0; i < int(l); i++ { - // In the first word (4 bytes) of ID, only 18 bits - // are significant, the rest must be 0. - if i == 0 { - // 2**18 - 1 = 262143 - id = binary.BigEndian.Uint32(packet[:4]) & 262143 - } else { - id = binary.BigEndian.Uint32(packet[:4]) - } - ref.ID[i] = id - packet = packet[4:] - } - - stack.term = ref - stack.i++ - - case ettPort: - if len(packet) < 5 { - return nil, nil, errMalformedPort - } - - name, ok := term.(Atom) - if !ok { - return nil, nil, errMalformedPort - } - - port := Port{ - Node: name, - ID: binary.BigEndian.Uint32(packet[:4]), - Creation: uint32(packet[4]), - } - - packet = packet[5:] - stack.term = port - stack.i++ - - case ettNewPort: - if len(packet) < 8 { - return nil, nil, errMalformedNewPort - } - - name, ok := term.(Atom) - if !ok { - return nil, nil, errMalformedNewPort - } - - port := Port{ - Node: name, - ID: binary.BigEndian.Uint32(packet[:4]), - Creation: binary.BigEndian.Uint32(packet[4:8]), - } - - packet = packet[8:] - stack.term = port - stack.i++ - - case ettNewFun: - fun := stack.term.(Function) - switch stack.i { - case 0: - // Module - module, ok := term.(Atom) - if !ok { - return nil, nil, errMalformedFun - } - fun.Module = module - - case 1: - // OldIndex - oldindex, ok := term.(int) - if !ok { - return nil, nil, errMalformedFun - } - fun.OldIndex = uint32(oldindex) - - case 2: - // OldUnique - olduniq, ok := term.(int64) - if !ok { - return nil, nil, errMalformedFun - } - fun.OldUnique = uint32(olduniq) - - case 3: - // Pid - pid, ok := term.(Pid) - if !ok { - return nil, nil, errMalformedFun - } - fun.Pid = pid - - default: - if len(fun.FreeVars) < (stack.i-4)+1 { - return nil, nil, errMalformedFun - } - fun.FreeVars[stack.i-4] = term - } - - stack.term = fun - stack.i++ - - case ettExport: - exp := stack.term.(Export) - switch stack.i { - case 0: - module, ok := term.(Atom) - if !ok { - return nil, nil, errMalformedExport - } - exp.Module = module - - case 1: - function, ok := term.(Atom) - if !ok { - return nil, nil, errMalformedExport - } - exp.Function = function - - case 2: - arity, ok := term.(int) - if !ok { - return nil, nil, errMalformedExport - } - exp.Arity = arity - - default: - return nil, nil, errMalformedExport - - } - - default: - return nil, nil, errInternal - } - - if field.Kind() == reflect.Ptr { - pfield := reflect.New(field.Type().Elem()) - field.Set(pfield) - field = pfield.Elem() - } - - if set_field && term != nil { - switch field.Kind() { - case reflect.Int8: - switch v := term.(type) { - case int: - if v > math.MaxInt8 || v < math.MinInt8 { - // overflows - if stack.strict { - panic("overflows int8") - } - stack.reg = nil - break - } - field.SetInt(int64(v)) - case int64: - if v > math.MaxInt8 || v < math.MinInt8 { - // overflows - if stack.strict { - panic("overflows int8") - } - stack.reg = nil - break - } - field.SetInt(v) - case uint64: - if v > math.MaxInt8 { - // overflows - if stack.strict { - panic("overflows int8") - } - stack.reg = nil - break - } - field.SetInt(int64(v)) - default: - if stack.strict { - panic("wrong int8 value") - } - stack.reg = nil - } - - case reflect.Int16: - switch v := term.(type) { - case int: - if v > math.MaxInt16 || v < math.MinInt16 { - // overflows - if stack.strict { - panic("overflows int16") - } - stack.reg = nil - break - } - field.SetInt(int64(v)) - case int64: - if v > math.MaxInt16 || v < math.MinInt16 { - // overflows - if stack.strict { - panic("overflows int16") - } - stack.reg = nil - break - } - field.SetInt(v) - case uint64: - if v > math.MaxInt16 { - // overflows - if stack.strict { - panic("overflows int16") - } - stack.reg = nil - break - } - field.SetInt(int64(v)) - default: - if stack.strict { - panic("wrong int16 value") - } - stack.reg = nil - } - - case reflect.Int32: - switch v := term.(type) { - case int: - if v > math.MaxInt32 || v < math.MinInt32 { - // overflows - if stack.strict { - panic("overflows int32") - } - stack.reg = nil - break - } - field.SetInt(int64(v)) - case int64: - if v > math.MaxInt32 || v < math.MinInt32 { - // overflows - if stack.strict { - panic("overflows int32") - } - stack.reg = nil - break - } - field.SetInt(v) - case uint64: - if v > math.MaxInt32 { - // overflows - if stack.strict { - panic("overflows int32") - } - stack.reg = nil - break - } - field.SetInt(int64(v)) - default: - if stack.strict { - panic("wrong int32 value") - } - stack.reg = nil - } - case reflect.Int64: - switch v := term.(type) { - case int: - field.SetInt(int64(v)) - case int64: - field.SetInt(v) - case uint64: - if v > math.MaxInt64 { - // overflows - if stack.strict { - panic("overflows int64") - } - stack.reg = nil - break - } - field.SetInt(int64(v)) - default: - if stack.strict { - panic("wrong int64 value") - } - stack.reg = nil - } - case reflect.Int: - switch v := term.(type) { - case int: - field.SetInt(int64(v)) - case int64: - if v > math.MaxInt { - // overflows - if stack.strict { - panic("overflows int") - } - stack.reg = nil - break - } - field.SetInt(v) - case uint64: - if v > math.MaxInt { - // overflows - if stack.strict { - panic("overflows int") - } - stack.reg = nil - break - } - field.SetInt(int64(v)) - default: - if stack.strict { - panic("wrong int value") - } - stack.reg = nil - } - - case reflect.Uint8: - switch v := term.(type) { - case int: - if int64(v) > math.MaxUint8 || v < 0 { - // overflows - if stack.strict { - panic("overflows uint8") - } - stack.reg = nil - break - } - field.SetUint(uint64(v)) - case int64: - if v > math.MaxUint8 || v < 0 { - // overflows - if stack.strict { - panic("overflows uint8") - } - stack.reg = nil - break - } - field.SetUint(uint64(v)) - case uint64: - if v > math.MaxUint8 { - // overflows - if stack.strict { - panic("overflows uint8") - } - stack.reg = nil - break - } - field.SetUint(v) - default: - if stack.strict { - panic("wrong uint8 value") - } - stack.reg = nil - } - - case reflect.Uint16: - switch v := term.(type) { - case int: - if int64(v) > math.MaxUint16 || v < 0 { - // overflows - if stack.strict { - panic("overflows uint16") - } - stack.reg = nil - break - } - field.SetUint(uint64(v)) - case int64: - if v > math.MaxUint16 || v < 0 { - // overflows - if stack.strict { - panic("overflows uint16") - } - stack.reg = nil - break - } - field.SetUint(uint64(v)) - case uint64: - if v > math.MaxUint16 { - // overflows - if stack.strict { - panic("overflows uint16") - } - stack.reg = nil - break - } - field.SetUint(v) - default: - if stack.strict { - panic("wrong uint16 value") - } - stack.reg = nil - } - case reflect.Uint32: - switch v := term.(type) { - case int: - if int64(v) > math.MaxUint32 || v < 0 { - // overflows - if stack.strict { - panic("overflows uint32") - } - stack.reg = nil - break - } - field.SetUint(uint64(v)) - case int64: - if v > math.MaxUint32 || v < 0 { - // overflows - if stack.strict { - panic("overflows uint32") - } - stack.reg = nil - break - } - field.SetUint(uint64(v)) - case uint64: - if v > math.MaxUint32 { - // overflows - if stack.strict { - panic("overflows uint32") - } - stack.reg = nil - break - } - field.SetUint(v) - default: - if stack.strict { - panic("wrong uint32 value") - } - stack.reg = nil - } - - case reflect.Uint64: - switch v := term.(type) { - case int: - if v < 0 { - // overflows - if stack.strict { - panic("overflows uint64") - } - stack.reg = nil - break - } - field.SetUint(uint64(v)) - case int64: - if v < 0 { - // overflows - if stack.strict { - panic("overflows uint64") - } - stack.reg = nil - break - } - field.SetUint(uint64(v)) - case uint64: - field.SetUint(v) - default: - if stack.strict { - panic("wrong uint64 value") - } - stack.reg = nil - } - - case reflect.Uint: - switch v := term.(type) { - case int: - if v < 0 { - // overflows - if stack.strict { - panic("overflows uint") - } - stack.reg = nil - break - } - field.SetUint(uint64(v)) - case int64: - if v > math.MaxInt || v < 0 { - // overflows - if stack.strict { - panic("overflows uint") - } - stack.reg = nil - break - } - field.SetUint(uint64(v)) - case uint64: - if v > math.MaxUint { - // overflows - if stack.strict { - panic("overflows uint") - } - stack.reg = nil - break - } - field.SetUint(v) - default: - if stack.strict { - panic("wrong uint value") - } - stack.reg = nil - } - case reflect.Float32: - f, ok := term.(float64) - if ok == false { - if stack.strict { - panic("wrong float value") - } - stack.reg = nil - break - } - field.SetFloat(f) - - case reflect.Float64: - f64, ok := term.(float64) - if ok == false { - if stack.strict { - panic("wrong float64") - } - stack.reg = nil - break - } - field.SetFloat(f64) - - case reflect.String: - switch v := term.(type) { - case List: - s, err := convertCharlistToString(v) - if err != nil { - if stack.strict { - panic("can't convert charlist into string") - } - stack.reg = nil - break - } - field.SetString(s) - case []byte: - field.SetString(string(v)) - case string: - field.SetString(v) - case Atom: - field.SetString(string(v)) - default: - if stack.strict { - panic("wrong string value") - } - stack.reg = nil - } - case reflect.Bool: - b, ok := term.(bool) - if !ok { - if stack.strict { - panic("wrong bool value") - } - stack.reg = nil - break - } - field.SetBool(b) - - case reflect.Map: - if stack.tmp == nil { - field.Set(reflect.ValueOf(term)) - break - } - destkey := reflect.ValueOf(stack.tmp) - destval := reflect.ValueOf(term) - stack.reg.SetMapIndex(destkey, destval) - - default: - if stack.strict { - if field.Type().Name() == "Alias" { - term = Alias(term.(Ref)) - } - field.Set(reflect.ValueOf(term)) - } else { - // wrap it to catch the panic - setValue := func(f reflect.Value, v interface{}) (ok bool) { - if lib.CatchPanic() { - defer func() { - if r := recover(); r != nil { - ok = false - } - }() - } - if field.Type().Name() == "Alias" { - v = Alias(v.(Ref)) - } - f.Set(reflect.ValueOf(v)) - return true - } - if setValue(field, term) == false { - stack.reg = nil - } - - } - } - - } - - } - - // we are still decoding children of Lis/Map/Tuple/... - if stack.i < stack.children { - continue - } - - if stack.reg != nil { - term = (*stack.reg).Interface() - } else { - term = stack.term - } - - // this term was the last element of List/Map/Tuple/... - // pop from the stack, but if its the root just finish - if stack.parent == nil { - break - } - - stack, stack.parent = stack.parent, nil // nil here is just a little help for GC - goto processStack - - } - - return term, packet, nil -} diff --git a/etf/decode_test.go b/etf/decode_test.go deleted file mode 100644 index 0aa2c64f..00000000 --- a/etf/decode_test.go +++ /dev/null @@ -1,707 +0,0 @@ -package etf - -import ( - "math/big" - "reflect" - "testing" -) - -func TestDecodeAtom(t *testing.T) { - expected := Atom("abc") - packet := []byte{ettAtomUTF8, 0, 3, 97, 98, 99} - term, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil || term != expected { - t.Fatal(err) - } - - packet = []byte{ettSmallAtomUTF8, 3, 97, 98, 99} - term, _, err = Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil || term != expected { - t.Fatal(err) - } - - packet = []byte{ettSmallAtomUTF8, 4, 97, 98, 99} - term, _, err = Decode(packet, []Atom{}, DecodeOptions{}) - if err != errMalformedSmallAtomUTF8 { - t.Fatal(err) - } - - packet = []byte{119} - term, _, err = Decode(packet, []Atom{}, DecodeOptions{}) - if err != errMalformedSmallAtomUTF8 { - t.Fatal(err) - } - -} - -func TestDecodeString(t *testing.T) { - expected := "abc" - packet := []byte{ettString, 0, 3, 97, 98, 99} - term, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil || term != expected { - t.Fatal(err) - } - - packet = []byte{ettString, 3, 97, 98, 99} - term, _, err = Decode(packet, []Atom{}, DecodeOptions{}) - if err != errMalformedString { - t.Fatal(err) - } -} - -func TestDecodeNewFloat(t *testing.T) { - expected := float64(2.1) - packet := []byte{ettNewFloat, 64, 0, 204, 204, 204, 204, 204, 205} - term, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil || term != expected { - t.Fatal(err) - } - - packet = []byte{ettNewFloat, 64, 0, 204, 204, 204, 204, 204} - term, _, err = Decode(packet, []Atom{}, DecodeOptions{}) - if err != errMalformedNewFloat { - t.Fatal(err) - } -} - -func TestDecodeInteger(t *testing.T) { - expected := int(88) - packet := []byte{ettSmallInteger, 88} - - term, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil || term != expected { - t.Fatal(err) - } - - packet = []byte{ettSmallInteger} - term, _, err = Decode(packet, []Atom{}, DecodeOptions{}) - if err != errMalformedSmallInteger { - t.Fatal(err) - } - - expectedInteger := int64(-1234567890) - packet = []byte{ettInteger, 182, 105, 253, 46} - term, _, err = Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil || term != expectedInteger { - t.Fatal(err, expectedInteger, term) - } - - packet = []byte{ettInteger, 182, 105, 253} - term, _, err = Decode(packet, []Atom{}, DecodeOptions{}) - if err != errMalformedInteger { - t.Fatal(err) - } - - //-9223372036854775808 - expectedBigInt64 := int64(-9223372036854775808) - packet = []byte{ettSmallBig, 8, 1, 0, 0, 0, 0, 0, 0, 0, 128} - term, _, err = Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil || term != expectedBigInt64 { - t.Fatal(err, term, expectedBigInt64) - } - - largeBigString := "-12345678909876543210123456789098765432101234567890987654321012345678909876543210123456789098765432101234567890987654321012345678909876543210123456789098765432101234567890987654321012345678909876543210123456789098765432101234567890987654321012345678909876543210123456789098765432101234567890987654321012345678909876543210123456789098765432101234567890987654321012345678909876543210123456789098765432101234567890987654321012345678909876543210123456789098765432101234567890987654321012345678909876543210123456789098765432101234567890987654321012345678909876543210123456789098765432101234567890987654321012345678909876543210123456789098765432101234567890987654321012345678909876543210123456789098765432101234567890987654321012345678909876543210123456789098765432101234567890987654321012345678909876543210123456789098765432101234567890987654321012345678909876543210123456789098765432101234567890987654321012345678909876543210123456789098765432101234567890" - - bigInt := new(big.Int) - bigInt.SetString(largeBigString, 10) - packet = []byte{ettLargeBig, 0, 0, 1, 139, 1, 210, 106, 44, 197, 54, 176, 151, 83, 243, - 228, 193, 133, 194, 193, 21, 90, 196, 4, 252, 150, 226, 188, 79, 11, 8, - 190, 106, 189, 21, 73, 176, 196, 54, 221, 118, 232, 212, 126, 141, - 118, 154, 78, 238, 143, 34, 118, 245, 135, 17, 231, 224, 86, 71, 12, - 175, 207, 224, 19, 206, 5, 241, 241, 207, 125, 243, 87, 18, 14, 162, - 71, 3, 244, 85, 240, 211, 12, 141, 5, 38, 124, 232, 122, 104, 228, 36, - 40, 124, 109, 196, 20, 94, 46, 167, 215, 107, 53, 51, 28, 45, 249, 146, - 151, 18, 11, 246, 151, 220, 138, 139, 97, 63, 166, 255, 101, 12, 153, - 247, 201, 62, 9, 131, 235, 67, 85, 13, 151, 200, 233, 239, 35, 224, 10, - 101, 144, 107, 82, 206, 71, 226, 67, 212, 254, 15, 29, 122, 128, 38, - 230, 60, 97, 146, 52, 241, 216, 220, 114, 82, 90, 166, 207, 31, 63, - 112, 254, 19, 111, 225, 104, 159, 133, 186, 15, 5, 93, 220, 56, 6, 4, - 197, 4, 196, 204, 94, 34, 144, 141, 31, 165, 188, 241, 105, 197, 82, - 69, 77, 136, 207, 152, 76, 112, 79, 57, 159, 232, 165, 215, 0, 164, - 231, 132, 124, 252, 90, 91, 71, 198, 254, 203, 83, 96, 42, 35, 240, - 218, 174, 37, 112, 86, 218, 203, 135, 7, 88, 24, 245, 50, 173, 72, 133, - 70, 2, 160, 235, 61, 151, 28, 124, 173, 254, 244, 37, 96, 19, 40, 192, - 194, 51, 75, 51, 186, 229, 93, 142, 165, 50, 43, 129, 0, 78, 253, 159, - 105, 151, 150, 253, 24, 109, 22, 123, 95, 55, 143, 126, 122, 109, 57, - 73, 240, 191, 25, 140, 131, 27, 64, 252, 238, 174, 211, 89, 167, 38, - 137, 32, 176, 174, 122, 64, 66, 171, 175, 113, 174, 247, 236, 67, 180, - 179, 23, 58, 17, 117, 223, 18, 184, 223, 156, 151, 179, 18, 84, 145, - 16, 18, 194, 121, 19, 186, 170, 49, 21, 7, 108, 174, 89, 59, 53, 62, - 247, 232, 9, 184, 242, 60, 137, 96, 54, 183, 89, 206, 219, 81, 208, - 214, 197, 254, 207, 3, 41, 224, 169, 181, 56, 132, 18, 116, 141, 89, - 185, 133, 186, 46, 81, 244, 139, 188, 171, 206, 52, 225, 160, 232, - 246, 254, 193, 1} - - term, _, err = Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil || bigInt.Cmp(term.(*big.Int)) != 0 { - t.Fatal(err, term, bigInt) - } - - //-123456789098 should be treated as int64 - expectedInt64 := int64(-123456789098) - packet = []byte{ettSmallBig, 5, 1, 106, 26, 153, 190, 28} - term, _, err = Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil || term != expectedInt64 { - t.Fatal(err, term, expectedInt64) - } - - // 18446744073709551615 - expectedUint64 := uint64(18446744073709551615) - packet = []byte{ettSmallBig, 8, 0, 255, 255, 255, 255, 255, 255, 255, 255} - term, _, err = Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil || term != expectedUint64 { - t.Fatal(err, term, expectedUint64) - } -} - -func TestDecodeList(t *testing.T) { - expected := List{3.14, Atom("abc"), int64(987654321)} - packet := []byte{ettList, 0, 0, 0, 3, 70, 64, 9, 30, 184, 81, 235, 133, 31, 100, 0, 3, 97, - 98, 99, 98, 58, 222, 104, 177, 106} - term, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - result := term.(List) - if !reflect.DeepEqual(expected, result) { - t.Fatal("result != expected") - } - -} - -func TestDecodeListNested(t *testing.T) { - // [1,[2,3,[4,5],6]] - expected := List{1, List{2, 3, List{4, 5}, 6}} - packet := []byte{108, 0, 0, 0, 2, 97, 1, 108, 0, 0, 0, 4, 97, 2, 97, 3, 108, 0, 0, 0, 2, 97, 4, 97, 5, 106, - 97, 6, 106, 106} - - term, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - - result := term.(List) - if !reflect.DeepEqual(expected, result) { - t.Fatal("result != expected") - } -} - -func TestDecodeTuple(t *testing.T) { - expected := Tuple{3.14, Atom("abc"), int64(987654321)} - packet := []byte{ettSmallTuple, 3, 70, 64, 9, 30, 184, 81, 235, 133, 31, 100, 0, 3, 97, 98, 99, - 98, 58, 222, 104, 177} - - term, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - result := term.(Tuple) - if !reflect.DeepEqual(expected, result) { - t.Fatal("result != expected") - } -} - -func TestDecodeMap(t *testing.T) { - expected := Map{ - Atom("abc"): 123, - "abc": 4.56, - } - packet := []byte{116, 0, 0, 0, 2, 100, 0, 3, 97, 98, 99, 97, 123, 107, 0, 3, 97, 98, - 99, 70, 64, 18, 61, 112, 163, 215, 10, 61} - - term, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - - result := term.(Map) - if !reflect.DeepEqual(expected, result) { - t.Fatal("result != expected") - } - -} - -func TestDecodeBinary(t *testing.T) { - expected := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0} - packet := []byte{ettBinary, 0, 0, 0, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0} - - term, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - - result := term.([]byte) - if !reflect.DeepEqual(expected, result) { - t.Fatal("result != expected") - } -} - -func TestDecodeBitBinary(t *testing.T) { - expected := []byte{1, 2, 3, 4, 5} - packet := []byte{77, 0, 0, 0, 5, 3, 1, 2, 3, 4, 160} - - term, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - - result := term.([]byte) - if !reflect.DeepEqual(expected, result) { - t.Fatal("result != expected") - } - -} - -func TestDecodePid(t *testing.T) { - expected := Pid{ - Node: Atom("erl-demo@127.0.0.1"), - ID: 142, - Creation: 2, - } - packet := []byte{103, 100, 0, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, 49, - 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 142, 0, 0, 0, 0, 2} - term, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - - result := term.(Pid) - if !reflect.DeepEqual(expected, result) { - t.Fatal("result != expected") - } -} - -func TestDecodePidWithCacheAtom(t *testing.T) { - expected := Pid{ - Node: Atom("erl-demo@127.0.0.1"), - ID: 142, - Creation: 2, - } - packet := []byte{103, ettCacheRef, 0, 0, 0, 0, 142, 0, 0, 0, 0, 2} - cache := []Atom{Atom("erl-demo@127.0.0.1")} - term, _, err := Decode(packet, cache, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - - result := term.(Pid) - if !reflect.DeepEqual(expected, result) { - t.Fatal("result != expected") - } -} - -func TestDecodeRef(t *testing.T) { - expected := Ref{ - Node: Atom("erl-demo@127.0.0.1"), - Creation: 2, - ID: [5]uint32{73444, 3082813441, 2373634851}, - } - packet := []byte{114, 0, 3, 100, 0, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, - 49, 50, 55, 46, 48, 46, 48, 46, 49, 2, 0, 1, 30, 228, 183, 192, 0, 1, 141, - 122, 203, 35} - term, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - - result := term.(Ref) - if !reflect.DeepEqual(expected, result) { - t.Fatal("result != expected") - } -} - -func TestDecodeRefWithAtomCache(t *testing.T) { - expected := Ref{ - Node: Atom("erl-demo@127.0.0.1"), - Creation: 2, - ID: [5]uint32{73444, 3082813441, 2373634851}, - } - packet := []byte{114, 0, 3, ettCacheRef, 0, 2, 0, 1, 30, 228, 183, 192, 0, 1, 141, - 122, 203, 35} - cache := []Atom{Atom("erl-demo@127.0.0.1")} - - term, _, err := Decode(packet, cache, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - - result := term.(Ref) - if !reflect.DeepEqual(expected, result) { - t.Fatal("result != expected") - } -} -func TestDecodeTupleRefPid(t *testing.T) { - expected := Tuple{ - Ref{ - Node: Atom("erl-demo@127.0.0.1"), - Creation: 2, - ID: [5]uint32{0x11f1c, 0xb7c00001, 0x8d7acb23}}, - Pid{ - Node: Atom("erl-demo@127.0.0.1"), - ID: 0x8e, - Creation: 0x2}} - packet := []byte{ettSmallTuple, 2, ettNewRef, 0, 3, ettAtom, 0, 18, 101, 114, 108, 45, 100, 101, 109, - 111, 64, 49, 50, 55, 46, 48, 46, 48, 46, 49, 2, 0, 1, 31, 28, 183, 192, 0, - 1, 141, 122, 203, 35, 103, 100, 0, 18, 101, 114, 108, 45, 100, 101, - 109, 111, 64, 49, 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 142, 0, 0, 0, 0, - 2} - term, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - - result := term.(Tuple) - if !reflect.DeepEqual(expected, result) { - t.Fatal("result != expected") - } -} - -func TestDecodeTupleRefPidWithAtomCache(t *testing.T) { - expected := Tuple{ - Ref{ - Node: Atom("erl-demo@127.0.0.1"), - Creation: 2, - ID: [5]uint32{0x11f1c, 0xb7c00001, 0x8d7acb23}}, - Pid{ - Node: Atom("erl-demo@127.0.0.1"), - ID: 0x8e, - Creation: 0x2}} - packet := []byte{ettSmallTuple, 2, ettNewRef, 0, 3, ettCacheRef, 0, - 2, 0, 1, 31, 28, 183, 192, 0, 1, 141, 122, 203, 35, 103, ettCacheRef, 0, - 0, 0, 0, 142, 0, 0, 0, 0, 2} - cache := []Atom{Atom("erl-demo@127.0.0.1")} - term, _, err := Decode(packet, cache, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - - result := term.(Tuple) - if !reflect.DeepEqual(expected, result) { - t.Fatal("result != expected") - } -} -func TestDecodePort(t *testing.T) { - expected := Port{ - Node: Atom("erl-demo@127.0.0.1"), - Creation: 2, - ID: 32, - } - packet := []byte{102, 100, 0, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, 49, - 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 32, 2} - - term, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - - result := term.(Port) - if !reflect.DeepEqual(expected, result) { - t.Fatal("result != expected") - } -} - -func TestDecodeComplex(t *testing.T) { - //{"hello",[], #{v1 => [{3,13,3.13}, {abc, "abc"}], v2 => 12345}}. - expected := Tuple{"hello", List{}, - Map{Atom("v1"): List{Tuple{3, 13, 3.13}, Tuple{Atom("abc"), "abc"}}, - Atom("v2"): int64(12345)}} - packet := []byte{104, 3, 107, 0, 5, 104, 101, 108, 108, 111, 106, 116, 0, 0, 0, 2, - 100, 0, 2, 118, 49, 108, 0, 0, 0, 2, 104, 3, 97, 3, 97, 13, 70, 64, 9, 10, - 61, 112, 163, 215, 10, 104, 2, 100, 0, 3, 97, 98, 99, 107, 0, 3, 97, 98, - 99, 106, 100, 0, 2, 118, 50, 98, 0, 0, 48, 57} - term, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - - result := term.(Tuple) - if !reflect.DeepEqual(expected, result) { - t.Errorf("got %#v, want %#v", result, expected) - t.Fatal("result != expected") - } -} - -var packetFunction []byte - -func TestDecodeFunction(t *testing.T) { - // A = fun(X) -> X*2 end. - packet := []byte{112, 0, 0, 3, 76, 1, 245, 82, 198, 227, 120, 209, 152, 67, 80, 234, - 138, 144, 123, 165, 151, 196, 0, 0, 0, 6, 0, 0, 0, 1, 100, 0, 8, 101, 114, - 108, 95, 101, 118, 97, 108, 97, 6, 98, 7, 170, 150, 55, 103, 100, 0, 20, - 101, 114, 108, 45, 100, 101, 109, 111, 50, 50, 64, 49, 50, 55, 46, 48, - 46, 48, 46, 49, 0, 0, 0, 83, 0, 0, 0, 0, 1, 104, 4, 106, 104, 2, 100, 0, 4, - 101, 118, 97, 108, 112, 0, 0, 2, 29, 3, 196, 150, 93, 173, 104, 167, - 134, 253, 184, 200, 203, 147, 166, 63, 88, 201, 0, 0, 0, 21, 0, 0, 0, 4, - 100, 0, 5, 115, 104, 101, 108, 108, 97, 21, 98, 6, 36, 178, 237, 103, - 100, 0, 20, 101, 114, 108, 45, 100, 101, 109, 111, 50, 50, 64, 49, 50, - 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 83, 0, 0, 0, 0, 1, 104, 2, 100, 0, 5, - 118, 97, 108, 117, 101, 112, 0, 0, 0, 110, 2, 196, 150, 93, 173, 104, - 167, 134, 253, 184, 200, 203, 147, 166, 63, 88, 201, 0, 0, 0, 5, 0, 0, 0, - 1, 100, 0, 5, 115, 104, 101, 108, 108, 97, 5, 98, 6, 36, 178, 237, 103, - 100, 0, 20, 101, 114, 108, 45, 100, 101, 109, 111, 50, 50, 64, 49, 50, - 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 83, 0, 0, 0, 0, 1, 103, 100, 0, 20, - 101, 114, 108, 45, 100, 101, 109, 111, 50, 50, 64, 49, 50, 55, 46, 48, - 46, 48, 46, 49, 0, 0, 0, 77, 0, 0, 0, 0, 1, 114, 0, 3, 100, 0, 20, 101, 114, - 108, 45, 100, 101, 109, 111, 50, 50, 64, 49, 50, 55, 46, 48, 46, 48, 46, - 49, 1, 0, 3, 219, 136, 225, 146, 0, 9, 253, 168, 114, 208, 103, 100, 0, - 20, 101, 114, 108, 45, 100, 101, 109, 111, 50, 50, 64, 49, 50, 55, 46, - 48, 46, 48, 46, 49, 0, 0, 0, 77, 0, 0, 0, 0, 1, 112, 0, 0, 1, 14, 1, 196, - 150, 93, 173, 104, 167, 134, 253, 184, 200, 203, 147, 166, 63, 88, - 201, 0, 0, 0, 12, 0, 0, 0, 3, 100, 0, 5, 115, 104, 101, 108, 108, 97, 12, - 98, 6, 36, 178, 237, 103, 100, 0, 20, 101, 114, 108, 45, 100, 101, 109, - 111, 50, 50, 64, 49, 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 83, 0, 0, 0, - 0, 1, 104, 2, 100, 0, 5, 118, 97, 108, 117, 101, 112, 0, 0, 0, 110, 2, - 196, 150, 93, 173, 104, 167, 134, 253, 184, 200, 203, 147, 166, 63, - 88, 201, 0, 0, 0, 5, 0, 0, 0, 1, 100, 0, 5, 115, 104, 101, 108, 108, 97, 5, - 98, 6, 36, 178, 237, 103, 100, 0, 20, 101, 114, 108, 45, 100, 101, 109, - 111, 50, 50, 64, 49, 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 83, 0, 0, 0, - 0, 1, 103, 100, 0, 20, 101, 114, 108, 45, 100, 101, 109, 111, 50, 50, - 64, 49, 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 77, 0, 0, 0, 0, 1, 114, 0, - 3, 100, 0, 20, 101, 114, 108, 45, 100, 101, 109, 111, 50, 50, 64, 49, - 50, 55, 46, 48, 46, 48, 46, 49, 1, 0, 3, 219, 136, 225, 146, 0, 9, 253, - 168, 114, 208, 103, 100, 0, 20, 101, 114, 108, 45, 100, 101, 109, 111, - 50, 50, 64, 49, 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 77, 0, 0, 0, 0, 1, - 104, 2, 100, 0, 5, 118, 97, 108, 117, 101, 112, 0, 0, 0, 110, 2, 196, - 150, 93, 173, 104, 167, 134, 253, 184, 200, 203, 147, 166, 63, 88, - 201, 0, 0, 0, 5, 0, 0, 0, 1, 100, 0, 5, 115, 104, 101, 108, 108, 97, 5, 98, - 6, 36, 178, 237, 103, 100, 0, 20, 101, 114, 108, 45, 100, 101, 109, - 111, 50, 50, 64, 49, 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 83, 0, 0, 0, - 0, 1, 103, 100, 0, 20, 101, 114, 108, 45, 100, 101, 109, 111, 50, 50, - 64, 49, 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 77, 0, 0, 0, 0, 1, 108, 0, - 0, 0, 1, 104, 5, 100, 0, 6, 99, 108, 97, 117, 115, 101, 97, 1, 108, 0, 0, - 0, 1, 104, 3, 100, 0, 3, 118, 97, 114, 97, 1, 100, 0, 1, 78, 106, 106, - 108, 0, 0, 0, 1, 104, 5, 100, 0, 2, 111, 112, 97, 1, 100, 0, 1, 42, 104, 3, - 100, 0, 3, 118, 97, 114, 97, 1, 100, 0, 1, 78, 104, 3, 100, 0, 7, 105, - 110, 116, 101, 103, 101, 114, 97, 1, 97, 2, 106, 106} - - packetFunction = packet // save for benchmark - _, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - -} - -func TestDecodeRegisteredType(t *testing.T) { - type regTypeStruct3 struct { - C string - } - type regTypeStruct4 struct { - A uint8 - B *regTypeStruct3 - } - if a, err := RegisterType(regTypeStruct3{}, RegisterTypeOptions{}); err != nil { - t.Fatal(err) - } else { - defer UnregisterType(a) - } - - if a, err := RegisterType(regTypeStruct4{}, RegisterTypeOptions{}); err != nil { - t.Fatal(err) - } else { - defer UnregisterType(a) - } - - expected := regTypeStruct4{} - expected.A = 123 - expected.B = ®TypeStruct3{ - C: "hello", - } - - packet := []byte{ettSmallTuple, 3, ettSmallAtomUTF8, 49, 35, 103, 105, 116, 104, 117, 98, 46, 99, 111, 109, 47, 101, 114, 103, 111, 45, 115, 101, 114, 118, 105, 99, 101, 115, 47, 101, 114, 103, 111, 47, 101, 116, 102, 47, 114, 101, 103, 84, 121, 112, 101, 83, 116, 114, 117, 99, 116, 52, ettSmallInteger, 123, ettSmallTuple, 2, ettSmallAtomUTF8, 49, 35, 103, 105, 116, 104, 117, 98, 46, 99, 111, 109, 47, 101, 114, 103, 111, 45, 115, 101, 114, 118, 105, 99, 101, 115, 47, 101, 114, 103, 111, 47, 101, 116, 102, 47, 114, 101, 103, 84, 121, 112, 101, 83, 116, 114, 117, 99, 116, 51, ettString, 0, 5, 104, 101, 108, 108, 111} - - term, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - switch tt := term.(type) { - case regTypeStruct4: - //fmt.Printf("TERM: %v %#v %#v\n", tt, tt, tt.B) - default: - t.Fatal("unknown type", tt) - } - -} - -// -// benchmarks -// - -func BenchmarkDecodeAtom(b *testing.B) { - packet := []byte{ettAtomUTF8, 0, 3, 97, 98, 99} - for i := 0; i < b.N; i++ { - _, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkDecodeString(b *testing.B) { - packet := []byte{ettString, 0, 3, 97, 98, 99} - for i := 0; i < b.N; i++ { - _, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkDecodeNewFloat(b *testing.B) { - packet := []byte{ettNewFloat, 64, 0, 204, 204, 204, 204, 204, 205} - for i := 0; i < b.N; i++ { - _, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkDecodeInteger(b *testing.B) { - packet := []byte{ettInteger, 182, 105, 253, 46} - for i := 0; i < b.N; i++ { - _, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkDecodeSmallBigInteger(b *testing.B) { - packet := []byte{ettSmallBig, 8, 1, 177, 28, 108, 177, 244, 16, 34, 17} - for i := 0; i < b.N; i++ { - _, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkDecodeSmallBigIntegerWithinInt64Range(b *testing.B) { - packet := []byte{ettSmallBig, 5, 1, 106, 26, 153, 190, 28} - for i := 0; i < b.N; i++ { - _, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkDecodeList100Integer(b *testing.B) { - packet := []byte{} - packetInt := []byte{ettInteger, 182, 105, 253, 46} - packetList := []byte{ettList, 0, 0, 0, 100} - - packet = append(packet, packetList...) - packet = append(packet, byte(106)) - - for i := 0; i < 100; i++ { - packet = append(packet, packetInt...) - } - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - b.Fatal(err) - } - } - -} - -func BenchmarkDecodeTuple(b *testing.B) { - packet := []byte{ettSmallTuple, 3, 70, 64, 9, 30, 184, 81, 235, 133, 31, 100, 0, 3, 97, 98, 99, - 98, 58, 222, 104, 177} - for i := 0; i < b.N; i++ { - _, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkDecodePid(b *testing.B) { - packet := []byte{103, 100, 0, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, 49, - 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 142, 0, 0, 0, 0, 2} - for i := 0; i < b.N; i++ { - _, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkDecodePidWithAtomCache(b *testing.B) { - packet := []byte{103, ettCacheRef, 0, 0, 0, 0, 142, 0, 0, 0, 0, 2} - cache := []Atom{Atom("erl-demo@127.0.0.1")} - for i := 0; i < b.N; i++ { - _, _, err := Decode(packet, cache, DecodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkDecodeRef(b *testing.B) { - packet := []byte{114, 0, 3, 100, 0, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, - 49, 50, 55, 46, 48, 46, 48, 46, 49, 2, 0, 1, 30, 228, 183, 192, 0, 1, 141, - 122, 203, 35} - for i := 0; i < b.N; i++ { - _, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkDecodeRefWithAtomCache(b *testing.B) { - packet := []byte{114, 0, 3, ettCacheRef, 0, 2, 0, 1, 30, 228, 183, 192, 0, 1, 141, - 122, 203, 35} - cache := []Atom{Atom("erl-demo@127.0.0.1")} - for i := 0; i < b.N; i++ { - _, _, err := Decode(packet, cache, DecodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} -func BenchmarkDecodePort(b *testing.B) { - packet := []byte{102, 100, 0, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, 49, - 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 32, 2} - for i := 0; i < b.N; i++ { - _, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkDecodeTupleRefPid(b *testing.B) { - - packet := []byte{ettSmallTuple, 2, ettNewRef, 0, 3, ettAtom, 0, 18, 101, 114, 108, 45, 100, 101, 109, - 111, 64, 49, 50, 55, 46, 48, 46, 48, 46, 49, 2, 0, 1, 31, 28, 183, 192, 0, - 1, 141, 122, 203, 35, 103, 100, 0, 18, 101, 114, 108, 45, 100, 101, - 109, 111, 64, 49, 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 142, 0, 0, 0, 0, - 2} - for i := 0; i < b.N; i++ { - _, _, err := Decode(packet, []Atom{}, DecodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkDecodeTupleRefPidWithAtomCache(b *testing.B) { - - packet := []byte{ettSmallTuple, 2, ettNewRef, 0, 3, ettCacheRef, 0, - 2, 0, 1, 31, 28, 183, 192, 0, 1, 141, 122, 203, 35, 103, ettCacheRef, 0, - 0, 0, 0, 142, 0, 0, 0, 0, 2} - cache := []Atom{Atom("erl-demo@127.0.0.1")} - for i := 0; i < b.N; i++ { - _, _, err := Decode(packet, cache, DecodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/etf/encode.go b/etf/encode.go deleted file mode 100644 index a13fe06a..00000000 --- a/etf/encode.go +++ /dev/null @@ -1,886 +0,0 @@ -package etf - -import ( - "encoding/binary" - "fmt" - "math" - "math/big" - "reflect" - - "github.com/ergo-services/ergo/lib" -) - -var ( - ErrStringTooLong = fmt.Errorf("Encoding error. String too long. Max allowed length is 65535") - ErrAtomTooLong = fmt.Errorf("Encoding error. Atom too long. Max allowed UTF-8 chars is 255") - - // internal types - goSlice = byte(240) - goMap = byte(241) - goStruct = byte(242) - goSliceRegistered = byte(243) - goMapRegistered = byte(244) - goStructRegistered = byte(245) - - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// EncodeOptions -type EncodeOptions struct { - AtomCache *AtomCacheOut - SenderAtomCache map[Atom]CacheItem - EncodingAtomCache *EncodingAtomCache - AtomMapping *AtomMapping - - // FlagBigPidRef The node accepts a larger amount of data in pids - // and references (node container types version 4). - // In the pid case full 32-bit ID and Serial fields in NEW_PID_EXT - // and in the reference case up to 5 32-bit ID words are now - // accepted in NEWER_REFERENCE_EXT. Introduced in OTP 24. - FlagBigPidRef bool - - // FlagBigCreation The node understands big node creation tags NEW_PID_EXT, - // NEWER_REFERENCE_EXT. - FlagBigCreation bool - - NodeName string - PeerName string -} - -// Encode -func Encode(term Term, b *lib.Buffer, options EncodeOptions) (retErr error) { - if lib.CatchPanic() { - defer func() { - // We should catch any panic happened during encoding Golang types. - if r := recover(); r != nil { - retErr = fmt.Errorf("%v", r) - } - }() - } - var stack, child *stackElement - - cacheEnabled := options.AtomCache != nil - cacheIndex := int16(0) - if cacheEnabled { - cacheIndex = int16(len(options.EncodingAtomCache.L)) - } - - // Atom cache: (if its enabled: options.AtomCache != nil) - // 1. check for an atom in options.WriterAtomCache (map) - // 2. if not found in WriterAtomCache call AtomCache.Append(atom), - // encode it as a regular atom (ettAtom*) - // 3. if found - // add options.EncodingAtomCache[i] = CacheItem, where i is just a counter - // within this encoding process. - - // encode atom as ettCacheRef with value = i - for { - - child = nil - - if stack != nil { - - if stack.i == stack.children { - if stack.parent == nil { - return nil - } - stack, stack.parent = stack.parent, nil - continue - } - - switch stack.termType { - case ettList: - if stack.i == stack.children-1 { - // last item of list should be ettNil - term = nil - break - } - term = stack.term.(List)[stack.i] - case ettListImproper: - // improper list like [a|b] has no ettNil as a last item - term = stack.term.(ListImproper)[stack.i] - - case ettSmallTuple: - term = stack.term.(Tuple)[stack.i] - - case ettPid: - p := stack.term.(Pid) - if stack.i == 0 { - term = p.Node - break - } - - buf := b.Extend(9) - - // ID a 32-bit big endian unsigned integer. - // If FlagBigPidRef is not set, only 15 bits may be used - // and the rest must be 0. - if options.FlagBigPidRef { - binary.BigEndian.PutUint32(buf[:4], uint32(p.ID)) - } else { - // 15 bits only 2**15 - 1 = 32767 - binary.BigEndian.PutUint32(buf[:4], uint32(p.ID)&32767) - } - - // Serial a 32-bit big endian unsigned integer. - // If distribution FlagBigPidRef is not set, only 13 bits may be used - // and the rest must be 0. - if options.FlagBigPidRef { - binary.BigEndian.PutUint32(buf[4:8], uint32(p.ID>>32)) - } else { - // 13 bits only 2**13 - 1 = 8191 - binary.BigEndian.PutUint32(buf[4:8], (uint32(p.ID>>15) & 8191)) - } - - // Same as NEW_PID_EXT except the Creation field is - // only one byte and only two bits are significant, - // the rest are to be 0. - buf[8] = byte(p.Creation) & 3 - - stack.i++ - continue - - case ettNewPid: - p := stack.term.(Pid) - if stack.i == 0 { - term = p.Node - break - } - - buf := b.Extend(12) - // ID - if options.FlagBigPidRef { - binary.BigEndian.PutUint32(buf[:4], uint32(p.ID)) - } else { - // 15 bits only 2**15 - 1 = 32767 - binary.BigEndian.PutUint32(buf[:4], uint32(p.ID)&32767) - } - // Serial - if options.FlagBigPidRef { - binary.BigEndian.PutUint32(buf[4:8], uint32(p.ID>>32)) - } else { - // 13 bits only 2**13 - 1 = 8191 - binary.BigEndian.PutUint32(buf[4:8], (uint32(p.ID>>32))&8191) - } - // Creation - binary.BigEndian.PutUint32(buf[8:12], p.Creation) - - stack.i++ - continue - - case ettNewRef: - r := stack.term.(Ref) - if stack.i == 0 { - term = stack.term.(Ref).Node - break - } - - lenID := 3 - buf := b.Extend(1 + lenID*4) - // Only one byte long and only two bits are significant, the rest must be 0. - buf[0] = byte(r.Creation & 3) - buf = buf[1:] - for i := 0; i < lenID; i++ { - // In the first word (4 bytes) of ID, only 18 bits - // are significant, the rest must be 0. - if i == 0 { - // 2**18 - 1 = 262143 - binary.BigEndian.PutUint32(buf[:4], r.ID[i]&262143) - } else { - binary.BigEndian.PutUint32(buf[:4], r.ID[i]) - } - buf = buf[4:] - } - - stack.i++ - continue - - case ettNewerRef: - r := stack.term.(Ref) - if stack.i == 0 { - term = stack.term.(Ref).Node - break - } - - // // FIXME Erlang 24 has a bug https://github.com/erlang/otp/issues/5097 - // uncomment once they fix it - lenID := 3 - //if options.FlagBigPidRef { - // lenID = 5 - //} - buf := b.Extend(4 + lenID*4) - binary.BigEndian.PutUint32(buf[0:4], r.Creation) - buf = buf[4:] - for i := 0; i < lenID; i++ { - binary.BigEndian.PutUint32(buf[:4], r.ID[i]) - buf = buf[4:] - } - - stack.i++ - continue - - case ettMap: - key := stack.tmp.(List)[stack.i/2] - if stack.i&0x01 == 0x01 { // a value - term = stack.term.(Map)[key] - break - } - term = key - - case goMapRegistered: - if stack.i == 0 { // registered type name as a key - term = stack.tmp - break - } - if stack.i == 1 { // nil as a value for the key (registered type name) - term = nil - break - } - key := stack.term.([]reflect.Value)[(stack.i-2)/2] - if stack.i&0x01 == 0x01 { // a value - term = stack.reg.MapIndex(key).Interface() - break - } - term = key.Interface() // a key - - case goMap: - key := stack.tmp.([]reflect.Value)[stack.i/2] - if stack.i&0x01 == 0x01 { // a value - term = stack.term.(func(reflect.Value) reflect.Value)(key).Interface() - break - } - term = key.Interface() // a key - - case goSliceRegistered: - if stack.i == 0 { - term = stack.tmp - break - } - if stack.i == stack.children-1 { - // last item of list should be ettNil - term = nil - break - } - term = stack.term.(func(int) reflect.Value)(stack.i - 1).Interface() - - case goSlice: - if stack.i == stack.children-1 { - // last item of list should be ettNil - term = nil - break - } - term = stack.term.(func(int) reflect.Value)(stack.i).Interface() - - case goStructRegistered: - if stack.i == 0 { - // first item must be a sturct name (stored in stack.tmp). - term = stack.tmp - break - } - // field value - term = stack.term.(func(int) reflect.Value)(stack.i - 1).Interface() - - case goStruct: - field := stack.tmp.(func(int) reflect.StructField)(stack.i / 2) - fieldName := field.Name - - if tag := field.Tag.Get("etf"); tag != "" { - fieldName = tag - } - - if stack.i&0x01 != 0x01 { // a key (field name) - term = Atom(fieldName) - break - } - - // a value - fvalue := stack.term.(func(int) reflect.Value)(stack.i / 2) - if fvalue.CanInterface() == false { - return fmt.Errorf("struct has unexported field %q", fieldName) - } - term = fvalue.Interface() - - default: - - return errInternal - } - - stack.i++ - } - - recasting: - switch t := term.(type) { - case bool: - - if cacheEnabled && cacheIndex < 255 { - value := Atom("false") - if t { - value = Atom("true") - } - - // looking for CacheItem - ci, found := options.SenderAtomCache[value] - if found { - i := options.EncodingAtomCache.Append(ci) - cacheIndex = int16(i + 1) - b.Append([]byte{ettCacheRef, byte(i)}) - break - } else { - // add it to the cache and encode as usual Atom - options.AtomCache.Append(value) - } - } - - if t { - b.Append([]byte{ettSmallAtom, 4, 't', 'r', 'u', 'e'}) - break - } - - b.Append([]byte{ettSmallAtom, 5, 'f', 'a', 'l', 's', 'e'}) - - // do not use reflect.ValueOf(t) because its too expensive - case uint8: - b.Append([]byte{ettSmallInteger, t}) - - case int8: - if t < 0 { - term = int32(t) - goto recasting - } - - b.Append([]byte{ettSmallInteger, uint8(t)}) - break - - case uint16: - if t <= math.MaxUint8 { - b.Append([]byte{ettSmallInteger, byte(t)}) - break - } - term = int32(t) - goto recasting - - case int16: - if t >= 0 && t <= math.MaxUint8 { - b.Append([]byte{ettSmallInteger, byte(t)}) - break - } - - term = int32(t) - goto recasting - - case uint32: - if t <= math.MaxUint8 { - b.Append([]byte{ettSmallInteger, byte(t)}) - break - } - - if t > math.MaxInt32 { - term = int64(t) - goto recasting - } - - term = int32(t) - goto recasting - - case int32: - if t >= 0 && t <= math.MaxUint8 { - b.Append([]byte{ettSmallInteger, byte(t)}) - break - } - - // 1 (ettInteger) + 4 (32bit integer) - buf := b.Extend(1 + 4) - buf[0] = ettInteger - binary.BigEndian.PutUint32(buf[1:5], uint32(t)) - - case uint: - if t <= math.MaxUint8 { - b.Append([]byte{ettSmallInteger, byte(t)}) - break - } - - if t > math.MaxInt32 { - term = int64(t) - goto recasting - } - - term = int32(t) - goto recasting - - case int: - if t >= 0 && t <= math.MaxUint8 { - b.Append([]byte{ettSmallInteger, byte(t)}) - break - } - - if t > math.MaxInt32 || t < math.MinInt32 { - term = int64(t) - goto recasting - } - - term = int32(t) - goto recasting - - case uint64: - if t <= math.MaxUint8 { - b.Append([]byte{ettSmallInteger, byte(t)}) - break - } - - if t <= math.MaxInt32 { - term = int32(t) - goto recasting - } - - if t <= math.MaxInt64 { - term = int64(t) - goto recasting - } - - buf := []byte{ettSmallBig, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0} - binary.LittleEndian.PutUint64(buf[3:], uint64(t)) - b.Append(buf) - - case int64: - if t >= 0 && t <= math.MaxUint8 { - b.Append([]byte{ettSmallInteger, byte(t)}) - break - } - - if t >= math.MinInt32 && t <= math.MaxInt32 { - term = int32(t) - goto recasting - } - - if t == math.MinInt64 { - // corner case: - // if t = -9223372036854775808 (which is math.MinInt64) - // we can't just revert the sign because it overflows math.MaxInt64 value - buf := []byte{ettSmallBig, 8, 1, 0, 0, 0, 0, 0, 0, 0, 128} - b.Append(buf) - break - } - - negative := byte(0) - if t < 0 { - negative = 1 - t = -t - } - - buf := []byte{ettSmallBig, 0, negative, 0, 0, 0, 0, 0, 0, 0, 0} - binary.LittleEndian.PutUint64(buf[3:], uint64(t)) - switch { - case t < 4294967296: - buf[1] = 4 - b.Append(buf[:7]) - - case t < 1099511627776: - buf[1] = 5 - b.Append(buf[:8]) - - case t < 281474976710656: - buf[1] = 6 - b.Append(buf[:9]) - - case t < 72057594037927936: - buf[1] = 7 - b.Append(buf[:10]) - - default: - buf[1] = 8 - b.Append(buf) - } - - case big.Int: - bytes := t.Bytes() - negative := t.Sign() < 0 - l := len(bytes) - - for i := 0; i < l/2; i++ { - bytes[i], bytes[l-1-i] = bytes[l-1-i], bytes[i] - } - - if l < 256 { - // 1 (ettSmallBig) + 1 (len) + 1 (sign) + bytes - buf := b.Extend(1 + 1 + 1 + l) - buf[0] = ettSmallBig - buf[1] = byte(l) - - if negative { - buf[2] = 1 - } else { - buf[2] = 0 - } - - copy(buf[3:], bytes) - - break - } - - // 1 (ettLargeBig) + 4 (len) + 1(sign) + bytes - buf := b.Extend(1 + 4 + 1 + l) - buf[0] = ettLargeBig - binary.BigEndian.PutUint32(buf[1:5], uint32(l)) - - if negative { - buf[5] = 1 - } else { - buf[5] = 0 - } - - copy(buf[6:], bytes) - - case string: - lenString := len(t) - - if lenString > 65535 { - return ErrStringTooLong - } - - // 1 (ettString) + 2 (len) + string - buf := b.Extend(1 + 2 + lenString) - buf[0] = ettString - binary.BigEndian.PutUint16(buf[1:3], uint16(lenString)) - copy(buf[3:], t) - - case Charlist: - term = []rune(t) - goto recasting - - case String: - term = []byte(t) - goto recasting - - case Atom: - // As from ERTS 9.0 (OTP 20), atoms may contain any Unicode - // characters and are always encoded using the UTF-8 external - // formats ATOM_UTF8_EXT or SMALL_ATOM_UTF8_EXT. - - // replace atom value if we have mapped value for it - if options.AtomMapping != nil { - options.AtomMapping.MutexOut.RLock() - if mapped, ok := options.AtomMapping.Out[t]; ok { - t = mapped - } - options.AtomMapping.MutexOut.RUnlock() - } - - // https://erlang.org/doc/apps/erts/erl_ext_dist.html#utf8_atoms - // The maximum number of allowed characters in an atom is 255. - // In the UTF-8 case, each character can need 4 bytes to be encoded. - if len([]rune(t)) > 255 { - return ErrAtomTooLong - } - - if cacheEnabled && cacheIndex < 255 { - // looking for CacheItem - ci, found := options.SenderAtomCache[t] - if found { - i := options.EncodingAtomCache.Append(ci) - cacheIndex = int16(i + 1) - b.Append([]byte{ettCacheRef, byte(i)}) - break - } else { - // add it to the cache and encode as usual Atom - options.AtomCache.Append(t) - } - } - - lenAtom := len(t) - if lenAtom < 256 { - buf := b.Extend(1 + 1 + lenAtom) - buf[0] = ettSmallAtomUTF8 - buf[1] = byte(lenAtom) - copy(buf[2:], t) - break - } - - // 1 (ettAtomUTF8) + 2 (len) + atom - buf := b.Extend(1 + 2 + lenAtom) - buf[0] = ettAtomUTF8 - binary.BigEndian.PutUint16(buf[1:3], uint16(lenAtom)) - copy(buf[3:], t) - - case float32: - term = float64(t) - goto recasting - - case float64: - // 1 (ettNewFloat) + 8 (float) - buf := b.Extend(1 + 8) - buf[0] = ettNewFloat - bits := math.Float64bits(t) - binary.BigEndian.PutUint64(buf[1:9], uint64(bits)) - - case nil: - b.AppendByte(ettNil) - - case Tuple: - lenTuple := len(t) - if lenTuple < 256 { - b.Append([]byte{ettSmallTuple, byte(lenTuple)}) - } else { - buf := b.Extend(5) - buf[0] = ettLargeTuple - binary.BigEndian.PutUint32(buf[1:5], uint32(lenTuple)) - } - child = &stackElement{ - parent: stack, - termType: ettSmallTuple, // doesn't matter what exact type for the further processing - term: t, - children: lenTuple, - } - - case Pid: - child = &stackElement{ - parent: stack, - term: t, - children: 2, - } - if options.FlagBigCreation { - child.termType = ettNewPid - b.AppendByte(ettNewPid) - } else { - child.termType = ettPid - b.AppendByte(ettPid) - } - - case Alias: - term = Ref(t) - goto recasting - - case Ref: - buf := b.Extend(3) - - child = &stackElement{ - parent: stack, - term: t, - children: 2, - } - if options.FlagBigCreation { - buf[0] = ettNewerRef - child.termType = ettNewerRef - - } else { - buf[0] = ettNewRef - child.termType = ettNewRef - } - - // LEN a 16-bit big endian unsigned integer not larger - // than 5 when the FlagBigPidRef has been set; otherwise not larger than 3. - - // FIXME Erlang 24 has a bug https://github.com/erlang/otp/issues/5097 - // uncomment once they fix it - //if options.FlagBigPidRef { - // binary.BigEndian.PutUint16(buf[1:3], 5) - //} else { - binary.BigEndian.PutUint16(buf[1:3], 3) - //} - - case Map: - lenMap := len(t) - buf := b.Extend(5) - buf[0] = ettMap - binary.BigEndian.PutUint32(buf[1:], uint32(lenMap)) - - keys := make(List, 0, lenMap) - for key := range t { - keys = append(keys, key) - } - - child = &stackElement{ - parent: stack, - termType: ettMap, - term: t, - children: lenMap * 2, - tmp: keys, - } - - case ListImproper: - if len(t) == 0 { - b.AppendByte(ettNil) - continue - } - lenList := len(t) - 1 - buf := b.Extend(5) - buf[0] = ettList - binary.BigEndian.PutUint32(buf[1:], uint32(lenList)) - child = &stackElement{ - parent: stack, - termType: ettListImproper, - term: t, - children: lenList + 1, - } - - case List: - lenList := len(t) - if lenList == 0 { - b.AppendByte(ettNil) - continue - } - buf := b.Extend(5) - buf[0] = ettList - binary.BigEndian.PutUint32(buf[1:], uint32(lenList)) - child = &stackElement{ - parent: stack, - termType: ettList, - term: t, - children: lenList + 1, - } - - case []byte: - lenBinary := len(t) - buf := b.Extend(1 + 4 + lenBinary) - buf[0] = ettBinary - binary.BigEndian.PutUint32(buf[1:5], uint32(lenBinary)) - copy(buf[5:], t) - - case Marshaler: - m, err := t.MarshalETF() - if err != nil { - return err - } - - lenBinary := len(m) - buf := b.Extend(1 + 4 + lenBinary) - buf[0] = ettBinary - binary.BigEndian.PutUint32(buf[1:5], uint32(lenBinary)) - copy(buf[5:], m) - - default: - v := reflect.ValueOf(t) - vt := reflect.TypeOf(t) - vtAtomName := regTypeName(vt) - registered.RLock() - rtype, typeIsRegistered := registered.typesEnc[vtAtomName] - registered.RUnlock() - - switch v.Kind() { - case reflect.Struct: - lenStruct := v.NumField() - if typeIsRegistered { - // registered type. encode as a tuple with vtAtomName as the first element - vtAtomName = rtype.name - if lenStruct+1 < 255 { - b.Append([]byte{ettSmallTuple, byte(lenStruct + 1)}) - } else { - buf := b.Extend(5) - buf[0] = ettLargeTuple - binary.BigEndian.PutUint32(buf[1:], uint32(lenStruct+1)) - } - child = &stackElement{ - parent: stack, - termType: goStructRegistered, - term: v.Field, - children: lenStruct + 1, - tmp: vtAtomName, - } - break - } - - // will be encoded as a ettMap - buf := b.Extend(5) - buf[0] = ettMap - binary.BigEndian.PutUint32(buf[1:], uint32(lenStruct)) - - child = &stackElement{ - parent: stack, - termType: goStruct, - term: v.Field, - children: lenStruct * 2, - tmp: v.Type().Field, - } - - case reflect.Array, reflect.Slice: - lenList := v.Len() - - if typeIsRegistered { - vtAtomName = rtype.name - lenList++ // first element for the type name - buf := b.Extend(5) - buf[0] = ettList - binary.BigEndian.PutUint32(buf[1:], uint32(lenList)) - child = &stackElement{ - parent: stack, - termType: goSliceRegistered, - term: v.Index, - children: lenList + 1, - tmp: vtAtomName, - } - break - } - - if lenList == 0 { - b.AppendByte(ettNil) - continue - } - - buf := b.Extend(5) - buf[0] = ettList - binary.BigEndian.PutUint32(buf[1:], uint32(lenList)) - child = &stackElement{ - parent: stack, - termType: goSlice, - term: v.Index, - children: lenList + 1, - } - - case reflect.Map: - lenMap := v.Len() - if typeIsRegistered { - lenMap++ - vtAtomName = rtype.name - buf := b.Extend(5) - buf[0] = ettMap - binary.BigEndian.PutUint32(buf[1:], uint32(lenMap)) - - child = &stackElement{ - parent: stack, - termType: goMapRegistered, - term: v.MapKeys(), - children: lenMap * 2, - tmp: vtAtomName, - reg: &v, - } - break - } - - buf := b.Extend(5) - buf[0] = ettMap - binary.BigEndian.PutUint32(buf[1:], uint32(lenMap)) - - child = &stackElement{ - parent: stack, - termType: goMap, - term: v.MapIndex, - children: lenMap * 2, - tmp: v.MapKeys(), - } - - case reflect.Ptr: - // dereference value - if !v.IsNil() { - term = v.Elem().Interface() - goto recasting - } - - b.AppendByte(ettNil) - if stack == nil { - break - } - - default: - return fmt.Errorf("unsupported type %v with value %#v", v.Type(), v) - } - } - - if stack == nil && child == nil { - return nil - } - - if child != nil { - stack = child - } - - } -} diff --git a/etf/encode_test.go b/etf/encode_test.go deleted file mode 100644 index 83288450..00000000 --- a/etf/encode_test.go +++ /dev/null @@ -1,1489 +0,0 @@ -package etf - -import ( - "fmt" - "math/big" - "reflect" - "testing" - - "github.com/ergo-services/ergo/lib" -) - -func TestEncodeBool(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - err := Encode(false, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(b.B, []byte{ettSmallAtom, 5, 'f', 'a', 'l', 's', 'e'}) { - t.Fatal("incorrect value") - } -} - -func TestEncodeBoolWithAtomCache(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - senderAtomCache := make(map[Atom]CacheItem) - encodingAtomCache := TakeEncodingAtomCache() - atomCache := NewAtomCache() - ci := CacheItem{ID: 499, Encoded: true, Name: "false"} - - senderAtomCache["false"] = ci - - encodeOptions := EncodeOptions{ - AtomCache: atomCache.Out, - SenderAtomCache: senderAtomCache, - EncodingAtomCache: encodingAtomCache, - } - - err := Encode(false, b, encodeOptions) - if err != nil { - t.Fatal(err) - } - - if encodingAtomCache.Len() != 1 || encodingAtomCache.L[0] != ci { - t.Fatal("incorrect cache value") - } - - if !reflect.DeepEqual(b.B, []byte{ettCacheRef, 0}) { - t.Fatal("incorrect value") - } - -} - -type integerCase struct { - name string - integer interface{} - expected []byte -} - -func integerCases() []integerCase { - bigInt := big.Int{} - bigInt.SetString("9223372036854775807123456789", 10) - bigIntNegative := big.Int{} - bigIntNegative.SetString("-9223372036854775807123456789", 10) - - return []integerCase{ - // - // unsigned integers - // - {"uint8::255", uint8(255), []byte{ettSmallInteger, 255}}, - {"uint16::255", uint16(255), []byte{ettSmallInteger, 255}}, - {"uint32::255", uint32(255), []byte{ettSmallInteger, 255}}, - {"uint64::255", uint64(255), []byte{ettSmallInteger, 255}}, - {"uint::255", uint(255), []byte{ettSmallInteger, 255}}, - - {"uint16::256", uint16(256), []byte{ettInteger, 0, 0, 1, 0}}, - - {"uint16::65535", uint16(65535), []byte{ettInteger, 0, 0, 255, 255}}, - {"uint32::65535", uint32(65535), []byte{ettInteger, 0, 0, 255, 255}}, - {"uint64::65535", uint64(65535), []byte{ettInteger, 0, 0, 255, 255}}, - - {"uint64::65536", uint64(65536), []byte{ettInteger, 0, 1, 0, 0}}, - - // treat as an int32 - {"uint32::2147483647", uint32(2147483647), []byte{ettInteger, 127, 255, 255, 255}}, - {"uint64::2147483647", uint64(2147483647), []byte{ettInteger, 127, 255, 255, 255}}, - {"uint64::2147483648", uint64(2147483648), []byte{ettSmallBig, 4, 0, 0, 0, 0, 128}}, - - {"uint32::4294967295", uint32(4294967295), []byte{ettSmallBig, 4, 0, 255, 255, 255, 255}}, - {"uint64::4294967295", uint64(4294967295), []byte{ettSmallBig, 4, 0, 255, 255, 255, 255}}, - {"uint64::4294967296", uint64(4294967296), []byte{ettSmallBig, 5, 0, 0, 0, 0, 0, 1}}, - - {"uint64::18446744073709551615", uint64(18446744073709551615), []byte{ettSmallBig, 8, 0, 255, 255, 255, 255, 255, 255, 255, 255}}, - - // - // signed integers - // - - // negative is always ettInteger for the numbers within the range of int32 - {"int8::-127", int8(-127), []byte{ettInteger, 255, 255, 255, 129}}, - {"int16::-127", int16(-127), []byte{ettInteger, 255, 255, 255, 129}}, - {"int32::-127", int32(-127), []byte{ettInteger, 255, 255, 255, 129}}, - {"int64::-127", int64(-127), []byte{ettInteger, 255, 255, 255, 129}}, - {"int::-127", int(-127), []byte{ettInteger, 255, 255, 255, 129}}, - - // positive within a range of int8 treats as ettSmallInteger - {"int8::127", int8(127), []byte{ettSmallInteger, 127}}, - {"int16::127", int16(127), []byte{ettSmallInteger, 127}}, - {"int32::127", int32(127), []byte{ettSmallInteger, 127}}, - {"int64::127", int64(127), []byte{ettSmallInteger, 127}}, - - // a positive int[16,32,64] value within the range of uint8 treats as an uint8 - {"int16::128", int16(128), []byte{ettSmallInteger, 128}}, - {"int32::128", int32(128), []byte{ettSmallInteger, 128}}, - {"int64::128", int64(128), []byte{ettSmallInteger, 128}}, - {"int::128", int(128), []byte{ettSmallInteger, 128}}, - - // whether its positive or negative value within the range of int16 its treating as an int32 - {"int16::-32767", int16(-32767), []byte{ettInteger, 255, 255, 128, 1}}, - {"int16::32767", int16(32767), []byte{ettInteger, 0, 0, 127, 255}}, - - // treat as an int32 - {"int32::2147483647", int32(2147483647), []byte{ettInteger, 127, 255, 255, 255}}, - {"int32::-2147483648", int32(-2147483648), []byte{ettInteger, 128, 0, 0, 0}}, - {"int64::2147483647", int64(2147483647), []byte{ettInteger, 127, 255, 255, 255}}, - {"int64::-2147483648", int64(-2147483648), []byte{ettInteger, 128, 0, 0, 0}}, - - {"int64::2147483648", int64(2147483648), []byte{ettSmallBig, 4, 0, 0, 0, 0, 128}}, - - // int64 treats as ettSmallBig whether its positive or negative - {"int64::9223372036854775807", int64(9223372036854775807), []byte{ettSmallBig, 8, 0, 255, 255, 255, 255, 255, 255, 255, 127}}, - {"int64::-9223372036854775808", int64(-9223372036854775808), []byte{ettSmallBig, 8, 1, 0, 0, 0, 0, 0, 0, 0, 128}}, - - {"big.int::-9223372036854775807123456789", bigIntNegative, []byte{ettSmallBig, 12, 1, 21, 3, 193, 203, 255, 255, 255, 255, 255, 100, 205, 29}}, - } -} - -func TestEncodeInteger(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - for _, c := range integerCases() { - t.Run(c.name, func(t *testing.T) { - b.Reset() - - err := Encode(c.integer, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(b.B, c.expected) { - fmt.Println("exp ", c.expected) - fmt.Println("got ", b.B) - t.Fatal("incorrect value") - } - }) - } -} - -func TestEncodeFloat(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - expected := []byte{ettNewFloat, 64, 9, 30, 184, 81, 235, 133, 31} - - err := Encode(float64(3.14), b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } - - b.Reset() - err = Encode(float32(3.14), b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - // float32 to float64 casting makes some changes, thats why 'expected' - // has different set of bytes - expected = []byte{ettNewFloat, 64, 9, 30, 184, 96, 0, 0, 0} - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } - -} - -func TestEncodeString(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - expected := []byte{ettString, 0, 52, 72, 101, 108, 108, 111, 32, 87, 111, 114, 108, 100, 46, 32, 228, 189, 160, 229, 165, 189, 228, 184, 150, 231, 149, 140, 46, 32, 208, 159, 209, 128, 208, 184, 208, 178, 208, 181, 209, 130, 32, 208, 188, 208, 184, 209, 128, 46, 32, 240, 159, 154, 128} - err := Encode("Hello World. 你好世界. Привет мир. 🚀", b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } -} - -func TestEncodeAtom(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - expected := []byte{ettSmallAtomUTF8, 14, 69, 114, 103, 111, 32, 70, 114, 97, 109, 101, 119, - 111, 114, 107} - - err := Encode(Atom("Ergo Framework"), b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } - - b.Reset() - - // longAtom with 255 utf-8 symbols 446 bytes - longAtom := Atom("你好世界ПриветМирHelloWorld你好世界ПриветМирHelloWorld你好世界ПриветМирHelloWorld你好世界ПриветМирHelloWorld你好世界ПриветМирHelloWorld你好世界ПриветМирHelloWorld你好世界ПриветМирHelloWorld你好世界ПриветМирHelloWorld你好世界ПриветМирHelloWorld你好世界ПриветМирHelloWorld你好世界ПриветМирHelloWorld你好") - err = Encode(longAtom, b, EncodeOptions{}) - - expected = []byte{ettAtomUTF8, 1, 190, 228, 189, 160, 229, 165, 189, 228, 184, 150, 231, 149, 140, 208, 159, 209, 128, 208, 184, 208, 178, 208, 181, 209, 130, 208, 156, 208, 184, 209, 128, 72, 101, 108, 108, 111, 87, 111, 114, 108, 100, 228, 189, 160, 229, 165, 189, 228, 184, 150, 231, 149, 140, 208, 159, 209, 128, 208, 184, 208, 178, 208, 181, 209, 130, 208, 156, 208, 184, 209, 128, 72, 101, 108, 108, 111, 87, 111, 114, 108, 100, 228, 189, 160, 229, 165, 189, 228, 184, 150, 231, 149, 140, 208, 159, 209, 128, 208, 184, 208, 178, 208, 181, 209, 130, 208, 156, 208, 184, 209, 128, 72, 101, 108, 108, 111, 87, 111, 114, 108, 100, 228, 189, 160, 229, 165, 189, 228, 184, 150, 231, 149, 140, 208, 159, 209, 128, 208, 184, 208, 178, 208, 181, 209, 130, 208, 156, 208, 184, 209, 128, 72, 101, 108, 108, 111, 87, 111, 114, 108, 100, 228, 189, 160, 229, 165, 189, 228, 184, 150, 231, 149, 140, 208, 159, 209, 128, 208, 184, 208, 178, 208, 181, 209, 130, 208, 156, 208, 184, 209, 128, 72, 101, 108, 108, 111, 87, 111, 114, 108, 100, 228, 189, 160, 229, 165, 189, 228, 184, 150, 231, 149, 140, 208, 159, 209, 128, 208, 184, 208, 178, 208, 181, 209, 130, 208, 156, 208, 184, 209, 128, 72, 101, 108, 108, 111, 87, 111, 114, 108, 100, 228, 189, 160, 229, 165, 189, 228, 184, 150, 231, 149, 140, 208, 159, 209, 128, 208, 184, 208, 178, 208, 181, 209, 130, 208, 156, 208, 184, 209, 128, 72, 101, 108, 108, 111, 87, 111, 114, 108, 100, 228, 189, 160, 229, 165, 189, 228, 184, 150, 231, 149, 140, 208, 159, 209, 128, 208, 184, 208, 178, 208, 181, 209, 130, 208, 156, 208, 184, 209, 128, 72, 101, 108, 108, 111, 87, 111, 114, 108, 100, 228, 189, 160, 229, 165, 189, 228, 184, 150, 231, 149, 140, 208, 159, 209, 128, 208, 184, 208, 178, 208, 181, 209, 130, 208, 156, 208, 184, 209, 128, 72, 101, 108, 108, 111, 87, 111, 114, 108, 100, 228, 189, 160, 229, 165, 189, 228, 184, 150, 231, 149, 140, 208, 159, 209, 128, 208, 184, 208, 178, 208, 181, 209, 130, 208, 156, 208, 184, 209, 128, 72, 101, 108, 108, 111, 87, 111, 114, 108, 100, 228, 189, 160, 229, 165, 189, 228, 184, 150, 231, 149, 140, 208, 159, 209, 128, 208, 184, 208, 178, 208, 181, 209, 130, 208, 156, 208, 184, 209, 128, 72, 101, 108, 108, 111, 87, 111, 114, 108, 100, 228, 189, 160, 229, 165, 189} - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } - - b.Reset() - - // long Atom. longer 255 symbols. Should return ErrAtomTooLong - longAtom = Atom("Ergo Framework Ergo Framework Ergo Framework Ergo Framework Ergo Framework Ergo Framework Ergo Framework Ergo Framework Ergo Framework Ergo Framework Ergo Framework Ergo Framework Ergo Framework Ergo Framework Ergo Framework Ergo Framework Ergo Framework Ergo Framework Ergo Framework Ergo Framework") - err = Encode(longAtom, b, EncodeOptions{}) - if err != ErrAtomTooLong { - t.Fatal("incorrect value") - } -} - -func TestEncodeAtomWithCache(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - senderAtomCache := make(map[Atom]CacheItem) - encodingAtomCache := TakeEncodingAtomCache() - - atomCache := NewAtomCache() - - ci := CacheItem{ID: 2020, Encoded: true, Name: "cached atom"} - senderAtomCache["cached atom"] = ci - - encodeOptions := EncodeOptions{ - AtomCache: atomCache.Out, - SenderAtomCache: senderAtomCache, - EncodingAtomCache: encodingAtomCache, - } - - err := Encode(Atom("cached atom"), b, encodeOptions) - if err != nil { - t.Fatal(err) - } - - if encodingAtomCache.Len() != 1 || encodingAtomCache.L[0] != ci { - t.Fatal("incorrect cache value") - } - - if !reflect.DeepEqual(b.B, []byte{ettCacheRef, 0}) { - t.Fatal("incorrect value") - } - - b.Reset() - - err = Encode(Atom("not cached atom"), b, encodeOptions) - if err != nil { - t.Fatal(err) - } - - if encodingAtomCache.Len() != 1 || encodingAtomCache.L[0] != ci { - t.Fatal("incorrect cache value") - } - - expected := []byte{ettSmallAtomUTF8, 15, 110, 111, 116, 32, 99, 97, 99, 104, 101, 100, 32, 97, 116, 111, 109} - if !reflect.DeepEqual(b.B, expected) { - t.Fatal("incorrect value") - } -} - -func TestEncodeBinary(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - err := Encode([]byte{1, 2, 3, 4, 5}, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - expected := []byte{ettBinary, 0, 0, 0, 5, 1, 2, 3, 4, 5} - if !reflect.DeepEqual(b.B, expected) { - t.Fatal("incorrect value") - } -} - -func TestEncodeList(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - expected := []byte{ettList, 0, 0, 0, 3, ettSmallAtomUTF8, 1, 97, ettSmallInteger, 2, ettSmallInteger, 3, ettNil} - term := List{Atom("a"), 2, 3} - err := Encode(term, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } -} -func TestEncodeListImproper(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - expected := []byte{ettList, 0, 0, 0, 2, ettSmallAtomUTF8, 1, 97, ettSmallInteger, 2, ettSmallInteger, 3} - term := ListImproper{Atom("a"), 2, 3} - err := Encode(term, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } -} - -func TestEncodeSlice(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - expected := []byte{108, 0, 0, 0, 4, 98, 0, 0, 48, 57, 98, 0, 1, 9, 50, 98, 0, 0, 48, 57, - 98, 0, 1, 9, 50, 106} - //expected := []byte{ettList, 0, 0, 0, 3, ettSmallAtomUTF8, 1, 97, ettSmallInteger, 2, ettSmallInteger, 3, ettNil} - term := []int{12345, 67890, 12345, 67890} - err := Encode(term, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } - - b.Reset() - - expected = []byte{108, 0, 0, 0, 3, 119, 1, 97, 119, 1, 98, 119, 1, 99, 106} - termAtoms := []Atom{Atom("a"), Atom("b"), Atom("c")} - err = Encode(termAtoms, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } -} - -func TestEncodeListNested(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - expected := []byte{108, 0, 0, 0, 2, 119, 1, 97, 108, 0, 0, 0, 4, 119, 1, 98, 97, 2, 108, - 0, 0, 0, 2, 119, 1, 99, 97, 3, 106, 97, 4, 106, 106} - - term := List{Atom("a"), List{Atom("b"), 2, List{Atom("c"), 3}, 4}} - err := Encode(term, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } -} - -func TestEncodeTupleNested(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - expected := []byte{104, 2, 119, 1, 97, 104, 4, 119, 1, 98, 97, 2, 104, 2, 119, 1, 99, - 97, 3, 97, 4} - - term := Tuple{Atom("a"), Tuple{Atom("b"), 2, Tuple{Atom("c"), 3}, 4}} - err := Encode(term, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } -} - -func TestEncodeTuple(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - expected := []byte{ettSmallTuple, 3, ettSmallAtomUTF8, 1, 97, ettSmallInteger, 2, ettSmallInteger, 3} - term := Tuple{Atom("a"), 2, 3} - err := Encode(term, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } -} - -func TestEncodeMap(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - // map has no guarantee of key order, so the result could be different - expected := []byte{116, 0, 0, 0, 2, 119, 4, 107, 101, 121, 49, 98, 0, 0, 48, 57, 119, 4, - 107, 101, 121, 50, 107, 0, 11, 104, 101, 108, 108, 111, 32, 119, 111, - 114, 108, 100} - expected1 := []byte{116, 0, 0, 0, 2, 119, 4, 107, 101, 121, 50, 107, 0, 11, 104, 101, - 108, 108, 111, 32, 119, 111, 114, 108, 100, 119, 4, 107, 101, 121, 49, 98, 0, 0, - 48, 57} - term := Map{ - Atom("key1"): 12345, - Atom("key2"): "hello world", - } - - err := Encode(term, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) && !reflect.DeepEqual(b.B, expected1) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } -} - -func TestEncodeGoMap(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - // map has no guarantee of key order, so the result could be different - expected := []byte{116, 0, 0, 0, 2, 119, 4, 107, 101, 121, 49, 98, 0, 0, 48, 57, 119, 4, - 107, 101, 121, 50, 107, 0, 11, 104, 101, 108, 108, 111, 32, 119, 111, - 114, 108, 100} - expected1 := []byte{116, 0, 0, 0, 2, 119, 4, 107, 101, 121, 50, 107, 0, 11, 104, 101, - 108, 108, 111, 32, 119, 111, 114, 108, 100, 119, 4, 107, 101, 121, 49, 98, 0, 0, - 48, 57} - term := map[Atom]interface{}{ - Atom("key1"): 12345, - Atom("key2"): "hello world", - } - - err := Encode(term, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) && !reflect.DeepEqual(b.B, expected1) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } -} - -func TestEncodeStruct(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - expected := []byte{116, 0, 0, 0, 3, 119, 15, 83, 116, 114, 117, 99, 116, 84, 111, 77, 97, 112, 75, 101, 121, 49, 98, 0, 0, 48, 57, 119, 15, 83, 116, 114, 117, 99, 116, 84, 111, 77, 97, 112, 75, 101, 121, 50, 107, 0, 22, 112, 111, 105, 110, 116, 101, 114, 32, 116, 111, 32, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 119, 15, 83, 116, 114, 117, 99, 116, 84, 111, 77, 97, 112, 75, 101, 121, 51, 107, 0, 11, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100} - - s := "pointer to hello world" - term := struct { - StructToMapKey1 int - StructToMapKey2 string - StructToMapKey3 string - }{ - StructToMapKey1: 12345, - StructToMapKey2: s, - StructToMapKey3: "hello world", - } - - err := Encode(term, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } - b1 := lib.TakeBuffer() - defer lib.ReleaseBuffer(b1) - - term1 := struct { - StructToMapKey1 int - StructToMapKey2 *string - StructToMapKey3 string - }{ - StructToMapKey1: 12345, - StructToMapKey2: &s, - StructToMapKey3: "hello world", - } - - err = Encode(term1, b1, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b1.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b1.B) - t.Fatal("incorrect value") - } -} - -func TestEncodeStructWithNestedPointers(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - type Nested struct { - Key1 string - Key2 *string - Key3 int - Key4 *int - Key5 float64 - Key6 *float64 - Key7 bool - Key8 *bool - } - type Tst struct { - Nested - Key9 *Nested - } - ValueString := "hello world" - ValueInt := 123 - ValueFloat := 3.14 - ValueBool := true - - nested := Nested{ - Key1: ValueString, - Key2: &ValueString, - Key3: ValueInt, - Key4: &ValueInt, - Key5: ValueFloat, - Key6: &ValueFloat, - Key7: ValueBool, - Key8: &ValueBool, - } - term := Tst{ - Nested: nested, - Key9: &nested, - } - - expected := []byte{116, 0, 0, 0, 2, 119, 6, 78, 101, 115, 116, 101, 100, 116, 0, 0, 0, 8, 119, 4, 75, 101, 121, 49, 107, 0, 11, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 119, 4, 75, 101, 121, 50, 107, 0, 11, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 119, 4, 75, 101, 121, 51, 97, 123, 119, 4, 75, 101, 121, 52, 97, 123, 119, 4, 75, 101, 121, 53, 70, 64, 9, 30, 184, 81, 235, 133, 31, 119, 4, 75, 101, 121, 54, 70, 64, 9, 30, 184, 81, 235, 133, 31, 119, 4, 75, 101, 121, 55, 115, 4, 116, 114, 117, 101, 119, 4, 75, 101, 121, 56, 115, 4, 116, 114, 117, 101, 119, 4, 75, 101, 121, 57, 116, 0, 0, 0, 8, 119, 4, 75, 101, 121, 49, 107, 0, 11, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 119, 4, 75, 101, 121, 50, 107, 0, 11, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 119, 4, 75, 101, 121, 51, 97, 123, 119, 4, 75, 101, 121, 52, 97, 123, 119, 4, 75, 101, 121, 53, 70, 64, 9, 30, 184, 81, 235, 133, 31, 119, 4, 75, 101, 121, 54, 70, 64, 9, 30, 184, 81, 235, 133, 31, 119, 4, 75, 101, 121, 55, 115, 4, 116, 114, 117, 101, 119, 4, 75, 101, 121, 56, 115, 4, 116, 114, 117, 101} - - err := Encode(term, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } - - b1 := lib.TakeBuffer() - defer lib.ReleaseBuffer(b1) - termWithNil := Tst{ - Nested: nested, - } - expectedWithNil := []byte{116, 0, 0, 0, 2, 119, 6, 78, 101, 115, 116, 101, 100, 116, 0, 0, 0, 8, 119, 4, 75, 101, 121, 49, 107, 0, 11, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 119, 4, 75, 101, 121, 50, 107, 0, 11, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 119, 4, 75, 101, 121, 51, 97, 123, 119, 4, 75, 101, 121, 52, 97, 123, 119, 4, 75, 101, 121, 53, 70, 64, 9, 30, 184, 81, 235, 133, 31, 119, 4, 75, 101, 121, 54, 70, 64, 9, 30, 184, 81, 235, 133, 31, 119, 4, 75, 101, 121, 55, 115, 4, 116, 114, 117, 101, 119, 4, 75, 101, 121, 56, 115, 4, 116, 114, 117, 101, 119, 4, 75, 101, 121, 57, 106} - - err = Encode(termWithNil, b1, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(b1.B, expectedWithNil) { - fmt.Println("exp", expectedWithNil) - fmt.Println("got", b1.B) - t.Fatal("incorrect value") - } -} - -func TestEncodeStructWithTags(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - expected := []byte{116, 0, 0, 0, 4, 119, 4, 75, 101, 121, 49, 107, 0, 12, 72, 101, 108, 108, 111, 32, 87, 111, 114, 108, 100, 33, 119, 17, 99, 117, 115, 116, 111, 109, 95, 102, 105, 101, 108, 100, 95, 110, 97, 109, 101, 108, 0, 0, 0, 3, 108, 0, 0, 0, 7, 98, 0, 0, 79, 96, 98, 0, 0, 89, 125, 98, 0, 0, 78, 22, 98, 0, 0, 117, 76, 97, 33, 97, 32, 98, 0, 1, 246, 128, 106, 108, 0, 0, 0, 13, 98, 0, 0, 4, 31, 98, 0, 0, 4, 64, 98, 0, 0, 4, 56, 98, 0, 0, 4, 50, 98, 0, 0, 4, 53, 98, 0, 0, 4, 66, 97, 32, 98, 0, 0, 4, 28, 98, 0, 0, 4, 56, 98, 0, 0, 4, 64, 97, 33, 97, 32, 98, 0, 1, 246, 128, 106, 108, 0, 0, 0, 14, 97, 72, 97, 101, 97, 108, 97, 108, 97, 111, 97, 32, 97, 87, 97, 111, 97, 114, 97, 108, 97, 100, 97, 33, 97, 32, 98, 0, 1, 246, 128, 106, 106, 119, 4, 75, 101, 121, 51, 116, 0, 0, 0, 2, 119, 10, 78, 101, 115, 116, 101, 100, 75, 101, 121, 49, 107, 0, 52, 72, 101, 108, 108, 111, 32, 87, 111, 114, 108, 100, 33, 32, 228, 189, 160, 229, 165, 189, 228, 184, 150, 231, 149, 140, 33, 32, 208, 159, 209, 128, 208, 184, 208, 178, 208, 181, 209, 130, 32, 208, 156, 208, 184, 209, 128, 33, 32, 240, 159, 154, 128, 119, 5, 102, 105, 101, 108, 100, 116, 0, 0, 0, 1, 107, 0, 7, 109, 97, 112, 95, 107, 101, 121, 108, 0, 0, 0, 32, 97, 72, 97, 101, 97, 108, 97, 108, 97, 111, 97, 32, 97, 87, 97, 111, 97, 114, 97, 108, 97, 100, 97, 33, 97, 32, 98, 0, 0, 79, 96, 98, 0, 0, 89, 125, 98, 0, 0, 78, 22, 98, 0, 0, 117, 76, 97, 33, 97, 32, 98, 0, 0, 4, 31, 98, 0, 0, 4, 64, 98, 0, 0, 4, 56, 98, 0, 0, 4, 50, 98, 0, 0, 4, 53, 98, 0, 0, 4, 66, 97, 32, 98, 0, 0, 4, 28, 98, 0, 0, 4, 56, 98, 0, 0, 4, 64, 97, 33, 97, 32, 98, 0, 1, 246, 128, 106, 119, 4, 75, 101, 121, 52, 108, 0, 0, 0, 2, 108, 0, 0, 0, 3, 108, 0, 0, 0, 7, 98, 0, 0, 79, 96, 98, 0, 0, 89, 125, 98, 0, 0, 78, 22, 98, 0, 0, 117, 76, 97, 33, 97, 32, 98, 0, 1, 246, 128, 106, 108, 0, 0, 0, 13, 98, 0, 0, 4, 31, 98, 0, 0, 4, 64, 98, 0, 0, 4, 56, 98, 0, 0, 4, 50, 98, 0, 0, 4, 53, 98, 0, 0, 4, 66, 97, 32, 98, 0, 0, 4, 28, 98, 0, 0, 4, 56, 98, 0, 0, 4, 64, 97, 33, 97, 32, 98, 0, 1, 246, 128, 106, 108, 0, 0, 0, 14, 97, 72, 97, 101, 97, 108, 97, 108, 97, 111, 97, 32, 97, 87, 97, 111, 97, 114, 97, 108, 97, 100, 97, 33, 97, 32, 98, 0, 1, 246, 128, 106, 106, 108, 0, 0, 0, 3, 108, 0, 0, 0, 7, 98, 0, 0, 79, 96, 98, 0, 0, 89, 125, 98, 0, 0, 78, 22, 98, 0, 0, 117, 76, 97, 33, 97, 32, 98, 0, 1, 246, 128, 106, 108, 0, 0, 0, 13, 98, 0, 0, 4, 31, 98, 0, 0, 4, 64, 98, 0, 0, 4, 56, 98, 0, 0, 4, 50, 98, 0, 0, 4, 53, 98, 0, 0, 4, 66, 97, 32, 98, 0, 0, 4, 28, 98, 0, 0, 4, 56, 98, 0, 0, 4, 64, 97, 33, 97, 32, 98, 0, 1, 246, 128, 106, 108, 0, 0, 0, 14, 97, 72, 97, 101, 97, 108, 97, 108, 97, 111, 97, 32, 97, 87, 97, 111, 97, 114, 97, 108, 97, 100, 97, 33, 97, 32, 98, 0, 1, 246, 128, 106, 106, 106} - - type Nested struct { - NestedKey1 string - NestedKey2 map[string]*Charlist `etf:"field"` - } - type StructWithTags struct { - Key1 string - Key2 []*Charlist `etf:"custom_field_name"` - Key3 *Nested - Key4 [][]*Charlist - } - - nestedMap := make(map[string]*Charlist) - value1 := Charlist("Hello World! 你好世界! Привет Мир! 🚀") - value11 := "Hello World! 你好世界! Привет Мир! 🚀" - nestedMap["map_key"] = &value1 - - nested := Nested{ - NestedKey1: value11, - NestedKey2: nestedMap, - } - - value2 := Charlist("你好世界! 🚀") - value3 := Charlist("Привет Мир! 🚀") - value4 := Charlist("Hello World! 🚀") - term := StructWithTags{ - Key1: "Hello World!", - Key2: []*Charlist{&value2, &value3, &value4}, - Key3: &nested, - Key4: [][]*Charlist{{&value2, &value3, &value4}, {&value2, &value3, &value4}}, - } - err := Encode(term, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } -} - -func TestEncodePid(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - // FlagBigPidRef disabled. max value for ID (15 bits), serial 0 - expected := []byte{ettPid, 119, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, 49, 50, - 55, 46, 48, 46, 48, 46, 49, 0, 0, 127, 255, 0, 0, 0, 0, 2} - term := Pid{Node: "erl-demo@127.0.0.1", ID: 32767, Creation: 2} - - err := Encode(term, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } - - // FlagBigPidRef disabled. overflowed 15 bit. ID 0, serial 1 - b.Reset() - expected = []byte{ettPid, 119, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, 49, 50, - 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 0, 0, 0, 0, 1, 2} - term = Pid{Node: "erl-demo@127.0.0.1", ID: 32768, Creation: 2} - - err = Encode(term, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } - - // BigCreation, FlagBigPidRef enabled. max value for ID (32 bits), serial 0 - b.Reset() - expected = []byte{ettNewPid, 119, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, 49, 50, - 55, 46, 48, 46, 48, 46, 49, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 2} - term = Pid{Node: "erl-demo@127.0.0.1", ID: 4294967295, Creation: 2} - - options := EncodeOptions{ - FlagBigCreation: true, - FlagBigPidRef: true, - } - err = Encode(term, b, options) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } - - // BigCreation, FlagBigPidRef enabled. max value for ID (32 bits), max value for Serial (32 bits) - b.Reset() - expected = []byte{ettNewPid, 119, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, 49, 50, - 55, 46, 48, 46, 48, 46, 49, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 2} - term = Pid{Node: "erl-demo@127.0.0.1", ID: 18446744073709551615, Creation: 2} - - options = EncodeOptions{ - FlagBigCreation: true, - FlagBigPidRef: true, - } - err = Encode(term, b, options) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } -} - -func TestEncodePidWithAtomCache(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - expected := []byte{103, 82, 0, 0, 0, 1, 56, 0, 0, 0, 0, 2} - term := Pid{Node: "erl-demo@127.0.0.1", ID: 312, Creation: 2} - - senderAtomCache := make(map[Atom]CacheItem) - encodingAtomCache := TakeEncodingAtomCache() - atomCache := NewAtomCache() - - ci := CacheItem{ID: 2020, Encoded: true, Name: "erl-demo@127.0.0.1"} - senderAtomCache["erl-demo@127.0.0.1"] = ci - encodeOptions := EncodeOptions{ - AtomCache: atomCache.Out, - SenderAtomCache: senderAtomCache, - EncodingAtomCache: encodingAtomCache, - } - err := Encode(term, b, encodeOptions) - if err != nil { - t.Fatal(err) - } - - if encodingAtomCache.Len() != 1 || encodingAtomCache.L[0] != ci { - t.Fatal("incorrect cache value") - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } - -} - -func TestEncodeRef(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - // FlagBigCreation = false, FlagBigPidRef = false - expected := []byte{ettNewRef, 0, 3, 119, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, - 49, 50, 55, 46, 48, 46, 48, 46, 49, 3, 0, 1, 30, 228, 183, 192, 0, 1, 141, - 122, 203, 35} - - term := Ref{ - Node: Atom("erl-demo@127.0.0.1"), - // Creation must be encoded as 3 - // Only one byte long and only two bits are significant, the rest must be 0. - Creation: 7, - ID: [5]uint32{73444, 3082813441, 2373634851}, - } - - err := Encode(term, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } - - // FlagBigCreation = true, FlagBigPidRef = false - b.Reset() - expected = []byte{ettNewerRef, 0, 3, 119, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, - 49, 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 8, 0, 1, 30, 228, 183, 192, 0, 1, 141, - 122, 203, 35} - - term = Ref{ - Node: Atom("erl-demo@127.0.0.1"), - Creation: 8, - ID: [5]uint32{73444, 3082813441, 2373634851, 1, 2}, - } - - options := EncodeOptions{ - FlagBigCreation: true, - } - err = Encode(term, b, options) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } - - // - // FIXME Erlang 24 has a bug https://github.com/erlang/otp/issues/5097 - // uncomment once they fix it - // - // FlagBigCreation = true, FlagBigPidRef = true - //b.Reset() - //expected = []byte{ettNewerRef, 0, 5, 119, 18, 101, 114, 108, 45, 100, 101, 109, 111, 64, - // 49, 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 0, 8, 0, 1, 30, 228, 183, 192, 0, 1, 141, - // 122, 203, 35, 0, 0, 0, 1, 0, 0, 0, 2} - - //term = Ref{ - // Node: Atom("erl-demo@127.0.0.1"), - // Creation: 8, - // ID: [5]uint32{73444, 3082813441, 2373634851, 1, 2}, - //} - - //options = EncodeOptions{ - // FlagBigCreation: true, - // FlagBigPidRef: true, - //} - //err = Encode(term, b, options) - //if err != nil { - // t.Fatal(err) - //} - - //if !reflect.DeepEqual(b.B, expected) { - // fmt.Println("exp", expected) - // fmt.Println("got", b.B) - // t.Fatal("incorrect value") - //} -} - -func TestEncodeTupleRefPid(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - expected := []byte{ettSmallTuple, 2, ettNewRef, 0, 3, ettSmallAtomUTF8, 18, 101, 114, 108, 45, 100, 101, 109, - 111, 64, 49, 50, 55, 46, 48, 46, 48, 46, 49, 2, 0, 1, 31, 28, 183, 192, 0, - 1, 141, 122, 203, 35, 103, ettSmallAtomUTF8, 18, 101, 114, 108, 45, 100, 101, - 109, 111, 64, 49, 50, 55, 46, 48, 46, 48, 46, 49, 0, 0, 1, 56, 0, 0, 0, 0, - 2} - - term := Tuple{ - Ref{ - Node: Atom("erl-demo@127.0.0.1"), - Creation: 2, - ID: [5]uint32{0x11f1c, 0xb7c00001, 0x8d7acb23}}, - Pid{ - Node: Atom("erl-demo@127.0.0.1"), - ID: 312, - Creation: 2}} - - err := Encode(term, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } -} - -func TestEncodeGoPtrNil(t *testing.T) { - var x *int - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - err := Encode(x, b, EncodeOptions{}) - - if err != nil { - t.Fatal(err) - } - expected := []byte{ettNil} - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } -} - -func TestEncodeRegisteredType(t *testing.T) { - var tmp int - type regTypeStruct1 struct { - a int - } - type regTypeStruct3 struct { - C string - } - type regTypeStruct2 struct { - A int - B regTypeStruct3 - } - type regTypeMap map[string]regTypeStruct3 - type regTypeSlice []regTypeStruct3 - type regTypeArray [5]regTypeStruct3 - - // only struct/map/slice/array types are supported - if _, err := RegisterType(tmp, RegisterTypeOptions{}); err == nil { - t.Fatal("must be error here") - } - - // only struct with no unexported fields - if _, err := RegisterType(regTypeStruct1{}, RegisterTypeOptions{}); err == nil { - t.Fatal("must be error here") - } - - // all nested struct must be registered first - if _, err := RegisterType(regTypeStruct2{}, RegisterTypeOptions{}); err == nil { - t.Fatal("must be error here") - } - - if a, err := RegisterType(regTypeStruct3{}, RegisterTypeOptions{}); err != nil { - t.Fatal(err) - } else { - defer UnregisterType(a) - } - if a, err := RegisterType(regTypeStruct2{}, RegisterTypeOptions{}); err != nil { - t.Fatal(err) - } else { - defer UnregisterType(a) - } - - if a, err := RegisterType(regTypeMap{}, RegisterTypeOptions{}); err != nil { - t.Fatal(err) - } else { - defer UnregisterType(a) - } - - if a, err := RegisterType(regTypeSlice{}, RegisterTypeOptions{}); err != nil { - t.Fatal(err) - } else { - defer UnregisterType(a) - } - - if a, err := RegisterType(regTypeArray{}, RegisterTypeOptions{}); err != nil { - t.Fatal(err) - } else { - defer UnregisterType(a) - } - - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - x := regTypeStruct2{} - x.A = 123 - x.B.C = "hello" - - expected := []byte{ettSmallTuple, 3, ettSmallAtomUTF8, 49, 35, 103, 105, 116, 104, 117, 98, 46, 99, 111, 109, 47, 101, 114, 103, 111, 45, 115, 101, 114, 118, 105, 99, 101, 115, 47, 101, 114, 103, 111, 47, 101, 116, 102, 47, 114, 101, 103, 84, 121, 112, 101, 83, 116, 114, 117, 99, 116, 50, ettSmallInteger, 123, ettSmallTuple, 2, ettSmallAtomUTF8, 49, 35, 103, 105, 116, 104, 117, 98, 46, 99, 111, 109, 47, 101, 114, 103, 111, 45, 115, 101, 114, 118, 105, 99, 101, 115, 47, 101, 114, 103, 111, 47, 101, 116, 102, 47, 114, 101, 103, 84, 121, 112, 101, 83, 116, 114, 117, 99, 116, 51, ettString, 0, 5, 104, 101, 108, 108, 111} - err := Encode(x, b, EncodeOptions{}) - - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } -} - -type testMarshal struct{} - -func (testMarshal) MarshalETF() ([]byte, error) { - return []byte{1, 2, 3}, nil -} - -func TestEncodeMarshal(t *testing.T) { - var x testMarshal - - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - err := Encode(x, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - expected := []byte{ettBinary, 0, 0, 0, 3, 1, 2, 3} - if !reflect.DeepEqual(b.B, expected) { - fmt.Println("exp", expected) - fmt.Println("got", b.B) - t.Fatal("incorrect value") - } -} - -func BenchmarkEncodeBool(b *testing.B) { - - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := Encode(false, buf, EncodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodeBoolWithAtomCache(b *testing.B) { - - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - senderAtomCache := make(map[Atom]CacheItem) - encodingAtomCache := TakeEncodingAtomCache() - atomCache := NewAtomCache() - - senderAtomCache["false"] = CacheItem{ID: 499, Encoded: true, Name: "false"} - - encodeOptions := EncodeOptions{ - AtomCache: atomCache.Out, - SenderAtomCache: senderAtomCache, - EncodingAtomCache: encodingAtomCache, - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := Encode(false, buf, encodeOptions) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodeInteger(b *testing.B) { - for _, c := range integerCases() { - b.Run(c.name, func(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - for i := 0; i < b.N; i++ { - err := Encode(c.integer, buf, EncodeOptions{}) - if err != nil { - b.Fatal(err) - } - } - }) - } -} - -func BenchmarkEncodeFloat32(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := Encode(float32(3.14), buf, EncodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodeFloat64(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := Encode(float64(3.14), buf, EncodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodeString(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := Encode("Ergo Framework", buf, EncodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodeAtom(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := Encode(Atom("Ergo Framework"), buf, EncodeOptions{}) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodeAtomWithCache(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - senderAtomCache := make(map[Atom]CacheItem) - encodingAtomCache := TakeEncodingAtomCache() - atomCache := NewAtomCache() - - ci := CacheItem{ID: 2020, Encoded: true, Name: "cached atom"} - senderAtomCache["cached atom"] = ci - - encodeOptions := EncodeOptions{ - AtomCache: atomCache.Out, - SenderAtomCache: senderAtomCache, - EncodingAtomCache: encodingAtomCache, - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := Encode(Atom("cached atom"), buf, encodeOptions) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } - -} - -func BenchmarkEncodeBinary(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - bytes := []byte{1, 2, 3, 4, 5} - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := Encode(bytes, buf, EncodeOptions{}) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodeList(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - term := List{Atom("a"), 2, 3} - b.ResetTimer() - - for i := 0; i < b.N; i++ { - err := Encode(term, buf, EncodeOptions{}) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } - -} - -func BenchmarkEncodeListNested(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - term := List{Atom("a"), List{Atom("b"), 2, List{Atom("c"), 3}, 4}} - b.ResetTimer() - - for i := 0; i < b.N; i++ { - err := Encode(term, buf, EncodeOptions{}) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } - -} - -func BenchmarkEncodeTuple(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - term := Tuple{Atom("a"), 2, 3} - b.ResetTimer() - - for i := 0; i < b.N; i++ { - err := Encode(term, buf, EncodeOptions{}) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } - -} - -func BenchmarkEncodeTupleNested(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - term := Tuple{Atom("a"), Tuple{Atom("b"), 2, Tuple{Atom("c"), 3}, 4}} - b.ResetTimer() - - for i := 0; i < b.N; i++ { - err := Encode(term, buf, EncodeOptions{}) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } - -} - -func BenchmarkEncodeSlice(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - term := []int{12345, 67890, 12345, 67890} - b.ResetTimer() - - for i := 0; i < b.N; i++ { - err := Encode(term, buf, EncodeOptions{}) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodeArray(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - term := [4]int{12345, 67890, 12345, 67890} - b.ResetTimer() - - for i := 0; i < b.N; i++ { - err := Encode(term, buf, EncodeOptions{}) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodeMap(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - term := Map{ - Atom("key1"): 12345, - Atom("key2"): "hello world", - } - b.ResetTimer() - - for i := 0; i < b.N; i++ { - err := Encode(term, buf, EncodeOptions{}) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodeGoMap(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - term := map[Atom]interface{}{ - Atom("key1"): 12345, - Atom("key2"): "hello world", - } - b.ResetTimer() - - for i := 0; i < b.N; i++ { - err := Encode(term, buf, EncodeOptions{}) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodeGoStruct(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - term := struct { - StructToMapKey1 int - StructToMapKey2 string - }{ - StructToMapKey1: 12345, - StructToMapKey2: "hello world", - } - b.ResetTimer() - - for i := 0; i < b.N; i++ { - err := Encode(term, buf, EncodeOptions{}) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodePid(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - term := Pid{Node: "erl-demo@127.0.0.1", ID: 312, Creation: 2} - - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := Encode(term, buf, EncodeOptions{}) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodePidWithAtomCache(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - term := Pid{Node: "erl-demo@127.0.0.1", ID: 312, Creation: 2} - - senderAtomCache := make(map[Atom]CacheItem) - encodingAtomCache := TakeEncodingAtomCache() - atomCache := NewAtomCache() - - ci := CacheItem{ID: 2020, Encoded: true, Name: "erl-demo@127.0.0.1"} - senderAtomCache["erl-demo@127.0.0.1"] = ci - - encodeOptions := EncodeOptions{ - AtomCache: atomCache.Out, - SenderAtomCache: senderAtomCache, - EncodingAtomCache: encodingAtomCache, - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := Encode(term, buf, encodeOptions) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodeRef(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - term := Ref{ - Node: Atom("erl-demo@127.0.0.1"), - Creation: 2, - ID: [5]uint32{73444, 3082813441, 2373634851}, - } - - for i := 0; i < b.N; i++ { - err := Encode(term, buf, EncodeOptions{}) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodeRefWithAtomCache(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - term := Ref{ - Node: Atom("erl-demo@127.0.0.1"), - Creation: 2, - ID: [5]uint32{73444, 3082813441, 2373634851}, - } - - senderAtomCache := make(map[Atom]CacheItem) - encodingAtomCache := TakeEncodingAtomCache() - atomCache := NewAtomCache() - - ci := CacheItem{ID: 2020, Encoded: true, Name: "erl-demo@127.0.0.1"} - senderAtomCache["erl-demo@127.0.0.1"] = ci - - encodeOptions := EncodeOptions{ - AtomCache: atomCache.Out, - SenderAtomCache: senderAtomCache, - EncodingAtomCache: encodingAtomCache, - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := Encode(term, buf, encodeOptions) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodeTupleRefPid(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - term := Tuple{ - Ref{ - Node: Atom("erl-demo@127.0.0.1"), - Creation: 2, - ID: [5]uint32{0x11f1c, 0xb7c00001, 0x8d7acb23}}, - Pid{ - Node: Atom("erl-demo@127.0.0.1"), - ID: 312, - Creation: 2}} - - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := Encode(term, buf, EncodeOptions{}) - buf.Reset() - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkEncodeTupleRefPidWithAtomCache(b *testing.B) { - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - term := Tuple{ - Ref{ - Node: Atom("erl-demo@127.0.0.1"), - Creation: 2, - ID: [5]uint32{0x11f1c, 0xb7c00001, 0x8d7acb23}}, - Pid{ - Node: Atom("erl-demo@127.0.0.1"), - ID: 312, - Creation: 2}} - - senderAtomCache := make(map[Atom]CacheItem) - encodingAtomCache := TakeEncodingAtomCache() - defer ReleaseEncodingAtomCache(encodingAtomCache) - atomCache := NewAtomCache() - - ci := CacheItem{ID: 2020, Encoded: true, Name: "erl-demo@127.0.0.1"} - senderAtomCache["erl-demo@127.0.0.1"] = ci - - encodeOptions := EncodeOptions{ - AtomCache: atomCache.Out, - SenderAtomCache: senderAtomCache, - EncodingAtomCache: encodingAtomCache, - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := Encode(term, buf, encodeOptions) - buf.Reset() - encodingAtomCache.Reset() - if err != nil { - b.Fatal(err) - } - } -} diff --git a/etf/etf.go b/etf/etf.go deleted file mode 100644 index c3c67ef5..00000000 --- a/etf/etf.go +++ /dev/null @@ -1,837 +0,0 @@ -package etf - -import ( - "fmt" - "hash/crc32" - "reflect" - "strings" - "sync" - - "github.com/ergo-services/ergo/lib" -) - -var ( - registered = registeredTypes{ - typesEnc: make(map[Atom]*registerType), - typesDec: make(map[Atom]*registerType), - } -) - -// Erlang external term tags. -const ( - ettAtom = byte(100) //deprecated - ettAtomUTF8 = byte(118) - ettSmallAtom = byte(115) //deprecated - ettSmallAtomUTF8 = byte(119) - ettString = byte(107) - - ettCacheRef = byte(82) - - ettNewFloat = byte(70) - - ettSmallInteger = byte(97) - ettInteger = byte(98) - ettLargeBig = byte(111) - ettSmallBig = byte(110) - - ettList = byte(108) - ettListImproper = byte(18) // to be able to encode improper lists like [a|b]. - ettSmallTuple = byte(104) - ettLargeTuple = byte(105) - - ettMap = byte(116) - - ettBinary = byte(109) - ettBitBinary = byte(77) - - ettNil = byte(106) - - ettPid = byte(103) - ettNewPid = byte(88) // since OTP 23, only when BIG_CREATION flag is set - ettNewRef = byte(114) - ettNewerRef = byte(90) // since OTP 21, only when BIG_CREATION flag is set - - ettExport = byte(113) - ettFun = byte(117) // legacy, wont support it here - ettNewFun = byte(112) - - ettPort = byte(102) - ettNewPort = byte(89) // since OTP 23, only when BIG_CREATION flag is set - - // ettRef = byte(101) deprecated - - ettFloat = byte(99) // legacy -) - -type registeredTypes struct { - sync.RWMutex - typesEnc map[Atom]*registerType - typesDec map[Atom]*registerType -} -type registerType struct { - rtype reflect.Type - name Atom - origin Atom - strict bool -} - -// Term -type Term interface{} - -// Tuple -type Tuple []Term - -// List -type List []Term - -// Alias -type Alias Ref - -// ListImproper as a workaround for the Erlang's improper list [a|b]. Intended to be used to interact with Erlang. -type ListImproper []Term - -// Atom -type Atom string - -// Map -type Map map[Term]Term - -// String this type is intended to be used to interact with Erlang. String value encodes as a binary (Erlang type: <<...>>) -type String string - -// Charlist this type is intended to be used to interact with Erlang. Charlist value encodes as a list of int32 numbers in order to support Erlang string with UTF-8 symbols on an Erlang side (Erlang type: [...]) -type Charlist string - -// Pid -type Pid struct { - Node Atom - ID uint64 - Creation uint32 -} - -// Port -type Port struct { - Node Atom - ID uint32 - Creation uint32 -} - -// Ref -type Ref struct { - Node Atom - Creation uint32 - ID [5]uint32 -} - -// Marshaler interface implemented by types that can marshal themselves into valid ETF binary -// Interface implementation must be over the object e.g. (MyObject) UnmarshalETF: -// -// type MyObject struct{} -// -// func (m MyObject) MarshalETF() ([]byte, error) { -// var encoded []byte -// ... encoding routine ... -// return encoded, nil -// } -type Marshaler interface { - MarshalETF() ([]byte, error) -} - -// Unmarshaler interface implemented by types that can unmarshal an ETF binary of themselves. -// Returns error ErrEmpty for []byte{}. -// Interface implementation must be over pointer to the object e.g. (*MyObject) UnmarshalETF: -// -// type MyObject struct{} -// -// func (m *MyObject) UnmarshalETF(b []byte) error { -// var err error -// ... decoding routine ... -// return err -// } -type Unmarshaler interface { - UnmarshalETF([]byte) error -} - -// Function -type Function struct { - Arity byte - Unique [16]byte - Index uint32 - // Free uint32 - Module Atom - OldIndex uint32 - OldUnique uint32 - Pid Pid - FreeVars []Term -} - -// Export -type Export struct { - Module Atom - Function Atom - Arity int -} - -// Element -func (m Map) Element(k Term) Term { - return m[k] -} - -// Element -func (l List) Element(i int) Term { - return l[i-1] -} - -// Element -func (t Tuple) Element(i int) Term { - return t[i-1] -} - -// String -func (p Pid) String() string { - empty := Pid{} - if p == empty { - return "<0.0.0>" - } - - n := uint32(0) - if p.Node != "" { - n = crc32.Checksum([]byte(p.Node), lib.CRC32Q) - } - return fmt.Sprintf("<%08X.%d.%d>", n, int32(p.ID>>32), int32(p.ID)) -} - -// String -func (r Ref) String() string { - n := uint32(0) - if r.Node != "" { - n = crc32.Checksum([]byte(r.Node), lib.CRC32Q) - } - return fmt.Sprintf("Ref#<%08X.%d.%d.%d>", n, r.ID[0], r.ID[1], r.ID[2]) -} - -// String -func (a Alias) String() string { - n := uint32(0) - if a.Node != "" { - n = crc32.Checksum([]byte(a.Node), lib.CRC32Q) - } - return fmt.Sprintf("Ref#<%08X.%d.%d.%d>", n, a.ID[0], a.ID[1], a.ID[2]) -} - -// ProplistElement -type ProplistElement struct { - Name Atom - Value Term -} - -// TermToString transforms given term (Atom, []byte, List) to the string -func TermToString(t Term) (s string, ok bool) { - ok = true - switch x := t.(type) { - case Atom: - s = string(x) - case string: - s = x - case []byte: - s = string(x) - case List: - str, err := convertCharlistToString(x) - if err != nil { - ok = false - return - } - s = str - default: - ok = false - } - return -} - -// TermProplistIntoStruct transorms given term into the provided struct 'dest'. -// Proplist is the list of Tuple values with two items { Name , Value }, -// where Name can be string or Atom and Value must be the same type as -// it has the field of 'dest' struct with the equivalent name. Its also -// accepts []ProplistElement as a 'term' value -func TermProplistIntoStruct(term Term, dest interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("%v", r) - } - }() - v := reflect.Indirect(reflect.ValueOf(dest)) - return setProplist(term, v) -} - -// TermIntoStruct transforms 'term' (etf.Term, etf.List, etf.Tuple, etf.Map) into the -// given 'dest' (could be a struct, map, slice or array). Its a pretty -// expencive operation in terms of CPU usage so you shouldn't use it -// on highload parts of your code. Use manual type casting instead. -func TermIntoStruct(term Term, dest interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("%v", r) - } - }() - v := reflect.Indirect(reflect.ValueOf(dest)) - err = termIntoStruct(term, v) - return -} - -func termIntoStruct(term Term, dest reflect.Value) error { - - if term == nil { - return nil - } - - if dest.Type().NumMethod() > 0 && dest.CanInterface() { - v := dest - if v.Kind() != reflect.Ptr && v.CanAddr() { - v = v.Addr() - - if u, ok := v.Interface().(Unmarshaler); ok { - b, is_binary := term.([]byte) - if !is_binary { - return fmt.Errorf("can't unmarshal value, wront type %s", term) - } - return u.UnmarshalETF(b) - } - } - } - - switch dest.Kind() { - case reflect.Ptr: - pdest := reflect.New(dest.Type().Elem()) - dest.Set(pdest) - dest = pdest.Elem() - return termIntoStruct(term, dest) - - case reflect.Array, reflect.Slice: - t := dest.Type() - byte_slice, ok := term.([]byte) - if t == reflect.SliceOf(reflect.TypeOf(byte(1))) && ok { - dest.Set(reflect.ValueOf(byte_slice)) - return nil - - } - if _, ok := term.(List); !ok { - // in case if term is the golang native type - dest.Set(reflect.ValueOf(term)) - return nil - } - return setListField(term.(List), dest) - - case reflect.Struct: - switch s := term.(type) { - case Map: - return setMapStructField(s, dest) - case Tuple: - return setStructField(s, dest) - case Ref: - dest.Set(reflect.ValueOf(s)) - return nil - case Pid: - dest.Set(reflect.ValueOf(s)) - return nil - } - return fmt.Errorf("can't convert %#v to struct", term) - - case reflect.Map: - if _, ok := term.(Map); !ok { - // in case if term is the golang native type - dest.Set(reflect.ValueOf(term)) - return nil - } - return setMapField(term.(Map), dest) - - case reflect.Bool: - b, ok := term.(bool) - if !ok { - return fmt.Errorf("can't convert %#v to bool", term) - } - dest.SetBool(b) - return nil - - case reflect.Float32, reflect.Float64: - f, ok := term.(float64) - if !ok { - return fmt.Errorf("can't convert %#v to float64", term) - } - dest.SetFloat(f) - return nil - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - i := int64(0) - switch v := term.(type) { - case int64: - i = v - case int32: - i = int64(v) - case int16: - i = int64(v) - case int8: - i = int64(v) - case int: - i = int64(v) - case uint64: - i = int64(v) - case uint32: - i = int64(v) - case uint16: - i = int64(v) - case uint8: - i = int64(v) - case uint: - i = int64(v) - default: - return fmt.Errorf("can't convert %#v to int64", term) - } - dest.SetInt(i) - return nil - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - u := uint64(0) - switch v := term.(type) { - case uint64: - u = v - case uint32: - u = uint64(v) - case uint16: - u = uint64(v) - case uint8: - u = uint64(v) - case uint: - u = uint64(v) - case int64: - u = uint64(v) - case int32: - u = uint64(v) - case int16: - u = uint64(v) - case int8: - u = uint64(v) - case int: - u = uint64(v) - - default: - return fmt.Errorf("can't convert %#v to uint64", term) - } - dest.SetUint(u) - return nil - - case reflect.String: - switch v := term.(type) { - case List: - s, err := convertCharlistToString(v) - if err != nil { - return err - } - dest.SetString(s) - return nil - case []byte: - dest.SetString(string(v)) - return nil - case string: - dest.SetString(v) - return nil - case Atom: - dest.SetString(string(v)) - return nil - } - - default: - dest.Set(reflect.ValueOf(term)) - return nil - } - - return nil -} - -func setListField(term List, dest reflect.Value) error { - var value reflect.Value - if dest.Kind() == reflect.Ptr { - pdest := reflect.New(dest.Type().Elem()) - dest.Set(pdest) - dest = pdest.Elem() - } - t := dest.Type() - switch t.Kind() { - case reflect.Slice: - value = reflect.MakeSlice(t, len(term), len(term)) - case reflect.Array: - if t.Len() != len(term) { - return NewInvalidTypesError(t, term) - } - value = dest - default: - return NewInvalidTypesError(t, term) - } - - for i, elem := range term { - if err := termIntoStruct(elem, value.Index(i)); err != nil { - return err - } - } - - if t.Kind() == reflect.Slice { - dest.Set(value) - } - - return nil -} - -func setProplist(term Term, dest reflect.Value) error { - switch v := term.(type) { - case []ProplistElement: - return setProplistElementField(v, dest) - case List: - return setProplistField(v, dest) - default: - return NewInvalidTypesError(dest.Type(), term) - } - -} - -func setProplistField(list List, dest reflect.Value) error { - t := dest.Type() - numField := t.NumField() - fields := make([]reflect.StructField, numField) - for i := range fields { - fields[i] = t.Field(i) - } - - for _, elem := range list { - if len(elem.(Tuple)) != 2 { - return &InvalidStructKeyError{Term: elem} - } - - key := elem.(Tuple)[0] - val := elem.(Tuple)[1] - fName, ok := TermToString(key) - if !ok { - return &InvalidStructKeyError{Term: key} - } - index := findStructField(fields, fName) - if index == -1 { - continue - } - - err := termIntoStruct(val, dest.Field(index)) - if err != nil { - return err - } - } - - return nil -} - -func setProplistElementField(proplist []ProplistElement, dest reflect.Value) error { - t := dest.Type() - numField := t.NumField() - fields := make([]reflect.StructField, numField) - for i := range fields { - fields[i] = t.Field(i) - } - - for _, elem := range proplist { - fName, ok := TermToString(elem.Name) - if !ok { - return &InvalidStructKeyError{Term: elem.Name} - } - index := findStructField(fields, fName) - if index == -1 { - continue - } - - err := termIntoStruct(elem.Value, dest.Field(index)) - if err != nil { - return err - } - } - - return nil -} -func setMapField(term Map, dest reflect.Value) error { - switch dest.Type().Kind() { - case reflect.Map: - return setMapMapField(term, dest) - case reflect.Struct: - return setMapStructField(term, dest) - case reflect.Interface: - dest.Set(reflect.ValueOf(term)) - return nil - } - - return NewInvalidTypesError(dest.Type(), term) -} - -func setStructField(term Tuple, dest reflect.Value) error { - if dest.Kind() == reflect.Ptr { - pdest := reflect.New(dest.Type().Elem()) - dest.Set(pdest) - dest = pdest.Elem() - } - for i, elem := range term { - // let it panic if number of term elements is bigger than - // number of struct fields - if err := termIntoStruct(elem, dest.Field(i)); err != nil { - return err - } - } - - return nil - -} - -func setMapStructField(term Map, dest reflect.Value) error { - t := dest.Type() - numField := t.NumField() - fields := make([]reflect.StructField, numField) - for i := range fields { - fields[i] = t.Field(i) - } - - for key, val := range term { - fName, ok := TermToString(key) - if !ok { - return &InvalidStructKeyError{Term: key} - } - index := findStructField(fields, fName) - if index == -1 { - continue - } - - err := termIntoStruct(val, dest.Field(index)) - if err != nil { - return err - } - } - - return nil -} - -func findStructField(term []reflect.StructField, key string) (index int) { - var fieldName string - index = -1 - for i, f := range term { - fieldName = f.Name - - if tag := f.Tag.Get("etf"); tag != "" { - fieldName = tag - } - - if fieldName == key { - index = i - return - } else { - if strings.EqualFold(f.Name, key) { - index = i - } - } - } - - return -} - -func setMapMapField(term Map, dest reflect.Value) error { - t := dest.Type() - if dest.IsNil() { - dest.Set(reflect.MakeMapWithSize(t, len(term))) - } - tkey := t.Key() - tval := t.Elem() - for key, val := range term { - destkey := reflect.Indirect(reflect.New(tkey)) - if err := termIntoStruct(key, destkey); err != nil { - return err - } - destval := reflect.Indirect(reflect.New(tval)) - if err := termIntoStruct(val, destval); err != nil { - return err - } - dest.SetMapIndex(destkey, destval) - } - return nil -} - -// RegisterTypeOptins defines custom name for the registering type. -// Leaving the Name option empty makes the name automatically generated. -// Strict option defines whether the decoding process causes panic -// if the decoding value doesn't fit the destination object. -type RegisterTypeOptions struct { - Name Atom - Strict bool -} - -// RegisterType registers new type with the given options. It returns a Name -// of the registered type, which can be used in the UnregisterType function -// for unregistering this type. Supported types: struct, slice, array, map. -// Returns an error if this type can not be registered. -func RegisterType(t interface{}, options RegisterTypeOptions) (Atom, error) { - switch t.(type) { - case Pid, Ref, Alias: - return "", fmt.Errorf("types Pid, Ref, Alias can not be registered") - } - tt := reflect.TypeOf(t) - ttk := tt.Kind() - - name := options.Name - origin := regTypeName(tt) - if name == "" { - name = origin - } - lname := len([]rune(name)) - if lname > 255 { - return name, fmt.Errorf("type name %q is too long. characters number %d (limit: 255)", name, lname) - } - - switch ttk { - case reflect.Struct, reflect.Slice, reflect.Array: - case reflect.Map: - // Using pointers for the network messaging is meaningless. - // Supporting this feature in the maps is getting the decoding process a bit overloaded. - // But they still can be used for the other types, even being meaningless. - if tt.Key().Kind() == reflect.Ptr { - return name, fmt.Errorf("pointer as a key for the map is not supported") - } - if tt.Elem().Kind() == reflect.Ptr { - return name, fmt.Errorf("pointer as a value for the map is not supported") - } - // supported types - default: - return name, fmt.Errorf("type %q is not supported", regTypeName(tt)) - } - - registered.Lock() - defer registered.Unlock() - - _, taken := registered.typesDec[name] - if taken { - return name, lib.ErrTaken - } - - r, taken := registered.typesEnc[origin] - if taken { - return name, fmt.Errorf("type is already registered as %q", r.name) - } - - checkIsRegistered := func(name Atom, rt reflect.Kind) error { - switch rt { - case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: - // check if this type is registered - _, taken := registered.typesEnc[name] - if taken == false { - return fmt.Errorf("type %q must be registered first", name) - } - case reflect.Chan, reflect.Func, reflect.UnsafePointer, reflect.Complex64, reflect.Complex128: - return fmt.Errorf("type %q is not supported", rt) - } - return nil - } - - switch ttk { - case reflect.Struct: - // check for unexported fields - tv := reflect.ValueOf(t) - for i := 0; i < tv.NumField(); i++ { - f := tv.Field(i) - if f.CanInterface() == false { - return name, fmt.Errorf("struct has unexported field(s)") - } - - switch f.Interface().(type) { - case Pid, Ref, Alias: - // ignore this types - continue - } - - if f.Type().Kind() == reflect.Slice && f.Type().Elem().Kind() == reflect.Uint8 { - // []byte - continue - } - - orig := regTypeName(f.Type()) - if err := checkIsRegistered(orig, f.Kind()); err != nil { - return name, err - } - } - case reflect.Array, reflect.Slice, reflect.Map: - elem := tt.Elem() - orig := regTypeName(elem) - if err := checkIsRegistered(orig, elem.Kind()); err != nil { - return name, err - } - } - - rt := ®isterType{ - rtype: reflect.TypeOf(t), - name: name, - origin: origin, - strict: options.Strict, - } - registered.typesEnc[origin] = rt - registered.typesDec[name] = rt - return name, nil -} - -// UnregisterType unregisters type with a given name. -func UnregisterType(name Atom) error { - registered.Lock() - defer registered.Unlock() - r, found := registered.typesDec[name] - if found == false { - return lib.ErrUnknown - } - delete(registered.typesDec, name) - delete(registered.typesEnc, r.origin) - return nil -} - -type StructPopulatorError struct { - Type reflect.Type - Term Term -} - -func (s *StructPopulatorError) Error() string { - return fmt.Sprintf("Cannot put %#v into go value of type %s", s.Term, s.Type.Kind().String()) -} - -func NewInvalidTypesError(t reflect.Type, term Term) error { - return &StructPopulatorError{ - Type: t, - Term: term, - } -} - -type InvalidStructKeyError struct { - Term Term -} - -func (s *InvalidStructKeyError) Error() string { - return fmt.Sprintf("Cannot use %s as struct field name", reflect.TypeOf(s.Term).Name()) -} - -func convertCharlistToString(l List) (string, error) { - runes := make([]rune, len(l)) - for i := range l { - switch x := l[i].(type) { - case int64: - runes[i] = int32(x) - case int32: - runes[i] = int32(x) - case int16: - runes[i] = int32(x) - case int8: - runes[i] = int32(x) - case int: - runes[i] = int32(x) - default: - return "", fmt.Errorf("wrong rune %#v", l[i]) - } - } - return string(runes), nil -} - -func regTypeName(t reflect.Type) Atom { - return Atom("#" + t.PkgPath() + "/" + t.Name()) -} diff --git a/etf/etf_test.go b/etf/etf_test.go deleted file mode 100644 index d4bb5b91..00000000 --- a/etf/etf_test.go +++ /dev/null @@ -1,903 +0,0 @@ -package etf - -import ( - "bytes" - "fmt" - "reflect" - "testing" - "time" - - "github.com/ergo-services/ergo/lib" -) - -func TestTermIntoStruct_Slice(t *testing.T) { - dest := []byte{} - - tests := []struct { - want []byte - term Term - }{ - {[]byte{1, 2, 3}, List{1, 2, 3}}, - } - - for _, tt := range tests { - if err := TermIntoStruct(tt.term, &dest); err != nil { - t.Errorf("%#v: conversion failed: %v", tt.term, err) - } - - if !bytes.Equal(dest, tt.want) { - t.Errorf("%#v: got %v, want %v", tt.term, dest, tt.want) - } - } - tests1 := []struct { - want [][]float32 - term Term - }{ - {[][]float32{[]float32{1.23, 2.34, 3.45}, []float32{4.56, 5.67, 6.78}, []float32{7.89, 8.91, 9.12}}, List{List{1.23, 2.34, 3.45}, List{4.56, 5.67, 6.78}, List{7.89, 8.91, 9.12}}}, - } - dest1 := [][]float32{} - - for _, tt := range tests1 { - if err := TermIntoStruct(tt.term, &dest1); err != nil { - t.Errorf("%#v: conversion failed: %v", tt.term, err) - } - - if !reflect.DeepEqual(dest1, tt.want) { - t.Errorf("%#v: got %v, want %v", tt.term, dest1, tt.want) - } - } -} - -func TestTermIntoStruct_Array(t *testing.T) { - dest := [3]byte{} - - tests := []struct { - want [3]byte - term Term - }{ - {[...]byte{1, 2, 3}, List{1, 2, 3}}, - } - - for _, tt := range tests { - if err := TermIntoStruct(tt.term, &dest); err != nil { - t.Errorf("%#v: conversion failed: %v", tt.term, err) - } - - if dest != tt.want { - t.Errorf("%#v: got %v, want %v", tt.term, dest, tt.want) - } - } - - tests1 := []struct { - want [3][3]float64 - term Term - }{ - {[3][3]float64{[...]float64{1.23, 2.34, 3.45}, [...]float64{4.56, 5.67, 6.78}, [...]float64{7.89, 8.91, 9.12}}, List{List{1.23, 2.34, 3.45}, List{4.56, 5.67, 6.78}, List{7.89, 8.91, 9.12}}}, - } - dest1 := [3][3]float64{} - - for _, tt := range tests1 { - if err := TermIntoStruct(tt.term, &dest1); err != nil { - t.Errorf("%#v: conversion failed: %v", tt.term, err) - } - - if !reflect.DeepEqual(dest1, tt.want) { - t.Errorf("%#v: got %v, want %v", tt.term, dest1, tt.want) - } - } -} - -func TestTermIntoStruct_Struct(t *testing.T) { - type testAA struct { - A []bool - B uint32 - C string - } - - type testStruct struct { - AA testAA - BB float64 - CC *testStruct - } - type testItem struct { - Want testStruct - Term Term - } - - dest := testStruct{} - tests := []testItem{ - testItem{ - Want: testStruct{ - AA: testAA{ - A: []bool{true, false, false, true, false}, - B: 8765, - C: "test value", - }, - BB: 3.13, - CC: &testStruct{ - BB: 4.14, - CC: &testStruct{ - AA: testAA{ - A: []bool{false, true}, - B: 5, - }, - }, - }, - }, - Term: Tuple{ //testStruct - Tuple{ // AA testAA - List{true, false, false, true, false}, // A []bool - 8765, // B uint32 - "test value", // C string - }, - 3.13, // BB float64 - Tuple{ // CC *testStruct - Tuple{}, // AA testAA (empty) - 4.14, // BB float64 - Tuple{ // CC *testStruct - Tuple{ // AA testAA - List{false, true}, // A []bool - 5, // B uint32 - // C string (empty) - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - if err := TermIntoStruct(tt.Term, &dest); err != nil { - t.Errorf("%#v: conversion failed %v", tt.Term, err) - } - - if !reflect.DeepEqual(dest, tt.Want) { - t.Errorf("%#v: got %#v, want %#v", tt.Term, dest, tt.Want) - } - } -} - -func TestTermIntoStruct_Map(t *testing.T) { - type St struct { - A uint16 - B float32 - } - var destIS map[int]string - var destSI map[string]int - var destFlSt map[float64]St - var destSliceSI []map[bool][]int8 - - wantIS := map[int]string{ - 888: "hello", - 777: "world", - } - termIS := Map{ - 888: "hello", - 777: Atom("world"), - } - if err := TermIntoStruct(termIS, &destIS); err != nil { - t.Errorf("%#v: conversion failed %v", termIS, err) - } - - if !reflect.DeepEqual(destIS, wantIS) { - t.Errorf("%#v: got %#v, want %#v", termIS, destIS, wantIS) - } - - wantSI := map[string]int{ - "hello": 888, - "world": 777, - } - termSI := Map{ - "hello": 888, - Atom("world"): 777, - } - if err := TermIntoStruct(termSI, &destSI); err != nil { - t.Errorf("%#v: conversion failed %v", termSI, err) - } - - if !reflect.DeepEqual(destSI, wantSI) { - t.Errorf("%#v: got %#v, want %#v", termSI, destSI, wantSI) - } - - wantFlSt := map[float64]St{ - 3.45: St{67, 8.91}, - 7.65: St{43, 2.19}, - } - termFlSt := Map{ - 3.45: Tuple{67, 8.91}, - 7.65: Tuple{43, 2.19}, - } - if err := TermIntoStruct(termFlSt, &destFlSt); err != nil { - t.Errorf("%#v: conversion failed %v", termFlSt, err) - } - - if !reflect.DeepEqual(destFlSt, wantFlSt) { - t.Errorf("%#v: got %#v, want %#v", termFlSt, destFlSt, wantFlSt) - } - - wantSliceSI := []map[bool][]int8{ - map[bool][]int8{ - true: []int8{1, 2, 3, 4, 5}, - false: []int8{11, 22, 33, 44, 55}, - }, - map[bool][]int8{ - true: []int8{21, 22, 23, 24, 25}, - false: []int8{-11, -22, -33, -44, -55}, - }, - } - termSliceSI := List{ - Map{ - true: List{1, 2, 3, 4, 5}, - false: List{11, 22, 33, 44, 55}, - }, - Map{ - true: List{21, 22, 23, 24, 25}, - false: List{-11, -22, -33, -44, -55}, - }, - } - if err := TermIntoStruct(termSliceSI, &destSliceSI); err != nil { - t.Errorf("%#v: conversion failed %v", termSliceSI, err) - } - - if !reflect.DeepEqual(destSliceSI, wantSliceSI) { - t.Errorf("%#v: got %#v, want %#v", termSliceSI, destSliceSI, wantSliceSI) - } -} - -func TestTermMapIntoStruct_Struct(t *testing.T) { - type testStruct struct { - A []bool `etf:"a"` - B uint32 `etf:"b"` - C string `etf:"c"` - } - - dest := testStruct{} - - want := testStruct{ - A: []bool{false, true, true}, - B: 3233, - C: "hello world", - } - - term := Map{ - Atom("a"): List{false, true, true}, - "b": 3233, - Atom("c"): "hello world", - } - - if err := TermIntoStruct(term, &dest); err != nil { - t.Errorf("%#v: conversion failed %v", term, err) - } - - if !reflect.DeepEqual(dest, want) { - t.Errorf("%#v: got %#v, want %#v", term, dest, want) - } - -} -func TestTermMapIntoMap(t *testing.T) { - type testMap map[string]int - - var dest testMap - - want := testMap{ - "a": 123, - "b": 456, - "c": 789, - } - - term := Map{ - Atom("a"): 123, - "b": 456, - Atom("c"): 789, - } - - if err := TermIntoStruct(term, &dest); err != nil { - t.Errorf("%#v: conversion failed %v", term, err) - } - - if !reflect.DeepEqual(dest, want) { - t.Errorf("%#v: got %#v, want %#v", term, dest, want) - } - -} - -func TestTermProplistIntoStruct(t *testing.T) { - type testStruct struct { - A []bool `etf:"a"` - B uint32 `etf:"b"` - C string `etf:"c"` - } - - dest := testStruct{} - - want := testStruct{ - A: []bool{false, true, true}, - B: 3233, - C: "hello world", - } - termList := List{ - Tuple{Atom("a"), List{false, true, true}}, - Tuple{"b", 3233}, - Tuple{Atom("c"), "hello world"}, - } - - if err := TermProplistIntoStruct(termList, &dest); err != nil { - t.Errorf("%#v: conversion failed %v", termList, err) - } - - if !reflect.DeepEqual(dest, want) { - t.Errorf("%#v: got %#v, want %#v", termList, dest, want) - } - - termSliceProplistElements := []ProplistElement{ - ProplistElement{Atom("a"), List{false, true, true}}, - ProplistElement{"b", 3233}, - ProplistElement{Atom("c"), "hello world"}, - } - - if err := TermProplistIntoStruct(termSliceProplistElements, &dest); err != nil { - t.Errorf("%#v: conversion failed %v", termList, err) - } - - if !reflect.DeepEqual(dest, want) { - t.Errorf("%#v: got %#v, want %#v", termSliceProplistElements, dest, want) - } -} - -func TestTermIntoStructCharlistString(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - type Nested struct { - NestedKey1 String - NestedKey2 map[string]*Charlist `etf:"field"` - } - type StructCharlistString struct { - Key1 string - Key2 []*Charlist `etf:"custom_field_name"` - Key3 *Nested - Key4 [][]*Charlist - } - - nestedMap := make(map[string]*Charlist) - value1 := Charlist("Hello World! 你好世界! Привет Мир! 🚀") - value11 := String("Hello World! 你好世界! Привет Мир! 🚀") - nestedMap["map_key"] = &value1 - - nested := Nested{ - NestedKey1: value11, - NestedKey2: nestedMap, - } - - value2 := Charlist("你好世界! 🚀") - value3 := Charlist("Привет Мир! 🚀") - value4 := Charlist("Hello World! 🚀") - term := StructCharlistString{ - Key1: "Hello World!", - Key2: []*Charlist{&value2, &value3, &value4}, - Key3: &nested, - Key4: [][]*Charlist{[]*Charlist{&value2, &value3, &value4}, []*Charlist{&value2, &value3, &value4}}, - } - err := Encode(term, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - term_Term, _, err := Decode(b.B, []Atom{}, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - - term_dest := StructCharlistString{} - if err := TermIntoStruct(term_Term, &term_dest); err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(term, term_dest) { - t.Fatal("result != expected") - } -} - -func TestCharlistToString(t *testing.T) { - l := List{72, 101, 108, 108, 111, 32, 87, 111, 114, 108, 100, 33, 32, 20320, 22909, 19990, 30028, 33, 32, 1055, 1088, 1080, 1074, 1077, 1090, 32, 1052, 1080, 1088, 33, 32, 128640} - s, err := convertCharlistToString(l) - if err != nil { - t.Fatal(err) - } - expected := "Hello World! 你好世界! Привет Мир! 🚀" - if s != expected { - t.Error("want", expected) - t.Error("got", s) - t.Fatal("incorrect result") - } - -} - -func TestEncodeDecodePid(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - pidIn := Pid{Node: "erl-demo@127.0.0.1", ID: 32767, Creation: 2} - - err := Encode(pidIn, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - - term, _, err := Decode(b.B, []Atom{}, DecodeOptions{}) - pidOut, ok := term.(Pid) - if !ok { - t.Fatal("incorrect result") - } - - if pidIn != pidOut { - t.Error("want", pidIn) - t.Error("got", pidOut) - t.Fatal("incorrect result") - } - - // enable BigCreation - b.Reset() - encodeOptions := EncodeOptions{ - FlagBigCreation: true, - } - err = Encode(pidIn, b, encodeOptions) - if err != nil { - t.Fatal(err) - } - - decodeOptions := DecodeOptions{} - term, _, err = Decode(b.B, []Atom{}, decodeOptions) - pidOut, ok = term.(Pid) - if !ok { - t.Fatal("incorrect result") - } - - if pidIn != pidOut { - t.Error("want", pidIn) - t.Error("got", pidOut) - t.Fatal("incorrect result") - } - - // enable FlagBigPidRef - b.Reset() - encodeOptions = EncodeOptions{ - FlagBigPidRef: true, - } - err = Encode(pidIn, b, encodeOptions) - if err != nil { - t.Fatal(err) - } - - decodeOptions = DecodeOptions{ - FlagBigPidRef: true, - } - term, _, err = Decode(b.B, []Atom{}, decodeOptions) - pidOut, ok = term.(Pid) - if !ok { - t.Fatal("incorrect result") - } - - if pidIn != pidOut { - t.Error("want", pidIn) - t.Error("got", pidOut) - t.Fatal("incorrect result") - } - - // enable BigCreation and FlagBigPidRef - b.Reset() - encodeOptions = EncodeOptions{ - FlagBigPidRef: true, - FlagBigCreation: true, - } - err = Encode(pidIn, b, encodeOptions) - if err != nil { - t.Fatal(err) - } - - decodeOptions = DecodeOptions{ - FlagBigPidRef: true, - } - term, _, err = Decode(b.B, []Atom{}, decodeOptions) - pidOut, ok = term.(Pid) - if !ok { - t.Fatal("incorrect result") - } - - if pidIn != pidOut { - t.Error("want", pidIn) - t.Error("got", pidOut) - t.Fatal("incorrect result") - } -} - -type myTime struct { - Time time.Time -} - -func (m myTime) MarshalETF() ([]byte, error) { - s := fmt.Sprintf("%s", m.Time.Format(time.RFC3339)) - return []byte(s), nil -} - -func (m *myTime) UnmarshalETF(b []byte) error { - t, e := time.Parse( - time.RFC3339, string(b)) - m.Time = t - return e -} - -func TestTermIntoStructUnmarshal(t *testing.T) { - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - var src, dest myTime - - now := time.Now() - s := fmt.Sprintf("%s", now.Format(time.RFC3339)) - now, _ = time.Parse(time.RFC3339, s) - - src.Time = now - err := Encode(src, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - term, _, err := Decode(b.B, []Atom{}, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if err := TermIntoStruct(term, &dest); err != nil { - t.Errorf("%#v: conversion failed %v", term, err) - } - - if src != dest { - t.Fatal("wrong value") - } - - type aaa struct { - A1 myTime - A2 *myTime - } - - var src1 aaa - var dst1 aaa - - src1.A1.Time = now - src1.A2 = &myTime{now} - - b.Reset() - - err = Encode(src1, b, EncodeOptions{}) - if err != nil { - t.Fatal(err) - } - term, _, err = Decode(b.B, []Atom{}, DecodeOptions{}) - if err != nil { - t.Fatal(err) - } - - if err = TermIntoStruct(term, &dst1); err != nil { - t.Errorf("%#v: conversion failed %v", term, err) - } - if !reflect.DeepEqual(src1, dst1) { - t.Errorf("got %v, want %v", dst1, src1) - } -} - -func TestRegisterSlice(t *testing.T) { - type sliceString []string - type sliceInt []int - type sliceInt8 []int8 - type sliceInt16 []int16 - type sliceInt32 []int32 - type sliceInt64 []int64 - type sliceUint []uint - type sliceUint8 []uint8 - type sliceUint16 []uint16 - type sliceUint32 []uint32 - type sliceUint64 []uint64 - type sliceFloat32 []float32 - type sliceFloat64 []float64 - - type allInOneSlices struct { - A sliceString - B sliceInt - C sliceInt8 - D sliceInt16 - E sliceInt32 - F sliceInt64 - G sliceUint - H sliceUint8 - I sliceUint16 - K sliceUint32 - L sliceUint64 - M sliceFloat32 - O sliceFloat64 - } - types := []interface{}{ - sliceString{}, - sliceInt{}, - sliceInt8{}, - sliceInt16{}, - sliceInt32{}, - sliceInt64{}, - sliceUint{}, - sliceUint8{}, - sliceUint16{}, - sliceUint32{}, - sliceUint64{}, - sliceFloat32{}, - sliceFloat64{}, - allInOneSlices{}, - } - if err := registerTypes(types); err != nil { - t.Fatal(err) - } - - src := allInOneSlices{ - A: sliceString{"Hello", "World"}, - B: sliceInt{-1, 2, -3, 4, -5}, - C: sliceInt8{-1, 2, -3, 4, -5}, - D: sliceInt16{-1, 2, -3, 4, -5}, - E: sliceInt32{-1, 2, -3, 4, -5}, - F: sliceInt64{-1, 2, -3, 4, -5}, - G: sliceUint{1, 2, 3, 4, 5}, - H: sliceUint8{1, 2, 3, 4, 5}, - I: sliceUint16{1, 2, 3, 4, 5}, - K: sliceUint32{1, 2, 3, 4, 5}, - L: sliceUint64{1, 2, 3, 4, 5}, - M: sliceFloat32{1.1, -2.2, 3.3, -4.4, 5.5}, - O: sliceFloat64{1.1, -2.2, 3.3, -4.4, 5.5}, - } - - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - encodeOptions := EncodeOptions{ - FlagBigPidRef: true, - FlagBigCreation: true, - } - if err := Encode(src, buf, encodeOptions); err != nil { - t.Fatal(err) - } - decodeOptions := DecodeOptions{ - FlagBigPidRef: true, - } - dst, _, err := Decode(buf.B, []Atom{}, decodeOptions) - if err != nil { - t.Fatal(err) - } - - if _, ok := dst.(allInOneSlices); !ok { - t.Fatalf("wrong term result: %#v\n", dst) - } - - if !reflect.DeepEqual(src, dst) { - t.Errorf("got:\n%#v\n\nwant:\n%#v\n", dst, src) - } -} - -func TestRegisterMap(t *testing.T) { - type mapIntString map[int]string - type mapStringInt map[string]int - type mapInt8Int map[int8]int - type mapFloat32Int32 map[float32]int32 - type mapFloat64Int32 map[float64]int32 - type mapInt32Float32 map[int32]float32 - type mapInt32Float64 map[int32]float64 - - type allInOne struct { - A mapIntString - B mapStringInt - C mapInt8Int - D mapFloat32Int32 - E mapFloat64Int32 - F mapInt32Float32 - G mapInt32Float64 - } - - types := []interface{}{ - mapIntString{}, - mapStringInt{}, - mapInt8Int{}, - mapFloat32Int32{}, - mapFloat64Int32{}, - mapInt32Float32{}, - mapInt32Float64{}, - allInOne{}, - } - if err := registerTypes(types); err != nil { - t.Fatal(err) - } - - src := allInOne{ - A: make(mapIntString), - B: make(mapStringInt), - C: make(mapInt8Int), - D: make(mapFloat32Int32), - E: make(mapFloat64Int32), - F: make(mapInt32Float32), - G: make(mapInt32Float64), - } - - src.A[1] = "Hello" - src.B["Hello"] = 1 - src.C[1] = 1 - src.D[3.14] = 1 - src.E[3.15] = 1 - src.F[1] = 3.15 - src.G[1] = 3.15 - - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - encodeOptions := EncodeOptions{ - FlagBigPidRef: true, - FlagBigCreation: true, - } - if err := Encode(src, buf, encodeOptions); err != nil { - t.Fatal(err) - } - decodeOptions := DecodeOptions{ - FlagBigPidRef: true, - } - dst, _, err := Decode(buf.B, []Atom{}, decodeOptions) - if err != nil { - t.Fatal(err) - } - - if _, ok := dst.(allInOne); !ok { - t.Fatalf("wrong term result: %#v\n", dst) - } - - if !reflect.DeepEqual(src, dst) { - t.Errorf("got:\n%#v\n\nwant:\n%#v\n", dst, src) - } - -} - -func TestRegisterType(t *testing.T) { - type ccc []string - type ddd [3]bool - type aaa struct { - A string - B int - B8 int8 - B16 int16 - B32 int32 - B64 int64 - - BU uint - BU8 uint8 - BU16 uint16 - BU32 uint32 - BU64 uint64 - - C float32 - C64 float64 - D ddd - - T1 Pid - T2 Ref - T3 Alias - } - type bbb map[aaa]ccc - - src := bbb{ - aaa{ - A: "aa", - B: -11, - B8: -18, - B16: -1116, - B32: -1132, - B64: -1164, - BU: 0xb, - BU8: 0x12, - BU16: 0x45c, - BU32: 0x46c, - BU64: 0x48c, - C: -11.22, - C64: 1164.22, - D: ddd{true, false, false}, - T1: Pid{Node: Atom("nodepid11"), ID: 123, Creation: 456}, - T2: Ref{Node: Atom("noderef11"), Creation: 123, ID: [5]uint32{4, 5, 6, 0, 0}}, - T3: Alias{Node: Atom("nodealias11"), Creation: 123, ID: [5]uint32{4, 5, 6, 0, 0}}, - }: ccc{"a1", "a2", "a3"}, - aaa{ - A: "bb", - B: -22, - B8: -28, - B16: -2216, - B32: -2232, - B64: -2264, - BU: 0x16, - BU8: 0x1c, - BU16: 0x8a8, - BU32: 0x8b8, - BU64: 0x8d8, - C: -22.22, - C64: 2264.22, - D: ddd{false, true, false}, - T1: Pid{Node: Atom("nodepid22"), ID: 123, Creation: 456}, - T2: Ref{Node: Atom("noderef22"), Creation: 123, ID: [5]uint32{4, 5, 6, 0, 0}}, - T3: Alias{Node: Atom("nodealias22"), Creation: 123, ID: [5]uint32{4, 5, 6, 0, 0}}, - }: ccc{"b1", "b2", "b3"}, - aaa{ - A: "cc", - B: -33, - B8: -38, - B16: -3316, - B32: -3332, - B64: -3364, - BU: 0x21, - BU8: 0x26, - BU16: 0xcf4, - BU32: 0xd04, - BU64: 0xd24, - C: -33.22, - C64: 3364.22, - D: ddd{false, false, true}, - T1: Pid{Node: Atom("nodepid33"), ID: 123, Creation: 456}, - T2: Ref{Node: Atom("noderef33"), Creation: 123, ID: [5]uint32{4, 5, 6, 0, 0}}, - T3: Alias{Node: Atom("nodealias33"), Creation: 123, ID: [5]uint32{4, 5, 6, 0, 0}}, - }: ccc{}, //test empty list - } - - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - if _, err := RegisterType(Pid{}, RegisterTypeOptions{Strict: true}); err == nil { - t.Fatal("shouldn't be registered") - } - if _, err := RegisterType(Ref{}, RegisterTypeOptions{Strict: true}); err == nil { - t.Fatal("shouldn't be registered") - } - if _, err := RegisterType(Alias{}, RegisterTypeOptions{Strict: true}); err == nil { - t.Fatal("shouldn't be registered") - } - - types := []interface{}{ - ddd{}, - aaa{}, - ccc{}, - bbb{}, - } - if err := registerTypes(types); err != nil { - t.Fatal(err) - } - - encodeOptions := EncodeOptions{ - FlagBigPidRef: true, - FlagBigCreation: true, - } - if err := Encode(src, buf, encodeOptions); err != nil { - t.Fatal(err) - } - decodeOptions := DecodeOptions{ - FlagBigPidRef: true, - } - dst, _, err := Decode(buf.B, []Atom{}, decodeOptions) - if err != nil { - t.Fatal(err) - } - - if _, ok := dst.(bbb); !ok { - t.Fatal("wrong term result") - } - - if !reflect.DeepEqual(src, dst) { - t.Errorf("got:\n%#v\n\nwant:\n%#v\n", dst, src) - } -} - -func registerTypes(types []interface{}) error { - rtOpts := RegisterTypeOptions{Strict: true} - - for _, t := range types { - if _, err := RegisterType(t, rtOpts); err != nil && err != lib.ErrTaken { - return err - } - } - return nil -} diff --git a/gen/README.md b/gen/README.md deleted file mode 100644 index dc564ac8..00000000 --- a/gen/README.md +++ /dev/null @@ -1,68 +0,0 @@ - -## Generic behaviors ## - -### Server - Generic server behavior. - -Example: [gen.Server](https://github.com/ergo-services/examples/tree/master/genserver) - -### Supervisor - Generic supervisor behavior. - -A supervisor is responsible for starting, stopping, and monitoring its child processes. The basic idea of a supervisor is that it is to keep its child processes alive by restarting them when necessary. - -Example: [gen.Supervisor](https://github.com/ergo-services/examples/tree/master/supervisor) - -### Application - Generic application behavior. - -Example: [gen.Application](https://github.com/ergo-services/examples/tree/master/application) - -### Pool - Generic pool of workers. - - This behavior implements a basic design pattern with a pool of workers. All messages/requests received by the pool process are forwarded to the workers using the "Round Robin" algorithm. The worker process is automatically restarting on termination. - -Example: [gen.Pool](https://github.com/ergo-services/examples/tree/master/genpool) - -### Web - Web API Gateway behavior. - - The Web API Gateway pattern is also sometimes known as the "Backend For Frontend" (BFF) because you build it while thinking about the needs of the client app. Therefore, BFF sits between the client apps and the microservices. It acts as a reverse proxy, routing requests from clients to services. - -Example: [gen.Web](https://github.com/ergo-services/examples/tree/master/genweb) - -### TCP - Socket acceptor pool for TCP protocols. - - This behavior aims to provide everything you need to accept TCP connections and process packets with a small code base and low latency while being easy to use. - -Example: [gen.TCP](https://github.com/ergo-services/examples/tree/master/gentcp) - -### UDP - UDP acceptor pool for UDP protocols - - This behavior provides the same feature set as TCP but for handling UDP packets using pool of handlers. - -Example: [gen.UDP](https://github.com/ergo-services/examples/tree/master/genudp) - -### Stage - Generic stage behavior (originated from Elixir's [GenStage](https://hexdocs.pm/gen_stage/GenStage.html)). - -This is abstraction built on top of `gen.Server` to provide a simple way to create a distributed Producer/Consumer architecture, while automatically managing the concept of backpressure. This implementation is fully compatible with Elixir's GenStage. - -Example: [gen.Stage](https://github.com/ergo-services/examples/tree/master/genstage) - -### Saga - Generic saga behavior. - -It implements Saga design pattern - a sequence of transactions that updates each service state and publishes the result (or cancels the transaction or triggers the next transaction step). `gen.Saga` also provides a feature of interim results (can be used as transaction progress or as a part of pipeline processing), time deadline (to limit transaction lifespan), two-phase commit (to make distributed transaction atomic). - -Example: [gen.Saga](https://github.com/ergo-services/examples/tree/master/gensaga) - -### Raft - Generic raft behavior. - -It's improved implementation of [Raft consensus algorithm](https://raft.github.io). The key improvement is using quorum under the hood to manage the leader election process and make the Raft cluster more reliable. This implementation supports quorums of 3, 5, 7, 9, or 11 quorum members. - -Example: [gen.Raft](https://github.com/ergo-services/examples/tree/master/genraft) diff --git a/gen/application.go b/gen/application.go index 95762c71..3b7addb9 100644 --- a/gen/application.go +++ b/gen/application.go @@ -1,264 +1,105 @@ package gen -// http://erlang.org/doc/apps/kernel/application.html - -import ( - "fmt" - "sync" - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -type ApplicationStartType = string +type ApplicationMode int +type ApplicationState int32 const ( - // start types: + ApplicationModeTemporary ApplicationMode = 1 + ApplicationModeTransient ApplicationMode = 2 + ApplicationModePermanent ApplicationMode = 3 - // ApplicationStartPermanent If a permanent application terminates, - // all other applications and the runtime system (node) are also terminated. - ApplicationStartPermanent ApplicationStartType = "permanent" - - // ApplicationStartTemporary If a temporary application terminates, - // this is reported but no other applications are terminated. - ApplicationStartTemporary ApplicationStartType = "temporary" - - // ApplicationStartTransient If a transient application terminates - // with reason normal, this is reported but no other applications are - // terminated. If a transient application terminates abnormally, that - // is with any other reason than normal, all other applications and - // the runtime system (node) are also terminated. - ApplicationStartTransient ApplicationStartType = "transient" - - // EnvKeyAppSpec - EnvKeyAppSpec EnvKey = "ergo:AppSpec" + ApplicationStateLoaded ApplicationState = 1 + ApplicationStateRunning ApplicationState = 2 + ApplicationStateStopping ApplicationState = 3 ) -// ApplicationBehavior interface -type ApplicationBehavior interface { - ProcessBehavior - Load(args ...etf.Term) (ApplicationSpec, error) - Start(process Process, args ...etf.Term) +func (am ApplicationMode) String() string { + switch am { + case ApplicationModePermanent: + return "permanent" + case ApplicationModeTransient: + return "transient" + default: + return "temporary" + } } -// ApplicationSpec -type ApplicationSpec struct { - sync.Mutex - Name string - Description string - Version string - Lifespan time.Duration - Applications []string - Env map[EnvKey]interface{} - Children []ApplicationChildSpec - Process Process - StartType ApplicationStartType +func (am ApplicationMode) MarshalJSON() ([]byte, error) { + return []byte("\"" + am.String() + "\""), nil } -// ApplicationChildSpec -type ApplicationChildSpec struct { - Child ProcessBehavior - Options ProcessOptions - Name string - Args []etf.Term - process Process +func (as ApplicationState) String() string { + switch as { + case ApplicationStateStopping: + return "stopping" + case ApplicationStateRunning: + return "running" + default: + return "loaded" + } } -// Application is implementation of ProcessBehavior interface -type Application struct{} - -// ApplicationInfo -type ApplicationInfo struct { - Name string - Description string - Version string - PID etf.Pid +func (as ApplicationState) MarshalJSON() ([]byte, error) { + return []byte("\"" + as.String() + "\""), nil } -// ProcessInit -func (a *Application) ProcessInit(p Process, args ...etf.Term) (ProcessState, error) { - spec := p.Env(EnvKeyAppSpec).(*ApplicationSpec) - spec, ok := p.Env(EnvKeyAppSpec).(*ApplicationSpec) - if !ok { - return ProcessState{}, fmt.Errorf("ProcessInit: not an ApplicationBehavior") - } - // remove variable from the env - p.SetEnv(EnvKeyAppSpec, nil) - - p.SetTrapExit(true) - - if spec.Env != nil { - for k, v := range spec.Env { - p.SetEnv(k, v) - } - } - - if !a.startChildren(p, spec.Children[:]) { - a.stopChildren(p.Self(), spec.Children[:], "failed") - return ProcessState{}, fmt.Errorf("failed") - } - - behavior, ok := p.Behavior().(ApplicationBehavior) - if !ok { - return ProcessState{}, fmt.Errorf("ProcessInit: not an ApplicationBehavior") - } - behavior.Start(p, args...) - spec.Process = p - - return ProcessState{ - Process: p, - State: spec, - }, nil +type ApplicationBehavior interface { + // Load invoked on loading application using method ApplicationLoad of gen.Node interface. + Load(node Node, args ...any) (ApplicationSpec, error) + // Start invoked once the application started + Start(mode ApplicationMode) + // Terminate invoked once the application stopped + Terminate(reason error) } -// ProcessLoop -func (a *Application) ProcessLoop(ps ProcessState, started chan<- bool) string { - spec := ps.State.(*ApplicationSpec) - defer func() { spec.Process = nil }() - - if spec.Lifespan == 0 { - spec.Lifespan = time.Hour * 24 * 365 * 100 // let's define default lifespan 100 years :) - } - - chs := ps.ProcessChannels() - - timer := time.NewTimer(spec.Lifespan) - // timer must be stopped explicitly to prevent of timer leaks - // due to its not GCed until the timer fires - defer timer.Stop() - - started <- true - for { - select { - case ex := <-chs.GracefulExit: - terminated := ex.From - reason := ex.Reason - if ex.From == ps.Self() { - childrenStopped := a.stopChildren(terminated, spec.Children, reason) - if !childrenStopped { - lib.Warning("application %q can't be stopped. Some of the children are still running", spec.Name) - continue - } - return ex.Reason - } - - unknownChild := true - - for i := range spec.Children { - child := spec.Children[i].process - if child == nil { - continue - } - if child.Self() == terminated { - unknownChild = false - break - } - } - - if unknownChild { - continue - } - - switch spec.StartType { - case ApplicationStartPermanent: - a.stopChildren(terminated, spec.Children, string(reason)) - lib.Warning("Application child %s (at %s) stopped with reason %s (permanent: node is shutting down)", - terminated, ps.NodeName(), reason) - ps.NodeStop() - return "shutdown" - - case ApplicationStartTransient: - if reason == "normal" || reason == "shutdown" { - lib.Warning("Application child %s (at %s) stopped with reason %s (transient)", - terminated, ps.NodeName(), reason) - continue - } - a.stopChildren(terminated, spec.Children, reason) - lib.Warning("Application child %s (at %s) stopped with reason %s. (transient: node is shutting down)", - terminated, ps.NodeName(), reason) - ps.NodeStop() - return string(reason) - - case ApplicationStartTemporary: - fmt.Printf("Application child %s (at %s) stopped with reason %s (temporary)\n", - terminated, ps.NodeName(), reason) - } - - case direct := <-chs.Direct: - switch direct.Message.(type) { - case MessageDirectChildren: - pids := []etf.Pid{} - for i := range spec.Children { - if spec.Children[i].process == nil { - continue - } - pids = append(pids, spec.Children[i].process.Self()) - } - - ps.PutSyncReply(direct.Ref, pids, nil) - - default: - ps.PutSyncReply(direct.Ref, nil, lib.ErrUnsupportedRequest) - } - - case <-ps.Context().Done(): - // node is down or killed using p.Kill() - return "kill" - - case <-timer.C: - // time to die - ps.SetTrapExit(false) - go ps.Exit("normal") - - case <-chs.Mailbox: - // do nothing - } - - } +type ApplicationOptions struct { + Env map[Env]any + LogLevel LogLevel } -func (a *Application) stopChildren(from etf.Pid, children []ApplicationChildSpec, reason string) bool { - childrenStopped := true - for i := range children { - child := children[i].process - if child == nil { - continue - } - - if child.Self() == from { - continue - } - - if !child.IsAlive() { - continue - } - - if err := child.Exit(reason); err != nil { - childrenStopped = false - continue - } +type ApplicationOptionsExtra struct { + ApplicationOptions + CorePID PID + CoreEnv map[Env]any + CoreLogLevel LogLevel +} - if err := child.WaitWithTimeout(5 * time.Second); err != nil { - childrenStopped = false - continue - } +// ApplicationSpec +type ApplicationSpec struct { + Name Atom + Description string + Version Version + Depends ApplicationDepends + Env map[Env]any + Group []ApplicationMemberSpec + Mode ApplicationMode + Weight int + LogLevel LogLevel +} - children[i].process = nil - } +// ApplicationMemberSpec +type ApplicationMemberSpec struct { + Factory ProcessFactory + Options ProcessOptions + Name Atom + Args []any +} - return childrenStopped +type ApplicationDepends struct { + Applications []Atom + Network bool } -func (a *Application) startChildren(parent Process, children []ApplicationChildSpec) bool { - for i := range children { - // i know, it looks weird to use the funcion from supervisor file. - // will move it to somewhere else, but let it be there for a while. - p := startChild(parent, children[i].Name, children[i].Child, children[i].Options, children[i].Args...) - if p == nil { - return false - } - children[i].process = p - } - return true +type ApplicationInfo struct { + Name Atom + Weight int + Description string + Version Version + Env map[Env]any + Depends ApplicationDepends + Mode ApplicationMode + State ApplicationState + Parent Atom + Uptime int64 + Group []PID } diff --git a/gen/cert.go b/gen/cert.go new file mode 100644 index 00000000..e7fd8dd3 --- /dev/null +++ b/gen/cert.go @@ -0,0 +1,43 @@ +package gen + +import ( + "crypto/tls" + "sync" +) + +type CertManager interface { + Update(cert tls.Certificate) + GetCertificateFunc() func(*tls.ClientHelloInfo) (*tls.Certificate, error) + GetCertificate() tls.Certificate +} + +type certManager struct { + sync.RWMutex + cert *tls.Certificate +} + +func CreateCertManager(cert tls.Certificate) CertManager { + return &certManager{ + cert: &cert, + } +} + +func (cm *certManager) Update(cert tls.Certificate) { + cm.Lock() + defer cm.Unlock() + cm.cert = &cert +} + +func (cm *certManager) GetCertificateFunc() func(*tls.ClientHelloInfo) (*tls.Certificate, error) { + return func(*tls.ClientHelloInfo) (*tls.Certificate, error) { + cm.RLock() + defer cm.RUnlock() + return cm.cert, nil + } +} + +func (cm *certManager) GetCertificate() tls.Certificate { + cm.RLock() + defer cm.RUnlock() + return *cm.cert +} diff --git a/gen/core.go b/gen/core.go new file mode 100644 index 00000000..c0b7139e --- /dev/null +++ b/gen/core.go @@ -0,0 +1,68 @@ +package gen + +type Core interface { + // sending message + RouteSendPID(from PID, to PID, options MessageOptions, message any) error + RouteSendProcessID(from PID, to ProcessID, options MessageOptions, message any) error + RouteSendAlias(from PID, to Alias, options MessageOptions, message any) error + + RouteSendEvent(from PID, token Ref, options MessageOptions, message MessageEvent) error + RouteSendExit(from PID, to PID, reason error) error + RouteSendResponse(from PID, to PID, options MessageOptions, message any) error + RouteSendResponseError(from PID, to PID, options MessageOptions, err error) error + + // call requests + RouteCallPID(from PID, to PID, options MessageOptions, message any) error + RouteCallProcessID(from PID, to ProcessID, options MessageOptions, message any) error + RouteCallAlias(from PID, to Alias, options MessageOptions, message any) error + + // linking requests + RouteLinkPID(pid PID, target PID) error + RouteUnlinkPID(pid PID, target PID) error + + RouteLinkProcessID(pid PID, target ProcessID) error + RouteUnlinkProcessID(pid PID, target ProcessID) error + + RouteLinkAlias(pid PID, target Alias) error + RouteUnlinkAlias(pid PID, target Alias) error + + RouteLinkEvent(pid PID, target Event) ([]MessageEvent, error) + RouteUnlinkEvent(pid PID, target Event) error + + // monitoring requests + RouteMonitorPID(pid PID, target PID) error + RouteDemonitorPID(pid PID, target PID) error + + RouteMonitorProcessID(pid PID, target ProcessID) error + RouteDemonitorProcessID(pid PID, target ProcessID) error + + RouteMonitorAlias(pid PID, target Alias) error + RouteDemonitorAlias(pid PID, target Alias) error + + RouteMonitorEvent(pid PID, target Event) ([]MessageEvent, error) + RouteDemonitorEvent(pid PID, target Event) error + + // target termination + RouteTerminatePID(target PID, reason error) error + RouteTerminateProcessID(target ProcessID, reason error) error + RouteTerminateEvent(target Event, reason error) error + RouteTerminateAlias(terget Alias, reason error) error + + RouteSpawn(node Atom, name Atom, options ProcessOptionsExtra, source Atom) (PID, error) + RouteApplicationStart(name Atom, mode ApplicationMode, options ApplicationOptionsExtra, source Atom) error + + RouteNodeDown(node Atom, reason error) + + MakeRef() Ref + Name() Atom + Creation() int64 + + PID() PID + LogLevel() LogLevel + Security() SecurityOptions + EnvList() map[Env]any +} + +const ( + CoreEvent Atom = "core" +) diff --git a/gen/default.go b/gen/default.go new file mode 100644 index 00000000..4a4c41f7 --- /dev/null +++ b/gen/default.go @@ -0,0 +1,59 @@ +package gen + +import ( + "time" +) + +var ( + DefaultRequestTimeout = 5 + + DefaultCompressionType CompressionType = CompressionTypeGZIP + DefaultCompressionLevel CompressionLevel = CompressionDefault + DefaultCompressionThreshold int = 1024 + + DefaultLogFilter = []LogLevel{ + LogLevelTrace, + LogLevelDebug, + LogLevelInfo, + LogLevelWarning, + LogLevelError, + LogLevelPanic, + } + + DefaultKeepAlivePeriod time.Duration = 15 * time.Second + DefaultTCPBufferSize int = 65535 + DefaultPort uint16 = 11144 + + DefaultNetworkFlags = NetworkFlags{ + Enable: true, + EnableRemoteSpawn: true, + EnableRemoteApplicationStart: true, + EnableFragmentation: false, + EnableProxyTransit: false, + EnableProxyAccept: true, + EnableImportantDelivery: true, + } + + DefaultNetworkProxyFlags = NetworkProxyFlags{ + Enable: true, + EnableRemoteSpawn: false, + EnableRemoteApplicationStart: false, + EnableEncryption: false, + EnableImportantDelivery: true, + } + + DefaultLogLevels = []LogLevel{ + LogLevelSystem, + LogLevelTrace, + LogLevelDebug, + LogLevelInfo, + LogLevelWarning, + LogLevelError, + LogLevelPanic, + } +) + +const ( + LicenseMIT string = "MIT" + LicenseBSL1 string = "Business Source License 1.1" +) diff --git a/gen/errors.go b/gen/errors.go new file mode 100644 index 00000000..fcaebd39 --- /dev/null +++ b/gen/errors.go @@ -0,0 +1,58 @@ +package gen + +import ( + "errors" +) + +var ( + ErrNameUnknown = errors.New("unknown name") + ErrParentUnknown = errors.New("parent/leader is not set") + ErrNodeTerminated = errors.New("node terminated") + + ErrProcessMailboxFull = errors.New("process mailbox is full") + ErrProcessUnknown = errors.New("unknown process") + ErrProcessIncarnation = errors.New("process ID belongs to the previous incarnation") + ErrProcessTerminated = errors.New("process terminated") + + ErrMetaUnknown = errors.New("unknown meta process") + ErrMetaMailboxFull = errors.New("meta process mailbox is full") + + ErrApplicationUnknown = errors.New("unknown application") + ErrApplicationDepends = errors.New("dependency fail") + ErrApplicationState = errors.New("application is in running/stopping state") + ErrApplicationLoadPanic = errors.New("panic in application loading") + ErrApplicationEmpty = errors.New("application has no items") + ErrApplicationName = errors.New("application has no name") + ErrApplicationStopping = errors.New("application stopping is in progress") + ErrApplicationRunning = errors.New("application is still running") + + ErrTargetUnknown = errors.New("unknown target") + ErrTargetExist = errors.New("target is already exist") + + ErrRegistrarTerminated = errors.New("registrar client terminated") + + ErrAliasUnknown = errors.New("unknown alias") + ErrAliasOwner = errors.New("not an owner") + ErrEventUnknown = errors.New("unknown event") + ErrEventOwner = errors.New("not an owner") + ErrTaken = errors.New("resource is taken") + + ErrAtomTooLong = errors.New("too long Atom (max: 255)") + + ErrTimeout = errors.New("timed out") + ErrUnsupported = errors.New("not supported") + ErrUnknown = errors.New("unknown") + ErrNotAllowed = errors.New("not allowed") + + ErrIncorrect = errors.New("incorrect value or argument") + ErrMalformed = errors.New("malformed value") + ErrResponseIgnored = errors.New("response ignored") + ErrUnregistered = errors.New("unregistered") + ErrTooLarge = errors.New("too large") + + ErrNetworkStopped = errors.New("network stack is stopped") + ErrNoConnection = errors.New("no connection") + ErrNoRoute = errors.New("no route") + + ErrInternal = errors.New("internal error") +) diff --git a/gen/logger.go b/gen/logger.go new file mode 100644 index 00000000..29b94bc6 --- /dev/null +++ b/gen/logger.go @@ -0,0 +1,116 @@ +package gen + +import ( + "fmt" + "io" + "os" +) + +type Log interface { + Level() LogLevel + SetLevel(level LogLevel) error + + Logger() string + SetLogger(name string) + + Trace(format string, args ...any) + Debug(format string, args ...any) + Info(format string, args ...any) + Warning(format string, args ...any) + Error(format string, args ...any) + Panic(format string, args ...any) +} + +type LoggerBehavior interface { + Log(message MessageLog) + Terminate() +} + +// DefaultLoggerOptions +type DefaultLoggerOptions struct { + // Disable makes node to disable default logger + Disable bool + // TimeFormat enables output time in the defined format. See https://pkg.go.dev/time#pkg-constants + // Not defined format makes output time as a timestamp in nanoseconds. + TimeFormat string + // IncludeBehavior includes process/meta behavior to the log message + IncludeBehavior bool + // IncludeName includes registered process name to the log message + IncludeName bool + // Filter enables filtering log messages. + Filter []LogLevel + // Output defines output for the log messages. By default it uses os.Stdout + Output io.Writer +} + +// +// default logger for the Ergo Framework. It uses stdout as an output by default, but can be used +// any io.Writer. +// + +func CreateDefaultLogger(options DefaultLoggerOptions) LoggerBehavior { + var l defaultLogger + + l.out = options.Output + if l.out == nil { + l.out = os.Stdout + } + + l.format = options.TimeFormat + l.includeBehavior = options.IncludeBehavior + l.includeName = options.IncludeName + + return &l +} + +type defaultLogger struct { + out io.Writer + format string + includeBehavior bool + includeName bool +} + +func (l *defaultLogger) Log(m MessageLog) { + var t string + var source string + var behavior string + var name string + + if l.format == "" { + t = fmt.Sprintf("%d", m.Time.UnixNano()) + } else { + t = m.Time.Format(l.format) + } + + switch src := m.Source.(type) { + case MessageLogNode: + source = src.Node.CRC32() + case MessageLogNetwork: + source = fmt.Sprintf("%s-%s", src.Node.CRC32(), src.Peer.CRC32()) + case MessageLogProcess: + if l.includeBehavior { + behavior = " " + src.Behavior + } + if l.includeName && src.Name != "" { + name = " " + src.Name.String() + } + source = src.PID.String() + case MessageLogMeta: + if l.includeBehavior { + behavior = " " + src.Behavior + } + source = src.Meta.String() + default: + panic(fmt.Sprintf("unknown log source type: %#v", m.Source)) + } + + message := fmt.Sprintf(m.Format, m.Args...) + _, err := fmt.Fprintf(l.out, "%s [%s] %s%s%s: %s\n", + t, m.Level, source, name, behavior, message) + if err != nil { + fmt.Printf("(fallback) %s [%s] %s%s%s: %s\n", + t, m.Level, source, name, behavior, message) + } +} + +func (l *defaultLogger) Terminate() {} diff --git a/gen/mailbox.go b/gen/mailbox.go new file mode 100644 index 00000000..0bf8f47f --- /dev/null +++ b/gen/mailbox.go @@ -0,0 +1,46 @@ +package gen + +import ( + "sync" +) + +type MailboxMessageType int + +const ( + MailboxMessageTypeRegular MailboxMessageType = 0 + MailboxMessageTypeRequest MailboxMessageType = 1 + MailboxMessageTypeEvent MailboxMessageType = 2 + MailboxMessageTypeExit MailboxMessageType = 3 + MailboxMessageTypeInspect MailboxMessageType = 4 // for the observer's purposes +) + +type MailboxMessage struct { + From PID + Ref Ref + Type MailboxMessageType + Target any + Message any +} + +var ( + mbm = &sync.Pool{ + New: func() any { + return &MailboxMessage{} + }, + } +) + +func TakeMailboxMessage() *MailboxMessage { + return mbm.Get().(*MailboxMessage) +} + +func ReleaseMailboxMessage(m *MailboxMessage) { + var emptyPID PID + var emptyRef Ref + m.Message = nil + m.Target = nil + m.Type = 0 + m.From = emptyPID + m.Ref = emptyRef + mbm.Put(m) +} diff --git a/gen/message.go b/gen/message.go new file mode 100644 index 00000000..d32786dc --- /dev/null +++ b/gen/message.go @@ -0,0 +1,130 @@ +package gen + +import "time" + +// MessageDownPID +type MessageDownPID struct { + PID PID + Reason error +} + +// MessageDownProcessID +type MessageDownProcessID struct { + ProcessID ProcessID + Reason error +} + +// MessageDownAlias +type MessageDownAlias struct { + Alias Alias + Reason error +} + +// MessageDownEvent +type MessageDownEvent struct { + Event Event + Reason error +} + +// MessageDownNode +type MessageDownNode struct { + Name Atom +} + +// MessageDownProxy +type MessageDownProxy struct { + Node Atom + Proxy Atom + Reason error +} + +// MessageExitPID +type MessageExitPID struct { + PID PID + Reason error +} + +// MessageExitProcessID +type MessageExitProcessID struct { + ProcessID ProcessID + Reason error +} + +// MessageExitAlias +type MessageExitAlias struct { + Alias Alias + Reason error +} + +// MessageExitEvent +type MessageExitEvent struct { + Event Event + Reason error +} + +// MessageExitNode +type MessageExitNode struct { + Name Atom +} + +// MessageFallback +type MessageFallback struct { + PID PID + Tag string + Target any + Message any +} + +// MessageEventStart +type MessageEventStart struct { + Name Atom +} + +// MessageEventStop +type MessageEventStop struct { + Name Atom +} + +// MessageEvent +type MessageEvent struct { + Event Event + Timestamp int64 + Message any +} + +// MessageLog +type MessageLog struct { + Time time.Time + Level LogLevel + Source any // MessageLogProcess, MessageLogNode, MessageLogNetwork, MessageLogMeta + Format string + Args []any +} + +// MessageLogProcess +type MessageLogProcess struct { + Node Atom + PID PID + Name Atom + Behavior string +} + +// MessageLogMeta +type MessageLogMeta struct { + Node Atom + Parent PID + Meta Alias + Behavior string +} + +// MessageLogNode +type MessageLogNode struct { + Node Atom + Creation int64 +} + +type MessageLogNetwork struct { + Node Atom + Peer Atom + Creation int64 +} diff --git a/gen/meta.go b/gen/meta.go new file mode 100644 index 00000000..8c8339be --- /dev/null +++ b/gen/meta.go @@ -0,0 +1,72 @@ +package gen + +import ( + "fmt" +) + +type MetaState int32 + +const ( + MetaStateSleep MetaState = 1 + MetaStateRunning MetaState = 2 + MetaStateTerminated MetaState = 4 +) + +func (p MetaState) String() string { + switch p { + case MetaStateSleep: + return "sleep" + case MetaStateRunning: + return "running" + case MetaStateTerminated: + return "terminated" + } + return fmt.Sprintf("state#%d", int32(p)) +} +func (p MetaState) MarshalJSON() ([]byte, error) { + return []byte("\"" + p.String() + "\""), nil +} + +type MetaBehavior interface { + Init(process MetaProcess) error + Start() error + HandleMessage(from PID, message any) error + HandleCall(from PID, ref Ref, request any) (any, error) + Terminate(reason error) + + HandleInspect(from PID, item ...string) map[string]string +} + +type MetaProcess interface { + ID() Alias + Parent() PID + Send(to any, message any) error + SendImportant(to any, message any) error + SendWithPriority(to any, message any, priority MessagePriority) error + Spawn(behavior MetaBehavior, options MetaOptions) (Alias, error) + Env(name Env) (any, bool) + EnvList() map[Env]any + Log() Log +} + +type MetaOptions struct { + MailboxSize int64 + SendPriority MessagePriority + LogLevel LogLevel +} + +// MetaInfo +type MetaInfo struct { + ID Alias + Parent PID + Application Atom + Behavior string + MailboxSize int64 + MailboxQueues MailboxQueues + MessagePriority MessagePriority + MessagesIn uint64 + MessagesOut uint64 + LogLevel LogLevel + Uptime int64 + State MetaState +} diff --git a/gen/network.go b/gen/network.go new file mode 100644 index 00000000..b3ea5477 --- /dev/null +++ b/gen/network.go @@ -0,0 +1,481 @@ +package gen + +import ( + "encoding/binary" + "fmt" + "io" + "net" +) + +type Network interface { + Registrar() (Registrar, error) + Cookie() string + SetCookie(cookie string) error + MaxMessageSize() int + SetMaxMessageSize(size int) + NetworkFlags() NetworkFlags + SetNetworkFlags(flags NetworkFlags) + Acceptors() ([]Acceptor, error) + + // Node returns existing connection with the given node + Node(name Atom) (RemoteNode, error) + // GetNode attempts to connect to the given node if the connection doesn't exist. + // Otherwise, it returns the existing connection. + GetNode(name Atom) (RemoteNode, error) + // GetNodeWithRoute attempts to connect to the given node using provided route + GetNodeWithRoute(name Atom, route NetworkRoute) (RemoteNode, error) + + // Nodes return list of connected nodes + Nodes() []Atom + + // AddRoute add static route + AddRoute(match string, route NetworkRoute, weight int) error + RemoveRoute(match string) error + Route(name Atom) ([]NetworkRoute, error) + + AddProxyRoute(match string, proxy NetworkProxyRoute, weight int) error + RemoveProxyRoute(match string) error + ProxyRoute(name Atom) ([]NetworkProxyRoute, error) + + RegisterProto(proto NetworkProto) + RegisterHandshake(handshake NetworkHandshake) + + // EnableSpawn allows the starting of the given process by the remote node(s) + // Leaving argument "nodes" empty makes spawning this process by any remote node + EnableSpawn(name Atom, factory ProcessFactory, nodes ...Atom) error + DisableSpawn(name Atom, nodes ...Atom) error + + // EnableApplicationStart allows the starting of the given application by the remote node(s). + // Leaving argument "nodes" empty makes starting this application by any remote node. + EnableApplicationStart(name Atom, nodes ...Atom) error + DisableApplicationStart(name Atom, nodes ...Atom) error + + Info() (NetworkInfo, error) + Mode() NetworkMode +} + +type RemoteNode interface { + Name() Atom + Uptime() int64 + ConnectionUptime() int64 + Version() Version + Info() RemoteNodeInfo + + Spawn(name Atom, options ProcessOptions, args ...any) (PID, error) + SpawnRegister(register Atom, name Atom, options ProcessOptions, args ...any) (PID, error) + + // ApplicationStart starts application on the remote node. + // Starting mode is according to the defined in the gen.ApplicationSpec.Mode + ApplicationStart(name Atom, options ApplicationOptions) error + + // ApplicationStartTemporary starts application on the remote node in temporary mode + // overriding the value of gen.ApplicationSpec.Mode + ApplicationStartTemporary(name Atom, options ApplicationOptions) error + + // ApplicationStartTransient starts application on the remote node in transient mode + // overriding the value of gen.ApplicationSpec.Mode + ApplicationStartTransient(name Atom, options ApplicationOptions) error + + // ApplicationStartPermanent starts application on the remote node in permanent mode + // overriding the value of gen.ApplicationSpec.Mode + ApplicationStartPermanent(name Atom, options ApplicationOptions) error + + Creation() int64 + + Disconnect() +} + +type Acceptor interface { + Cookie() string + SetCookie(cokie string) + NetworkFlags() NetworkFlags + SetNetworkFlags(flags NetworkFlags) + MaxMessageSize() int + SetMaxMessageSize(size int) + Info() AcceptorInfo +} + +type Connection interface { + Node() RemoteNode + + // Methods for sending async message to the remote process + SendPID(from PID, to PID, options MessageOptions, message any) error + SendProcessID(from PID, to ProcessID, options MessageOptions, message any) error + SendAlias(from PID, to Alias, options MessageOptions, message any) error + + SendEvent(from PID, options MessageOptions, message MessageEvent) error + SendExit(from PID, to PID, reason error) error + SendResponse(from PID, to PID, options MessageOptions, response any) error + SendResponseError(from PID, to PID, options MessageOptions, err error) error + + // target terminated + SendTerminatePID(target PID, reason error) error + SendTerminateProcessID(target ProcessID, reason error) error + SendTerminateAlias(target Alias, reason error) error + SendTerminateEvent(target Event, reason error) error + + // Methods for sending sync request to the remote process + CallPID(from PID, to PID, options MessageOptions, message any) error + CallProcessID(from PID, to ProcessID, options MessageOptions, message any) error + CallAlias(from PID, to Alias, options MessageOptions, message any) error + + // Links + LinkPID(pid PID, target PID) error + UnlinkPID(pid PID, target PID) error + + LinkProcessID(pid PID, target ProcessID) error + UnlinkProcessID(pid PID, target ProcessID) error + + LinkAlias(pid PID, target Alias) error + UnlinkAlias(pid PID, target Alias) error + + LinkEvent(pid PID, target Event) ([]MessageEvent, error) + UnlinkEvent(pid PID, targer Event) error + + // Monitors + MonitorPID(pid PID, target PID) error + DemonitorPID(pid PID, target PID) error + + MonitorProcessID(pid PID, target ProcessID) error + DemonitorProcessID(pid PID, target ProcessID) error + + MonitorAlias(pid PID, target Alias) error + DemonitorAlias(pid PID, target Alias) error + + MonitorEvent(pid PID, target Event) ([]MessageEvent, error) + DemonitorEvent(pid PID, targer Event) error + + RemoteSpawn(name Atom, options ProcessOptionsExtra) (PID, error) + + Join(c net.Conn, id string, dial NetworkDial, tail []byte) error + Terminate(reason error) +} + +type NetworkMode int + +const ( + // NetworkModeEnabled default network mode for the node. It makes node to + // register on the registrar services providing the port number for the + // incomming connections + NetworkModeEnabled NetworkMode = 0 + + // NerworkModeHidden makes node to start network with disabled acceptor(s) for the incomming connections. + NetworkModeHidden NetworkMode = 1 + + // NetworkModeDisabled disables networking for the node entirely. + NetworkModeDisabled NetworkMode = -1 +) + +func (nm NetworkMode) String() string { + switch nm { + case NetworkModeEnabled: + return "enabled" + case NetworkModeHidden: + return "hidden" + case NetworkModeDisabled: + return "disabled" + } + + return fmt.Sprintf("unknown network mode %d", nm) +} + +func (nm NetworkMode) MarshalJSON() ([]byte, error) { + return []byte("\"" + nm.String() + "\""), nil +} + +// NetworkOptions +type NetworkOptions struct { + Mode NetworkMode + // Cookie + Cookie string + // Flags + Flags NetworkFlags + // Registrar default registrar for outgoing connections + Registrar Registrar + // Handshake set default handshake + Handshake NetworkHandshake + // Proto set default proto + Proto NetworkProto + + // Acceptors node can have multiple acceptors at once + Acceptors []AcceptorOptions + // InsecureSkipVerify skips the certificate verification + InsecureSkipVerify bool + // MaxMessageSize limit the message size for the incoming messages. + MaxMessageSize int + // ProxyAccept options for incomming proxy connections + ProxyAccept ProxyAcceptOptions + // ProxyTransit options for the proxy connections through this node + ProxyTransit ProxyTransitOptions + + // TODO + // FragmentationUnit chunck size in bytes + //FragmentationUnit int +} + +type ProxyAcceptOptions struct { + // Cookie sets cookie for incoming connections + Cookie string + // Flags sets options for incoming connections + Flags NetworkProxyFlags +} + +type ProxyTransitOptions struct { + // TODO + // proxy Routes + // access control + // etc +} + +// NetworkFlags +type NetworkFlags struct { + // Enable enable flags customization. + Enable bool + // EnableRemoteSpawn accepts remote spawn request + EnableRemoteSpawn bool + // EnableRemoteApplicationStart accepts remote request to start application + EnableRemoteApplicationStart bool + // EnableFragmentation enables support fragmentation messages + EnableFragmentation bool + // EnableProxyTransit enables support for transit proxy connection + EnableProxyTransit bool + // EnableProxyAccept enables support for incoming proxy connection + EnableProxyAccept bool + // EnableImportantDelivery enables support 'important' flag + EnableImportantDelivery bool +} + +// we must be able to extend this structure by introducing new features. +// it is using in the handshake process. to keep capability +// use the custom marshaling for this type. +func (nf NetworkFlags) MarshalEDF(w io.Writer) error { + var flags uint64 + var buf [8]byte + if nf.Enable == false { + w.Write(buf[:]) + return nil + } + flags = 1 // nf.Enable = true + if nf.EnableRemoteSpawn == true { + flags |= 2 + } + if nf.EnableRemoteApplicationStart == true { + flags |= 4 + } + if nf.EnableFragmentation == true { + flags |= 8 + } + if nf.EnableProxyTransit == true { + flags |= 16 + } + if nf.EnableProxyAccept == true { + flags |= 32 + } + if nf.EnableImportantDelivery == true { + flags |= 64 + } + binary.BigEndian.PutUint64(buf[:], flags) + w.Write(buf[:]) + return nil +} + +func (nf *NetworkFlags) UnmarshalEDF(buf []byte) error { + if len(buf) < 8 { + return fmt.Errorf("unable to unmarshal NetworkFlags") + } + flags := binary.BigEndian.Uint64(buf) + nf.Enable = (flags & 1) > 0 + if nf.Enable == false { + return nil + } + nf.EnableRemoteSpawn = (flags & 2) > 0 + nf.EnableRemoteApplicationStart = (flags & 4) > 0 + nf.EnableFragmentation = (flags & 8) > 0 + nf.EnableProxyTransit = (flags & 16) > 0 + nf.EnableProxyAccept = (flags & 32) > 0 + nf.EnableImportantDelivery = (flags & 64) > 0 + return nil +} + +// NetworkProxyFlags +type NetworkProxyFlags struct { + Enable bool + EnableRemoteSpawn bool + EnableRemoteApplicationStart bool + EnableEncryption bool + EnableImportantDelivery bool +} + +func (npf NetworkProxyFlags) MarshalEDF(w io.Writer) error { + // TODO + return nil +} + +func (npf *NetworkProxyFlags) UnmarshalEDF(buf []byte) error { + // TODO + return nil +} + +type RemoteNodeInfo struct { + Node Atom + Uptime int64 + ConnectionUptime int64 + Version Version + + HandshakeVersion Version + ProtoVersion Version + + NetworkFlags NetworkFlags + + PoolSize int + PoolDSN []string + + MaxMessageSize int + MessagesIn uint64 + MessagesOut uint64 + + BytesIn uint64 + BytesOut uint64 + + TransitBytesIn uint64 + TransitBytesOut uint64 +} + +type AcceptorOptions struct { + // Cookie cookie for the incoming connection to this acceptor. Leave it empty in + // case of using the node's cookie. + Cookie string + // Hostname defines an interface for the listener. Default: takes from the node name. + Host string + // Port defines a listening port number for accepting incoming connections. Default 15000 + Port uint16 + // PortRange a range of the ports for the attempts to start listening: + // Starting from: + // Ending at: + + PortRange uint16 + // TCP defines the TCP network. By default will be used IPv4 only. + // For IPv6 use "tcp6". To listen on any available address use "tcp" + TCP string + // BufferSize defines buffer size for the TCP connection + BufferSize int + // MaxMessageSize set max message size. overrides gen.NetworkOptions.MaxMessageSize + MaxMessageSize int + + Flags NetworkFlags + AtomMapping map[Atom]Atom + + CertManager CertManager + InsecureSkipVerify bool + + Registrar Registrar + Handshake NetworkHandshake + Proto NetworkProto +} + +// Handshake defines handshake interface +type NetworkHandshake interface { + NetworkFlags() NetworkFlags + // Start initiates handshake process. + // Cert value has CertManager that was used to create this connection + Start(NodeHandshake, net.Conn, HandshakeOptions) (HandshakeResult, error) + // Join is invoking within the NetworkDial to shortcut the handshake process + Join(NodeHandshake, net.Conn, string, HandshakeOptions) ([]byte, error) + // Accept accepts handshake process initiated by another side of this connection. + Accept(NodeHandshake, net.Conn, HandshakeOptions) (HandshakeResult, error) + // Version + Version() Version +} + +type HandshakeOptions struct { + Cookie string + Flags NetworkFlags + CertManager CertManager + MaxMessageSize int +} + +type HandshakeResult struct { + HandshakeVersion Version + + ConnectionID string + Peer Atom + PeerCreation int64 + PeerVersion Version // peer's version (gen.Node.Version()) + PeerFlags NetworkFlags // peer's flags + PeerMaxMessageSize int + + NodeFlags NetworkFlags + NodeMaxMessageSize int + + AtomMapping map[Atom]Atom + + // Tail if something is left in the buffer after the handshaking we should + // pass it to the proto handler + Tail []byte + // Custom allows passing the custom data to the proto handler + Custom any +} + +type NetworkDial func(dsn, id string) (net.Conn, []byte, error) + +type NetworkProto interface { + // NewConnection + NewConnection(core Core, result HandshakeResult, log Log) (Connection, error) + // Serve connection. Argument dial is the closure to create TCP connection with invoking + // NetworkHandshake.Join inside to shortcut the handshake process + Serve(conn Connection, dial NetworkDial) error + // Version + Version() Version +} + +type NetworkInfo struct { + Mode NetworkMode + + Registrar RegistrarInfo + Acceptors []AcceptorInfo + MaxMessageSize int + HandshakeVersion Version + ProtoVersion Version + + Nodes []Atom + + Routes []RouteInfo + ProxyRoutes []ProxyRouteInfo + + Flags NetworkFlags + EnabledSpawn []NetworkSpawnInfo + EnabledApplicationStart []NetworkApplicationStartInfo +} + +type NetworkSpawnInfo struct { + Name Atom + Behavior string + Nodes []Atom +} + +type NetworkApplicationStartInfo struct { + Name Atom + Nodes []Atom +} + +type NetworkRoute struct { + Resolver Resolver + Route Route + + Cookie string + Cert CertManager + InsecureSkipVerify bool + Flags NetworkFlags + + AtomMapping map[Atom]Atom + + LogLevel LogLevel +} + +type NetworkProxyRoute struct { + Resolver Resolver + Route ProxyRoute + + Cookie string + Flags NetworkProxyFlags + MaxHop int // DefaultProxyMaxHop == 8 +} diff --git a/gen/node.go b/gen/node.go new file mode 100644 index 00000000..00a315a4 --- /dev/null +++ b/gen/node.go @@ -0,0 +1,349 @@ +package gen + +import ( + "time" +) + +type Node interface { + // Name returns node name + Name() Atom + // IsAlive returns true if node is still alive + IsAlive() bool + // Uptime returns node uptime in seconds. Returns 0 if node is stopped. + Uptime() int64 + // Version returns node version + Version() Version + // FrameworkVersion returns framework version + FrameworkVersion() Version + // Info returns summary information about this node + Info() (NodeInfo, error) + // EnvList returns a map of configured Node environment variables. + EnvList() map[Env]any + // SetEnv set node environment variable with given name. Use nil value to remove variable with given name. + SetEnv(name Env, value any) + // Env returns value associated with given environment name. + Env(name Env) (any, bool) + + // Spawn spawns a new process + Spawn(factory ProcessFactory, options ProcessOptions, args ...any) (PID, error) + // SpawnRegister spawns a new process and register associated name with it + SpawnRegister(register Atom, factory ProcessFactory, options ProcessOptions, args ...any) (PID, error) + + // RegisterName register associates the name with the given PID so you can address messages + // to this process using gen.ProcessID{, }. Returns error if this process + // is already has registered name. + RegisterName(name Atom, pid PID) error + + // UnregisterName unregister associated name. Returns PID that this name belonged to. + UnregisterName(name Atom) (PID, error) + + // MetaInfo returns summary information about given meta process + MetaInfo(meta Alias) (MetaInfo, error) + + // ProcessInfo returns short summary information about given process + ProcessInfo(pid PID) (ProcessInfo, error) + + // ProcessList returns the list of the processes + ProcessList() ([]PID, error) + + // ProcessListShortInfo returns the list of the processes with short information + // for the given range of process identifiers (gen.PID.ID) + ProcessListShortInfo(start, limit int) ([]ProcessShortInfo, error) + + // ProcessState returns state of the given process: + // - ProcessStateSleep (process has no messages) + // - ProcessStateRunning (process is handling its mailbox) + // - ProcessStateTerminated (final state of the process lifespan before it will be removed + // removed from the node) + // - ProcessStateZombee (process was killed by node being in the running state). + ProcessState(pid PID) (ProcessState, error) + + // ApplicationLoad loads application to the node. To start it use ApplicationStart method. + // Returns the name of loaded application. Returns error gen.ErrTaken if application name + // is already registered in the node. + ApplicationLoad(app ApplicationBehavior, args ...any) (Atom, error) + + // ApplcationInfo returns the short information about the given application. + // Returns error gen.ErrApplicationUnknown if it does not exist in the node + ApplicationInfo(name Atom) (ApplicationInfo, error) + + // ApplicationUnload unloads application from the node. Returns gen.ErrApplicationRunning + // if given application is already started (must be stopped before the unloading). + // Or returns error gen.ErrApplicationUnknown if it does not exist in the node. + ApplicationUnload(name Atom) error + + // ApplicationStart starts application with its children processes. Starting mode is according + // to the defined in the gen.ApplicationSpec.Mode + ApplicationStart(name Atom, options ApplicationOptions) error + + // ApplicationStartTemporary starts application in temporary mode overriding the value + // of gen.ApplicationSpec.Mode + ApplicationStartTemporary(name Atom, options ApplicationOptions) error + + // ApplicationStartTransient starts application in transient mode overriding the value + // of gen.ApplicationSpec.Mode + ApplicationStartTransient(name Atom, options ApplicationOptions) error + + // ApplicationStartPermanent starts application in permanent mode overriding the value + // of gen.ApplicationSpec.Mode + ApplicationStartPermanent(name Atom, options ApplicationOptions) error + + // ApplicationStop stops the given applications, awaiting all children to be stopped. + // The default waiting time is 5 seconds. Returns error gen.ErrApplicationStopping + // if application is still stopping. Once the application is stopped it can be unloaded + // from the node using ApplicationUnload. + ApplicationStop(name Atom) error + + // ApplicationStopForce force to kill all children, no awaiting the termination of children processes. + ApplicationStopForce(name Atom) error + + // ApplicationStopWithTimeout stops the given applications, awaiting all children to be stopped + // during the given period of time. Returns gen.ErrApplicationStopping on timeout. + ApplicationStopWithTimeout(name Atom, timeout time.Duration) error + + // Applications return list of all applications (loaded and started). + Applications() []Atom + // ApplicationsRunning return list of all running applications + ApplicationsRunning() []Atom + + NetworkStart(options NetworkOptions) error + NetworkStop() error + Network() Network + + CertManager() CertManager + + Security() SecurityOptions + + Stop() + StopForce() + + // Wait waits node termination + Wait() + // WaitWithTimeout waits node termination with the given period of time + WaitWithTimeout(timeout time.Duration) error + + // Kill terminates the given process. Can be used for the local process only. + Kill(pid PID) error + + // Send sends a message to the given process. + Send(to any, message any) error + + // SendEvent sends event message to the subscribers (to the processes that made link/monitor + // on this event). Event must be registered with RegisterEvent method. + SendEvent(name Atom, token Ref, options MessageOptions, message any) error + + // RegisterEvent registers a new event. Returns a reference as the token + // for sending events. Unregistering this event is allowed to the node only. + // Sending an event can be done using SendEvent method with the provided token. + RegisterEvent(name Atom, options EventOptions) (Ref, error) + + // UnregisterEvent unregisters an event. Can be used for the registered + // events by the node. + UnregisterEvent(name Atom) error + + // SendExit sends graceful termination request to the process. + SendExit(pid PID, reason error) error + + // Log returns gen.Log interface + Log() Log + + // LogLevelProcess returns logging level for the given process + LogLevelProcess(pid PID) (LogLevel, error) + + // SetLogLevelProcess allows set logging level for the given process + SetLogLevelProcess(pid PID, level LogLevel) error + + // LogLevelMeta returns logging level for the given meta process + LogLevelMeta(meta Alias) (LogLevel, error) + + // SetLogLevelMeta allows set logging level for the given meta process + SetLogLevelMeta(meta Alias, level LogLevel) error + + // Loggers returns list of loggers + Loggers() []string + + // LoggerAdd makes process to receive log messages gen.MessageLogNode, gen.MessageLogProcess + LoggerAddPID(pid PID, name string, filter ...LogLevel) error + + // LoggerAdd allows to add a custom logger + LoggerAdd(name string, logger LoggerBehavior, filter ...LogLevel) error + + // LoggerDelete removes process from the loggers list + LoggerDeletePID(pid PID) + + // LoggerDeleteCustom removes custom logger from the logger list + LoggerDelete(name string) + + // LoggerLevels returns list of log levels for the given logger + LoggerLevels(name string) []LogLevel + + // MakeRef creates an unique reference within this node + MakeRef() Ref + + // Commercial returns list of component versions with a commercial license (gen.LicenseBSL1) + Commercial() []Version + + // PID returns virtual PID of the core. This PID is using as a source + // for the messages sent by node using methods Send, SendExit or SendEvent + // and as a parent PID for the spawned process by the node + PID() PID + Creation() int64 + + // SetCTRLC allows you to catch Ctrl+C to enable/disable debug level for the node + // Twice Ctrl+C - to stop node gracefully + SetCTRLC(enable bool) +} + +// NodeRegistrar bridge interface from Node to the Registrar +type NodeRegistrar interface { + Name() Atom + RegisterEvent(name Atom, options EventOptions) (Ref, error) + UnregisterEvent(name Atom) error + SendEvent(name Atom, token Ref, options MessageOptions, message any) error + Log() Log +} + +// NodeHandshake bridge interface from Node to the Handshake +type NodeHandshake interface { + Name() Atom + Creation() int64 + Version() Version +} + +// There is no NodeProto bridge interface. gen.Core is used for that. + +// NodeOptions defines bootstrapping options for the node +type NodeOptions struct { + // Applications application list that must be started + Applications []ApplicationBehavior + // Env node environment + Env map[Env]any + // Network + Network NetworkOptions + // CertManager + CertManager CertManager + // Security options + Security SecurityOptions + // Log options for the defaulf logger + Log LogOptions + // Version sets the version details for your node + Version Version +} + +type SecurityOptions struct { + ExposeEnvInfo bool + // ExposeEnvRemoteSpawn makes remote spawned process inherit env from the parent process/node + ExposeEnvRemoteSpawn bool + ExposeEnvRemoteApplicationStart bool +} + +// LogOptions +type LogOptions struct { + // Level default logging level for node + Level LogLevel + + // loggers options + + // DefaultLogger options + DefaultLogger DefaultLoggerOptions + + // Loggers add extra loggers on start + Loggers []Logger +} + +type Logger struct { + Name string + Logger LoggerBehavior + Filter []LogLevel +} + +type Compression struct { + // Enable enables compression for all outgoing messages having size + // greater than the defined threshold. + Enable bool + // Type defines type of compression. Use gen.CompressionTypeZLIB or gen.CompressionTypeLZW. By default is using gen.CompressionTypeGZIP + Type CompressionType + // Level defines compression level. Use gen.CompressionBestSize or gen.CompressionBestSpeed. By default is using gen.CompressionDefault + Level CompressionLevel + // Threshold defines the minimal message size for the compression. + // Messages less of this threshold will not be compressed. + Threshold int +} + +type CompressionLevel int +type CompressionType string + +func (cl CompressionLevel) String() string { + switch cl { + case CompressionBestSize: + return "best size" + case CompressionBestSpeed: + return "best speed" + case CompressionDefault: + return "default" + default: + return "unknown compression level" + } +} + +func (cl CompressionLevel) MarshalJSON() ([]byte, error) { + return []byte("\"" + cl.String() + "\""), nil +} + +func (ct CompressionType) ID() uint8 { + switch ct { + case CompressionTypeLZW: + return 100 + case CompressionTypeZLIB: + return 101 + case CompressionTypeGZIP: + return 102 + default: + return 0 + } +} + +const ( + CompressionDefault CompressionLevel = 0 + CompressionBestSpeed CompressionLevel = 1 + CompressionBestSize CompressionLevel = 2 + + CompressionTypeGZIP CompressionType = "gzip" + CompressionTypeLZW CompressionType = "lzw" + CompressionTypeZLIB CompressionType = "zlib" +) + +type NodeInfo struct { + Name Atom + Uptime int64 + Version Version + Framework Version + Commercial []Version + + Env map[Env]any // gen.NodeOptions.Security.ExposeEnvInfo must be enabled to reveal this data + LogLevel LogLevel + Loggers []LoggerInfo + + ProcessesTotal int64 + ProcessesRunning int64 + ProcessesZombee int64 + + RegisteredAliases int64 + RegisteredNames int64 + RegisteredEvents int64 + + ApplicationsTotal int64 + ApplicationsRunning int64 + + MemoryUsed uint64 + MemoryAlloc uint64 + + UserTime int64 + SystemTime int64 +} + +type LoggerInfo struct { + Name string + Behavior string + Levels []LogLevel +} diff --git a/gen/pool.go b/gen/pool.go deleted file mode 100644 index 023183c5..00000000 --- a/gen/pool.go +++ /dev/null @@ -1,131 +0,0 @@ -package gen - -import ( - "fmt" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -type PoolBehavior interface { - ServerBehavior - - InitPool(process *PoolProcess, args ...etf.Term) (PoolOptions, error) -} - -type PoolProcess struct { - ServerProcess - options PoolOptions - workers []etf.Pid - monitors map[etf.Ref]int - i int -} - -type Pool struct { - Server -} - -type PoolOptions struct { - NumWorkers int - Worker PoolWorkerBehavior - WorkerOptions ProcessOptions - WorkerArgs []etf.Term -} - -func (p *Pool) Init(process *ServerProcess, args ...etf.Term) error { - behavior, ok := process.Behavior().(PoolBehavior) - if !ok { - return fmt.Errorf("Pool: not a PoolBehavior") - } - - pool := &PoolProcess{ - ServerProcess: *process, - monitors: make(map[etf.Ref]int), - } - - // do not inherit parent State - pool.State = nil - poolOptions, err := behavior.InitPool(pool, args...) - if err != nil { - return err - } - - poolOptions.WorkerOptions.Context = process.Context() - pool.options = poolOptions - process.State = pool - - for i := 0; i < poolOptions.NumWorkers; i++ { - w, err := process.Spawn("", poolOptions.WorkerOptions, poolOptions.Worker, - poolOptions.WorkerArgs...) - if err != nil { - return err - } - - pool.workers = append(pool.workers, w.Self()) - ref := process.MonitorProcess(w.Self()) - pool.monitors[ref] = i - } - - return nil -} - -func (p *Pool) HandleCall(process *ServerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - pool := process.State.(*PoolProcess) - msg := workerCallMessage{ - from: from, - message: message, - } - if err := p.send(pool, msg); err != nil { - lib.Warning("Pool (HandleCall): all workers are busy. Message dropped") - } - return nil, ServerStatusIgnore -} -func (p *Pool) HandleCast(process *ServerProcess, message etf.Term) ServerStatus { - pool := process.State.(*PoolProcess) - msg := workerCastMessage{ - message: message, - } - if err := p.send(pool, msg); err != nil { - lib.Warning("Pool (HandleCast): all workers are busy. Message dropped") - } - return ServerStatusOK -} -func (p *Pool) HandleInfo(process *ServerProcess, message etf.Term) ServerStatus { - pool := process.State.(*PoolProcess) - switch m := message.(type) { - case MessageDown: - // worker terminated. restart it - - i, exist := pool.monitors[m.Ref] - if exist == false { - break - } - delete(pool.monitors, m.Ref) - w, err := process.Spawn("", pool.options.WorkerOptions, pool.options.Worker, - pool.options.WorkerArgs...) - if err != nil { - panicMessage := fmt.Sprintf("Pool: can't restart worker - %s", err) - panic(panicMessage) - } - pool.workers[i] = w.Self() - return ServerStatusOK - } - - if err := p.send(pool, message); err != nil { - lib.Warning("Pool (HandleInfo): all workers are busy. Message dropped") - } - - return ServerStatusOK -} - -func (p *Pool) send(pool *PoolProcess, message etf.Term) error { - for retry := 0; retry < pool.options.NumWorkers; retry++ { - pool.i++ - worker := pool.workers[pool.i%pool.options.NumWorkers] - if err := pool.Send(worker, message); err == nil { - return ServerStatusOK - } - } - - return fmt.Errorf("error") -} diff --git a/gen/pool_worker.go b/gen/pool_worker.go deleted file mode 100644 index c481d422..00000000 --- a/gen/pool_worker.go +++ /dev/null @@ -1,84 +0,0 @@ -package gen - -import ( - "fmt" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -type workerCastMessage struct { - message etf.Term -} -type workerCallMessage struct { - from ServerFrom - message etf.Term -} - -type PoolWorkerBehavior interface { - ServerBehavior - InitPoolWorker(process *PoolWorkerProcess, args ...etf.Term) error - HandleWorkerInfo(process *PoolWorkerProcess, message etf.Term) - HandleWorkerCast(process *PoolWorkerProcess, message etf.Term) - HandleWorkerCall(process *PoolWorkerProcess, message etf.Term) etf.Term -} - -type PoolWorkerProcess struct { - ServerProcess -} - -type PoolWorker struct { - Server -} - -func (pw *PoolWorker) Init(process *ServerProcess, args ...etf.Term) error { - behavior, ok := process.Behavior().(PoolWorkerBehavior) - - if !ok { - return fmt.Errorf("Pool: not a PoolWorkerBehavior") - } - - worker := &PoolWorkerProcess{ - ServerProcess: *process, - } - - worker.State = nil - - if err := behavior.InitPoolWorker(worker, args...); err != nil { - return err - } - - process.State = worker - return nil -} - -func (pw *PoolWorker) HandleInfo(process *ServerProcess, message etf.Term) ServerStatus { - worker := process.State.(*PoolWorkerProcess) - behavior := worker.Behavior().(PoolWorkerBehavior) - switch m := message.(type) { - case workerCallMessage: - result := behavior.HandleWorkerCall(worker, m.message) - process.SendReply(m.from, result) - case workerCastMessage: - behavior.HandleWorkerCast(worker, m.message) - default: - behavior.HandleWorkerInfo(worker, message) - } - return ServerStatusOK -} - -// HandleWorkerInfo -func (pw *PoolWorker) HandleWorkerInfo(process *PoolWorkerProcess, message etf.Term) { - lib.Warning("HandleWorkerInfo: unhandled message %#v", message) -} - -// HandleWorkerCast -func (pw *PoolWorker) HandleWorkerCast(process *PoolWorkerProcess, message etf.Term) { - lib.Warning("HandleWorkerCast: unhandled message %#v", message) -} - -// HandleWorkerCall -func (pw *PoolWorker) HandleWorkerCall(process *PoolWorkerProcess, message etf.Term) etf.Term { - lib.Warning("HandleWorkerCall: unhandled message %#v", message) - return nil -} diff --git a/gen/process.go b/gen/process.go new file mode 100644 index 00000000..089e33c4 --- /dev/null +++ b/gen/process.go @@ -0,0 +1,495 @@ +package gen + +import ( + "errors" + "fmt" + "time" + + "ergo.services/ergo/lib" +) + +// ProcessBehavior interface contains methods you should implement to make your own process behavior +type ProcessBehavior interface { + ProcessInit(process Process, args ...any) error + ProcessRun() error + ProcessTerminate(reason error) +} + +type ProcessFactory func() ProcessBehavior +type CancelFunc func() bool + +type ProcessState int32 + +func (p ProcessState) String() string { + switch p { + case ProcessStateInit: + return "init" + case ProcessStateSleep: + return "sleep" + case ProcessStateRunning: + return "running" + case ProcessStateWaitResponse: + return "wait response" + case ProcessStateTerminated: + return "terminated" + case ProcessStateZombee: + return "zombee" + } + return fmt.Sprintf("state#%d", int32(p)) +} +func (p ProcessState) MarshalJSON() ([]byte, error) { + return []byte("\"" + p.String() + "\""), nil +} + +const ( + ProcessStateInit ProcessState = 1 + ProcessStateSleep ProcessState = 2 + ProcessStateRunning ProcessState = 4 + ProcessStateWaitResponse ProcessState = 8 + ProcessStateTerminated ProcessState = 16 + ProcessStateZombee ProcessState = 32 +) + +var ( + TerminateReasonNormal error = errors.New("normal") + TerminateReasonKill error = errors.New("kill") + TerminateReasonPanic error = errors.New("panic") + TerminateReasonShutdown error = errors.New("shutdown") +) + +// Process +type Process interface { + // Node returns Node interface + Node() Node + + // Name returns registered name associated with this process + Name() Atom + + // PID returns identificator belonging to the process + PID() PID + + // Leader returns group leader process. Usually it points to the application (or supervisor) process. Otherwise, it has the same value as parent. + Leader() PID + + // Parent returns parent process. + Parent() PID + + // Uptime returns process uptime in seconds + Uptime() int64 + + // Spawn creates a child process. Terminating the parent process + // doesn't cause terminating this process. + Spawn(factory ProcessFactory, options ProcessOptions, args ...any) (PID, error) + + // Spawn creates a child process with associated name. + SpawnRegister(register Atom, factory ProcessFactory, options ProcessOptions, args ...any) (PID, error) + + // SpawnMeta creates a meta process. Returned alias is associated with this process and other + // processes can send messages (using Send method) or make the requests (with Call method) + // to this meta process. + SpawnMeta(behavior MetaBehavior, options MetaOptions) (Alias, error) + + // RemoteSpawn makes request to the remote node to spawn a new process. See also ProvideSpawn method. + RemoteSpawn(node Atom, name Atom, options ProcessOptions, args ...any) (PID, error) + // RemoteSpawnRegister makes request to the remote node to spawn a new process and register it there + // with the given rigistered name. + RemoteSpawnRegister(node Atom, name Atom, register Atom, options ProcessOptions, args ...any) (PID, error) + + // State returns current process state. Usually, it returns gen.ProcessStateRunning. + // But, If node has killed this process during the handling of its mailbox, + // it returns gen.ProcessStateZombee, which means this process won't receive any new messages, + // and most of the gen.Process methods won't be working returning gen.ErrNotAllowed. + State() ProcessState + + // RegisterName register associates the name with PID so you can address messages to this + // process using gen.ProcessID{, }. Returns error if this process is already + // has registered name. + RegisterName(name Atom) error + + // UnregisterName unregister associated name. + UnregisterName() error + + // EnvList returns a map of configured environment variables. + // It also includes environment variables from the GroupLeader, Parent and Node. + // which are overlapped by priority: Process(Parent(GroupLeader(Node))) + EnvList() map[Env]any + + // SetEnv set environment variable with given name. Use nil value to remove variable with given name. + SetEnv(name Env, value any) + + // Env returns value associated with given environment name. + Env(name Env) (any, bool) + + // Compression returns true if compression is enabled for this process. + Compression() bool + + // SetCompression enables/disables compression for the messages sent over the network + SetCompression(enabled bool) error + + // CompressionType returns type of compression + CompressionType() CompressionType + // SetCompressionType defines the compression type. Use gen.CompressionTypeZLIB or gen.CompressionTypeLZW. Be default is using gen.CompressionTypeGZIP + SetCompressionType(ctype CompressionType) error + + // CompressionLevel returns comression level for the process + CompressionLevel() CompressionLevel + + // SetCompressionLevel defines compression level. Use gen.CompressionBestSize or gen.CompressionBestSpeed. By default is using gen.CompressionDefault + SetCompressionLevel(level CompressionLevel) error + + // CompressionThreshold returns compression threshold for the process + CompressionThreshold() int + + // SetCompressionThreshold defines the minimal size for the message that must be compressed + // Value must be greater than DefaultCompressionThreshold (1024) + SetCompressionThreshold(threshold int) error + + // SendPriority returns priority for the sending messages + SendPriority() MessagePriority + + // SetSendPriority defines priority for the sending messages + SetSendPriority(priority MessagePriority) error + + // SetKeepNetworkOrder enables/disables to keep delivery order over the network. In some cases disabling this options allows improve network performance. + SetKeepNetworkOrder(order bool) error + + // KeepNetworkOrder returns true if it was enabled, otherwise - false. Enabled by default. + KeepNetworkOrder() bool + + // SetImportantDelivery enables/disables important flag for sending messages. This flag makes remote node to send confirmation that message was delivered into the process mailbox + SetImportantDelivery(important bool) error + // ImportantDelivery returns true if flag ImportantDelivery was set for this process + ImportantDelivery() bool + + // CreateAlias creates a new alias associated with this process + CreateAlias() (Alias, error) + + // DeleteAlias deletes the given alias + DeleteAlias(alias Alias) error + + // Aliases lists of aliases associated with this process + Aliases() []Alias + + // Events lists of registered events by this process + Events() []Atom + + // Send sends a message + Send(to any, message any) error + SendPID(to PID, message any) error + SendProcessID(to ProcessID, message any) error + SendAlias(to Alias, message any) error + SendWithPriority(to any, message any, priority MessagePriority) error + SendImportant(to any, message any) error + + // SendAfter starts a timer. When the timer expires, the message sends to the process + // identified by 'to'. Returns cancel function in order to discard + // sending a message. CancelFunc returns bool value. If it returns false, than the timer has + // already expired and the message has been sent. + SendAfter(to any, message any, after time.Duration) (CancelFunc, error) + + // SendEvent sends event message to the subscribers (to the processes that made link/monitor + // on this event). Event must be registered with RegisterEvent method. + SendEvent(name Atom, token Ref, message any) error + + // SendExit sends graceful termination request to the process. + SendExit(to PID, reason error) error + + // SendExitMeta sends graceful termination request to the meta process. + SendExitMeta(meta Alias, reason error) error + + SendResponse(to PID, ref Ref, message any) error + SendResponseError(to PID, ref Ref, err error) error + + // Call makes a sync request + Call(to any, message any) (any, error) + CallWithTimeout(to any, message any, timeout int) (any, error) + CallWithPriority(to any, message any, priority MessagePriority) (any, error) + CallImportant(to any, message any) (any, error) + CallPID(to PID, message any, timeout int) (any, error) + CallProcessID(to ProcessID, message any, timeout int) (any, error) + CallAlias(to Alias, message any, timeout int) (any, error) + + // Inspect sends inspect request to the process. + Inspect(target PID, item ...string) (map[string]string, error) + // Inspect sends inspect request to the meta process. + InspectMeta(meta Alias, item ...string) (map[string]string, error) + + // RegisterEvent registers a new event. Returns a reference as the token + // for sending events. Unregistering the event is allowed to the process + // that registered it. Sending an event can be done in any other process + // by using the registered event name with the provided token (delegation of + // event sending feature). + RegisterEvent(name Atom, options EventOptions) (Ref, error) + + // UnregisterEvent unregisters an event. It can be done by the process owner + // of this event only. + UnregisterEvent(name Atom) error + + // links + Link(target any) error + Unlink(target any) error + + LinkPID(target PID) error + UnlinkPID(target PID) error + + LinkProcessID(target ProcessID) error + UnlinkProcessID(target ProcessID) error + + LinkAlias(target Alias) error + UnlinkAlias(target Alias) error + + LinkEvent(target Event) ([]MessageEvent, error) + UnlinkEvent(target Event) error + + LinkNode(target Atom) error + UnlinkNode(target Atom) error + + // monitors + Monitor(target any) error + Demonitor(target any) error + + MonitorPID(pid PID) error + DemonitorPID(pid PID) error + + MonitorProcessID(process ProcessID) error + DemonitorProcessID(process ProcessID) error + + MonitorAlias(alias Alias) error + DemonitorAlias(alias Alias) error + + MonitorEvent(event Event) ([]MessageEvent, error) + DemonitorEvent(event Event) error + + MonitorNode(node Atom) error + DemonitorNode(node Atom) error + + // Log returns gen.Log interface + Log() Log + + // Info returns summary information about this process + Info() (ProcessInfo, error) + + // MetaInfo returns summary information about given meta process + MetaInfo(meta Alias) (MetaInfo, error) + + // low level api (for gen.ProcessBehavior implementaions) + + Mailbox() ProcessMailbox + Behavior() ProcessBehavior + Forward(to PID, message *MailboxMessage, priority MessagePriority) error +} + +type MessagePriority int + +func (mp MessagePriority) String() string { + switch mp { + case 0: + return "normal" + case 1: + return "high" + case 2: + return "max" + } + return "undefined" +} + +func (mp MessagePriority) MarshalJSON() ([]byte, error) { + return []byte("\"" + mp.String() + "\""), nil +} + +const MessagePriorityNormal MessagePriority = 0 // default +const MessagePriorityHigh MessagePriority = 1 +const MessagePriorityMax MessagePriority = 2 + +type MessageOptions struct { + Ref Ref + Priority MessagePriority + Compression Compression + KeepNetworkOrder bool + ImportantDelivery bool +} + +// ProcessOptions +type ProcessOptions struct { + // MailboxSize defines the length of message queue for the process. Default is zero - unlimited + MailboxSize int64 + // Leader + Leader PID + // Env set the process environment variables + Env map[Env]any + + Compression Compression + + // SendPriority defines the priority of the sending messages. + // Actor-receiver handles its Mailbox with the next order: + // - Urgent + // - System + // - Main + // Setting this option to MessagePriorityHigh makes the node deliver messages + // to the "Mailbox.System" of the receving Process + // With MessagePriorityMax - makes delivering to the Mailbox.Urgent + // By default, messages are delivering to the Mailbox.Main. + SendPriority MessagePriority + + // ImportantDelivery enables important flag for sending messages. This flag makes remote node to send confirmation that message was delivered into the process mailbox + ImportantDelivery bool + + // Fallback defines the process to where messages will be forwarded + // if the mailbox is overflowed. The tag value could be used to + // differentiate the source processes. Forwarded messages are wrapped + // into the MessageFallback struct with the given tag value. + // This option is ignored for the unlimited mailbox size + Fallback ProcessFallback + + // LinkParent creates a link with the parent process on start. + // It will make this process terminate on the parent process termination. + // This option is ignored if this process starts by the node. + LinkParent bool + + // LinkChild makes the node create a link with the spawning process. + // This feature allows you to link the parent process with the child even + // being in the init state. This option is ignored if this process starts by the node. + LinkChild bool + + // LogLevel defines logging level. Default is gen.LogLevelInfo + LogLevel LogLevel +} + +type ProcessOptionsExtra struct { + ProcessOptions + + ParentPID PID + ParentLeader PID + ParentEnv map[Env]any + ParentLogLevel LogLevel + + Register Atom + Application Atom + + Args []any +} + +// ProcessInfo +type ProcessInfo struct { + // PID process ID + PID PID + // Name registered associated name with this process + Name Atom + // Application application name if this process started under application umbrella + Application Atom + // Behavior + Behavior string + // MailboxSize + MailboxSize int64 + // MailboxQueues + MailboxQueues MailboxQueues + // MessagesIn total number of messages this process received + MessagesIn uint64 + // MessagesOut total number of messages this process sent + MessagesOut uint64 + // RunningTime how long this process was in 'running' state in ns + RunningTime uint64 + // Compression + Compression Compression + // MessagePriority priority for the sending messages + MessagePriority MessagePriority + // Uptime of the process in seconds + Uptime int64 + // State shows current state of the process + State ProcessState + // Parent points to the parent process that spawned this process as a child + Parent PID + // Leader usually points to the Supervisor or Application process + Leader PID + // Fallback + Fallback ProcessFallback + // Env process environment. gen.NodeOptions.Security.ExposeEnvInfo must be enabled to reveal this data + Env map[Env]any + // Aliases list of the aliases belonging to this process + Aliases []Alias + // Events list of the events this process is the owner of + Events []Atom + + // Metas list of meta processes + Metas []Alias + + // MonitorsPID list of processes monitored by this process by the PID + MonitorsPID []PID + // MonitorsProcessID list of processes monitored by this process by the name + MonitorsProcessID []ProcessID + // MonitorsAlias list of aliases monitored by this process + MonitorsAlias []Alias + // MonitorsEvent list of events monitored by this process + MonitorsEvent []Event + // MonitorsNode list of remote nodes monitored by this process + MonitorsNode []Atom + + // LinksPID list of the processes this process is linked with + LinksPID []PID + // LinksProcessID list of the processes this process is linked with by the name. + LinksProcessID []ProcessID + // LinksAlias list of the aliases this process is linked with + LinksAlias []Alias + // LinksEvent list of the events this process is linked with + LinksEvent []Event + //LinksNode list of the remote nodes this process is linked with + LinksNode []Atom + + // LogLevel current logging level + LogLevel LogLevel + // KeepNetworkOrder + KeepNetworkOrder bool + // ImportantDelivery + ImportantDelivery bool +} + +// ProcessShortInfo +type ProcessShortInfo struct { + // PID process ID + PID PID + // Name registered associated name with this process + Name Atom + // Application application name if this process started under application umbrella + Application Atom + // Behavior + Behavior string + // MessagesIn total number of messages this process received + MessagesIn uint64 + // MessagesOut total number of messages this process sent + MessagesOut uint64 + // MessagesMailbox total number of messages in mailbox queues + MessagesMailbox uint64 + // RunningTime how long this process was in 'running' state in ns + RunningTime uint64 + // Uptime of the process in seconds + Uptime int64 + // State shows current state of the process + State ProcessState + // Parent points to the parent process that spawned this process as a child + Parent PID + // Leader usually points to the Supervisor or Application process + Leader PID + // LogLevel current logging level + LogLevel LogLevel +} + +// ProcessFallback +type ProcessFallback struct { + Enable bool + Name Atom + Tag string +} + +type ProcessMailbox struct { + Main lib.QueueMPSC + System lib.QueueMPSC + Urgent lib.QueueMPSC + Log lib.QueueMPSC +} + +type MailboxQueues struct { + Main int64 + System int64 + Urgent int64 + Log int64 +} diff --git a/gen/raft.go b/gen/raft.go deleted file mode 100644 index 4dd8b33c..00000000 --- a/gen/raft.go +++ /dev/null @@ -1,2616 +0,0 @@ -package gen - -import ( - "fmt" - "math/rand" - "sort" - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -const ( - DefaultRaftGetTimeout = 5 // in seconds - DefaultRaftAppendTimeout = 5 // in seconds - DefaultRaftHeartbeat = 3 // in seconds -) - -var ( - ErrRaftState = fmt.Errorf("incorrect raft state") - ErrRaftNoQuorum = fmt.Errorf("no quorum") - ErrRaftNoLeader = fmt.Errorf("no leader") - ErrRaftNoSerial = fmt.Errorf("no peers with requested serial") - ErrRaftBusy = fmt.Errorf("another append request is in progress") - ErrRaftWrongTimeout = fmt.Errorf("wrong timeout value") -) - -type RaftBehavior interface { - ServerBehavior - // - // Mandatory callbacks - // - - InitRaft(process *RaftProcess, arr ...etf.Term) (RaftOptions, error) - - // HandleAppend. Invokes on append request. To cancel this request by a leader, it must return RaftStatusDiscard. - HandleAppend(process *RaftProcess, ref etf.Ref, serial uint64, key string, value etf.Term) RaftStatus - - // HandleGet - HandleGet(process *RaftProcess, serial uint64) (string, etf.Term, RaftStatus) - - // - // Optional callbacks - // - - // HandlePeer - HandlePeer(process *RaftProcess, peer etf.Pid, serial uint64) RaftStatus - - // HandleQuorum - HandleQuorum(process *RaftProcess, quorum *RaftQuorum) RaftStatus - - // HandleLeader - HandleLeader(process *RaftProcess, leader *RaftLeader) RaftStatus - - // HandleCancel - HandleCancel(process *RaftProcess, ref etf.Ref, reason string) RaftStatus - - // HandleSerial - HandleSerial(process *RaftProcess, ref etf.Ref, serial uint64, key string, value etf.Term) RaftStatus - - // - // Server's callbacks - // - - // HandleRaftCall this callback is invoked on ServerProcess.Call. This method is optional - // for the implementation - HandleRaftCall(process *RaftProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) - // HandleStageCast this callback is invoked on ServerProcess.Cast. This method is optional - // for the implementation - HandleRaftCast(process *RaftProcess, message etf.Term) ServerStatus - // HandleStageInfo this callback is invoked on Process.Send. This method is optional - // for the implementation - HandleRaftInfo(process *RaftProcess, message etf.Term) ServerStatus - // HandleRaftDirect this callback is invoked on Process.Direct. This method is optional - // for the implementation - HandleRaftDirect(process *RaftProcess, message interface{}) (interface{}, error) -} - -type RaftStatus error -type RaftQuorumState int - -var ( - RaftStatusOK RaftStatus // nil - RaftStatusStop RaftStatus = fmt.Errorf("stop") - RaftStatusDiscard RaftStatus = fmt.Errorf("discard") - - RaftQuorumState3 RaftQuorumState = 3 // minimum quorum that could make leader election - RaftQuorumState5 RaftQuorumState = 5 - RaftQuorumState7 RaftQuorumState = 7 - RaftQuorumState9 RaftQuorumState = 9 - RaftQuorumState11 RaftQuorumState = 11 // maximal quorum - - cleanVoteTimeout = 1 * time.Second - cleanLeaderVoteTimeout = 1 * time.Second - quorumChangeDeferMaxTime = 450 // in millisecond. uses as max value in range of 50.. -) - -type Raft struct { - Server -} - -type RaftProcess struct { - ServerProcess - options RaftOptions - behavior RaftBehavior - - quorum *RaftQuorum - quorumCandidates *quorumCandidates - quorumVotes map[RaftQuorumState]*quorum - quorumChangeDefer bool - quorumChangeAttempt int - - leader etf.Pid - election *leaderElection - round int // "log term" in terms of Raft spec - - // get requests - requests map[etf.Ref]CancelFunc - - // append requests - requestsAppend map[string]*requestAppend - requestsAppendQueue []requestAppendQueued - - // leader sends heartbeat messages and keep the last sending timestamp - heartbeatLeader int64 - heartbeatCancel CancelFunc -} - -type leaderElection struct { - votes map[etf.Pid]etf.Pid - results map[etf.Pid]bool - round int - leader etf.Pid // leader elected - voted int // number of peers voted for the leader - cancel CancelFunc -} - -type requestAppend struct { - ref etf.Ref - from etf.Pid - origin etf.Pid - value etf.Term - peers map[etf.Pid]bool - cancel CancelFunc -} - -type requestAppendQueued struct { - from etf.Pid - request *messageRaftRequestAppend -} - -type quorumCandidates struct { - candidates map[etf.Pid]*candidate -} - -type candidate struct { - monitor etf.Ref - serial uint64 - joined bool - heartbeat int64 - failures int -} - -type RaftLeader struct { - Leader etf.Pid - Serial uint64 - State RaftQuorumState -} - -type RaftQuorum struct { - Member bool - State RaftQuorumState - Peers []etf.Pid // the number of participants in quorum could be 3,5,7,9,11 -} -type quorum struct { - RaftQuorum - votes map[etf.Pid]int // 1 - sent, 2 - recv, 3 - sent and recv - origin etf.Pid // where the voting has come from. it must receive our voice in the last order - lastVote int64 // time.Now().UnixMilli() -} - -type RaftOptions struct { - ID string // raft cluster id - Peers []ProcessID - Serial uint64 // serial number ("log id" in terms of Raft spec) -} - -type messageRaft struct { - Request etf.Atom - Pid etf.Pid - Command interface{} -} - -type messageRaftClusterInit struct{} -type messageRaftClusterJoin struct { - ID string // cluster id - Serial uint64 -} -type messageRaftClusterJoinReply struct { - ID string // cluster id - Serial uint64 - Peers []etf.Pid - QuorumState int - QuorumPeers []etf.Pid -} -type messageRaftQuorumVote struct { - ID string // cluster id - Serial uint64 - State int - Candidates []etf.Pid -} -type messageRaftQuorumChange struct{} -type messageRaftQuorumBuilt struct { - ID string // cluster id - State int - Round int // last round - Peers []etf.Pid -} -type messageRaftQuorumLeave struct { - ID string - DueToPid etf.Pid -} - -type messageRaftQuorumCleanVote struct { - state RaftQuorumState -} - -type messageRaftLeaderHeartbeat struct { - ID string - Serial uint64 -} - -type messageRaftLeaderVote struct { - ID string // cluster id - State int //quorum state - Leader etf.Pid // offered leader - Round int -} -type messageRaftLeaderElected struct { - ID string // cluster id - Leader etf.Pid // elected leader - Voted int // number of votes for this leader - Round int -} - -type messageRaftRequestGet struct { - ID string // cluster id - Ref etf.Ref - Origin etf.Pid - Serial uint64 -} -type messageRaftRequestReply struct { - ID string // cluster id - Ref etf.Ref - Serial uint64 - Key string - Value etf.Term -} -type messageRaftRequestAppend struct { - ID string // cluster id - Ref etf.Ref - Origin etf.Pid - Key string - Value etf.Term - Deadline int64 // timestamp in milliseconds -} - -type messageRaftAppendReady struct { - ID string // cluster id - Ref etf.Ref - Key string -} - -type messageRaftAppendCommit struct { - ID string // cluster id - Ref etf.Ref - Key string - Serial uint64 - Broadcast etf.Pid // quorum member who is in charge of broadcasting -} - -type messageRaftAppendBroadcast struct { - ID string - Ref etf.Ref - Serial uint64 - Key string - Value etf.Term -} - -type messageRaftRequestClean struct { - ref etf.Ref -} -type messageRaftAppendClean struct { - key string - ref etf.Ref -} -type messageRaftElectionClean struct { - round int -} -type messageRaftHeartbeat struct{} - -// -// RaftProcess quorum routines and APIs -// - -// Join makes a join requst to the given peer, which is supposed to be in a raft cluster -func (rp *RaftProcess) Join(peer interface{}) error { - // QUODBG fmt.Println(rp.Name(), "CLU send join to", peer) - join := etf.Tuple{ - etf.Atom("$cluster_join"), - rp.Self(), - etf.Tuple{ - rp.options.ID, - }, - } - return rp.Cast(peer, join) -} - -// Peers returns list of the processes in the raft cluster. Note, this list is sorted by the Serial value on them in the descending order -func (rp *RaftProcess) Peers() []etf.Pid { - return rp.quorumCandidates.List() -} - -// Quorum returns current quorum. It returns nil if quorum hasn't built yet. -func (rp *RaftProcess) Quorum() *RaftQuorum { - var q RaftQuorum - if rp.quorum == nil { - return nil - } - q.Member = rp.quorum.Member - q.State = rp.quorum.State - q.Peers = make([]etf.Pid, len(rp.quorum.Peers)) - for i := range rp.quorum.Peers { - q.Peers[i] = rp.quorum.Peers[i] - } - return &q -} - -// Leader returns current leader in the quorum. It returns nil If this process is not a quorum or if leader election is still in progress -func (rp *RaftProcess) Leader() *RaftLeader { - var leader RaftLeader - - if rp.quorum == nil || rp.quorum.Member == false { - return nil - } - - noLeader := etf.Pid{} - if rp.leader == noLeader { - return nil - } - leader.Leader = rp.leader - leader.State = rp.quorum.State - leader.Serial = rp.options.Serial - if rp.leader != rp.Self() { - // must be present among the peers - c := rp.quorumCandidates.GetOnline(rp.leader) - if c == nil { - panic("internal error. elected leader has been lost") - } - leader.Serial = c.serial - } - - return &leader -} - -// Get makes a request to the quorum member to get the data with the given serial number and -// sets the timeout to the DefaultRaftGetTimeout = 5 sec. It returns ErrRaftNoQuorum if quorum -// forming is still in progress. -func (rp *RaftProcess) Get(serial uint64) (etf.Ref, error) { - return rp.GetWithTimeout(serial, DefaultRaftGetTimeout) -} - -// Get makes a request to the quorum member to get the data with the given serial number and -// timeout in seconds. Returns a reference of this request. Once requested data has arrived -// the callback HandleSerial will be invoked. -// If a timeout occurred the callback HandleCancel will be invoked with reason "timeout" -func (rp *RaftProcess) GetWithTimeout(serial uint64, timeout int) (etf.Ref, error) { - var ref etf.Ref - if rp.quorum == nil { - return ref, ErrRaftNoQuorum - } - - peers := []etf.Pid{} - for _, pid := range rp.quorum.Peers { - if pid == rp.Self() { - continue - } - if c := rp.quorumCandidates.GetOnline(pid); c != nil { - if serial > c.serial { - continue - } - peers = append(peers, pid) - } - } - if len(peers) == 0 { - return ref, ErrRaftNoSerial - } - - // get random member of quorum and send the request - n := 0 - if len(peers) > 1 { - rand.Intn(len(peers) - 1) - } - peer := peers[n] - ref = rp.MakeRef() - requestGet := etf.Tuple{ - etf.Atom("$request_get"), - rp.Self(), - etf.Tuple{ - rp.options.ID, - ref, - rp.Self(), // origin - serial, - }, - } - - if err := rp.Cast(peer, requestGet); err != nil { - return ref, err - } - cancel := rp.CastAfter(rp.Self, messageRaftRequestClean{ref: ref}, time.Duration(timeout)*time.Second) - rp.requests[ref] = cancel - return ref, nil -} - -// Append -func (rp *RaftProcess) Append(key string, value etf.Term) (etf.Ref, error) { - return rp.AppendWithTimeout(key, value, DefaultRaftAppendTimeout) -} - -// AppendWithTimeout -func (rp *RaftProcess) AppendWithTimeout(key string, value etf.Term, timeout int) (etf.Ref, error) { - var ref etf.Ref - if timeout < 1 { - return ref, ErrRaftWrongTimeout - } - - if _, exist := rp.requestsAppend[key]; exist { - return ref, ErrRaftBusy - } - if rp.quorum == nil { - return ref, ErrRaftNoQuorum - } - noLeader := etf.Pid{} - if rp.quorum.Member == true && rp.leader == noLeader { - return ref, ErrRaftNoLeader - } - t := int(time.Duration(timeout) * time.Second) - deadline := time.Now().Add(time.Duration(t - t/int(rp.quorum.State))).UnixMilli() - ref = rp.MakeRef() - - // if Append request has made on a leader - if rp.leader == rp.Self() { - // DBGAPN fmt.Println(rp.Self(), "DBGAPN append request", ref, "made on a leader") - dataAppend := &messageRaftRequestAppend{ - Ref: ref, - Origin: rp.Self(), - Key: key, - Value: value, - Deadline: deadline, - } - rp.handleAppendLeader(rp.Self(), dataAppend) - return ref, nil - } - - peer := rp.leader - // if Member == false => rp.leader == noLeader - if rp.quorum.Member == false { - // this raft process runs as a Client. send this request to the quorum member - n := rand.Intn(len(rp.quorum.Peers) - 1) - peer = rp.quorum.Peers[n] - deadline = time.Now().Add(time.Duration(t - t/(int(rp.quorum.State)+1))).UnixMilli() - } - dataAppend := etf.Tuple{ - etf.Atom("$request_append"), - rp.Self(), - etf.Tuple{ - rp.options.ID, - ref, - rp.Self(), - key, - value, - deadline, - }, - } - // DBGAPN fmt.Println(rp.Self(), "DPGAPN sent $request_append", ref, "to the peer", peer) - if err := rp.Cast(peer, dataAppend); err != nil { - return ref, err - } - - peers := make(map[etf.Pid]bool) - if rp.quorum.Member == true { - // this process will be in charge of broadcasting - // so we should keep the set of peers in this quorum in order - // to exlude them on the broadcasting - for _, pid := range rp.quorum.Peers { - if pid == rp.Self() { - continue - } - peers[pid] = true - } - } - - clean := messageRaftAppendClean{key: key, ref: ref} - after := time.Duration(timeout) * time.Second - cancel := rp.CastAfter(rp.Self, clean, after) - requestAppend := &requestAppend{ - ref: ref, - origin: rp.Self(), - value: value, - peers: peers, - cancel: cancel, - } - rp.requestsAppend[key] = requestAppend - return ref, nil -} - -// Serial returns current value of serial for this raft process -func (rp *RaftProcess) Serial() uint64 { - return rp.options.Serial -} - -// private routines - -func (rp *RaftProcess) handleRaftRequest(m messageRaft) error { - switch m.Request { - case etf.Atom("$cluster_join"): - join := &messageRaftClusterJoin{} - if err := etf.TermIntoStruct(m.Command, &join); err != nil { - return lib.ErrUnsupportedRequest - } - - if join.ID != rp.options.ID { - // this peer belongs to another quorum id - return RaftStatusOK - } - - if rp.quorum != nil && rp.quorum.Member { - // if we got $cluster_join from a quorum member, it means - // the quorum we had belonging is not existed anymore - if rp.isQuorumMember(m.Pid) == true { - rp.quorum = nil - rp.handleQuorum() - rp.quorumChangeStart(false) - } - } - - rp.quorumCandidates.Set(rp, m.Pid) - rp.quorumCandidates.SetOnline(rp, m.Pid, join.Serial) - - if status := rp.behavior.HandlePeer(rp, m.Pid, join.Serial); status != RaftStatusOK { - return status - } - - // send peer list even if this peer is already present in our candidates list - // just to exchange updated data - peers := rp.quorumCandidates.List() - quorumState := 0 - quorumPeers := []etf.Pid{} - if rp.quorum != nil { - quorumState = int(rp.quorum.State) - quorumPeers = rp.quorum.Peers - } - reply := etf.Tuple{ - etf.Atom("$cluster_join_reply"), - rp.Self(), - etf.Tuple{ - rp.options.ID, - rp.options.Serial, - peers, - quorumState, - quorumPeers, - }, - } - // QUODBG fmt.Println(rp.Name(), "GOT CLU JOIN from", m.Pid, "send peers", peers) - rp.Cast(m.Pid, reply) - return RaftStatusOK - - case etf.Atom("$cluster_join_reply"): - - reply := &messageRaftClusterJoinReply{} - if err := etf.TermIntoStruct(m.Command, &reply); err != nil { - return lib.ErrUnsupportedRequest - } - - if reply.ID != rp.options.ID { - // this peer belongs to another quorum id. ignore it. - return RaftStatusOK - } - - // QUODBG fmt.Println(rp.Name(), "GOT CLU JOIN REPL from", m.Pid, "got peers", reply.Peers) - canAcceptQuorum := true - - // check if there is another quorum in this cluster - if rp.quorum != nil { - // doesnt matter we compare the number of peers or quorum state - // reply.QuorumState <= rp.quorum.State - if len(reply.QuorumPeers) <= len(rp.quorum.Peers) { - canAcceptQuorum = false - } - } - - // check peers - for _, peer := range reply.Peers { - if peer == rp.Self() { - continue - } - // check if we dont have some of them among the online peers - if c := rp.quorumCandidates.GetOnline(peer); c != nil { - continue - } - rp.quorumCandidates.Set(rp, peer) - canAcceptQuorum = false - } - - rp.quorumCandidates.Set(rp, m.Pid) - rp.quorumCandidates.SetOnline(rp, m.Pid, reply.Serial) - - if status := rp.behavior.HandlePeer(rp, m.Pid, reply.Serial); status != RaftStatusOK { - return status - } - - // try to rebuild quorum since the number of peers has changed - rp.quorumChangeStart(false) - - // accept quorum if this peer is belongs to the existing quorum - // and set membership to false - switch RaftQuorumState(reply.QuorumState) { - case RaftQuorumState3, RaftQuorumState5: - break - case RaftQuorumState7, RaftQuorumState9, RaftQuorumState11: - break - default: - canAcceptQuorum = false - } - if canAcceptQuorum == true { - rp.election = nil - rp.quorum = &RaftQuorum{ - State: RaftQuorumState(reply.QuorumState), - Peers: reply.QuorumPeers, - Member: false, - } - return rp.handleQuorum() - } - return RaftStatusOK - - case etf.Atom("$quorum_vote"): - vote := &messageRaftQuorumVote{} - if err := etf.TermIntoStruct(m.Command, &vote); err != nil { - return lib.ErrUnsupportedRequest - } - if vote.ID != rp.options.ID { - // ignore this request - return RaftStatusOK - } - return rp.quorumVote(m.Pid, vote) - - case etf.Atom("$quorum_built"): - built := &messageRaftQuorumBuilt{} - if err := etf.TermIntoStruct(m.Command, &built); err != nil { - return lib.ErrUnsupportedRequest - } - // QUODBG fmt.Println(rp.Name(), "GOT QUO BUILT from", m.Pid) - if built.ID != rp.options.ID { - // this process is not belong this quorum - return RaftStatusOK - } - duplicates := make(map[etf.Pid]bool) - matchCandidates := true - for _, pid := range built.Peers { - if _, exist := duplicates[pid]; exist { - // duplicate found - return RaftStatusOK - } - if pid == rp.Self() { - panic("raft internal error. got quorum built message") - } - if c := rp.quorumCandidates.GetOnline(pid); c != nil { - c.failures = 0 - c.heartbeat = time.Now().Unix() - continue - } - rp.quorumCandidates.Set(rp, pid) - matchCandidates = false - } - if len(built.Peers) != built.State { - // ignore wrong peer list - lib.Warning("[%s] got quorum state doesn't match with the peer list", rp.Self()) - return RaftStatusOK - } - candidateQuorumState := RaftQuorumState3 - switch built.State { - case 11: - candidateQuorumState = RaftQuorumState11 - case 9: - candidateQuorumState = RaftQuorumState9 - case 7: - candidateQuorumState = RaftQuorumState7 - case 5: - candidateQuorumState = RaftQuorumState5 - case 3: - candidateQuorumState = RaftQuorumState3 - default: - // ignore wrong state - return RaftStatusOK - } - - rp.quorumChangeStart(false) - - if built.Round > rp.round { - // update rp.round - rp.round = built.Round - } - - // we do accept quorum if it was built using - // the peers we got registered as candidates - if matchCandidates == true { - rp.election = nil - if rp.quorum == nil { - rp.quorum = &RaftQuorum{} - rp.quorum.State = candidateQuorumState - rp.quorum.Member = false - rp.quorum.Peers = built.Peers - // QUODBG fmt.Println(rp.Name(), "QUO BUILT. NOT A MEMBER", rp.quorum.State, rp.quorum.Peers) - return rp.handleQuorum() - } - // QUODBG fmt.Println(rp.Name(), "QUO BUILT. NOT A MEMBER", rp.quorum.State, rp.quorum.Peers) - - changed := false - if rp.quorum.State != candidateQuorumState { - changed = true - } - rp.quorum.State = candidateQuorumState - - if rp.quorum.Member != false { - changed = true - } - rp.quorum.Member = false - - rp.quorum.Peers = built.Peers - if changed == true { - return rp.handleQuorum() - } - return RaftStatusOK - } - - if rp.quorum != nil { - rp.quorum = nil - rp.election = nil - return rp.handleQuorum() - } - return RaftStatusOK - - case etf.Atom("$leader_heartbeat"): - heartbeat := &messageRaftLeaderHeartbeat{} - if err := etf.TermIntoStruct(m.Command, &heartbeat); err != nil { - return lib.ErrUnsupportedRequest - } - - if rp.options.ID != heartbeat.ID { - return RaftStatusOK - } - - c := rp.quorumCandidates.GetOnline(m.Pid) - if c == nil { - // HRTDBG fmt.Println(rp.Self(), "HRT from unknown/offline peer", m.Pid) - rp.quorumCandidates.Set(rp, m.Pid) - return RaftStatusOK - } - // HRTDBG fmt.Println(rp.Self(), "HRT from", m.Pid, "serial", c.serial) - c.heartbeat = time.Now().Unix() - c.serial = heartbeat.Serial - c.failures = 0 - return RaftStatusOK - - case etf.Atom("$quorum_leave"): - leave := &messageRaftQuorumLeave{} - if err := etf.TermIntoStruct(m.Command, &leave); err != nil { - return lib.ErrUnsupportedRequest - } - if rp.quorum == nil { - return RaftStatusOK - } - - if rp.options.ID != leave.ID { - return RaftStatusOK - } - - // check if it came from the quorum member - if rp.isQuorumMember(m.Pid) == false { - return RaftStatusOK - } - - // QUODBG fmt.Println(rp.Self(), "QUO got leave from", m.Pid, "due to", leave.DueToPid) - rp.quorumCandidates.SetOffline(rp, leave.DueToPid) - - member := rp.quorum.Member - rp.quorum = nil - rp.handleQuorum() - // only quorum member can restart quorum building if some of the member has left - if member == true { - rp.quorumChangeStart(false) - } - return RaftStatusOK - - case etf.Atom("$leader_vote"): - vote := &messageRaftLeaderVote{} - if err := etf.TermIntoStruct(m.Command, &vote); err != nil { - return lib.ErrUnsupportedRequest - } - - if rp.options.ID != vote.ID { - lib.Warning("[%s] ignore 'leader vote' message being not a member of the given raft cluster (from %s)", rp.Self(), m.Pid) - return RaftStatusOK - } - - if rp.quorum == nil { - rp.election = nil - // no quorum - // LDRDBG fmt.Println(rp.Self(), "LDR NO QUO ignore vote from", m.Pid, "round", vote.Round, "for", vote.Leader) - // Seems we have received leader_vote before the quorum_built message. - // Ignore this vote but update its round value to start a new leader election. - // Otherwise, the new election will be started with the same round value but without - // votes, which have been ignored before the quorum was built. - if vote.Round > rp.round { - rp.round = vote.Round - } - return RaftStatusOK - } - - if rp.quorum.State != RaftQuorumState(vote.State) { - // vote within another quorum. seems the quorum has been changed during this election. - // ignore it - // LDRDBG fmt.Println(rp.Self(), "LDR ignore vote from", m.Pid, "with another quorum", vote.State, "current quorum", rp.quorum.State) - if vote.Round > rp.round { - rp.round = vote.Round - } - return RaftStatusOK - } - if rp.election != nil && rp.election.round > vote.Round { - // ignore it. current election round is greater - // LDRDBG fmt.Println(rp.Self(), "LDR ignore vote from", m.Pid, "with round", vote.Round, "current election round", rp.election.round) - return RaftStatusOK - } - if rp.round > vote.Round { - // newbie is trying to start a new election :) - // LDRDBG fmt.Println(rp.Self(), "LDR ignore vote from newbie", m.Pid, "with round", vote.Round, "current round", rp.round) - return RaftStatusOK - } - - // check if m.Pid is belongs to the quorum - belongs := false - for _, pid := range rp.quorum.Peers { - if pid == m.Pid { - belongs = true - break - } - } - - if belongs == false { - // there might be a case if we got vote message before the quorum_built - lib.Warning("[%s] got ignore from the peer, which doesn't belong to the quorum %s", rp.Self(), m.Pid) - if vote.Round > rp.round { - rp.round = vote.Round - } - return RaftStatusOK - } - - // start new election - new_election := false - switch { - case rp.election == nil: - new_election = true - case rp.election != nil: - // TODO case with existing leader whithin this quorum. if some of the quorum member - // got leader heartbeat timeout it starts new election but this process has no problem - // with the leader. - if vote.Round > rp.election.round { - // overwrite election if it has greater round number - rp.election.cancel() - new_election = true - } - } - if new_election { - // LDRDBG fmt.Println(rp.Self(), "LDR accept election from", m.Pid, "round", vote.Round, " with vote for:", vote.Leader) - rp.election = &leaderElection{ - votes: make(map[etf.Pid]etf.Pid), - results: make(map[etf.Pid]bool), - round: vote.Round, - } - rp.election.cancel = rp.CastAfter(rp.Self, messageRaftElectionClean{round: vote.Round}, cleanLeaderVoteTimeout) - rp.handleElectionVote() - } - - if _, exist := rp.election.votes[m.Pid]; exist { - lib.Warning("[%s] ignore duplicate vote for %s from %s during %d round", rp.Self(), - vote.Leader, m.Pid, vote.Round) - return RaftStatusOK - } - - rp.election.votes[m.Pid] = vote.Leader - // LDRDBG fmt.Println(rp.Self(), "LDR got vote from", m.Pid, "for", vote.Leader, "round", vote.Round, "quorum", vote.State) - if len(rp.quorum.Peers) != len(rp.election.votes) { - // make sure if we got all votes - return RaftStatusOK - } - if len(rp.election.votes) != len(rp.quorum.Peers) { - // waiting for all votes from the quorum members) - return RaftStatusOK - } - - // got all votes. count them to get the quorum leader - countVotes := make(map[etf.Pid]int) - for _, vote_for := range rp.election.votes { - c, _ := countVotes[vote_for] - countVotes[vote_for] = c + 1 - } - leaderPid := etf.Pid{} - leaderVoted := 0 - leaderSplit := false - for leader, voted := range countVotes { - if leaderVoted == voted { - leaderSplit = true - continue - } - if leaderVoted < voted { - leaderVoted = voted - leaderPid = leader - leaderSplit = false - } - } - // LDRDBG fmt.Println(rp.Self(), "LDR got all votes. round", vote.Round, "quorum", vote.State) - if leaderSplit { - // LDRDBG fmt.Println(rp.Self(), "LDR got split voices. round", vote.Round, "quorum", vote.State) - // got more than one leader - // start new leader election with round++ - rp.handleElectionStart(vote.Round + 1) - return RaftStatusOK - } - - noLeader := etf.Pid{} - if rp.election.leader == noLeader { - rp.election.leader = leaderPid - rp.election.voted = leaderVoted - } else { - if rp.election.leader != leaderPid || rp.election.voted != leaderVoted { - // our result defers from the others which we already received - // start new leader election with round++ - lib.Warning("[%s] got different result from %s. cheating detected", rp.Self(), m.Pid) - rp.handleElectionStart(vote.Round + 1) - return RaftStatusOK - } - } - - // LDRDBG fmt.Println(rp.Self(), "LDR election done. round", rp.election.round, "Leader", leaderPid, "with", leaderVoted, "voices", "quorum", vote.State) - rp.election.results[rp.Self()] = true - - // send to all quorum members our choice - elected := etf.Tuple{ - etf.Atom("$leader_elected"), - rp.Self(), - etf.Tuple{ - rp.options.ID, - leaderPid, - leaderVoted, - rp.election.round, - }, - } - for _, pid := range rp.quorum.Peers { - if pid == rp.Self() { - continue - } - rp.Cast(pid, elected) - // LDRDBG fmt.Println(rp.Self(), "LDR elected", leaderPid, "sent result to", pid, "wait the others") - } - - if len(rp.election.votes) != len(rp.election.results) { - // we should wait for result from all the election members - return RaftStatusOK - } - - // leader has been elected - // LDRDBG fmt.Println(rp.Self(), "LDR finished. leader", rp.election.leader, "round", rp.election.round, "quorum", rp.quorum.State) - rp.round = rp.election.round - rp.election.cancel() - if rp.leader != rp.election.leader { - rp.leader = rp.election.leader - l := rp.Leader() - rp.election = nil - return rp.behavior.HandleLeader(rp, l) - } - rp.election = nil - return RaftStatusOK - - case etf.Atom("$leader_elected"): - elected := &messageRaftLeaderElected{} - if err := etf.TermIntoStruct(m.Command, &elected); err != nil { - return lib.ErrUnsupportedRequest - } - - if rp.options.ID != elected.ID { - lib.Warning("[%s] ignore 'leader elected' message being not a member of the given raft cluster (from %s)", rp.Self(), m.Pid) - return RaftStatusOK - } - - if rp.quorum == nil { - rp.election = nil - // no quorum - // LDRDBG fmt.Println(rp.Self, "LDR NO QUO ignore election result", elected, "from", m.Pid) - return RaftStatusOK - } - - if rp.election == nil { - lib.Warning("[%s] ignore election result from %s. no election on this peer", rp.Self(), m.Pid) - return RaftStatusOK - } - - if elected.Round != rp.election.round { - // round value must be the same. seemd another election is started - lib.Warning("[%s] ignore election result from %s with another round value %d (current election round %d)", rp.Self(), m.Pid, elected.Round, rp.election.round) - if elected.Round > rp.round { - // update round value to the greatest one - rp.round = elected.Round - } - return RaftStatusOK - } - - noLeader := etf.Pid{} - if rp.election.leader == noLeader { - rp.election.leader = elected.Leader - rp.election.voted = elected.Voted - } else { - if rp.election.leader != elected.Leader || rp.election.voted != elected.Voted { - // elected leader must be the same in all election results - lib.Warning("[%s] ignore election result from %s with different leader which must be the same", rp.Self(), m.Pid) - return RaftStatusOK - } - } - - if _, exist := rp.election.results[m.Pid]; exist { - // duplicate - lib.Warning("[%s] ignore duplicate election result from %s during %d round", rp.Self(), - m.Pid, elected.Round) - return RaftStatusOK - } - - if _, exist := rp.election.votes[m.Pid]; exist == false { - // Got election result before the vote from m.Pid - // Check if m.Pid belongs to the quorum - if rp.election.round > rp.round { - rp.round = rp.election.round - } - belongs := false - for _, pid := range rp.quorum.Peers { - if pid == m.Pid { - belongs = true - break - } - } - if belongs == false { - // got from unknown peer - lib.Warning("[%s] ignore election result from %s which doesn't belong this quorum", rp.Self(), m.Pid) - return RaftStatusOK - } - - // keep it and wait for the vote from this peer - rp.election.results[m.Pid] = true - return RaftStatusOK - } - rp.election.results[m.Pid] = true - - if len(rp.quorum.Peers) != len(rp.election.votes) { - // make sure if we got all votes - return RaftStatusOK - } - - if len(rp.election.votes) != len(rp.election.results) { - // we should wait for result from all the election members - return RaftStatusOK - } - - // leader has been elected - // LDRDBG fmt.Println(rp.Self(), "LDR finished. leader", rp.election.leader, "round", rp.election.round, "quorum", rp.quorum.State) - rp.election.cancel() // cancel timer - rp.round = rp.election.round - if rp.leader != rp.election.leader { - rp.leader = rp.election.leader - rp.election = nil - l := rp.Leader() - return rp.behavior.HandleLeader(rp, l) - } - rp.election = nil - return RaftStatusOK - - case etf.Atom("$request_get"): - requestGet := &messageRaftRequestGet{} - if err := etf.TermIntoStruct(m.Command, &requestGet); err != nil { - return lib.ErrUnsupportedRequest - } - - if rp.options.ID != requestGet.ID { - lib.Warning("[%s] got 'get' request being not a member of the given raft cluster (from %s)", rp.Self(), m.Pid) - return RaftStatusOK - } - - if rp.quorum == nil { - // no quorum - return RaftStatusOK - } - - if rp.quorum.Member == false { - // not a quorum member. couldn't handle this request - lib.Warning("[%s] got 'get' request being not a member of the quorum (from %s)", rp.Self(), m.Pid) - return RaftStatusOK - } - //fmt.Println(rp.Self(), "GET request", requestGet.Ref, "from", m.Pid, "serial", requestGet.Serial) - - key, value, status := rp.behavior.HandleGet(rp, requestGet.Serial) - if status != RaftStatusOK { - // do nothing - return status - } - if value == nil { - // not found. - if m.Pid != requestGet.Origin { - // its already forwarded request. just ignore it - return RaftStatusOK - } - - // forward this request to another qourum member - forwardGet := etf.Tuple{ - etf.Atom("$request_get"), - rp.Self(), - etf.Tuple{ - requestGet.ID, - requestGet.Ref, - requestGet.Origin, - requestGet.Serial, - }, - } - - // get random quorum member excluding m.Pid and requestGet.Origin - peers := []etf.Pid{} - for _, pid := range rp.quorum.Peers { - if pid == m.Pid { - continue - } - if pid == requestGet.Origin { - continue - } - if pid == rp.Self() { - continue - } - peers = append(peers, pid) - } - - if len(peers) == 0 { - return RaftStatusOK - } - - n := 0 - if len(peers) > 1 { - n = rand.Intn(len(peers) - 1) - } - peer := peers[n] - //fmt.Println(rp.Self(), "GET forward", requestGet.Ref, "to", peer, "serial", requestGet.Serial) - rp.Cast(peer, forwardGet) - return RaftStatusOK - } - - requestReply := etf.Tuple{ - etf.Atom("$request_reply"), - rp.Self(), - etf.Tuple{ - requestGet.ID, - requestGet.Ref, - requestGet.Serial, - key, - value, - }, - } - rp.Cast(requestGet.Origin, requestReply) - - // update serial of this peer - if c := rp.quorumCandidates.GetOnline(requestGet.Origin); c != nil { - if c.serial < requestGet.Serial { - c.serial = requestGet.Serial - } - } else { - rp.quorumCandidates.Set(rp, requestGet.Origin) - } - return RaftStatusOK - - case etf.Atom("$request_reply"): - requestReply := &messageRaftRequestReply{} - if err := etf.TermIntoStruct(m.Command, &requestReply); err != nil { - return lib.ErrUnsupportedRequest - } - - if rp.options.ID != requestReply.ID { - lib.Warning("[%s] got 'reply' being not a member of the given raft cluster (from %s)", rp.Self(), m.Pid) - return RaftStatusOK - } - cancel, exist := rp.requests[requestReply.Ref] - if exist == false { - // might be timed out already. do nothing - return RaftStatusOK - } - // cancel timer - cancel() - if rp.options.Serial < requestReply.Serial { - rp.options.Serial = requestReply.Serial - } - // call HandleSerial - return rp.behavior.HandleSerial(rp, requestReply.Ref, requestReply.Serial, - requestReply.Key, requestReply.Value) - - case etf.Atom("$request_append"): - requestAppend := &messageRaftRequestAppend{} - if err := etf.TermIntoStruct(m.Command, &requestAppend); err != nil { - return lib.ErrUnsupportedRequest - } - - if rp.options.ID != requestAppend.ID { - lib.Warning("[%s] got 'append' request being not a member of the given raft cluster (from %s)", rp.Self(), m.Pid) - return RaftStatusOK - } - - if rp.quorum == nil { - // no quorum. ignore it - return RaftStatusOK - } - - // - // There are 3 options: - // - - // 1) This process is a leader -> handleAppendLeader() - // a) increment serial. send this request to all quorum members (except the origin peer) - // b) wait for the request_append_ready from the quorum peers - // c) call the callback HandleAppend - // d) send request_append_commit(serial) to all quorum members (including the origin peer) - if rp.leader == rp.Self() { - return rp.handleAppendLeader(m.Pid, requestAppend) - } - - // 2) This process is not a leader, is a quorum member, and request has - // received from the leader -> handleAppendQuorum() - // a) accept this request and reply with request_append_ready - // b) wait for the request_append_commit - // c) call the callback HandleAppend - // d) send request_append to the peers that are not in the quorum - if rp.quorum.Member == true && m.Pid == rp.leader { - return rp.handleAppendQuorum(requestAppend) - } - - // 3) This process neither a leader or a quorum member. - // Or this process is a quorum member but request has received not from - // the leader of this quorum. - // It also could happened if quorum has changed during the delivering this request. - - // Forward this request to the quorum member (if this process not a quorum member) - // or to the leader (if this process is a quorum member) - - forwardAppend := etf.Tuple{ - etf.Atom("$request_append"), - rp.Self(), - etf.Tuple{ - requestAppend.ID, - requestAppend.Ref, - requestAppend.Origin, - requestAppend.Key, - requestAppend.Value, - requestAppend.Deadline, - }, - } - - if rp.quorum.Member == true { - // DBGAPN fmt.Println(rp.Self(), "DPGAPN forward $request_append", requestAppend.Ref, "to the leader", rp.leader) - noLeader := etf.Pid{} - if rp.leader == noLeader { - // no leader in this quorum yet. ignore this request - return RaftStatusOK - } - // This request has received not from the quorum leader. - // Forward this request to the leader - rp.Cast(rp.leader, forwardAppend) - return RaftStatusOK - } - - // exclude requestAppend.Origin and m.Pid - peers := []etf.Pid{} - for _, pid := range rp.quorum.Peers { - if pid == m.Pid { - continue - } - if pid == requestAppend.Origin { - continue - } - peers = append(peers, pid) - } - n := rand.Intn(len(peers) - 1) - peer := peers[n] - // DBGAPN fmt.Println(rp.Self(), "DPGAPN forward $request_append", requestAppend.Ref, "to the quorum member", peer) - rp.Cast(peer, forwardAppend) - return RaftStatusOK - - case etf.Atom("$request_append_ready"): - appendReady := &messageRaftAppendReady{} - if err := etf.TermIntoStruct(m.Command, &appendReady); err != nil { - return lib.ErrUnsupportedRequest - } - - if rp.options.ID != appendReady.ID { - lib.Warning("[%s] got 'append_ready' message being not a member of the given raft cluster (from %s)", rp.Self(), m.Pid) - return RaftStatusOK - } - - if rp.quorum == nil { - // no quorum. ignore it - return RaftStatusOK - } - - requestAppend, exist := rp.requestsAppend[appendReady.Key] - if exist == false { - // there might be timeout happened. ignore this message - return RaftStatusOK - } - - if requestAppend.ref != appendReady.Ref { - // there might be timeout happened for the previous append request for this key - // and another append request arrived during previous append request handling - return RaftStatusOK - } - - if rp.leader != rp.Self() { - // i'm not a leader. seems leader election happened during this request handling - requestAppend.cancel() - delete(rp.requestsAppend, appendReady.Key) - return RaftStatusOK - } - requestAppend.peers[m.Pid] = true - commit := true - for _, confirmed := range requestAppend.peers { - if confirmed { - continue - } - commit = false - break - } - - if commit == false { - return RaftStatusOK - } - - // received confirmations from all the peers are involved to this append handling. - // call HandleAppend - status := rp.behavior.HandleAppend(rp, requestAppend.ref, rp.options.Serial+1, - appendReady.Key, requestAppend.value) - switch status { - case RaftStatusOK: - rp.options.Serial++ - // sent them $request_append_commit including the origin - request := etf.Tuple{ - etf.Atom("$request_append_commit"), - rp.Self(), - etf.Tuple{ - rp.options.ID, - requestAppend.ref, - appendReady.Key, - rp.options.Serial, - requestAppend.from, - }, - } - for pid, _ := range requestAppend.peers { - if pid == rp.Self() { - continue - } - rp.Cast(pid, request) - // DBGAPN fmt.Println(rp.Self(), "DBGAPN sent append_commit to", pid, "with serial", rp.options.Serial) - if c := rp.quorumCandidates.GetOnline(pid); c != nil { - if c.serial < rp.options.Serial { - c.serial = rp.options.Serial - } - } - } - requestAppend.cancel() - delete(rp.requestsAppend, appendReady.Key) - if requestAppend.from == rp.Self() { - rp.handleBroadcastCommit(appendReady.Key, requestAppend, rp.options.Serial) - } - if len(rp.requestsAppendQueue) == 0 { - return RaftStatusOK - } - - // handle queued append request - handled := 0 - for i := range rp.requestsAppendQueue { - handled = i - queued := rp.requestsAppendQueue[i] - if queued.request.Deadline < time.Now().UnixMilli() { - // expired request - lib.Warning("[%s] append request %s is expired", rp.Self(), queued.request.Ref) - continue - } - rp.handleAppendLeader(queued.from, queued.request) - break - } - rp.requestsAppendQueue = rp.requestsAppendQueue[handled+1:] - if len(rp.requestsAppendQueue) == 0 { - rp.requestsAppendQueue = nil - } - return RaftStatusOK - - case RaftStatusDiscard: - requestAppend.cancel() - delete(rp.requestsAppend, appendReady.Key) - return RaftStatusOK - } - - return status - - case etf.Atom("$request_append_commit"): - appendCommit := &messageRaftAppendCommit{} - if err := etf.TermIntoStruct(m.Command, &appendCommit); err != nil { - return lib.ErrUnsupportedRequest - } - - if rp.options.ID != appendCommit.ID { - lib.Warning("[%s] got 'append_commit' message being not a member of the given raft cluster (from %s)", rp.Self(), m.Pid) - return RaftStatusOK - } - - requestAppend, exist := rp.requestsAppend[appendCommit.Key] - if exist == false { - // seems timeout happened and this request was cleaned up - return RaftStatusOK - } - requestAppend.cancel() - delete(rp.requestsAppend, appendCommit.Key) - - if rp.options.Serial >= appendCommit.Serial { - lib.Warning("[%s] got append commit with serial (%d) greater or equal we have (%d). fork happened. stopping this process", rp.Self(), appendCommit.Serial, rp.options.Serial) - return fmt.Errorf("raft fork happened") - } - - rp.options.Serial = appendCommit.Serial - status := rp.behavior.HandleAppend(rp, requestAppend.ref, appendCommit.Serial, - appendCommit.Key, requestAppend.value) - if status == RaftStatusDiscard { - lib.Warning("[%s] RaftStatusDiscard can be used by a leader only", rp.Self()) - status = RaftStatusOK - } - if appendCommit.Broadcast != rp.Self() { - return status - } - - rp.handleBroadcastCommit(appendCommit.Key, requestAppend, appendCommit.Serial) - return status - - case etf.Atom("$request_append_broadcast"): - broadcast := &messageRaftAppendBroadcast{} - if err := etf.TermIntoStruct(m.Command, &broadcast); err != nil { - return lib.ErrUnsupportedRequest - } - - if rp.options.ID != broadcast.ID { - lib.Warning("[%s] got 'append_broadcast' message being not a member of the given raft cluster (from %s)", rp.Self(), m.Pid) - return RaftStatusOK - } - - rp.options.Serial = broadcast.Serial - return rp.behavior.HandleAppend(rp, broadcast.Ref, broadcast.Serial, - broadcast.Key, broadcast.Value) - - } - - return lib.ErrUnsupportedRequest -} - -func (rp *RaftProcess) handleElectionStart(round int) { - if rp.quorum == nil { - // no quorum. can't start election - return - } - if rp.quorum.Member == false { - // not a quorum member - return - } - if rp.election != nil { - if rp.election.round >= round { - // already in progress - return - } - rp.election.cancel() - } - if rp.round > round { - round = rp.round - } - // LDRDBG fmt.Println(rp.Self(), "LDR start. round", round, "Q", rp.quorum.State) - rp.election = &leaderElection{ - votes: make(map[etf.Pid]etf.Pid), - results: make(map[etf.Pid]bool), - round: round, - } - rp.handleElectionVote() - cancel := rp.CastAfter(rp.Self, messageRaftElectionClean{round: round}, cleanLeaderVoteTimeout) - rp.election.cancel = cancel -} - -func (rp *RaftProcess) handleElectionVote() { - if rp.quorum == nil || rp.election == nil { - return - } - - mapPeers := make(map[etf.Pid]bool) - for _, p := range rp.quorum.Peers { - mapPeers[p] = true - } - - voted_for := etf.Pid{} - c := rp.quorumCandidates.List() // ordered by serial in desk order - for _, pid := range c { - // check if this candidate is a member of quorum - if _, exist := mapPeers[pid]; exist == false { - continue - } - // get the first member since it has biggest serial - voted_for = pid - break - } - - // LDRDBG fmt.Println(rp.Self(), "LDR voted for:", voted_for, "quorum", rp.quorum.State) - leaderVote := etf.Tuple{ - etf.Atom("$leader_vote"), - rp.Self(), - etf.Tuple{ - rp.options.ID, - int(rp.quorum.State), - voted_for, - rp.election.round, - }, - } - for _, pid := range rp.quorum.Peers { - if pid == rp.Self() { - continue - } - // LDRDBG fmt.Println(rp.Self(), "LDR sent vote for", voted_for, "to", pid, "round", rp.election.round, "quorum", rp.quorum.State) - rp.Cast(pid, leaderVote) - } - rp.election.votes[rp.Self()] = voted_for -} - -func (rp *RaftProcess) handleBroadcastCommit(key string, request *requestAppend, serial uint64) { - // DBGAPN fmt.Println(rp.Self(), "broadcasting", request.ref) - // the origin process is in charge of broadcasting this result among - // the peers who aren't quorum members. - commit := etf.Tuple{ - etf.Atom("$request_append_broadcast"), - rp.Self(), - etf.Tuple{ - rp.options.ID, - request.ref, - serial, - key, - request.value, - }, - } - allPeers := rp.quorumCandidates.List() - for _, pid := range allPeers { - if _, exist := request.peers[pid]; exist { - continue - } - if pid == rp.Self() { - continue - } - rp.Cast(pid, commit) - // DBGAPN fmt.Println(rp.Self(), "DBGAPN sent $request_append_broadcast", request.ref, "to", pid) - c := rp.quorumCandidates.GetOnline(pid) - if c != nil && c.serial < serial { - c.serial = serial - } - } -} - -func (rp *RaftProcess) handleAppendLeader(from etf.Pid, request *messageRaftRequestAppend) RaftStatus { - // DBGAPN fmt.Println(rp.Self(), "DBGAPN handle append", request.Ref, "on leader.", request.Key, request.Value) - if _, exist := rp.requestsAppend[request.Key]; exist { - // another append request with this key is still in progress. append to the queue - queued := requestAppendQueued{ - from: from, - request: request, - } - rp.requestsAppendQueue = append(rp.requestsAppendQueue, queued) - lq := len(rp.requestsAppendQueue) - if lq > 10 { - lib.Warning("[%s] append request queue is getting long. queued request %d", rp.Self(), lq) - } - return RaftStatusOK - } - now := time.Now().UnixMilli() - if now >= request.Deadline { - // deadline has been passed. ignore this request - return RaftStatusOK - } - - sendRequestAppend := etf.Tuple{ - etf.Atom("$request_append"), - rp.Self(), - etf.Tuple{ - rp.options.ID, - request.Ref, - request.Origin, - request.Key, - request.Value, - request.Deadline, - }, - } - - peers := make(map[etf.Pid]bool) - for _, pid := range rp.quorum.Peers { - if pid == rp.Self() { - continue - } - peers[pid] = false - if pid == request.Origin { - peers[pid] = true // do not wait append_ready for the Origin - continue - } - rp.Cast(pid, sendRequestAppend) - } - - // if 'from' is not a quorum member the leader is in charge of broadcasting - if _, exist := peers[from]; exist == false { - from = rp.Self() - } - - after := time.Duration(request.Deadline-now) * time.Millisecond - clean := messageRaftAppendClean{key: request.Key, ref: request.Ref} - cancel := rp.CastAfter(rp.Self(), clean, after) - requestAppend := &requestAppend{ - ref: request.Ref, - from: from, - origin: request.Origin, - value: request.Value, - peers: peers, - cancel: cancel, - } - rp.requestsAppend[request.Key] = requestAppend - - return RaftStatusOK -} - -func (rp *RaftProcess) handleAppendQuorum(request *messageRaftRequestAppend) RaftStatus { - // DBGAPN fmt.Println(rp.Self(), "DBGAPN handle append", request.Ref, "on a quorum member.", request.Key, request.Value) - if r, exist := rp.requestsAppend[request.Key]; exist { - r.cancel() - delete(rp.requestsAppend, request.Key) - } - - ready := etf.Tuple{ - etf.Atom("$request_append_ready"), - rp.Self(), - etf.Tuple{ - rp.options.ID, - request.Ref, - request.Key, - }, - } - rp.Cast(rp.leader, ready) - clean := messageRaftAppendClean{key: request.Key, ref: request.Ref} - after := time.Duration(DefaultRaftAppendTimeout) * time.Second - if d := time.Duration(request.Deadline-time.Now().UnixMilli()) * time.Millisecond; d > after { - after = d - } - cancel := rp.CastAfter(rp.Self, clean, after) - - peers := make(map[etf.Pid]bool) - for _, pid := range rp.quorum.Peers { - peers[pid] = true - } - - requestAppend := &requestAppend{ - ref: request.Ref, - origin: request.Origin, - value: request.Value, - peers: peers, - cancel: cancel, - } - rp.requestsAppend[request.Key] = requestAppend - return RaftStatusOK -} - -func (rp *RaftProcess) quorumChangeStart(nextAttempt bool) { - if rp.quorumChangeDefer == false { - if nextAttempt { - // increase timeout for the next attempt to build a new quorum - rp.quorumChangeAttempt++ - } else { - rp.quorumChangeAttempt = 1 - } - maxTime := rp.quorumChangeAttempt * quorumChangeDeferMaxTime - after := time.Duration(50+rand.Intn(maxTime)) * time.Millisecond - rp.CastAfter(rp.Self(), messageRaftQuorumChange{}, after) - rp.quorumChangeDefer = true - } -} - -func (rp *RaftProcess) quorumChange() RaftStatus { - l := len(rp.quorumCandidates.List()) - - candidateRaftQuorumState := RaftQuorumState3 - switch { - case l > 9: - if rp.quorum != nil && rp.quorum.State == RaftQuorumState11 { - // do nothing - return RaftStatusOK - } - candidateRaftQuorumState = RaftQuorumState11 - l = 10 // to create quorum of 11 we need 10 candidates + itself. - - case l > 7: - if rp.quorum != nil && rp.quorum.State == RaftQuorumState9 { - // do nothing - return RaftStatusOK - } - candidateRaftQuorumState = RaftQuorumState9 - l = 8 // quorum of 9 => 8 candidates + itself - case l > 5: - if rp.quorum != nil && rp.quorum.State == RaftQuorumState7 { - // do nothing - return RaftStatusOK - } - candidateRaftQuorumState = RaftQuorumState7 - l = 6 // quorum of 7 => 6 candidates + itself - case l > 3: - if rp.quorum != nil && rp.quorum.State == RaftQuorumState5 { - // do nothing - return RaftStatusOK - } - candidateRaftQuorumState = RaftQuorumState5 - l = 4 // quorum of 5 => 4 candidates + itself - case l > 1: - if rp.quorum != nil && rp.quorum.State == RaftQuorumState3 { - // do nothing - return RaftStatusOK - } - candidateRaftQuorumState = RaftQuorumState3 - l = 2 // quorum of 3 => 2 candidates + itself - default: - // not enougth candidates to create a quorum - if rp.quorum != nil { - rp.quorum = nil - return rp.handleQuorum() - } - // QUODBG fmt.Println(rp.Name(), "QUO VOTE. NOT ENO CAND", rp.quorumCandidates.List()) - - // try send cluster_join again to receive an updated peer list - rp.CastAfter(rp.Self(), messageRaftClusterInit{}, 5*time.Second) - return RaftStatusOK - } - - if _, exist := rp.quorumVotes[candidateRaftQuorumState]; exist { - // voting for this state is already in progress - return RaftStatusOK - } - - quorumCandidates := make([]etf.Pid, 0, l+1) - quorumCandidates = append(quorumCandidates, rp.Self()) - candidates := rp.quorumCandidates.List() - quorumCandidates = append(quorumCandidates, candidates[:l]...) - // QUODBG fmt.Println(rp.Name(), "QUO VOTE INIT", candidateRaftQuorumState, quorumCandidates) - - // send quorumVote to all candidates (except itself) - quorum := &quorum{ - votes: make(map[etf.Pid]int), - origin: rp.Self(), - } - quorum.State = candidateRaftQuorumState - quorum.Peers = quorumCandidates - rp.quorumVotes[candidateRaftQuorumState] = quorum - rp.quorumSendVote(quorum) - rp.CastAfter(rp.Self(), messageRaftQuorumCleanVote{state: quorum.State}, cleanVoteTimeout) - return RaftStatusOK -} - -func (rp *RaftProcess) quorumSendVote(q *quorum) bool { - empty := etf.Pid{} - if q.origin == empty { - // do not send its vote until the origin vote will be received - return false - } - - allVoted := true - quorumVote := etf.Tuple{ - etf.Atom("$quorum_vote"), - rp.Self(), - etf.Tuple{ - rp.options.ID, - rp.options.Serial, - int(q.State), - q.Peers, - }, - } - - for _, pid := range q.Peers { - if pid == rp.Self() { - continue // do not send to itself - } - - if pid == q.origin { - continue - } - v, _ := q.votes[pid] - - // check if already sent vote to this peer - if v&1 == 0 { - // QUODBG fmt.Println(rp.Name(), "SEND VOTE to", pid, q.Peers) - rp.Cast(pid, quorumVote) - // mark as sent - v |= 1 - q.votes[pid] = v - } - - if v != 3 { // 2(010) - recv, 1(001) - sent, 3(011) - recv & sent - allVoted = false - } - } - - if allVoted == true && q.origin != rp.Self() { - // send vote to origin - // QUODBG fmt.Println(rp.Name(), "SEND VOTE to origin", q.origin, q.Peers) - rp.Cast(q.origin, quorumVote) - } - - return allVoted -} - -func (rp *RaftProcess) quorumVote(from etf.Pid, vote *messageRaftQuorumVote) RaftStatus { - if vote.State != len(vote.Candidates) { - lib.Warning("[%s] quorum state and number of candidates are mismatch", rp.Self()) - rp.quorumCandidates.SetOffline(rp, from) - return RaftStatusOK - } - - if c := rp.quorumCandidates.GetOnline(from); c == nil { - // there is a race conditioned case when we received a vote before - // the quorum_join_reply message. just ignore it. they will start - // another round of quorum forming - return RaftStatusOK - } else { - c.heartbeat = time.Now().Unix() - c.failures = 0 - } - candidatesRaftQuorumState := RaftQuorumState3 - switch vote.State { - case 3: - candidatesRaftQuorumState = RaftQuorumState3 - case 5: - candidatesRaftQuorumState = RaftQuorumState5 - case 7: - candidatesRaftQuorumState = RaftQuorumState7 - case 9: - candidatesRaftQuorumState = RaftQuorumState9 - case 11: - candidatesRaftQuorumState = RaftQuorumState11 - default: - lib.Warning("[%s] wrong number of candidates in the request. removing %s from quorum candidates list", rp.Self(), from) - rp.quorumCandidates.SetOffline(rp, from) - return RaftStatusOK - } - - // do not vote if requested quorum is less than existing one - if rp.quorum != nil && candidatesRaftQuorumState <= rp.quorum.State { - // There is a case when a peer is involved in more than one voting, - // and this peer just sent a vote for another voting process which - // is still in progress. - // Do not send $quorum_voted message if this peer is already a member - // of accepted quorum - member := false - for _, pid := range rp.quorum.Peers { - if pid == from { - member = true - break - } - } - if member == true { - return RaftStatusOK - } - - // QUODBG fmt.Println(rp.Name(), "SKIP VOTE from", from, candidatesRaftQuorumState, rp.quorum.State) - built := etf.Tuple{ - etf.Atom("$quorum_built"), - rp.Self(), - etf.Tuple{ - rp.options.ID, - int(rp.quorum.State), - rp.round, - rp.quorum.Peers, - }, - } - rp.Cast(from, built) - return RaftStatusOK - } - - q, exist := rp.quorumVotes[candidatesRaftQuorumState] - if exist == false { - // - // Received the first vote - // - if len(rp.quorumVotes) > 5 { - // can't be more than 5 (there could be only votes for 3,5,7,9,11) - lib.Warning("[%s] too many votes %#v", rp.quorumVotes) - return RaftStatusOK - } - - q = &quorum{} - q.State = candidatesRaftQuorumState - q.Peers = vote.Candidates - - if from == vote.Candidates[0] { - // Origin vote (received from the peer initiated this voting process). - // Otherwise keep this field empty, which means this quorum - // will be overwritten if we get another voting from the peer - // initiated that voting (with a different set/order of peers) - q.origin = from - } - - if rp.quorumValidateVote(from, q, vote) == false { - // do not create this voting if those peers aren't valid (haven't registered yet) - return RaftStatusOK - } - q.lastVote = time.Now().UnixMilli() - // QUODBG fmt.Println(rp.Name(), "QUO VOTE (NEW)", from, vote) - rp.quorumVotes[candidatesRaftQuorumState] = q - rp.CastAfter(rp.Self(), messageRaftQuorumCleanVote{state: q.State}, cleanVoteTimeout) - - } else { - empty := etf.Pid{} - if q.origin == empty && from == vote.Candidates[0] { - // got origin vote. - q.origin = from - - // check if this vote has the same set of peers - same := true - for i := range q.Peers { - if vote.Candidates[i] != q.Peers[i] { - same = false - break - } - } - // if it differs overwrite quorum by the new voting - if same == false { - q.Peers = vote.Candidates - q.votes = nil - } - } - - if rp.quorumValidateVote(from, q, vote) == false { - return RaftStatusOK - } - q.lastVote = time.Now().UnixMilli() - // QUODBG fmt.Println(rp.Name(), "QUO VOTE", from, vote) - } - - // returns true if we got votes from all the peers whithin this quorum - if rp.quorumSendVote(q) == true { - // - // Quorum built - // - // QUODBG fmt.Println(rp.Name(), "QUO BUILT", q.State, q.Peers) - if rp.quorum == nil { - rp.quorum = &RaftQuorum{} - } - rp.quorum.Member = true - rp.quorum.State = q.State - rp.quorum.Peers = q.Peers - delete(rp.quorumVotes, q.State) - - // all candidates who don't belong to this quorum should be known that quorum is built. - mapPeers := make(map[etf.Pid]bool) - for _, peer := range rp.quorum.Peers { - mapPeers[peer] = true - } - allCandidates := rp.quorumCandidates.List() - for _, peer := range allCandidates { - if _, exist := mapPeers[peer]; exist { - // this peer belongs to the quorum. skip it - continue - } - built := etf.Tuple{ - etf.Atom("$quorum_built"), - rp.Self(), - etf.Tuple{ - rp.options.ID, - int(rp.quorum.State), - rp.round, - rp.quorum.Peers, - }, - } - rp.Cast(peer, built) - - } - - rp.handleElectionStart(rp.round + 1) - return rp.handleQuorum() - } - - return RaftStatusOK -} - -func (rp *RaftProcess) clusterHeal() { - for _, pid := range rp.quorumCandidates.ListOffline() { - // c can't be nil here - c := rp.quorumCandidates.Get(pid) - if c.heartbeat == 0 { - continue - } - diff := time.Now().Unix() - c.heartbeat - switch { - case diff < 0: - // heartbeat was set in the future - continue - case diff > 300: // > 5 min - rp.Join(pid) - // the next attempt will be in an hour - c.heartbeat = time.Now().Unix() + 3600 - } - } -} - -func (rp *RaftProcess) handleQuorum() RaftStatus { - q := rp.Quorum() - if status := rp.behavior.HandleQuorum(rp, q); status != RaftStatusOK { - return status - } - - noLeader := etf.Pid{} - if rp.leader != noLeader { - rp.leader = noLeader - if status := rp.behavior.HandleLeader(rp, nil); status != RaftStatusOK { - return status - } - } - - if q == nil || q.Member == false { - return RaftStatusOK - } - - if rp.election == nil { - rp.handleElectionStart(rp.round + 1) - } - - return RaftStatusOK -} - -func (rp *RaftProcess) handleHeartbeat() { - if rp.heartbeatCancel != nil { - rp.heartbeatCancel() - rp.heartbeatCancel = nil - } - - defer func() { - after := DefaultRaftHeartbeat * time.Second - cancel := rp.CastAfter(rp.Self(), messageRaftHeartbeat{}, after) - rp.heartbeatCancel = cancel - rp.clusterHeal() - }() - - if rp.quorum == nil || rp.quorum.Member == false { - return - } - - noLeader := etf.Pid{} - if rp.leader == noLeader { - // leader election is still in progress. do nothing atm. - return - } - - if rp.leader == rp.Self() { - // send a heartbeat to all quorum members if this process is a leader of this quorum - heartbeat := etf.Tuple{ - etf.Atom("$leader_heartbeat"), - rp.Self(), - etf.Tuple{ - rp.options.ID, - rp.options.Serial, - }, - } - for _, pid := range rp.quorum.Peers { - if pid == rp.Self() { - continue - } - rp.Cast(pid, heartbeat) - } - return - } - - // check leader's heartbeat - c := rp.quorumCandidates.GetOnline(rp.leader) - if c != nil { - diff := time.Now().Unix() - c.heartbeat - if c.heartbeat == 0 { - diff = 0 - } - - if diff < DefaultRaftHeartbeat*3 { - return - } - - // long time no see heartbeats from the leader - c.joined = false - rp.quorumCandidates.SetOffline(rp, rp.leader) - } - - // HRTDBG fmt.Println(rp.Self(), "HRT lost leader", rp.leader) - leave := etf.Tuple{ - etf.Atom("$quorum_leave"), - rp.Self(), - etf.Tuple{ - rp.options.ID, - rp.leader, - }, - } - - // tell everyone in the raft cluster - for _, peer := range rp.quorumCandidates.List() { - rp.Cast(peer, leave) - } - rp.quorum = nil - rp.handleQuorum() - rp.quorumChangeStart(false) -} - -func (rp *RaftProcess) isQuorumMember(pid etf.Pid) bool { - if rp.quorum == nil { - return false - } - for _, peer := range rp.quorum.Peers { - if pid == peer { - return true - } - } - return false -} - -func (rp *RaftProcess) quorumValidateVote(from etf.Pid, q *quorum, vote *messageRaftQuorumVote) bool { - duplicates := make(map[etf.Pid]bool) - validFrom := false - validTo := false - validSerial := false - candidatesMatch := true - newVote := false - if q.votes == nil { - q.votes = make(map[etf.Pid]int) - newVote = true - } - - empty := etf.Pid{} - if q.origin != empty && newVote == true && vote.Candidates[0] != from { - return false - } - - for i, pid := range vote.Candidates { - if pid == rp.Self() { - validTo = true - continue - } - - // quorum peers must be matched with the vote's cadidates - if q.Peers[i] != vote.Candidates[i] { - candidatesMatch = false - } - - // check if received vote has the same set of peers. - // if this is the first vote for the given q.State the pid - // will be added to the vote map - _, exist := q.votes[pid] - if exist == false { - if newVote { - q.votes[pid] = 0 - } else { - candidatesMatch = false - } - } - - if _, exist := duplicates[pid]; exist { - lib.Warning("[%s] got vote with duplicates from %s", rp.Name(), from) - rp.quorumCandidates.SetOffline(rp, from) - return false - } - duplicates[pid] = false - - c := rp.quorumCandidates.GetOnline(pid) - if c == nil { - candidatesMatch = false - rp.quorumCandidates.Set(rp, pid) - continue - } - if pid == from { - if c.serial > vote.Serial { - // invalid serial - continue - } - c.serial = vote.Serial - validFrom = true - validSerial = true - } - } - - if candidatesMatch == false { - // can't accept this vote - // QUODBG fmt.Println(rp.Name(), "QUO CAND MISMATCH", from, vote.Candidates) - return false - } - - if validSerial == false { - lib.Warning("[%s] got vote from %s with invalid serial", rp.Name(), from) - rp.quorumCandidates.SetOffline(rp, from) - return false - } - - if validFrom == false || validTo == false { - lib.Warning("[%s] got vote from %s with invalid data", rp.Name(), from) - rp.quorumCandidates.SetOffline(rp, from) - return false - } - - // mark as recv - v, _ := q.votes[from] - q.votes[from] = v | 2 - - return true -} - -// -// Server callbacks -// - -func (r *Raft) Init(process *ServerProcess, args ...etf.Term) error { - var options RaftOptions - - behavior, ok := process.Behavior().(RaftBehavior) - if !ok { - return fmt.Errorf("Raft: not a RaftBehavior") - } - - raftProcess := &RaftProcess{ - ServerProcess: *process, - behavior: behavior, - quorumCandidates: createQuorumCandidates(), - quorumVotes: make(map[RaftQuorumState]*quorum), - requests: make(map[etf.Ref]CancelFunc), - requestsAppend: make(map[string]*requestAppend), - } - - // do not inherit parent State - raftProcess.State = nil - options, err := behavior.InitRaft(raftProcess, args...) - if err != nil { - return err - } - - raftProcess.options = options - process.State = raftProcess - - process.Cast(process.Self(), messageRaftClusterInit{}) - //process.SetTrapExit(true) - raftProcess.handleHeartbeat() - return nil -} - -// HandleCall -func (r *Raft) HandleCall(process *ServerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - rp := process.State.(*RaftProcess) - return rp.behavior.HandleRaftCall(rp, from, message) -} - -// HandleCast -func (r *Raft) HandleCast(process *ServerProcess, message etf.Term) ServerStatus { - var mRaft messageRaft - var status RaftStatus - - rp := process.State.(*RaftProcess) - switch m := message.(type) { - case messageRaftClusterInit: - if rp.quorum != nil { - return ServerStatusOK - } - if len(rp.quorumVotes) > 0 { - return ServerStatusOK - } - for _, peer := range rp.options.Peers { - rp.Join(peer) - } - return ServerStatusOK - - case messageRaftQuorumCleanVote: - q, exist := rp.quorumVotes[m.state] - if exist == true && q.lastVote > 0 { - diff := time.Duration(time.Now().UnixMilli()-q.lastVote) * time.Millisecond - // if voting is still in progress cast itself again with shifted timeout - // according to cleanVoteTimeout - if cleanVoteTimeout > diff { - nextCleanVoteTimeout := cleanVoteTimeout - diff - rp.CastAfter(rp.Self(), messageRaftQuorumCleanVote{state: q.State}, nextCleanVoteTimeout) - return ServerStatusOK - } - } - - if q != nil { - // QUODBG fmt.Println(rp.Name(), "CLN VOTE", m.state, q.Peers) - delete(rp.quorumVotes, m.state) - for _, peer := range q.Peers { - v, _ := q.votes[peer] - if v&2 > 0 { // vote received - continue - } - // no vote from this peer. there are two options - // 1. this peer has switched to the other quorum building - // 2. something wrong with this peer (raft process could be stuck). - c := rp.quorumCandidates.GetOnline(peer) - if c == nil { - // already offline - continue - } - c.failures++ - if c.failures > 10 { - // QUODBG fmt.Println(rp.Self(), "too many failures with", peer) - rp.quorumCandidates.SetOffline(rp, peer) - } - } - } - if len(rp.quorumVotes) == 0 { - // make another attempt to build new quorum - rp.quorumChangeStart(true) - } - case messageRaftQuorumChange: - rp.quorumChangeDefer = false - status = rp.quorumChange() - - case messageRaftRequestClean: - delete(rp.requests, m.ref) - status = rp.behavior.HandleCancel(rp, m.ref, "timeout") - - case messageRaftAppendClean: - request, exist := rp.requestsAppend[m.key] - if exist == false { - // do nothing - return ServerStatusOK - } - if request.ref != m.ref { - return ServerStatusOK - } - if request.origin == rp.Self() { - status = rp.behavior.HandleCancel(rp, request.ref, "timeout") - break - } - delete(rp.requestsAppend, m.key) - return ServerStatusOK - case messageRaftElectionClean: - if rp.quorum == nil { - return ServerStatusOK - } - if rp.election == nil && rp.quorum.Member { - // restart election - rp.handleElectionStart(rp.round + 1) - return ServerStatusOK - } - if m.round != rp.election.round { - // new election round happened - // LDRDBG fmt.Println(rp.Self(), "LDR clean election. skip. new election round", rp.election.round) - return ServerStatusOK - } - // LDRDBG fmt.Println(rp.Self(), "LDR clean election. round", rp.election.round) - rp.election = nil - return ServerStatusOK - - case messageRaftHeartbeat: - rp.handleHeartbeat() - return ServerStatusOK - - default: - if err := etf.TermIntoStruct(message, &mRaft); err != nil { - status = rp.behavior.HandleRaftInfo(rp, message) - break - } - if mRaft.Pid == process.Self() { - lib.Warning("[%s] got raft command from itself %#v", process.Self(), mRaft) - return ServerStatusOK - } - status = rp.handleRaftRequest(mRaft) - if status == lib.ErrUnsupportedRequest { - status = rp.behavior.HandleRaftCast(rp, message) - } - } - - switch status { - case nil, RaftStatusOK: - return ServerStatusOK - case RaftStatusStop: - return ServerStatusStop - case lib.ErrUnsupportedRequest: - return rp.behavior.HandleRaftInfo(rp, message) - default: - return ServerStatus(status) - } - -} - -// HandleInfo -func (r *Raft) HandleInfo(process *ServerProcess, message etf.Term) ServerStatus { - var status RaftStatus - - rp := process.State.(*RaftProcess) - switch m := message.(type) { - case MessageDown: - can := rp.quorumCandidates.GetOnline(m.Pid) - if can == nil { - break - } - if can.monitor != m.Ref { - status = rp.behavior.HandleRaftInfo(rp, message) - break - } - rp.quorumCandidates.SetOffline(rp, m.Pid) - if rp.quorum == nil { - return ServerStatusOK - } - for _, peer := range rp.quorum.Peers { - // check if this pid belongs to the quorum - if peer != m.Pid { - continue - } - - // start to build new quorum - // QUODBG fmt.Println(rp.Name(), "QUO PEER DOWN", m.Pid) - rp.handleQuorum() - rp.quorumChangeStart(false) - break - } - return ServerStatusOK - - default: - status = rp.behavior.HandleRaftInfo(rp, message) - } - - switch status { - case nil, RaftStatusOK: - return ServerStatusOK - case RaftStatusStop: - return ServerStatusStop - default: - return ServerStatus(status) - } -} - -// -// default Raft callbacks -// - -// HandleQuorum -func (r *Raft) HandleQuorum(process *RaftProcess, quorum *RaftQuorum) RaftStatus { - return RaftStatusOK -} - -// HandleLeader -func (r *Raft) HandleLeader(process *RaftProcess, leader *RaftLeader) RaftStatus { - return RaftStatusOK -} - -// HandlePeer -func (r *Raft) HandlePeer(process *RaftProcess, peer etf.Pid, serial uint64) RaftStatus { - return RaftStatusOK -} - -// HandleSerial -func (r *Raft) HandleSerial(process *RaftProcess, ref etf.Ref, serial uint64, key string, value etf.Term) RaftStatus { - lib.Warning("HandleSerial: unhandled key-value message with ref %s and serial %d", ref, serial) - return RaftStatusOK -} - -// HandleCancel -func (r *Raft) HandleCancel(process *RaftProcess, ref etf.Ref, reason string) RaftStatus { - lib.Warning("HandleCancel: unhandled cancel with ref %s and reason %q", ref, reason) - return RaftStatusOK -} - -// HandleRaftCall -func (r *Raft) HandleRaftCall(process *RaftProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - lib.Warning("HandleRaftCall: unhandled message (from %#v) %#v", from, message) - return etf.Atom("ok"), ServerStatusOK -} - -// HandleRaftCast -func (r *Raft) HandleRaftCast(process *RaftProcess, message etf.Term) ServerStatus { - lib.Warning("HandleRaftCast: unhandled message %#v", message) - return ServerStatusOK -} - -// HandleRaftInfo -func (r *Raft) HandleRaftInfo(process *RaftProcess, message etf.Term) ServerStatus { - lib.Warning("HandleRaftInfo: unhandled message %#v", message) - return ServerStatusOK -} - -// HandleRaftDirect -func (r *Raft) HandleRaftDirect(process *RaftProcess, message interface{}) (interface{}, error) { - return nil, lib.ErrUnsupportedRequest -} - -// -// internals -// - -func createQuorumCandidates() *quorumCandidates { - qc := &quorumCandidates{ - candidates: make(map[etf.Pid]*candidate), - } - return qc -} - -func (qc *quorumCandidates) Set(rp *RaftProcess, peer etf.Pid) { - c, exist := qc.candidates[peer] - if exist == true { - diff := time.Now().Unix() - c.heartbeat - if diff > DefaultRaftHeartbeat { - rp.Join(peer) - } - return - } - c = &candidate{ - heartbeat: time.Now().Unix(), - } - qc.candidates[peer] = c - rp.Join(peer) -} - -func (qc *quorumCandidates) SetOnline(rp *RaftProcess, peer etf.Pid, serial uint64) bool { - c, exist := qc.candidates[peer] - if exist == false { - return false - } - mon := rp.MonitorProcess(peer) - c.serial = serial - c.monitor = mon - c.joined = true - c.heartbeat = time.Now().Unix() - c.failures = 0 - return true -} - -func (qc *quorumCandidates) SetOffline(rp *RaftProcess, peer etf.Pid) { - c, exist := qc.candidates[peer] - if exist == false { - return - } - // QUODBG fmt.Println(rp.Self(), "peer", peer, "has left") - emptyRef := etf.Ref{} - if c.monitor != emptyRef { - rp.DemonitorProcess(c.monitor) - c.monitor = emptyRef - } - c.joined = false - c.failures = 0 - c.heartbeat = time.Now().Unix() - return -} - -func (qc *quorumCandidates) GetOnline(peer etf.Pid) *candidate { - c, exist := qc.candidates[peer] - if exist && c.joined == false { - return nil - } - return c -} -func (qc *quorumCandidates) Get(peer etf.Pid) *candidate { - c, exist := qc.candidates[peer] - if exist == false { - return nil - } - return c -} - -// List returns list of online peers -func (qc *quorumCandidates) List() []etf.Pid { - type c struct { - pid etf.Pid - serial uint64 - } - list := []c{} - for k, v := range qc.candidates { - if v.joined == false { - continue - } - list = append(list, c{pid: k, serial: v.serial}) - } - - // sort candidates by serial number in desc order - sort.Slice(list, func(a, b int) bool { return list[a].serial > list[b].serial }) - pids := []etf.Pid{} - for i := range list { - pids = append(pids, list[i].pid) - } - return pids -} - -func (qc *quorumCandidates) ListOffline() []etf.Pid { - list := []etf.Pid{} - for pid, c := range qc.candidates { - if c.joined == true { - continue - } - list = append(list, pid) - } - return list -} diff --git a/gen/registrar.go b/gen/registrar.go new file mode 100644 index 00000000..ed416332 --- /dev/null +++ b/gen/registrar.go @@ -0,0 +1,137 @@ +package gen + +// Registrar interface +type Registrar interface { + // Register invokes on network start + Register(node NodeRegistrar, routes RegisterRoutes) (StaticRoutes, error) + + // Resolver returns the gen.Resolver interface + Resolver() Resolver + + // RegisterProxy allows to register this node as a proxy for the given node on the registrar + // (if the registrar does support this feature) + RegisterProxy(to Atom) error + // UnregisterProxy unregisters this node as a proxy to the given node on the registrar + // (if the registrar does support this feature) + UnregisterProxy(to Atom) error + + // RegisterApplicationRoute registers the application on the registrar + // (if the registrar does support this feature). + RegisterApplicationRoute(route ApplicationRoute) error + // UnregisterApplication unregisters the given application. + // (if the registrar does support this feature). + UnregisterApplicationRoute(name Atom) error + + // Nodes returns a list of the nodes registered on the registrar + Nodes() ([]Atom, error) + // Config returns config received from the registrar + // (if the registrar does support this feature) + Config(items ...string) (map[string]any, error) + // ConfigItem returns the value from the config for the given name + // (if the registrar does support this feature) + ConfigItem(item string) (any, error) + // Event returns the event you may want to Link/Monitor + // (if the registrar does support this feature) + Event() (Event, error) + // Info return short information about the registrar + Info() RegistrarInfo + + // Terminate invokes on network stop. + Terminate() + + Version() Version +} + +// Resolver interface +type Resolver interface { + // Resolve resolves the routes for the given node name + Resolve(node Atom) ([]Route, error) + // Resolve resolves the proxy routes for the given node name + ResolveProxy(node Atom) ([]ProxyRoute, error) + // Resolve resolves the applications routes for the given application. + // This information allows you to know where the given application is running + // or loaded. + ResolveApplication(name Atom) ([]ApplicationRoute, error) +} + +type RegistrarInfo struct { + Server string + EmbeddedServer bool + SupportRegisterProxy bool + SupportRegisterApplication bool + SupportConfig bool + SupportEvent bool + Version Version +} + +type AcceptorInfo struct { + Interface string + MaxMessageSize int + Flags NetworkFlags + TLS bool + CustomRegistrar bool + RegistrarServer string + RegistrarVersion Version + HandshakeVersion Version + ProtoVersion Version +} + +type RegisterRoutes struct { + Routes []Route + ApplicationRoutes []ApplicationRoute + ProxyRoutes []ProxyRoute // if Proxy was enabled +} + +type RegistrarConfig struct { + LastUpdate int64 // timestamp + Config map[string]any +} + +type Route struct { + Host string + Port uint16 + TLS bool + HandshakeVersion Version + ProtoVersion Version +} + +type ProxyRoute struct { + To Atom // to + Proxy Atom // via +} + +type ApplicationRoute struct { + Node Atom + Name Atom + Weight int + Mode ApplicationMode + State ApplicationState +} + +type StaticRoutes struct { + Routes map[string]NetworkRoute // match string => network route + Proxies map[string]NetworkProxyRoute // match string => proxy route +} + +type RouteInfo struct { + Match string + Weight int + UseResolver bool + UseCustomCookie bool + UseCustomCert bool + Flags NetworkFlags + HandshakeVersion Version + ProtoVersion Version + Host string + Port uint16 +} + +type ProxyRouteInfo struct { + Match string + Weight int + UseResolver bool + UseCustomCookie bool + Flags NetworkProxyFlags + MaxHop int + Proxy Atom +} diff --git a/gen/saga.go b/gen/saga.go deleted file mode 100644 index 8334d721..00000000 --- a/gen/saga.go +++ /dev/null @@ -1,1347 +0,0 @@ -package gen - -import ( - "fmt" - "math" - "sync" - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -// SagaBehavior interface -type SagaBehavior interface { - ServerBehavior - - // - // Mandatory callbacks - // - - // InitSaga - InitSaga(process *SagaProcess, args ...etf.Term) (SagaOptions, error) - - // HandleTxNew invokes on a new TX receiving by this saga. - HandleTxNew(process *SagaProcess, id SagaTransactionID, value interface{}) SagaStatus - - // HandleTxResult invoked on a receiving result from the next saga - HandleTxResult(process *SagaProcess, id SagaTransactionID, from SagaNextID, result interface{}) SagaStatus - - // HandleTxCancel invoked on a request of transaction cancelation. - HandleTxCancel(process *SagaProcess, id SagaTransactionID, reason string) SagaStatus - - // - // Optional callbacks - // - - // HandleTxDone invoked when the transaction is done on a saga where it was created. - // It returns the final result and SagaStatus. The commit message will deliver the final - // result to all participants of this transaction (if it has enabled the TwoPhaseCommit option). - // Otherwise the final result will be ignored. - HandleTxDone(process *SagaProcess, id SagaTransactionID, result interface{}) (interface{}, SagaStatus) - - // HandleTxInterim invoked if received interim result from the next hop - HandleTxInterim(process *SagaProcess, id SagaTransactionID, from SagaNextID, interim interface{}) SagaStatus - - // HandleTxCommit invoked if TwoPhaseCommit option is enabled for the given TX. - // All sagas involved in this TX receive a commit message with final value and invoke this callback. - // The final result has a value returned by HandleTxDone on a Saga created this TX. - HandleTxCommit(process *SagaProcess, id SagaTransactionID, final interface{}) SagaStatus - - // - // Callbacks to handle result/interim from the worker(s) - // - - // HandleJobResult - HandleJobResult(process *SagaProcess, id SagaTransactionID, from SagaJobID, result interface{}) SagaStatus - // HandleJobInterim - HandleJobInterim(process *SagaProcess, id SagaTransactionID, from SagaJobID, interim interface{}) SagaStatus - // HandleJobFailed - HandleJobFailed(process *SagaProcess, id SagaTransactionID, from SagaJobID, reason string) SagaStatus - - // - // Server's callbacks - // - - // HandleStageCall this callback is invoked on ServerProcess.Call. This method is optional - // for the implementation - HandleSagaCall(process *SagaProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) - // HandleStageCast this callback is invoked on ServerProcess.Cast. This method is optional - // for the implementation - HandleSagaCast(process *SagaProcess, message etf.Term) ServerStatus - // HandleStageInfo this callback is invoked on Process.Send. This method is optional - // for the implementation - HandleSagaInfo(process *SagaProcess, message etf.Term) ServerStatus - // HandleSagaDirect this callback is invoked on Process.Direct. This method is optional - // for the implementation - HandleSagaDirect(process *SagaProcess, ref etf.Ref, message interface{}) (interface{}, DirectStatus) -} - -const ( - defaultHopLimit = math.MaxUint16 - defaultLifespan = 60 -) - -// SagaStatus -type SagaStatus error - -var ( - SagaStatusOK SagaStatus // nil - SagaStatusStop SagaStatus = fmt.Errorf("stop") - - // internal - - ErrSagaTxEndOfLifespan = fmt.Errorf("End of TX lifespan") - ErrSagaTxNextTimeout = fmt.Errorf("Next saga timeout") - ErrSagaUnknown = fmt.Errorf("Unknown saga") - ErrSagaJobUnknown = fmt.Errorf("Unknown job") - ErrSagaTxUnknown = fmt.Errorf("Unknown TX") - ErrSagaTxCanceled = fmt.Errorf("Tx is canceled") - ErrSagaTxInProgress = fmt.Errorf("Tx is still in progress") - ErrSagaResultAlreadySent = fmt.Errorf("Result is already sent") - ErrSagaNotAllowed = fmt.Errorf("Operation is not allowed") -) - -// Saga -type Saga struct { - Server -} - -// SagaTransactionOptions -type SagaTransactionOptions struct { - // HopLimit defines a number of hop within the transaction. Default limit - // is 0 (no limit). - HopLimit uint - // Lifespan defines a lifespan for the transaction in seconds. Default is 60. - Lifespan uint - - // TwoPhaseCommit enables 2PC for the transaction. This option makes all - // Sagas involved in this transaction invoke HandleCommit callback on them and - // invoke HandleCommitJob callback on Worker processes once the transaction is finished. - TwoPhaseCommit bool -} - -// SagaOptions -type SagaOptions struct { - // MaxTransactions defines the limit for the number of active transactions. Default: 0 (unlimited) - MaxTransactions uint - // Worker - Worker SagaWorkerBehavior -} - -// SagaProcess -type SagaProcess struct { - ServerProcess - options SagaOptions - behavior SagaBehavior - - // running transactions - txs map[SagaTransactionID]*SagaTransaction - mutexTXS sync.Mutex - - // next sagas where txs were sent - next map[SagaNextID]*SagaTransaction - mutexNext sync.Mutex - - // running jobs - jobs map[etf.Pid]*SagaJob - mutexJobs sync.Mutex -} - -// SagaTransactionID -type SagaTransactionID etf.Ref - -// String -func (id SagaTransactionID) String() string { - r := etf.Ref(id) - return fmt.Sprintf("TX#%d.%d.%d", r.ID[0], r.ID[1], r.ID[2]) -} - -// SagaTransaction -type SagaTransaction struct { - sync.Mutex - id SagaTransactionID - options SagaTransactionOptions - origin SagaNextID // next id on a saga it came from - monitor etf.Ref // monitor parent saga - next map[SagaNextID]*SagaNext // where were sent - jobs map[SagaJobID]etf.Pid - arrival int64 // when it arrived on this saga - parents []etf.Pid // sagas trace - - done bool // do not allow send result more than once if 2PC is set - cancelTimer CancelFunc -} - -// SagaNextID -type SagaNextID etf.Ref - -// String -func (id SagaNextID) String() string { - r := etf.Ref(id) - return fmt.Sprintf("Next#%d.%d.%d", r.ID[0], r.ID[1], r.ID[2]) -} - -// SagaNext -type SagaNext struct { - // Saga etf.Pid, string (for the locally registered process), gen.ProcessID{process, node} (for the remote process) - Saga interface{} - // Value a value for the invoking HandleTxNew on a next hop. - Value interface{} - // Timeout how long this Saga will be waiting for the result from the next hop. Default - 10 seconds - Timeout uint - // TrapCancel if the next saga fails, it will transform the cancel signal into the regular message gen.MessageSagaCancel, and HandleSagaInfo callback will be invoked. - TrapCancel bool - - // internal - done bool // for 2PC case - cancelTimer CancelFunc -} - -// SagaJobID -type SagaJobID etf.Ref - -// String -func (id SagaJobID) String() string { - r := etf.Ref(id) - return fmt.Sprintf("Job#%d.%d.%d", r.ID[0], r.ID[1], r.ID[2]) -} - -// SagaJob -type SagaJob struct { - ID SagaJobID - TransactionID SagaTransactionID - Value interface{} - - // internal - options SagaJobOptions - saga etf.Pid - commit bool - worker Process - done bool - cancelTimer CancelFunc -} - -// SagaJobOptions -type SagaJobOptions struct { - Timeout uint -} - -type messageSaga struct { - Request etf.Atom - Pid etf.Pid - Command interface{} -} - -type messageSagaNext struct { - TransactionID etf.Ref - Origin etf.Ref - Value interface{} - Parents []etf.Pid - Options map[string]interface{} -} - -type messageSagaResult struct { - TransactionID etf.Ref - Origin etf.Ref - Result interface{} -} - -type messageSagaCancel struct { - TransactionID etf.Ref - Origin etf.Ref - Reason string -} - -type messageSagaCommit struct { - TransactionID etf.Ref - Origin etf.Ref - Final interface{} -} - -// MessageSagaCancel -type MessageSagaCancel struct { - TransactionID SagaTransactionID - NextID SagaNextID - Reason string -} - -// MessageSagaError -type MessageSagaError struct { - TransactionID SagaTransactionID - NextID SagaNextID - Error string - Details string -} - -// -// Saga API -// - -type sagaSetMaxTransactions struct { - max uint -} - -// SetMaxTransactions set maximum transactions fo the saga -func (gs *Saga) SetMaxTransactions(process Process, max uint) error { - if !process.IsAlive() { - return lib.ErrServerTerminated - } - message := sagaSetMaxTransactions{ - max: max, - } - _, err := process.Direct(message) - return err -} - -// -// SagaProcess methods -// - -// StartTransaction -func (sp *SagaProcess) StartTransaction(options SagaTransactionOptions, value interface{}) SagaTransactionID { - id := sp.MakeRef() - - if options.HopLimit == 0 { - options.HopLimit = defaultHopLimit - } - if options.Lifespan == 0 { - options.Lifespan = defaultLifespan - } - - message := etf.Tuple{ - etf.Atom("$saga_next"), - sp.Self(), - etf.Tuple{ - id, // tx id - etf.Ref{}, // origin. empty value. (parent's next id) - value, // tx value - []etf.Pid{}, // parents - etf.Map{ // tx options - "HopLimit": options.HopLimit, - "Lifespan": options.Lifespan, - "TwoPhaseCommit": options.TwoPhaseCommit, - }, - }, - } - - sp.Send(sp.Self(), message) - return SagaTransactionID(id) -} - -// Next -func (sp *SagaProcess) Next(id SagaTransactionID, next SagaNext) (SagaNextID, error) { - sp.mutexTXS.Lock() - tx, ok := sp.txs[id] - sp.mutexTXS.Unlock() - if !ok { - return SagaNextID{}, ErrSagaTxUnknown - } - - if len(tx.next) > int(tx.options.HopLimit) { - return SagaNextID{}, fmt.Errorf("exceeded hop limit") - } - - nextLifespan := int64(tx.options.Lifespan) - (time.Now().Unix() - tx.arrival) - if nextLifespan < 1 { - sp.CancelTransaction(id, "exceeded lifespan") - return SagaNextID{}, fmt.Errorf("exceeded lifespan. transaction canceled") - } - - if next.Timeout > 0 && int64(next.Timeout) > nextLifespan { - return SagaNextID{}, fmt.Errorf("requested timeout exceed lifespan") - } - - if next.Timeout > 0 { - nextLifespan = int64(next.Timeout) - } - - ref := sp.MonitorProcess(next.Saga) - next_id := SagaNextID(ref) - message := etf.Tuple{ - etf.Atom("$saga_next"), - sp.Self(), - etf.Tuple{ - etf.Ref(tx.id), // tx id - ref, // next id (tx origin on the next saga) - next.Value, - tx.parents, - etf.Map{ - "HopLimit": tx.options.HopLimit, - "Lifespan": nextLifespan, - "TwoPhaseCommit": tx.options.TwoPhaseCommit, - }, - }, - } - - sp.Send(next.Saga, message) - - cancelMessage := etf.Tuple{ - etf.Atom("$saga_cancel"), - etf.Pid{}, // do not send sp.Self() to be able TrapCancel work - etf.Tuple{ - etf.Ref(tx.id), // tx id - ref, - "lifespan", - }, - } - timeout := time.Duration(nextLifespan) * time.Second - next.cancelTimer = sp.SendAfter(sp.Self(), cancelMessage, timeout) - - tx.Lock() - tx.next[next_id] = &next - tx.Unlock() - - sp.mutexNext.Lock() - sp.next[next_id] = tx - sp.mutexNext.Unlock() - - return next_id, nil -} - -// StartJob -func (sp *SagaProcess) StartJob(id SagaTransactionID, options SagaJobOptions, value interface{}) (SagaJobID, error) { - - if sp.options.Worker == nil { - return SagaJobID{}, fmt.Errorf("This saga has no worker") - } - sp.mutexTXS.Lock() - tx, ok := sp.txs[id] - sp.mutexTXS.Unlock() - - if !ok { - return SagaJobID{}, ErrSagaTxUnknown - } - - jobLifespan := int64(tx.options.Lifespan) - (time.Now().Unix() - tx.arrival) - if options.Timeout > 0 && int64(options.Timeout) > jobLifespan { - return SagaJobID{}, fmt.Errorf("requested timeout exceed lifespan") - } - if options.Timeout > 0 { - jobLifespan = int64(options.Timeout) - } - - workerOptions := ProcessOptions{} - worker, err := sp.Spawn("", workerOptions, sp.options.Worker) - if err != nil { - return SagaJobID{}, err - } - sp.Link(worker.Self()) - - job := SagaJob{ - ID: SagaJobID(sp.MakeRef()), - TransactionID: id, - Value: value, - commit: tx.options.TwoPhaseCommit, - saga: sp.Self(), - worker: worker, - } - - sp.mutexJobs.Lock() - sp.jobs[worker.Self()] = &job - sp.mutexJobs.Unlock() - - m := messageSagaJobStart{ - job: job, - } - tx.Lock() - tx.jobs[job.ID] = worker.Self() - tx.Unlock() - - sp.Cast(worker.Self(), m) - - // terminate worker process via handleSagaExit - exitMessage := MessageExit{ - Pid: worker.Self(), - Reason: "lifespan", - } - - timeout := time.Duration(jobLifespan) * time.Second - job.cancelTimer = sp.SendAfter(sp.Self(), exitMessage, timeout) - - return job.ID, nil -} - -// SendResult -func (sp *SagaProcess) SendResult(id SagaTransactionID, result interface{}) error { - sp.mutexTXS.Lock() - tx, ok := sp.txs[id] - sp.mutexTXS.Unlock() - if !ok { - return ErrSagaTxUnknown - } - - if len(tx.parents) == 0 { - // SendResult was called right after CreateTransaction call. - return ErrSagaNotAllowed - } - - if tx.done { - return ErrSagaResultAlreadySent - } - - if sp.checkTxDone(tx) == false { - return ErrSagaTxInProgress - } - - message := etf.Tuple{ - etf.Atom("$saga_result"), - sp.Self(), - etf.Tuple{ - etf.Ref(tx.id), - etf.Ref(tx.origin), - result, - }, - } - - // send message to the parent saga - if err := sp.Send(tx.parents[0], message); err != nil { - return err - } - - // tx handling is done on this saga - tx.done = true - - // do not remove TX if we send result to itself - if tx.parents[0] == sp.Self() { - return nil - } - - // do not remove TX if 2PC is enabled - if tx.options.TwoPhaseCommit { - return nil - } - - sp.mutexTXS.Lock() - delete(sp.txs, id) - sp.mutexTXS.Unlock() - - return nil -} - -// SendInterim -func (sp *SagaProcess) SendInterim(id SagaTransactionID, interim interface{}) error { - sp.mutexTXS.Lock() - tx, ok := sp.txs[id] - sp.mutexTXS.Unlock() - if !ok { - return ErrSagaTxUnknown - } - - message := etf.Tuple{ - etf.Atom("$saga_interim"), - sp.Self(), - etf.Tuple{ - etf.Ref(tx.id), - etf.Ref(tx.origin), - interim, - }, - } - - // send message to the parent saga - if err := sp.Send(tx.parents[0], message); err != nil { - return err - } - - return nil -} - -// CancelTransaction -func (sp *SagaProcess) CancelTransaction(id SagaTransactionID, reason string) error { - sp.mutexTXS.Lock() - tx, ok := sp.txs[id] - sp.mutexTXS.Unlock() - if !ok { - return ErrSagaTxUnknown - } - - message := etf.Tuple{ - etf.Atom("$saga_cancel"), - sp.Self(), - etf.Tuple{etf.Ref(tx.id), etf.Ref(tx.origin), reason}, - } - sp.Send(sp.Self(), message) - return nil -} - -// CancelJob -func (sp *SagaProcess) CancelJob(id SagaTransactionID, job SagaJobID, reason string) error { - sp.mutexTXS.Lock() - tx, ok := sp.txs[id] - sp.mutexTXS.Unlock() - if !ok { - return ErrSagaTxUnknown - } - tx.Lock() - defer tx.Unlock() - return nil -} - -func (sp *SagaProcess) checkTxDone(tx *SagaTransaction) bool { - if tx.options.TwoPhaseCommit == false { // 2PC is disabled - if len(tx.next) > 0 { // haven't received all results from the "next" sagas - return false - } - if len(tx.jobs) > 0 { // tx has running jobs - return false - } - return true - } - - // 2PC is enabled. check whether received all results from sagas - // and workers have finished their jobs - - tx.Lock() - // check results from sagas - for _, next := range tx.next { - if next.done == false { - tx.Unlock() - return false - } - } - - if len(tx.jobs) == 0 { - tx.Unlock() - return true - } - - // gen list of running workers - jobs := []etf.Pid{} - for _, pid := range tx.jobs { - jobs = append(jobs, pid) - } - tx.Unlock() - - // check the job states of them - sp.mutexJobs.Lock() - for _, pid := range jobs { - job := sp.jobs[pid] - if job.done == false { - sp.mutexJobs.Unlock() - return false - } - } - sp.mutexJobs.Unlock() - return true -} - -func (sp *SagaProcess) handleSagaRequest(m messageSaga) error { - - switch m.Request { - case etf.Atom("$saga_next"): - nextMessage := messageSagaNext{} - - if err := etf.TermIntoStruct(m.Command, &nextMessage); err != nil { - return lib.ErrUnsupportedRequest - } - - // Check if exceed the number of transaction on this saga - if sp.options.MaxTransactions > 0 && len(sp.txs)+1 > int(sp.options.MaxTransactions) { - cancel := etf.Tuple{ - etf.Atom("$saga_cancel"), - sp.Self(), - etf.Tuple{ - nextMessage.TransactionID, - nextMessage.Origin, - "exceed_tx_limit", - }, - } - sp.Send(m.Pid, cancel) - return nil - } - - // Check for the loop - transactionID := SagaTransactionID(nextMessage.TransactionID) - sp.mutexTXS.Lock() - tx, ok := sp.txs[transactionID] - sp.mutexTXS.Unlock() - if ok { - // loop detected. send cancel message - cancel := etf.Tuple{ - etf.Atom("$saga_cancel"), - sp.Self(), - etf.Tuple{ - nextMessage.TransactionID, - nextMessage.Origin, - "loop_detected", - }, - } - sp.Send(m.Pid, cancel) - return nil - } - - txOptions := SagaTransactionOptions{ - HopLimit: defaultHopLimit, - Lifespan: defaultLifespan, - } - if value, ok := nextMessage.Options["HopLimit"]; ok { - if hoplimit, ok := value.(int64); ok { - txOptions.HopLimit = uint(hoplimit) - } - } - if value, ok := nextMessage.Options["Lifespan"]; ok { - if lifespan, ok := value.(int64); ok && lifespan > 0 { - txOptions.Lifespan = uint(lifespan) - } - } - if value, ok := nextMessage.Options["TwoPhaseCommit"]; ok { - txOptions.TwoPhaseCommit, _ = value.(bool) - } - - tx = &SagaTransaction{ - id: transactionID, - options: txOptions, - origin: SagaNextID(nextMessage.Origin), - next: make(map[SagaNextID]*SagaNext), - jobs: make(map[SagaJobID]etf.Pid), - arrival: time.Now().Unix(), - parents: append([]etf.Pid{m.Pid}, nextMessage.Parents...), - } - sp.mutexTXS.Lock() - sp.txs[transactionID] = tx - sp.mutexTXS.Unlock() - - // do not monitor itself (they are equal if its came from the StartTransaction call) - if m.Pid != sp.Self() { - tx.monitor = sp.MonitorProcess(m.Pid) - } - - // tx lifespan timer - cancelMessage := etf.Tuple{ - etf.Atom("$saga_cancel"), - sp.Self(), // can't be trapped (ignored) - etf.Tuple{ - nextMessage.TransactionID, - nextMessage.Origin, - "lifespan", - }, - } - timeout := time.Duration(txOptions.Lifespan) * time.Second - tx.cancelTimer = sp.SendAfter(sp.Self(), cancelMessage, timeout) - - return sp.behavior.HandleTxNew(sp, transactionID, nextMessage.Value) - - case "$saga_cancel": - cancel := messageSagaCancel{} - if err := etf.TermIntoStruct(m.Command, &cancel); err != nil { - return lib.ErrUnsupportedRequest - } - - tx, exist := sp.txs[SagaTransactionID(cancel.TransactionID)] - if !exist { - // unknown tx, just ignore it - return nil - } - - // check where it came from. - if tx.parents[0] == m.Pid { - // came from parent saga or from itself via CancelTransaction - // can't be ignored - sp.cancelTX(m.Pid, cancel, tx) - return sp.behavior.HandleTxCancel(sp, tx.id, cancel.Reason) - } - - // this cancel came from one of the next sagas - // or from itself (being in the middle of transaction graph) - next_id := SagaNextID(cancel.Origin) - tx.Lock() - next, ok := tx.next[next_id] - tx.Unlock() - - if ok && next.TrapCancel { - // clean the next saga stuff - next.cancelTimer() - sp.DemonitorProcess(cancel.Origin) - tx.Lock() - delete(tx.next, next_id) - tx.Unlock() - sp.mutexNext.Lock() - delete(sp.next, next_id) - sp.mutexNext.Unlock() - - // came from the next saga and TrapCancel was enabled - cm := MessageSagaCancel{ - TransactionID: tx.id, - NextID: next_id, - Reason: cancel.Reason, - } - sp.Send(sp.Self(), cm) - return SagaStatusOK - } - - sp.cancelTX(m.Pid, cancel, tx) - return sp.behavior.HandleTxCancel(sp, tx.id, cancel.Reason) - - case etf.Atom("$saga_result"): - result := messageSagaResult{} - if err := etf.TermIntoStruct(m.Command, &result); err != nil { - return lib.ErrUnsupportedRequest - } - - transactionID := SagaTransactionID(result.TransactionID) - sp.mutexTXS.Lock() - tx, ok := sp.txs[transactionID] - sp.mutexTXS.Unlock() - if !ok { - // ignore unknown TX - return nil - } - - next_id := SagaNextID(result.Origin) - empty_next_id := SagaNextID{} - // next id is empty if we got result on a saga created this TX - if next_id != empty_next_id { - sp.mutexNext.Lock() - _, ok := sp.next[next_id] - sp.mutexNext.Unlock() - if !ok { - // ignore unknown result - return nil - } - sp.mutexNext.Lock() - delete(sp.next, next_id) - sp.mutexNext.Unlock() - - tx.Lock() - next := tx.next[next_id] - if tx.options.TwoPhaseCommit == false { - next.cancelTimer() - sp.DemonitorProcess(result.Origin) - delete(tx.next, next_id) - } else { - next.done = true - } - tx.Unlock() - - return sp.behavior.HandleTxResult(sp, tx.id, next_id, result.Result) - } - - final, status := sp.behavior.HandleTxDone(sp, tx.id, result.Result) - if status == SagaStatusOK { - sp.commitTX(tx, final) - } - - return status - - case etf.Atom("$saga_interim"): - interim := messageSagaResult{} - if err := etf.TermIntoStruct(m.Command, &interim); err != nil { - return lib.ErrUnsupportedRequest - } - next_id := SagaNextID(interim.Origin) - sp.mutexNext.Lock() - tx, ok := sp.next[next_id] - sp.mutexNext.Unlock() - if !ok { - // ignore unknown interim result and send cancel message to the sender - message := etf.Tuple{ - etf.Atom("$saga_cancel"), - sp.Self(), - etf.Tuple{ - interim.TransactionID, - interim.Origin, - "unknown or canceled tx", - }, - } - sp.Send(m.Pid, message) - return nil - } - return sp.behavior.HandleTxInterim(sp, tx.id, next_id, interim.Result) - - case etf.Atom("$saga_commit"): - // propagate Commit signal if 2PC is enabled - commit := messageSagaCommit{} - if err := etf.TermIntoStruct(m.Command, &commit); err != nil { - return lib.ErrUnsupportedRequest - } - transactionID := SagaTransactionID(commit.TransactionID) - sp.mutexTXS.Lock() - tx, ok := sp.txs[transactionID] - sp.mutexTXS.Unlock() - if !ok { - // ignore unknown TX - return nil - } - // clean up and send commit message before we invoke callback - sp.commitTX(tx, commit.Final) - // make sure if 2PC was enabled on this TX - if tx.options.TwoPhaseCommit { - return sp.behavior.HandleTxCommit(sp, tx.id, commit.Final) - } - return SagaStatusOK - } - return lib.ErrUnsupportedRequest -} - -func (sp *SagaProcess) cancelTX(from etf.Pid, cancel messageSagaCancel, tx *SagaTransaction) { - tx.cancelTimer() - - // stop workers - tx.Lock() - cancelJobs := []etf.Pid{} - for _, pid := range tx.jobs { - sp.Unlink(pid) - sp.Cast(pid, messageSagaJobCancel{reason: cancel.Reason}) - cancelJobs = append(cancelJobs, pid) - } - tx.Unlock() - - sp.mutexJobs.Lock() - for i := range cancelJobs { - job, ok := sp.jobs[cancelJobs[i]] - if ok { - delete(sp.jobs, cancelJobs[i]) - job.cancelTimer() - } - } - sp.mutexJobs.Unlock() - - // remove monitor from parent saga - if tx.parents[0] != sp.Self() { - sp.DemonitorProcess(tx.monitor) - - // do not send to the parent saga if it came from there - // and cancelation reason caused by lifespan timer - if tx.parents[0] != from && cancel.Reason != "lifespan" { - cm := etf.Tuple{ - etf.Atom("$saga_cancel"), - sp.Self(), - etf.Tuple{ - cancel.TransactionID, - etf.Ref(tx.origin), - cancel.Reason, - }, - } - sp.Send(tx.parents[0], cm) - } - } - - // send cancel to all next sagas except the saga this cancel came from - sp.mutexNext.Lock() - for nxtid, nxt := range tx.next { - ref := etf.Ref(nxtid) - // remove monitor from the next saga - sp.DemonitorProcess(ref) - delete(sp.next, nxtid) - nxt.cancelTimer() - - if cancel.Reason == "lifespan" { - // do not send if the cancelation caused by lifespan timer - continue - } - if ref == cancel.Origin { - // do not send to the parent if it came from there - continue - } - - cm := etf.Tuple{ - etf.Atom("$saga_cancel"), - sp.Self(), - etf.Tuple{ - cancel.TransactionID, - ref, - cancel.Reason, - }, - } - if err := sp.Send(nxt.Saga, cm); err != nil { - errmessage := MessageSagaError{ - TransactionID: tx.id, - NextID: nxtid, - Error: "can't send cancel message", - Details: err.Error(), - } - sp.Send(sp.Self(), errmessage) - } - } - sp.mutexNext.Unlock() - - // remove tx from this saga - sp.mutexTXS.Lock() - delete(sp.txs, tx.id) - sp.mutexTXS.Unlock() -} - -func (sp *SagaProcess) commitTX(tx *SagaTransaction, final interface{}) { - tx.cancelTimer() - // remove tx from this saga - sp.mutexTXS.Lock() - delete(sp.txs, tx.id) - sp.mutexTXS.Unlock() - - // send commit message to all workers - for _, pid := range tx.jobs { - // unlink before this worker stopped - sp.Unlink(pid) - // do nothing if 2PC option is disabled - if tx.options.TwoPhaseCommit == false { - continue - } - // send commit message - sp.Cast(pid, messageSagaJobCommit{final: final}) - } - // remove monitor from parent saga - sp.DemonitorProcess(tx.monitor) - - sp.mutexNext.Lock() - for nxtid, nxt := range tx.next { - ref := etf.Ref(nxtid) - // remove monitor from the next saga - sp.DemonitorProcess(ref) - - delete(sp.next, nxtid) - nxt.cancelTimer() - // send commit message - if tx.options.TwoPhaseCommit == false { - continue - } - cm := etf.Tuple{ - etf.Atom("$saga_commit"), - sp.Self(), - etf.Tuple{ - etf.Ref(tx.id), // tx id - ref, // origin (next_id) - final, // final result - }, - } - if err := sp.Send(nxt.Saga, cm); err != nil { - errmessage := MessageSagaError{ - TransactionID: tx.id, - NextID: nxtid, - Error: "can't send commit message", - Details: err.Error(), - } - sp.Send(sp.Self(), errmessage) - } - } - sp.mutexNext.Unlock() - -} - -func (sp *SagaProcess) handleSagaExit(exit MessageExit) error { - sp.mutexJobs.Lock() - job, ok := sp.jobs[exit.Pid] - sp.mutexJobs.Unlock() - if !ok { - // passthrough this message to HandleSagaInfo callback - return ErrSagaJobUnknown - } - - if exit.Reason == "lifespan" { - sp.Unlink(job.worker.Self()) - job.worker.Exit(exit.Reason) - } else { - job.cancelTimer() - } - - // remove it from saga job list - sp.mutexJobs.Lock() - delete(sp.jobs, exit.Pid) - sp.mutexJobs.Unlock() - - // check if this tx is still alive - sp.mutexTXS.Lock() - tx, ok := sp.txs[job.TransactionID] - sp.mutexTXS.Unlock() - if !ok { - // seems it was already canceled - return SagaStatusOK - } - - // remove it from the tx job list - tx.Lock() - delete(tx.jobs, job.ID) - tx.Unlock() - - // if this job is done, don't care about the termination reason - if job.done { - return SagaStatusOK - } - - if exit.Reason != "normal" { - return sp.behavior.HandleJobFailed(sp, job.TransactionID, job.ID, exit.Reason) - } - - // seems no result received from this worker - return sp.behavior.HandleJobFailed(sp, job.TransactionID, job.ID, "no result") -} - -func (sp *SagaProcess) handleSagaDown(down MessageDown) error { - - sp.mutexNext.Lock() - tx, ok := sp.next[SagaNextID(down.Ref)] - sp.mutexNext.Unlock() - if ok { - // got DOWN message from the next saga - empty := etf.Pid{} - reason := fmt.Sprintf("next saga %s is down", down.Pid) - if down.Pid == empty { - // monitored by name - reason = fmt.Sprintf("next saga %s is down", down.ProcessID) - } - message := etf.Tuple{ - etf.Atom("$saga_cancel"), - down.Pid, - etf.Tuple{etf.Ref(tx.id), down.Ref, reason}, - } - sp.Send(sp.Self(), message) - return nil - } - - sp.mutexTXS.Lock() - for _, tx := range sp.txs { - if down.Ref != tx.monitor { - continue - } - - // got DOWN message from the parent saga - reason := fmt.Sprintf("parent saga %s is down", down.Pid) - message := etf.Tuple{ - etf.Atom("$saga_cancel"), - down.Pid, - etf.Tuple{etf.Ref(tx.id), down.Ref, reason}, - } - sp.Send(sp.Self(), message) - sp.mutexTXS.Unlock() - return nil - } - sp.mutexTXS.Unlock() - - // down.Ref is unknown. Return ErrSagaUnknown to passthrough - // this message to HandleSagaInfo callback - return ErrSagaUnknown -} - -// -// Server callbacks -// - -// Init -func (gs *Saga) Init(process *ServerProcess, args ...etf.Term) error { - var options SagaOptions - - behavior, ok := process.Behavior().(SagaBehavior) - if !ok { - return fmt.Errorf("Saga: not a SagaBehavior") - } - - sagaProcess := &SagaProcess{ - ServerProcess: *process, - txs: make(map[SagaTransactionID]*SagaTransaction), - next: make(map[SagaNextID]*SagaTransaction), - behavior: behavior, - } - // do not inherit parent State - sagaProcess.State = nil - - options, err := behavior.InitSaga(sagaProcess, args...) - if err != nil { - return err - } - - sagaProcess.options = options - process.State = sagaProcess - - if options.Worker != nil { - sagaProcess.jobs = make(map[etf.Pid]*SagaJob) - } - - process.SetTrapExit(true) - - return nil -} - -// HandleCall -func (gs *Saga) HandleCall(process *ServerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - sp := process.State.(*SagaProcess) - return sp.behavior.HandleSagaCall(sp, from, message) -} - -// HandleDirect -func (gs *Saga) HandleDirect(process *ServerProcess, ref etf.Ref, message interface{}) (interface{}, DirectStatus) { - sp := process.State.(*SagaProcess) - switch m := message.(type) { - case sagaSetMaxTransactions: - sp.options.MaxTransactions = m.max - return nil, DirectStatusOK - default: - return sp.behavior.HandleSagaDirect(sp, ref, message) - } -} - -// HandleCast -func (gs *Saga) HandleCast(process *ServerProcess, message etf.Term) ServerStatus { - var status SagaStatus - - sp := process.State.(*SagaProcess) - - switch m := message.(type) { - case messageSagaJobResult: - sp.mutexJobs.Lock() - job, ok := sp.jobs[m.pid] - sp.mutexJobs.Unlock() - if !ok { - // kill this process - if worker := process.ProcessByPid(m.pid); worker != nil { - process.Unlink(worker.Self()) - worker.Kill() - } - status = SagaStatusOK - break - } - job.done = true - job.cancelTimer() - - sp.mutexTXS.Lock() - tx, ok := sp.txs[job.TransactionID] - sp.mutexTXS.Unlock() - - if !ok { - // tx is already canceled. kill this worker if its still alive (tx might have had - // 2PC enabled, and the worker is waiting for the commit message) - process.Unlink(job.worker.Self()) - job.worker.Kill() - status = SagaStatusOK - break - } - - // remove this job from the tx job list, but do not remove - // from the sp.jobs (will be removed once worker terminated) - if tx.options.TwoPhaseCommit == false { - tx.Lock() - delete(tx.jobs, job.ID) - tx.Unlock() - } - - status = sp.behavior.HandleJobResult(sp, job.TransactionID, job.ID, m.result) - - case messageSagaJobInterim: - sp.mutexJobs.Lock() - job, ok := sp.jobs[m.pid] - sp.mutexJobs.Unlock() - if !ok { - // kill this process - if worker := process.ProcessByPid(m.pid); worker != nil { - process.Unlink(worker.Self()) - worker.Kill() - } - // tx was canceled. just ignore it - status = SagaStatusOK - break - } - status = sp.behavior.HandleJobInterim(sp, job.TransactionID, job.ID, m.interim) - - default: - status = sp.behavior.HandleSagaCast(sp, message) - } - - switch status { - case SagaStatusOK: - return ServerStatusOK - case SagaStatusStop: - return ServerStatusStop - default: - return ServerStatus(status) - } -} - -// HandleInfo -func (gs *Saga) HandleInfo(process *ServerProcess, message etf.Term) ServerStatus { - var mSaga messageSaga - - sp := process.State.(*SagaProcess) - switch m := message.(type) { - case MessageExit: - // handle worker exit message - err := sp.handleSagaExit(m) - if err == ErrSagaJobUnknown { - return sp.behavior.HandleSagaInfo(sp, m) - } - return ServerStatus(err) - - case MessageDown: - // handle saga's down message - err := sp.handleSagaDown(m) - if err == ErrSagaUnknown { - return sp.behavior.HandleSagaInfo(sp, m) - } - return ServerStatus(err) - } - - if err := etf.TermIntoStruct(message, &mSaga); err != nil { - return sp.behavior.HandleSagaInfo(sp, message) - } - - status := sp.handleSagaRequest(mSaga) - switch status { - case nil, SagaStatusOK: - return ServerStatusOK - case SagaStatusStop: - return ServerStatusStop - case lib.ErrUnsupportedRequest: - return sp.behavior.HandleSagaInfo(sp, message) - default: - return ServerStatus(status) - } -} - -// -// default Saga callbacks -// - -// HandleTxInterim -func (gs *Saga) HandleTxInterim(process *SagaProcess, id SagaTransactionID, from SagaNextID, interim interface{}) SagaStatus { - lib.Warning("HandleTxInterim: [%v %v] unhandled message %#v", id, from, interim) - return ServerStatusOK -} - -// HandleTxCommit -func (gs *Saga) HandleTxCommit(process *SagaProcess, id SagaTransactionID, final interface{}) SagaStatus { - lib.Warning("HandleTxCommit: [%v] unhandled message", id) - return ServerStatusOK -} - -// HandleTxDone -func (gs *Saga) HandleTxDone(process *SagaProcess, id SagaTransactionID, result interface{}) (interface{}, SagaStatus) { - return nil, fmt.Errorf("Saga [%v:%v] has no implementation of HandleTxDone method", process.Self(), process.Name()) -} - -// HandleSagaCall -func (gs *Saga) HandleSagaCall(process *SagaProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - lib.Warning("HandleSagaCall: unhandled message (from %#v) %#v", from, message) - return etf.Atom("ok"), ServerStatusOK -} - -// HandleSagaCast -func (gs *Saga) HandleSagaCast(process *SagaProcess, message etf.Term) ServerStatus { - lib.Warning("HandleSagaCast: unhandled message %#v", message) - return ServerStatusOK -} - -// HandleSagaInfo -func (gs *Saga) HandleSagaInfo(process *SagaProcess, message etf.Term) ServerStatus { - lib.Warning("HandleSagaInfo: unhandled message %#v", message) - return ServerStatusOK -} - -// HandleSagaDirect -func (gs *Saga) HandleSagaDirect(process *SagaProcess, ref etf.Ref, message interface{}) (interface{}, DirectStatus) { - return nil, lib.ErrUnsupportedRequest -} - -// HandleJobResult -func (gs *Saga) HandleJobResult(process *SagaProcess, id SagaTransactionID, from SagaJobID, result interface{}) SagaStatus { - lib.Warning("HandleJobResult: [%v %v] unhandled message %#v", id, from, result) - return SagaStatusOK -} - -// HandleJobInterim -func (gs *Saga) HandleJobInterim(process *SagaProcess, id SagaTransactionID, from SagaJobID, interim interface{}) SagaStatus { - lib.Warning("HandleJobInterim: [%v %v] unhandled message %#v", id, from, interim) - return SagaStatusOK -} - -// HandleJobFailed -func (gs *Saga) HandleJobFailed(process *SagaProcess, id SagaTransactionID, from SagaJobID, reason string) SagaStatus { - lib.Warning("HandleJobFailed: [%v %v] unhandled message. reason %q", id, from, reason) - return nil -} diff --git a/gen/saga_worker.go b/gen/saga_worker.go deleted file mode 100644 index cce94db2..00000000 --- a/gen/saga_worker.go +++ /dev/null @@ -1,235 +0,0 @@ -package gen - -import ( - "fmt" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -// SagaWorkerBehavior -type SagaWorkerBehavior interface { - ServerBehavior - // Mandatory callbacks - - // HandleJobStart invoked on a worker start - HandleJobStart(process *SagaWorkerProcess, job SagaJob) error - // HandleJobCancel invoked if transaction was canceled before the termination. - HandleJobCancel(process *SagaWorkerProcess, reason string) - - // Optional callbacks - - // HandleJobCommit invoked if this job was a part of the transaction - // with enabled TwoPhaseCommit option. All workers involved in this TX - // handling are receiving this call. Callback invoked before the termination. - HandleJobCommit(process *SagaWorkerProcess, final interface{}) - - // HandleWorkerInfo this callback is invoked on Process.Send. This method is optional - // for the implementation - HandleWorkerInfo(process *SagaWorkerProcess, message etf.Term) ServerStatus - // HandleWorkerCast this callback is invoked on ServerProcess.Cast. This method is optional - // for the implementation - HandleWorkerCast(process *SagaWorkerProcess, message etf.Term) ServerStatus - // HandleWorkerCall this callback is invoked on ServerProcess.Call. This method is optional - // for the implementation - HandleWorkerCall(process *SagaWorkerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) - // HandleWorkerDirect this callback is invoked on Process.Direct. This method is optional - // for the implementation - HandleWorkerDirect(process *SagaWorkerProcess, ref etf.Ref, message interface{}) (interface{}, DirectStatus) - - // HandleWorkerTerminate this callback invoked on a process termination - HandleWorkerTerminate(process *SagaWorkerProcess, reason string) -} - -// SagaWorker -type SagaWorker struct { - Server -} - -// SagaWorkerProcess -type SagaWorkerProcess struct { - ServerProcess - - behavior SagaWorkerBehavior - job SagaJob - done bool - cancel bool -} - -type messageSagaJobStart struct { - job SagaJob -} -type messageSagaJobDone struct{} -type messageSagaJobCancel struct { - reason string -} -type messageSagaJobCommit struct { - final interface{} -} -type messageSagaJobInterim struct { - pid etf.Pid - interim interface{} -} -type messageSagaJobResult struct { - pid etf.Pid - result interface{} -} -type messageSagaJobLifespan struct { - pid etf.Pid -} - -// -// SagaWorkerProcess methods -// - -// SendResult sends the result and terminates this worker if 2PC is disabled. Otherwise, -// will be waiting for cancel/commit signal. -func (wp *SagaWorkerProcess) SendResult(result interface{}) error { - if wp.done { - return ErrSagaResultAlreadySent - } - if wp.cancel { - return ErrSagaTxCanceled - } - message := messageSagaJobResult{ - pid: wp.Self(), - result: result, - } - err := wp.Cast(wp.job.saga, message) - if err != nil { - return err - } - wp.done = true - - // if 2PC is enable do not terminate this worker - if wp.job.commit { - return nil - } - - wp.Cast(wp.Self(), messageSagaJobDone{}) - return nil -} - -// SendInterim -func (wp *SagaWorkerProcess) SendInterim(interim interface{}) error { - if wp.done { - return ErrSagaResultAlreadySent - } - if wp.cancel { - return ErrSagaTxCanceled - } - message := messageSagaJobInterim{ - pid: wp.Self(), - interim: interim, - } - return wp.Cast(wp.job.saga, message) -} - -// Server callbacks - -// Init -func (w *SagaWorker) Init(process *ServerProcess, args ...etf.Term) error { - behavior, ok := process.Behavior().(SagaWorkerBehavior) - if !ok { - return fmt.Errorf("Not a SagaWorkerBehavior") - } - workerProcess := &SagaWorkerProcess{ - ServerProcess: *process, - behavior: behavior, - } - process.State = workerProcess - return nil -} - -// HandleCast -func (w *SagaWorker) HandleCast(process *ServerProcess, message etf.Term) ServerStatus { - wp := process.State.(*SagaWorkerProcess) - switch m := message.(type) { - case messageSagaJobStart: - wp.job = m.job - err := wp.behavior.HandleJobStart(wp, wp.job) - if err != nil { - return err - } - - // if job is done and 2PC is disabled - // stop this worker with 'normal' as a reason - if wp.done && !wp.job.commit { - return ServerStatusStop - } - return ServerStatusOK - case messageSagaJobDone: - return ServerStatusStop - case messageSagaJobCommit: - wp.behavior.HandleJobCommit(wp, m.final) - return ServerStatusStop - case messageSagaJobCancel: - wp.cancel = true - wp.behavior.HandleJobCancel(wp, m.reason) - return ServerStatusStop - default: - return wp.behavior.HandleWorkerCast(wp, message) - } -} - -// HandleCall -func (w *SagaWorker) HandleCall(process *ServerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - p := process.State.(*SagaWorkerProcess) - return p.behavior.HandleWorkerCall(p, from, message) -} - -// HandleDirect -func (w *SagaWorker) HandleDirect(process *ServerProcess, ref etf.Ref, message interface{}) (interface{}, DirectStatus) { - p := process.State.(*SagaWorkerProcess) - return p.behavior.HandleWorkerDirect(p, ref, message) -} - -// HandleInfo -func (w *SagaWorker) HandleInfo(process *ServerProcess, message etf.Term) ServerStatus { - p := process.State.(*SagaWorkerProcess) - return p.behavior.HandleWorkerInfo(p, message) -} - -// Terminate -func (w *SagaWorker) Terminate(process *ServerProcess, reason string) { - p := process.State.(*SagaWorkerProcess) - p.behavior.HandleWorkerTerminate(p, reason) - return -} - -// default callbacks - -// HandleJobCommit -func (w *SagaWorker) HandleJobCommit(process *SagaWorkerProcess, final interface{}) { - lib.Warning("HandleJobCommit: unhandled message %#v", final) - return -} - -// HandleWorkerInfo -func (w *SagaWorker) HandleWorkerInfo(process *SagaWorkerProcess, message etf.Term) ServerStatus { - lib.Warning("HandleWorkerInfo: unhandled message %#v", message) - return ServerStatusOK -} - -// HandleWorkerCast -func (w *SagaWorker) HandleWorkerCast(process *SagaWorkerProcess, message etf.Term) ServerStatus { - lib.Warning("HandleWorkerCast: unhandled message %#v", message) - return ServerStatusOK -} - -// HandleWorkerCall -func (w *SagaWorker) HandleWorkerCall(process *SagaWorkerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - lib.Warning("HandleWorkerCall: unhandled message (from %#v) %#v", from, message) - return etf.Atom("ok"), ServerStatusOK -} - -// HandleWorkerDirect -func (w *SagaWorker) HandleWorkerDirect(process *SagaWorkerProcess, ref etf.Ref, message interface{}) (interface{}, DirectStatus) { - lib.Warning("HandleWorkerDirect: unhandled message %#v", message) - return nil, DirectStatusOK -} - -// HandleWorkerTerminate -func (w *SagaWorker) HandleWorkerTerminate(process *SagaWorkerProcess, reason string) { - return -} diff --git a/gen/server.go b/gen/server.go deleted file mode 100644 index 6b8fca35..00000000 --- a/gen/server.go +++ /dev/null @@ -1,585 +0,0 @@ -package gen - -import ( - "fmt" - "runtime" - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -const ( - DefaultCallTimeout = 5 -) - -// ServerBehavior interface -type ServerBehavior interface { - ProcessBehavior - - // methods below are optional - - // Init invoked on a start Server - Init(process *ServerProcess, args ...etf.Term) error - - // HandleCast invoked if Server received message sent with ServerProcess.Cast. - // Return ServerStatusStop to stop server with "normal" reason. Use ServerStatus(error) - // for the custom reason - HandleCast(process *ServerProcess, message etf.Term) ServerStatus - - // HandleCall invoked if Server got sync request using ServerProcess.Call - HandleCall(process *ServerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) - - // HandleDirect invoked on a direct request made with Process.Direct - HandleDirect(process *ServerProcess, ref etf.Ref, message interface{}) (interface{}, DirectStatus) - - // HandleInfo invoked if Server received message sent with Process.Send. - HandleInfo(process *ServerProcess, message etf.Term) ServerStatus - - // Terminate invoked on a termination process. ServerProcess.State is not locked during - // this callback. - Terminate(process *ServerProcess, reason string) -} - -// ServerStatus -type ServerStatus error -type DirectStatus error - -var ( - ServerStatusOK ServerStatus = nil - ServerStatusStop ServerStatus = fmt.Errorf("stop") - ServerStatusIgnore ServerStatus = fmt.Errorf("ignore") - - DirectStatusOK DirectStatus = nil - DirectStatusIgnore DirectStatus = fmt.Errorf("ignore") -) - -// ServerStatusStopWithReason -func ServerStatusStopWithReason(s string) ServerStatus { - return ServerStatus(fmt.Errorf(s)) -} - -// Server is implementation of ProcessBehavior interface for Server objects -type Server struct { - ServerBehavior -} - -// ServerFrom -type ServerFrom struct { - Pid etf.Pid - Ref etf.Ref - ReplyByAlias bool -} - -// ServerState state of the Server process. -type ServerProcess struct { - ProcessState - - behavior ServerBehavior - counter uint64 // total number of processed messages from mailBox - currentFunction string - - mailbox <-chan ProcessMailboxMessage - original <-chan ProcessMailboxMessage - deferred chan ProcessMailboxMessage - - waitReply *etf.Ref - callbackWaitReply chan *etf.Ref - stop chan string -} - -type handleCallMessage struct { - from ServerFrom - message etf.Term -} - -type handleCastMessage struct { - message etf.Term -} - -type handleInfoMessage struct { - message etf.Term -} - -// CastAfter a simple wrapper for Process.SendAfter to send a message in fashion of 'gen_server:cast' -func (sp *ServerProcess) CastAfter(to interface{}, message etf.Term, after time.Duration) CancelFunc { - msg := etf.Term(etf.Tuple{etf.Atom("$gen_cast"), message}) - return sp.SendAfter(to, msg, after) -} - -// Cast sends a message in fashion of 'gen_server:cast'. 'to' can be a Pid, registered local name -// or gen.ProcessID{RegisteredName, NodeName} -func (sp *ServerProcess) Cast(to interface{}, message etf.Term) error { - msg := etf.Term(etf.Tuple{etf.Atom("$gen_cast"), message}) - return sp.Send(to, msg) -} - -// Call makes outgoing sync request in fashion of 'gen_server:call'. -// 'to' can be Pid, registered local name or gen.ProcessID{RegisteredName, NodeName}. -func (sp *ServerProcess) Call(to interface{}, message etf.Term) (etf.Term, error) { - return sp.CallWithTimeout(to, message, DefaultCallTimeout) -} - -// CallWithTimeout makes outgoing sync request in fashiod of 'gen_server:call' with given timeout. -func (sp *ServerProcess) CallWithTimeout(to interface{}, message etf.Term, timeout int) (etf.Term, error) { - ref := sp.MakeRef() - from := etf.Tuple{sp.Self(), ref} - msg := etf.Term(etf.Tuple{etf.Atom("$gen_call"), from, message}) - - sp.PutSyncRequest(ref) - if err := sp.Send(to, msg); err != nil { - sp.CancelSyncRequest(ref) - return nil, err - } - sp.callbackWaitReply <- &ref - value, err := sp.WaitSyncReply(ref, timeout) - return value, err - -} - -// SendReply sends a reply message to the sender made ServerProcess.Call request. -// Useful for the case with dispatcher and pool of workers: Dispatcher process -// forwards Call requests (asynchronously) within a HandleCall callback to the worker(s) -// using ServerProcess.Cast or ServerProcess.Send but returns ServerStatusIgnore -// instead of ServerStatusOK; Worker process sends result using ServerProcess.SendReply -// method with 'from' value received from the Dispatcher. -func (sp *ServerProcess) SendReply(from ServerFrom, reply etf.Term) error { - var fromTag etf.Term - var to etf.Term - if from.ReplyByAlias { - // Erlang gen_server:call uses improper list for the reply ['alias'|Ref] - fromTag = etf.ListImproper{etf.Atom("alias"), from.Ref} - to = etf.Alias(from.Ref) - } else { - fromTag = from.Ref - to = from.Pid - } - - if reply != nil { - rep := etf.Tuple{fromTag, reply} - return sp.Send(to, rep) - } - rep := etf.Tuple{fromTag, etf.Atom("nil")} - return sp.Send(to, rep) -} - -// Reply the handling process.Direct(...) calls can be done asynchronously -// using gen.DirectStatusIgnore as a returning status in the HandleDirect callback. -// In this case, you must reply manualy using gen.ServerProcess.Reply method in any other -// callback. If a caller has canceled this request due to timeout it returns lib.ErrReferenceUnknown -func (sp *ServerProcess) Reply(ref etf.Ref, reply etf.Term, err error) error { - return sp.PutSyncReply(ref, reply, err) -} - -// MessageCounter returns the total number of messages handled by Server callbacks: HandleCall, -// HandleCast, HandleInfo, HandleDirect -func (sp *ServerProcess) MessageCounter() uint64 { - return sp.counter -} - -// ProcessInit -func (gs *Server) ProcessInit(p Process, args ...etf.Term) (ProcessState, error) { - behavior, ok := p.Behavior().(ServerBehavior) - if !ok { - return ProcessState{}, fmt.Errorf("ProcessInit: not a ServerBehavior") - } - ps := ProcessState{ - Process: p, - } - - sp := &ServerProcess{ - ProcessState: ps, - behavior: behavior, - - // callbackWaitReply must be defined here, otherwise making a Call request - // will not be able in the inherited object (locks on trying to send - // a message to the nil channel) - callbackWaitReply: make(chan *etf.Ref), - } - - err := behavior.Init(sp, args...) - if err != nil { - return ProcessState{}, err - } - ps.State = sp - return ps, nil -} - -// ProcessLoop -func (gs *Server) ProcessLoop(ps ProcessState, started chan<- bool) string { - sp, ok := ps.State.(*ServerProcess) - if !ok { - return "ProcessLoop: not a ServerBehavior" - } - - channels := ps.ProcessChannels() - sp.mailbox = channels.Mailbox - sp.original = channels.Mailbox - sp.deferred = make(chan ProcessMailboxMessage, cap(channels.Mailbox)) - sp.currentFunction = "Server:loop" - sp.stop = make(chan string, 2) - - defer func() { - if sp.waitReply == nil { - return - } - // there is running callback goroutine that waiting for a reply. to get rid - // of infinity lock (of this callback goroutine) we must provide a reader - // for the callbackWaitReply channel (it writes a nil value to this channel - // on exit) - go sp.waitCallbackOrDeferr(nil) - }() - - started <- true - for { - var message etf.Term - var fromPid etf.Pid - - select { - case ex := <-channels.GracefulExit: - if sp.TrapExit() == false { - sp.behavior.Terminate(sp, ex.Reason) - return ex.Reason - } - // Enabled trap exit message. Transform exit signal - // into MessageExit and send it to itself as a regular message - // keeping the processing order right. - // We should process this message after the others we got earlier - // from the died process. - message = MessageExit{ - Pid: ex.From, - Reason: ex.Reason, - } - // We can't write this message to the mailbox directly so use - // the common way to send it to itself - ps.Send(ps.Self(), message) - continue - - case reason := <-sp.stop: - sp.behavior.Terminate(sp, reason) - return reason - - case msg := <-sp.mailbox: - sp.mailbox = sp.original - fromPid = msg.From - message = msg.Message - - case <-sp.Context().Done(): - sp.behavior.Terminate(sp, "kill") - return "kill" - - case direct := <-channels.Direct: - sp.waitCallbackOrDeferr(direct) - continue - case sp.waitReply = <-sp.callbackWaitReply: - continue - } - - lib.Log("[%s] GEN_SERVER %s got message from %s", sp.NodeName(), sp.Self(), fromPid) - - switch m := message.(type) { - case etf.Tuple: - - switch mtag := m.Element(1).(type) { - case etf.Ref: - // check if we waiting for reply - if len(m) != 2 { - break - } - sp.PutSyncReply(mtag, m.Element(2), nil) - if sp.waitReply != nil && *sp.waitReply == mtag { - sp.waitReply = nil - // continue read sp.callbackWaitReply channel - // to wait for the exit from the callback call - sp.waitCallbackOrDeferr(nil) - continue - } - - case etf.Atom: - switch mtag { - case etf.Atom("$gen_call"): - - var from ServerFrom - var ok bool - if len(m) != 3 { - // wrong $gen_call message. ignore it - break - } - - fromTuple, ok := m.Element(2).(etf.Tuple) - if !ok || len(fromTuple) != 2 { - // not a tuple or has wrong value - break - } - - from.Pid, ok = fromTuple.Element(1).(etf.Pid) - if !ok { - // wrong Pid value - break - } - - correct := false - switch v := fromTuple.Element(2).(type) { - case etf.Ref: - from.Ref = v - correct = true - case etf.List: - var ok bool - // was sent with "alias" [etf.Atom("alias"), etf.Ref] - if len(v) != 2 { - // wrong value - break - } - if alias, ok := v.Element(1).(etf.Atom); !ok || alias != etf.Atom("alias") { - // wrong value - break - } - from.Ref, ok = v.Element(2).(etf.Ref) - if !ok { - // wrong value - break - } - from.ReplyByAlias = true - correct = true - } - - if correct == false { - break - } - - callMessage := handleCallMessage{ - from: from, - message: m.Element(3), - } - sp.waitCallbackOrDeferr(callMessage) - continue - - case etf.Atom("$gen_cast"): - if len(m) != 2 { - // wrong $gen_cast message. ignore it - break - } - castMessage := handleCastMessage{ - message: m.Element(2), - } - sp.waitCallbackOrDeferr(castMessage) - continue - } - } - - lib.Log("[%s] GEN_SERVER %#v got simple message %#v", sp.NodeName(), sp.Self(), message) - infoMessage := handleInfoMessage{ - message: message, - } - sp.waitCallbackOrDeferr(infoMessage) - - case handleCallMessage: - sp.waitCallbackOrDeferr(message) - case handleCastMessage: - sp.waitCallbackOrDeferr(message) - case handleInfoMessage: - sp.waitCallbackOrDeferr(message) - case ProcessDirectMessage: - sp.waitCallbackOrDeferr(message) - - default: - lib.Log("m: %#v", m) - infoMessage := handleInfoMessage{ - message: m, - } - sp.waitCallbackOrDeferr(infoMessage) - } - } -} - -// ServerProcess handlers - -func (sp *ServerProcess) waitCallbackOrDeferr(message interface{}) { - if sp.waitReply != nil { - // already waiting for reply. deferr this message - deferred := ProcessMailboxMessage{ - Message: message, - } - select { - case sp.deferred <- deferred: - // do nothing - default: - lib.Warning("deferred mailbox of %s[%q] is full. dropped message %v", - sp.Self(), sp.Name(), message) - } - - return - } - - switch m := message.(type) { - case handleCallMessage: - go func() { - sp.counter++ - sp.handleCall(m) - sp.callbackWaitReply <- nil - }() - case handleCastMessage: - go func() { - sp.counter++ - sp.handleCast(m) - sp.callbackWaitReply <- nil - }() - case handleInfoMessage: - go func() { - sp.counter++ - sp.handleInfo(m) - sp.callbackWaitReply <- nil - }() - case ProcessDirectMessage: - go func() { - sp.counter++ - sp.handleDirect(m) - sp.callbackWaitReply <- nil - }() - case nil: - // it was called just to read the channel sp.callbackWaitReply - - default: - lib.Warning("unknown message type in waitCallbackOrDeferr: %#v", message) - return - } - - select { - - //case <-sp.Context().Done(): - // do not read the context state. otherwise the goroutine with running callback - // might lock forever on exit (or on making a Call request) as nobody read - // the callbackWaitReply channel. - - case sp.waitReply = <-sp.callbackWaitReply: - // not nil value means callback made a Call request and waiting for reply - if sp.waitReply == nil && len(sp.deferred) > 0 { - sp.mailbox = sp.deferred - } - return - } -} - -func (sp *ServerProcess) panicHandler() { - if r := recover(); r != nil { - pc, fn, line, _ := runtime.Caller(2) - lib.Warning("Server terminated %s[%q]. Panic reason: %#v at %s[%s:%d]", - sp.Self(), sp.Name(), r, runtime.FuncForPC(pc).Name(), fn, line) - sp.stop <- "panic" - } -} - -func (sp *ServerProcess) handleDirect(direct ProcessDirectMessage) { - if lib.CatchPanic() { - defer sp.panicHandler() - } - - cf := sp.currentFunction - sp.currentFunction = "Server:HandleDirect" - reply, status := sp.behavior.HandleDirect(sp, direct.Ref, direct.Message) - sp.currentFunction = cf - switch status { - case DirectStatusIgnore: - return - default: - sp.PutSyncReply(direct.Ref, reply, status) - } -} - -func (sp *ServerProcess) handleCall(m handleCallMessage) { - if lib.CatchPanic() { - defer sp.panicHandler() - } - - cf := sp.currentFunction - sp.currentFunction = "Server:HandleCall" - reply, status := sp.behavior.HandleCall(sp, m.from, m.message) - sp.currentFunction = cf - switch status { - case ServerStatusOK: - sp.SendReply(m.from, reply) - case ServerStatusIgnore: - return - case ServerStatusStop: - sp.stop <- "normal" - - default: - sp.stop <- status.Error() - } -} - -func (sp *ServerProcess) handleCast(m handleCastMessage) { - if lib.CatchPanic() { - defer sp.panicHandler() - } - - cf := sp.currentFunction - sp.currentFunction = "Server:HandleCast" - status := sp.behavior.HandleCast(sp, m.message) - sp.currentFunction = cf - - switch status { - case ServerStatusOK, ServerStatusIgnore: - return - case ServerStatusStop: - sp.stop <- "normal" - default: - sp.stop <- status.Error() - } -} - -func (sp *ServerProcess) handleInfo(m handleInfoMessage) { - if lib.CatchPanic() { - defer sp.panicHandler() - } - - cf := sp.currentFunction - sp.currentFunction = "Server:HandleInfo" - status := sp.behavior.HandleInfo(sp, m.message) - sp.currentFunction = cf - switch status { - case ServerStatusOK, ServerStatusIgnore: - return - case ServerStatusStop: - sp.stop <- "normal" - default: - sp.stop <- status.Error() - } -} - -// -// default callbacks for Server interface -// - -// Init -func (gs *Server) Init(process *ServerProcess, args ...etf.Term) error { - return nil -} - -// HanldeCast -func (gs *Server) HandleCast(process *ServerProcess, message etf.Term) ServerStatus { - lib.Warning("Server [%s] HandleCast: unhandled message %#v", process.Name(), message) - return ServerStatusOK -} - -// HandleInfo -func (gs *Server) HandleCall(process *ServerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - lib.Warning("Server [%s] HandleCall: unhandled message %#v from %#v", process.Name(), message, from) - return "ok", ServerStatusOK -} - -// HandleDirect -func (gs *Server) HandleDirect(process *ServerProcess, ref etf.Ref, message interface{}) (interface{}, DirectStatus) { - return nil, lib.ErrUnsupportedRequest -} - -// HandleInfo -func (gs *Server) HandleInfo(process *ServerProcess, message etf.Term) ServerStatus { - lib.Warning("Server [%s] HandleInfo: unhandled message %#v", process.Name(), message) - return ServerStatusOK -} - -// Terminate -func (gs *Server) Terminate(process *ServerProcess, reason string) { - return -} diff --git a/gen/stage.go b/gen/stage.go deleted file mode 100644 index 95b4bb31..00000000 --- a/gen/stage.go +++ /dev/null @@ -1,992 +0,0 @@ -package gen - -import ( - "fmt" - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" - //"github.com/ergo-services/ergo/lib" -) - -type StageCancelMode uint - -// StageOptions defines the producer configuration using Init callback. It will be ignored -// if it acts as a consumer only. -type StageOptions struct { - - // DisableDemandHandle. the demand is always handling using the HandleDemand callback. - // When this options is set to 'true', demands are accumulated until mode is - // set back to 'false' using SetDemandHandle(true) method - DisableDemandHandle bool - - // BufferSize the size of the buffer to store events without demand. - // default value = defaultDispatcherBufferSize - BufferSize uint - - // BufferKeepLast defines whether the first or last entries should be - // kept on the buffer in case the buffer size is exceeded. - BufferKeepLast bool - - Dispatcher StageDispatcherBehavior -} - -type StageStatus error - -const ( - StageCancelPermanent StageCancelMode = 0 - StageCancelTransient StageCancelMode = 1 - StageCancelTemporary StageCancelMode = 2 - - defaultDispatcherBufferSize = 10000 -) - -var ( - StageStatusOK StageStatus = nil - StageStatusStop StageStatus = fmt.Errorf("stop") - StageStatusUnsupported StageStatus = fmt.Errorf("unsupported") - StageStatusNotAProducer StageStatus = fmt.Errorf("not a producer") -) - -// StageBehavior interface for the Stage inmplementation -type StageBehavior interface { - ServerBehavior - - // InitStage - InitStage(process *StageProcess, args ...etf.Term) (StageOptions, error) - - // HandleDemand this callback is invoked on a producer stage - // The producer that implements this callback must either store the demand, or return the amount of requested events. - HandleDemand(process *StageProcess, subscription StageSubscription, count uint) (etf.List, StageStatus) - - // HandleEvents this callback is invoked on a consumer stage. - HandleEvents(process *StageProcess, subscription StageSubscription, events etf.List) StageStatus - - // HandleSubscribe This callback is invoked on a producer stage. - HandleSubscribe(process *StageProcess, subscription StageSubscription, options StageSubscribeOptions) StageStatus - - // HandleSubscribed this callback is invoked as a confirmation for the subscription request - // Returning false means that demand must be sent to producers explicitly using Ask method. - // Returning true means the stage implementation will take care of automatically sending. - HandleSubscribed(process *StageProcess, subscription StageSubscription, opts StageSubscribeOptions) (bool, StageStatus) - - // HandleCancel - // Invoked when a consumer is no longer subscribed to a producer (invoked on a producer stage) - // The cancelReason will be a {Cancel: "cancel", Reason: _} if the reason for cancellation - // was a Stage.Cancel call. Any other value means the cancellation reason was - // due to an EXIT. - HandleCancel(process *StageProcess, subscription StageSubscription, reason string) StageStatus - - // HandleCanceled - // Invoked when a consumer is no longer subscribed to a producer (invoked on a consumer stage) - // Termination this stage depends on a cancel mode for the given subscription. For the cancel mode - // StageCancelPermanent - this stage will be terminated right after this callback invoking. - // For the cancel mode StageCancelTransient - it depends on a reason of subscription canceling. - // Cancel mode StageCancelTemporary keeps this stage alive whether the reason could be. - HandleCanceled(process *StageProcess, subscription StageSubscription, reason string) StageStatus - - // HandleStageCall this callback is invoked on ServerProcess.Call. This method is optional - // for the implementation - HandleStageCall(process *StageProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) - // HandleStageDirect this callback is invoked on Process.Direct. This method is optional - // for the implementation - HandleStageDirect(process *StageProcess, ref etf.Ref, message interface{}) (interface{}, DirectStatus) - // HandleStageCast this callback is invoked on ServerProcess.Cast. This method is optional - // for the implementation - HandleStageCast(process *StageProcess, message etf.Term) ServerStatus - // HandleStageInfo this callback is invoked on Process.Send. This method is optional - // for the implementation - HandleStageInfo(process *StageProcess, message etf.Term) ServerStatus - // HandleStageTerminate this callback is invoked on a termination process - HandleStageTerminate(process *StageProcess, reason string) -} - -type StageSubscription struct { - Pid etf.Pid - ID etf.Ref -} - -type subscriptionInternal struct { - Producer etf.Term - Subscription StageSubscription - options StageSubscribeOptions - Monitor etf.Ref - // number of event requests (demands) made as a consumer. - count uint -} - -type StageSubscribeOptions struct { - MinDemand uint `etf:"min_demand"` - MaxDemand uint `etf:"max_demand"` - // The stage implementation will take care of automatically sending - // demand to producer (as a default behavior). You can disable it - // setting ManualDemand to true - ManualDemand bool `etf:"manual"` - // What should happened with consumer if producer has terminated - // StageCancelPermanent the consumer exits when the producer cancels or exits. - // StageCancelTransient the consumer exits only if reason is not "normal", - // "shutdown", or {"shutdown", _} - // StageCancelTemporary the consumer never exits - Cancel StageCancelMode `etf:"cancel"` - - // Partition is defined the number of partition this subscription should belongs to. - // This option uses in the DispatcherPartition - Partition uint `etf:"partition"` - - // Extra is intended to be a custom set of options for the custom implementation - // of StageDispatcherBehavior - Extra etf.Term `etf:"extra"` -} - -type StageCancelReason struct { - Cancel string - Reason string -} - -type Stage struct { - Server -} - -type StageProcess struct { - ServerProcess - - options StageOptions - demandBuffer []demandRequest - dispatcherState interface{} - // keep our subscriptions - producers map[etf.Ref]*subscriptionInternal - // keep our subscribers - consumers map[etf.Pid]*subscriptionInternal - // - behavior StageBehavior -} - -type stageRequestCommand struct { - Cmd etf.Atom - Opt1 interface{} - Opt2 interface{} -} - -type stageMessage struct { - Request etf.Atom - Subscription StageSubscription - Command interface{} -} - -type setManualDemand struct { - subscription StageSubscription - enable bool -} - -type setCancelMode struct { - subscription StageSubscription - cancel StageCancelMode -} - -type setForwardDemand struct { - forward bool -} - -type demandRequest struct { - subscription StageSubscription - count uint -} - -// SetCancelMode defines how consumer will handle termination of the producer. There are 3 modes: -// StageCancelPermanent (default) - consumer exits when the producer cancels or exits -// StageCancelTransient - consumer exits only if reason is not normal, shutdown, or {shutdown, reason} -// StageCancelTemporary - never exits -func (s *Stage) SetCancelMode(p Process, subscription StageSubscription, cancel StageCancelMode) error { - message := setCancelMode{ - subscription: subscription, - cancel: cancel, - } - - _, err := p.Direct(message) - return err -} - -// -// StageProcess methods -// - -// SetAutoDemand setting this option to false means that demand must be sent to producers -// explicitly using Ask method. This mode can be used when a special behavior is desired. -// Setting this options to true enables auto demand mode (this is default mode for the consumer) -func (p *StageProcess) SetAutoDemand(subscription StageSubscription, autodemand bool) error { - subInternal, ok := p.producers[subscription.ID] - if !ok { - return fmt.Errorf("unknown subscription") - } - subInternal.options.ManualDemand = autodemand == false - if subInternal.count < subInternal.options.MinDemand && subInternal.options.ManualDemand == false { - cnt := subInternal.options.MaxDemand - subInternal.count - p.sendDemand(subInternal.Producer, subscription, cnt) - subInternal.count += cnt - } - return nil -} - -// AutoDemand returns value of the auto demand option -func (p *StageProcess) AutoDemand(subscription StageSubscription) (bool, error) { - subInternal, ok := p.producers[subscription.ID] - if !ok { - return false, fmt.Errorf("unknown subscription") - } - return subInternal.options.ManualDemand == false, nil -} - -// SetDemandHandle setting this option to false disables handling demand requests on a producer stage. -// This is useful as a synchronization mechanism, where the demand is accumulated until -// all consumers are subscribed. By default this option is true. -func (p *StageProcess) SetDemandHandle(enable bool) { - p.options.DisableDemandHandle = enable == false - if enable == true { - // create demand with count = 0, which will be ignored but start - // the processing of the buffered demands - msg := etf.Tuple{ - etf.Atom("$gen_producer"), - etf.Tuple{etf.Pid{}, etf.Ref{}}, - etf.Tuple{etf.Atom("ask"), 0}, - } - p.Send(p.Self(), msg) - } -} - -// DemandHandle returns whether enabled handling demand requests. -func (p *StageProcess) DemandHandle() bool { - return p.options.DisableDemandHandle == false -} - -// SetCancelMode defines how consumer will handle termination of the producer. There are 3 modes: -// StageCancelPermanent (default) - consumer exits when the producer cancels or exits -// StageCancelTransient - consumer exits only if reason is not normal, shutdown, or {shutdown, reason} -// StageCancelTemporary - never exits -func (p *StageProcess) SetCancelMode(subscription StageSubscription, mode StageCancelMode) error { - subInternal, ok := p.producers[subscription.ID] - if !ok { - return fmt.Errorf("unknown subscription") - } - - subInternal.options.Cancel = mode - return nil -} - -// CancelMode returns current cancel mode for the consumer -func (p *StageProcess) CancelMode(subscription StageSubscription) (StageCancelMode, error) { - subInternal, ok := p.producers[subscription.ID] - if !ok { - return 0, fmt.Errorf("unknown subscription") - } - - return subInternal.options.Cancel, nil -} - -// Subscribe subscribes to the given producer. HandleSubscribed callback will be invoked -// on a consumer stage once a request for the subscription is sent. If something went wrong -// on a producer side the callback HandleCancel will be invoked with a reason of cancelation. -func (p *StageProcess) Subscribe(producer etf.Term, opts StageSubscribeOptions) (StageSubscription, error) { - var subscription StageSubscription - switch producer.(type) { - case string: - case etf.Pid: - case ProcessID: - default: - return subscription, fmt.Errorf("allowed type for producer: etf.Pid, string, gen.ProcessID") - } - - subscription_id := p.MonitorProcess(producer) - subscription.Pid = p.Self() - subscription.ID = subscription_id - - subscribe_opts := etf.List{ - etf.Tuple{ - etf.Atom("min_demand"), - opts.MinDemand, - }, - etf.Tuple{ - etf.Atom("max_demand"), - opts.MaxDemand, - }, - etf.Tuple{ - etf.Atom("cancel"), - int(opts.Cancel), // custom types couldn't be handled by etf.Encode - }, - etf.Tuple{ - etf.Atom("manual"), - opts.ManualDemand, - }, - etf.Tuple{ - etf.Atom("partition"), - opts.Partition, - }, - } - - // In order to get rid of race condition we should send this message - // before we send 'subscribe' to the producer process. Just - // to make sure if we registered this subscription before the MessageDown - // or MessageExit message arrived in case of something went wrong. - msg := etf.Tuple{ - etf.Atom("$gen_consumer"), - etf.Tuple{p.Self(), subscription_id}, - etf.Tuple{etf.Atom("subscribed"), producer, subscribe_opts}, - } - p.Send(p.Self(), msg) - - msg = etf.Tuple{ - etf.Atom("$gen_producer"), - etf.Tuple{p.Self(), subscription_id}, - etf.Tuple{etf.Atom("subscribe"), etf.Atom("nil"), subscribe_opts}, - } - p.Send(producer, msg) - - return subscription, nil -} - -// SendEvents sends events to the subscribers -func (p *StageProcess) SendEvents(events etf.List) error { - var deliver []StageDispatchItem - // dispatch to the subscribers - if len(p.consumers) == 0 { - return fmt.Errorf("no subscribers") - } - deliver = p.options.Dispatcher.Dispatch(p.dispatcherState, events) - if len(deliver) == 0 { - return fmt.Errorf("no demand") - } - for d := range deliver { - msg := etf.Tuple{ - etf.Atom("$gen_consumer"), - etf.Tuple{deliver[d].subscription.Pid, deliver[d].subscription.ID}, - deliver[d].events, - } - p.Send(deliver[d].subscription.Pid, msg) - } - return nil -} - -// Ask makes a demand request for the given subscription. This function must only be -// used in the cases when a consumer sets a subscription to manual mode using DisableAutoDemand -func (p *StageProcess) Ask(subscription StageSubscription, count uint) error { - subInternal, ok := p.producers[subscription.ID] - if ok == false { - return fmt.Errorf("unknown subscription") - } - if subInternal.options.ManualDemand == false { - return fmt.Errorf("auto demand") - } - - p.sendDemand(subInternal.Producer, subInternal.Subscription, count) - subInternal.count += count - return nil -} - -// Cancel -func (p *StageProcess) Cancel(subscription StageSubscription, reason string) error { - // if we act as a consumer with this subscription - if subInternal, ok := p.producers[subscription.ID]; ok { - msg := etf.Tuple{ - etf.Atom("$gen_producer"), - etf.Tuple{subscription.Pid, subscription.ID}, - etf.Tuple{etf.Atom("cancel"), reason}, - } - p.Send(subInternal.Producer, msg) - cmd := stageRequestCommand{ - Cmd: etf.Atom("cancel"), - Opt1: "normal", - } - if _, err := p.handleConsumer(subInternal.Subscription, cmd); err != nil { - return err - } - return nil - } - // if we act as a producer within this subscription - if subInternal, ok := p.consumers[subscription.Pid]; ok { - msg := etf.Tuple{ - etf.Atom("$gen_consumer"), - etf.Tuple{subscription.Pid, subscription.ID}, - etf.Tuple{etf.Atom("cancel"), reason}, - } - p.Send(subscription.Pid, msg) - p.DemonitorProcess(subInternal.Monitor) - cmd := stageRequestCommand{ - Cmd: etf.Atom("cancel"), - Opt1: "normal", - } - if _, err := p.handleProducer(subInternal.Subscription, cmd); err != nil { - return err - } - return nil - } - return fmt.Errorf("unknown subscription") - -} - -// gen.Server callbacks -func (gst *Stage) Init(process *ServerProcess, args ...etf.Term) error { - stageProcess := &StageProcess{ - ServerProcess: *process, - producers: make(map[etf.Ref]*subscriptionInternal), - consumers: make(map[etf.Pid]*subscriptionInternal), - } - // do not inherit parent State - stageProcess.State = nil - - behavior := process.Behavior().(StageBehavior) - behavior, ok := process.Behavior().(StageBehavior) - if !ok { - return fmt.Errorf("Stage: not a StageBehavior") - } - stageProcess.behavior = behavior - - stageOpts, err := behavior.InitStage(stageProcess, args...) - if err != nil { - return err - } - - if stageOpts.BufferSize == 0 { - stageOpts.BufferSize = defaultDispatcherBufferSize - } - - // if dispatcher wasn't specified create a default one StageDispatcherDemand - if stageOpts.Dispatcher == nil { - stageOpts.Dispatcher = CreateStageDispatcherDemand() - } - - stageProcess.dispatcherState = stageOpts.Dispatcher.Init(stageOpts) - stageProcess.options = stageOpts - - process.State = stageProcess - return nil -} - -func (gst *Stage) HandleCall(process *ServerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - stageProcess := process.State.(*StageProcess) - return stageProcess.behavior.HandleStageCall(stageProcess, from, message) -} - -func (gst *Stage) HandleDirect(process *ServerProcess, ref etf.Ref, message interface{}) (interface{}, DirectStatus) { - stageProcess := process.State.(*StageProcess) - return stageProcess.behavior.HandleStageDirect(stageProcess, ref, message) -} - -func (gst *Stage) HandleCast(process *ServerProcess, message etf.Term) ServerStatus { - stageProcess := process.State.(*StageProcess) - return stageProcess.behavior.HandleStageCast(stageProcess, message) -} - -func (gst *Stage) HandleInfo(process *ServerProcess, message etf.Term) ServerStatus { - var r stageMessage - - stageProcess := process.State.(*StageProcess) - - // check if we got a MessageDown - if d, isDown := IsMessageDown(message); isDown { - if err := stageProcess.handleStageDown(d); err != nil { - return err - } - return ServerStatusOK - } - - if err := etf.TermIntoStruct(message, &r); err != nil { - reply := stageProcess.behavior.HandleStageInfo(stageProcess, message) - return reply - } - - _, err := stageProcess.handleStageRequest(r) - - switch err { - case nil: - return ServerStatusOK - case StageStatusStop: - return ServerStatusStop - case StageStatusUnsupported: - status := stageProcess.behavior.HandleStageInfo(stageProcess, message) - return status - default: - return err - } -} - -func (gst *Stage) Terminate(process *ServerProcess, reason string) { - stageProcess := process.State.(*StageProcess) - stageProcess.behavior.HandleStageTerminate(stageProcess, reason) -} - -// default callbacks - -// InitStage -func (gst *Stage) InitStage(process *StageProcess, args ...etf.Term) error { - return nil -} - -// HandleSagaCall -func (gst *Stage) HandleStageCall(process *StageProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - // default callback if it wasn't implemented - lib.Warning("HandleStageCall: unhandled message (from %#v) %#v", from, message) - return etf.Atom("ok"), ServerStatusOK -} - -// HandleStageDirect -func (gst *Stage) HandleStageDirect(process *StageProcess, ref etf.Ref, message interface{}) (interface{}, DirectStatus) { - // default callback if it wasn't implemented - return nil, lib.ErrUnsupportedRequest -} - -// HandleStageCast -func (gst *Stage) HandleStageCast(process *StageProcess, message etf.Term) ServerStatus { - // default callback if it wasn't implemented - lib.Warning("HandleStageCast: unhandled message %#v", message) - return ServerStatusOK -} - -// HandleStageInfo -func (gst *Stage) HandleStageInfo(process *StageProcess, message etf.Term) ServerStatus { - // default callback if it wasn't implemnted - lib.Warning("HandleStageInfo: unhandled message %#v", message) - return ServerStatusOK -} - -func (gst *Stage) HandleStageTerminate(process *StageProcess, reason string) { - return -} - -// HandleSubscribe -func (gst *Stage) HandleSubscribe(process *StageProcess, subscription StageSubscription, options StageSubscribeOptions) StageStatus { - return StageStatusNotAProducer -} - -// HandleSubscribed -func (gst *Stage) HandleSubscribed(process *StageProcess, subscription StageSubscription, opts StageSubscribeOptions) (bool, StageStatus) { - return opts.ManualDemand, StageStatusOK -} - -// HandleCancel -func (gst *Stage) HandleCancel(process *StageProcess, subscription StageSubscription, reason string) StageStatus { - // default callback if it wasn't implemented - return StageStatusOK -} - -// HandleCanceled -func (gst *Stage) HandleCanceled(process *StageProcess, subscription StageSubscription, reason string) StageStatus { - // default callback if it wasn't implemented - return StageStatusOK -} - -// HanndleEvents -func (gst *Stage) HandleEvents(process *StageProcess, subscription StageSubscription, events etf.List) StageStatus { - lib.Warning("Stage HandleEvents: unhandled subscription (%#v) events %#v", subscription, events) - return StageStatusOK -} - -// HandleDemand -func (gst *Stage) HandleDemand(process *StageProcess, subscription StageSubscription, count uint) (etf.List, StageStatus) { - lib.Warning("Stage HandleDemand: unhandled subscription (%#v) demand %#v", subscription, count) - return nil, StageStatusOK -} - -// private functions - -func (p *StageProcess) handleStageRequest(m stageMessage) (etf.Term, StageStatus) { - var command stageRequestCommand - switch m.Request { - case "$gen_consumer": - // I wish i had {events, [...]} for the events message (in - // fashion of the other messages), but the original autors - // made this way, so i have to use this little hack in order - // to use the same handler - if cmd, ok := m.Command.(etf.List); ok { - command.Cmd = etf.Atom("events") - command.Opt1 = cmd - return p.handleConsumer(m.Subscription, command) - } - if err := etf.TermIntoStruct(m.Command, &command); err != nil { - return nil, StageStatusUnsupported - } - return p.handleConsumer(m.Subscription, command) - case "$gen_producer": - if err := etf.TermIntoStruct(m.Command, &command); err != nil { - return nil, StageStatusUnsupported - } - return p.handleProducer(m.Subscription, command) - } - return nil, StageStatusUnsupported -} - -func (p *StageProcess) handleConsumer(subscription StageSubscription, cmd stageRequestCommand) (etf.Term, error) { - var subscriptionOpts StageSubscribeOptions - var err error - - switch cmd.Cmd { - case etf.Atom("events"): - events := cmd.Opt1.(etf.List) - numEvents := len(events) - - subInternal, ok := p.producers[subscription.ID] - if !ok { - lib.Warning("consumer got %d events for unknown subscription %#v", numEvents, subscription) - return etf.Atom("ok"), nil - } - subInternal.count -= uint(numEvents) - if subInternal.count < 0 { - return nil, fmt.Errorf("got %d events which haven't bin requested", numEvents) - } - if numEvents < int(subInternal.options.MinDemand) { - return nil, fmt.Errorf("got %d events which is less than min %d", numEvents, subInternal.options.MinDemand) - } - if numEvents > int(subInternal.options.MaxDemand) { - return nil, fmt.Errorf("got %d events which is more than max %d", numEvents, subInternal.options.MaxDemand) - } - - err = p.behavior.HandleEvents(p, subscription, events) - if err != nil { - return nil, err - } - - // if subscription has auto demand we should request yet another - // bunch of events - if subInternal.count < subInternal.options.MinDemand && subInternal.options.ManualDemand == false { - - cnt := subInternal.options.MaxDemand - subInternal.count - p.sendDemand(subInternal.Producer, subscription, cnt) - subInternal.count += cnt - } - return etf.Atom("ok"), nil - - case etf.Atom("subscribed"): - if err := etf.TermProplistIntoStruct(cmd.Opt2, &subscriptionOpts); err != nil { - return nil, err - } - - manualDemand, status := p.behavior.HandleSubscribed(p, subscription, subscriptionOpts) - - if status != StageStatusOK { - return nil, status - } - subscriptionOpts.ManualDemand = manualDemand - - producer := cmd.Opt1 - subInternal := &subscriptionInternal{ - Subscription: subscription, - Producer: producer, - options: subscriptionOpts, - } - p.producers[subscription.ID] = subInternal - - if manualDemand == false { - p.sendDemand(producer, subscription, subInternal.options.MaxDemand) - subInternal.count = subInternal.options.MaxDemand - } - - return etf.Atom("ok"), nil - - case etf.Atom("retry-cancel"): - // if "subscribed" message hasn't still arrived then just ignore it - if _, ok := p.producers[subscription.ID]; !ok { - return etf.Atom("ok"), nil - } - fallthrough - case etf.Atom("cancel"): - // the subscription was canceled - reason, ok := cmd.Opt1.(string) - if !ok { - return nil, fmt.Errorf("Cancel reason is not a string") - } - - subInternal, ok := p.producers[subscription.ID] - if !ok { - // There might be a case when "cancel" message arrives before - // the "subscribed" message due to async nature of messaging, - // so we should wait a bit and try to handle it one more time - // using "retry-cancel" message. - // I got this problem with GOMAXPROCS=1 - msg := etf.Tuple{ - etf.Atom("$gen_consumer"), - etf.Tuple{subscription.Pid, subscription.ID}, - etf.Tuple{etf.Atom("retry-cancel"), reason}, - } - // handle it in a second - p.SendAfter(p.Self(), msg, 200*time.Millisecond) - return etf.Atom("ok"), nil - } - - // if we already handle MessageDown skip it - if reason != "noconnection" { - p.DemonitorProcess(subscription.ID) - } - delete(p.producers, subscription.ID) - - err = p.behavior.HandleCanceled(p, subscription, reason) - if err != nil { - return nil, err - } - - switch subInternal.options.Cancel { - case StageCancelTemporary: - return etf.Atom("ok"), nil - case StageCancelTransient: - if reason == "normal" || reason == "shutdown" { - return etf.Atom("ok"), nil - } - return nil, fmt.Errorf(reason) - default: - // StageCancelPermanent - return nil, fmt.Errorf(reason) - } - } - - return nil, fmt.Errorf("unknown Stage command (HandleCast)") -} - -func (p *StageProcess) handleProducer(subscription StageSubscription, cmd stageRequestCommand) (etf.Term, error) { - var subscriptionOpts StageSubscribeOptions - var err error - - switch cmd.Cmd { - case etf.Atom("subscribe"): - // {subscribe, Cancel, Opts} - if err = etf.TermProplistIntoStruct(cmd.Opt2, &subscriptionOpts); err != nil { - return nil, err - } - - // TODO handle cmd.Opts1 - could be etf.Atom("nil") or list of subscriptions - // for the cancelation - - if subscriptionOpts.MinDemand > subscriptionOpts.MaxDemand { - msg := etf.Tuple{ - etf.Atom("$gen_consumer"), - etf.Tuple{subscription.Pid, subscription.ID}, - etf.Tuple{etf.Atom("cancel"), fmt.Errorf("MinDemand greater MaxDemand")}, - } - p.Send(subscription.Pid, msg) - return etf.Atom("ok"), nil - } - - err = p.behavior.HandleSubscribe(p, subscription, subscriptionOpts) - - switch err { - case nil: - // cancel current subscription if this consumer has been already subscribed - if s, ok := p.consumers[subscription.Pid]; ok { - msg := etf.Tuple{ - etf.Atom("$gen_consumer"), - etf.Tuple{subscription.Pid, s.Subscription.ID}, - etf.Tuple{etf.Atom("cancel"), "resubscribed"}, - } - p.Send(subscription.Pid, msg) - // notify dispatcher about cancelation the previous subscription - canceledSubscription := StageSubscription{ - Pid: subscription.Pid, - ID: s.Subscription.ID, - } - // cancel current demands - p.options.Dispatcher.Cancel(p.dispatcherState, canceledSubscription) - // notify dispatcher about the new subscription - if err := p.options.Dispatcher.Subscribe(p.dispatcherState, subscription, subscriptionOpts); err != nil { - // dispatcher can't handle this subscription - msg := etf.Tuple{ - etf.Atom("$gen_consumer"), - etf.Tuple{subscription.Pid, s.Subscription.ID}, - etf.Tuple{etf.Atom("cancel"), err.Error()}, - } - p.Send(subscription.Pid, msg) - return etf.Atom("ok"), nil - } - - s.Subscription = subscription - return etf.Atom("ok"), nil - } - - if err := p.options.Dispatcher.Subscribe(p.dispatcherState, subscription, subscriptionOpts); err != nil { - // dispatcher can't handle this subscription - msg := etf.Tuple{ - etf.Atom("$gen_consumer"), - etf.Tuple{subscription.Pid, subscription.ID}, - etf.Tuple{etf.Atom("cancel"), err.Error()}, - } - p.Send(subscription.Pid, msg) - return etf.Atom("ok"), nil - } - - // monitor subscriber in order to remove this subscription - // if it terminated unexpectedly - m := p.MonitorProcess(subscription.Pid) - s := &subscriptionInternal{ - Subscription: subscription, - Monitor: m, - options: subscriptionOpts, - } - p.consumers[subscription.Pid] = s - return etf.Atom("ok"), nil - - case StageStatusNotAProducer: - // if it wasnt overloaded - send 'cancel' to the consumer - msg := etf.Tuple{ - etf.Atom("$gen_consumer"), - etf.Tuple{subscription.Pid, subscription.ID}, - etf.Tuple{etf.Atom("cancel"), err.Error()}, - } - p.Send(subscription.Pid, msg) - return etf.Atom("ok"), nil - - default: - // any other error should terminate this stage - return nil, err - } - case etf.Atom("retry-ask"): - // if the "subscribe" message hasn't still arrived, send a cancelation message - // to the consumer - if _, ok := p.consumers[subscription.Pid]; !ok { - msg := etf.Tuple{ - etf.Atom("$gen_consumer"), - etf.Tuple{subscription.Pid, subscription.ID}, - etf.Tuple{etf.Atom("cancel"), "not subscribed"}, - } - p.Send(subscription.Pid, msg) - return etf.Atom("ok"), nil - } - fallthrough - - case etf.Atom("ask"): - var events etf.List - var deliver []StageDispatchItem - var count uint - switch c := cmd.Opt1.(type) { - case int: - count = uint(c) - case uint: - count = c - default: - return nil, fmt.Errorf("Demand has wrong value %#v. Expected positive integer", cmd.Opt1) - } - - // handle buffered demand on exit this function - defer func() { - if p.options.DisableDemandHandle { - return - } - if len(p.demandBuffer) == 0 { - return - } - d := p.demandBuffer[0] - msg := etf.Tuple{ - etf.Atom("$gen_producer"), - etf.Tuple{d.subscription.Pid, d.subscription.ID}, - etf.Tuple{etf.Atom("ask"), d.count}, - } - p.Send(p.Self(), msg) - p.demandBuffer = p.demandBuffer[1:] - }() - - if count == 0 { - // just ignore it - return etf.Atom("ok"), nil - } - - if _, ok := p.consumers[subscription.Pid]; !ok { - // there might be a case when "ask" message arrives before - // the "subscribe" message due to async nature of messaging, - // so we should wait a bit and try to handle it one more time - // using "retry-ask" message - msg := etf.Tuple{ - etf.Atom("$gen_producer"), - etf.Tuple{subscription.Pid, subscription.ID}, - etf.Tuple{etf.Atom("retry-ask"), count}, - } - // handle it in a second - p.SendAfter(p.Self(), msg, 1*time.Second) - return etf.Atom("ok"), nil - } - - if p.options.DisableDemandHandle { - d := demandRequest{ - subscription: subscription, - count: count, - } - // FIXME it would be more effective to use sync.Pool with - // preallocated array behind the slice. - // see how it was made in lib.TakeBuffer - p.demandBuffer = append(p.demandBuffer, d) - return etf.Atom("ok"), nil - } - - events, _ = p.behavior.HandleDemand(p, subscription, count) - - // register this demand and trying to dispatch having events - dispatcher := p.options.Dispatcher - dispatcher.Ask(p.dispatcherState, subscription, count) - deliver = dispatcher.Dispatch(p.dispatcherState, events) - if len(deliver) == 0 { - return etf.Atom("ok"), nil - } - - for d := range deliver { - msg := etf.Tuple{ - etf.Atom("$gen_consumer"), - etf.Tuple{deliver[d].subscription.Pid, deliver[d].subscription.ID}, - deliver[d].events, - } - p.Send(deliver[d].subscription.Pid, msg) - } - - return etf.Atom("ok"), nil - - case etf.Atom("cancel"): - var e error - // handle this cancelation in the dispatcher - dispatcher := p.options.Dispatcher - dispatcher.Cancel(p.dispatcherState, subscription) - reason := cmd.Opt1.(string) - // handle it in a Stage callback - e = p.behavior.HandleCancel(p, subscription, reason) - delete(p.consumers, subscription.Pid) - return etf.Atom("ok"), e - } - - return nil, fmt.Errorf("unknown Stage command (HandleCall)") -} - -func (p *StageProcess) handleStageDown(down MessageDown) error { - // remove subscription for producer and consumer. corner case - two - // processes have subscribed to each other. - - // checking for subscribers (if we act as a producer). - // we monitor them by Pid only - if subInternal, ok := p.consumers[down.Pid]; ok { - // producer monitors consumer by the Pid and stores monitor reference - // in the subInternal struct - p.DemonitorProcess(subInternal.Monitor) - cmd := stageRequestCommand{ - Cmd: etf.Atom("cancel"), - Opt1: down.Reason, - } - if _, err := p.handleProducer(subInternal.Subscription, cmd); err != nil { - return err - } - } - - // checking for producers (if we act as a consumer) - if subInternal, ok := p.producers[down.Ref]; ok { - - cmd := stageRequestCommand{ - Cmd: etf.Atom("cancel"), - Opt1: down.Reason, - } - - if _, err := p.handleConsumer(subInternal.Subscription, cmd); err != nil { - return err - } - } - - return nil -} - -// for the consumer side only -func (p *StageProcess) sendDemand(producer etf.Term, subscription StageSubscription, count uint) { - msg := etf.Tuple{ - etf.Atom("$gen_producer"), - etf.Tuple{p.Self(), subscription.ID}, - etf.Tuple{etf.Atom("ask"), count}, - } - p.Send(producer, msg) -} diff --git a/gen/stage_dispatcher.go b/gen/stage_dispatcher.go deleted file mode 100644 index 16659116..00000000 --- a/gen/stage_dispatcher.go +++ /dev/null @@ -1,556 +0,0 @@ -package gen - -import ( - "fmt" - "math/rand" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -// StageDispatcherBehavior defined interface for the dispatcher -// implementation. To create a custom dispatcher you should implement this interface -// and use it in StageOptions as a Dispatcher -type StageDispatcherBehavior interface { - // InitStageDispatcher(opts) - Init(opts StageOptions) (state interface{}) - - // Ask called every time a consumer sends demand - Ask(state interface{}, subscription StageSubscription, count uint) - - // Cancel called every time a subscription is cancelled or the consumer goes down. - Cancel(state interface{}, subscription StageSubscription) - - // Dispatch called every time a producer wants to dispatch an event. - Dispatch(state interface{}, events etf.List) []StageDispatchItem - - // Subscribe called every time the producer gets a new subscriber - Subscribe(state interface{}, subscription StageSubscription, opts StageSubscribeOptions) error -} - -// StageDispatcher -type StageDispatcher int -type dispatcherDemand struct{} -type dispatcherBroadcast struct{} -type dispatcherPartition struct { - n uint - hash func(etf.Term) int -} - -// CreateStageDispatcherDemand creates a dispatcher that sends batches -// to the highest demand. This is the default dispatcher used -// by Stage. In order to avoid greedy consumers, it is recommended -// that all consumers have exactly the same maximum demand. -func CreateStageDispatcherDemand() StageDispatcherBehavior { - return &dispatcherDemand{} -} - -// CreateStageDispatcherBroadcast creates a dispatcher that accumulates -// demand from all consumers before broadcasting events to all of them. -// This dispatcher guarantees that events are dispatched to -// all consumers without exceeding the demand of any given consumer. -// The demand is only sent upstream once all consumers ask for data. -func CreateStageDispatcherBroadcast() StageDispatcherBehavior { - return &dispatcherBroadcast{} -} - -// CreateStageDispatcherPartition creates a dispatcher that sends -// events according to partitions. Number of partitions 'n' must be > 0. -// 'hash' should return number within range [0,n). Value outside of this range -// is discarding event. -// If 'hash' is nil the random partition will be used on every event. -func CreateStageDispatcherPartition(n uint, hash func(etf.Term) int) StageDispatcherBehavior { - if hash == nil { - hash = func(event etf.Term) int { - p := rand.Intn(int(n) - 1) - return p - } - } - return &dispatcherPartition{ - n: n, - hash: hash, - } -} - -type StageDispatchItem struct { - subscription StageSubscription - events etf.List -} - -// -// Dispatcher Demand implementation -// - -type demand struct { - subscription StageSubscription - minDemand uint - maxDemand uint - count uint - partition uint -} - -type demandState struct { - demands map[etf.Pid]*demand - order []etf.Pid - i int - // buffer of events - events chan etf.Term - bufferSize uint - bufferKeepLast bool -} - -type partitionState struct { - demands map[etf.Pid]*demand - // partitioned - order [][]etf.Pid - i []int - events []chan etf.Term - - bufferSize uint - bufferKeepLast bool -} - -type broadcastState struct { - demands map[etf.Pid]*demand - // maxDemand should be a min value of all MaxDemand - maxDemand uint - - // minDemand should be a max value of all MinDemand - minDemand uint - - // Number of broadcast iteration could be done. - // Computes on every Ask/Cancel call as a minimum value - // among the all demands. - broadcasts uint - - events chan etf.Term - bufferSize uint - bufferKeepLast bool -} - -// Init -func (dd *dispatcherDemand) Init(opts StageOptions) interface{} { - state := &demandState{ - demands: make(map[etf.Pid]*demand), - i: 0, - events: make(chan etf.Term, opts.BufferSize), - bufferSize: opts.BufferSize, - bufferKeepLast: opts.BufferKeepLast, - } - return state -} - -// Ask -func (dd *dispatcherDemand) Ask(state interface{}, subscription StageSubscription, count uint) { - st := state.(*demandState) - demand, ok := st.demands[subscription.Pid] - if !ok { - return - } - demand.count += count - return -} - -// Cancel -func (dd *dispatcherDemand) Cancel(state interface{}, subscription StageSubscription) { - st := state.(*demandState) - delete(st.demands, subscription.Pid) - for i := range st.order { - if st.order[i] != subscription.Pid { - continue - } - st.order[i] = st.order[0] - st.order = st.order[1:] - break - } - return -} - -// Dispatch -func (dd *dispatcherDemand) Dispatch(state interface{}, events etf.List) []StageDispatchItem { - st := state.(*demandState) - // put events into the buffer before we start dispatching - for e := range events { - select { - case st.events <- events[e]: - continue - default: - // buffer is full - if st.bufferKeepLast { - <-st.events - st.events <- events[e] - continue - } - } - // dont have enough space to keep these events. - lib.Warning("DispatcherDemand. Event buffer is full. Discarding event: ", events[e]) - break - } - - // check out whether we have subscribers - if len(st.order) == 0 { - return nil - } - - dispatchItems := []StageDispatchItem{} - nextDemand := st.i - for { - left := uint(0) - for range st.order { - if st.i > len(st.order)-1 { - st.i = 0 - } - - if len(st.events) == 0 { - // have nothing to dispatch - break - } - - pid := st.order[st.i] - demand := st.demands[pid] - st.i++ - - if demand.count < demand.minDemand { - break - } - - if demand.count == 0 || len(st.events) < int(demand.minDemand) { - continue - } - - nextDemand = st.i - item := makeDispatchItem(st.events, demand) - dispatchItems = append(dispatchItems, item) - - demand.count -= uint(len(item.events)) - left += demand.count - - if len(st.events) < int(demand.minDemand) { - continue - } - } - if left > 0 && len(st.events) > 0 { - continue - } - break - } - - st.i = nextDemand - - return dispatchItems -} - -// Subscribe -func (dd *dispatcherDemand) Subscribe(state interface{}, subscription StageSubscription, opts StageSubscribeOptions) error { - st := state.(*demandState) - newDemand := &demand{ - subscription: subscription, - minDemand: opts.MinDemand, - maxDemand: opts.MaxDemand, - } - st.demands[subscription.Pid] = newDemand - st.order = append(st.order, subscription.Pid) - return nil -} - -// -// Dispatcher Broadcast implementation -// - -// Init -func (db *dispatcherBroadcast) Init(opts StageOptions) interface{} { - state := &broadcastState{ - demands: make(map[etf.Pid]*demand), - events: make(chan etf.Term, opts.BufferSize), - bufferSize: opts.BufferSize, - bufferKeepLast: opts.BufferKeepLast, - } - return state -} - -// Ask -func (db *dispatcherBroadcast) Ask(state interface{}, subscription StageSubscription, count uint) { - st := state.(*broadcastState) - demand, ok := st.demands[subscription.Pid] - if !ok { - return - } - demand.count += count - st.broadcasts = minCountDemand(st.demands) - return -} - -// Cancel -func (db *dispatcherBroadcast) Cancel(state interface{}, subscription StageSubscription) { - st := state.(*broadcastState) - delete(st.demands, subscription.Pid) - st.broadcasts = minCountDemand(st.demands) - return -} - -// Dispatch -func (db *dispatcherBroadcast) Dispatch(state interface{}, events etf.List) []StageDispatchItem { - st := state.(*broadcastState) - // put events into the buffer before we start dispatching - for e := range events { - select { - case st.events <- events[e]: - continue - default: - // buffer is full - if st.bufferKeepLast { - <-st.events - st.events <- events[e] - continue - } - } - // dont have enough space to keep these events. - lib.Warning("DispatcherBroadcast. Event buffer is full. Discarding event: ", events[e]) - break - } - - demand := &demand{ - minDemand: st.minDemand, - maxDemand: st.maxDemand, - count: st.broadcasts, - } - - items := []StageDispatchItem{} - for { - if st.broadcasts == 0 { - break - } - if len(st.events) < int(st.minDemand) { - break - } - - broadcast_item := makeDispatchItem(st.events, demand) - for _, d := range st.demands { - item := StageDispatchItem{ - subscription: d.subscription, - events: broadcast_item.events, - } - items = append(items, item) - d.count -= uint(len(item.events)) - } - st.broadcasts-- - } - return items -} - -// Subscribe -func (db *dispatcherBroadcast) Subscribe(state interface{}, subscription StageSubscription, opts StageSubscribeOptions) error { - st := state.(*broadcastState) - newDemand := &demand{ - subscription: subscription, - minDemand: opts.MinDemand, - maxDemand: opts.MaxDemand, - } - if len(st.demands) == 0 { - st.minDemand = opts.MinDemand - st.maxDemand = opts.MaxDemand - st.demands[subscription.Pid] = newDemand - return nil - } - - // check if min and max outside of the having range - // defined by the previous subscriptions - if opts.MaxDemand < st.minDemand { - return fmt.Errorf("broadcast dispatcher: MaxDemand (%d) outside of the accepted range (%d..%d)", opts.MaxDemand, st.minDemand, st.maxDemand) - } - if opts.MinDemand > st.maxDemand { - return fmt.Errorf("broadcast dispatcher: MinDemand (%d) outside of the accepted range (%d..%d)", opts.MinDemand, st.minDemand, st.maxDemand) - } - - // adjust the range - if opts.MaxDemand < st.maxDemand { - st.maxDemand = opts.MaxDemand - } - if opts.MinDemand > st.minDemand { - st.minDemand = opts.MinDemand - } - st.demands[subscription.Pid] = newDemand - - // we should stop broadcast events until this subscription make a new demand - st.broadcasts = 0 - return nil -} - -// -// Dispatcher Partition implementation -// - -// Init -func (dp *dispatcherPartition) Init(opts StageOptions) interface{} { - state := &partitionState{ - demands: make(map[etf.Pid]*demand), - order: make([][]etf.Pid, dp.n), - i: make([]int, dp.n), - events: make([]chan etf.Term, dp.n), - bufferSize: opts.BufferSize, - bufferKeepLast: opts.BufferKeepLast, - } - for i := range state.events { - state.events[i] = make(chan etf.Term, state.bufferSize) - } - return state -} - -// Ask -func (dp *dispatcherPartition) Ask(state interface{}, subscription StageSubscription, count uint) { - st := state.(*partitionState) - demand, ok := st.demands[subscription.Pid] - if !ok { - return - } - demand.count += count - return -} - -// Cancel -func (dp *dispatcherPartition) Cancel(state interface{}, subscription StageSubscription) { - st := state.(*partitionState) - demand, ok := st.demands[subscription.Pid] - if !ok { - return - } - delete(st.demands, subscription.Pid) - for i := range st.order[demand.partition] { - if st.order[demand.partition][i] != subscription.Pid { - continue - } - st.order[demand.partition][i] = st.order[demand.partition][0] - st.order[demand.partition] = st.order[demand.partition][1:] - break - } - return -} - -// Dispatch -func (dp *dispatcherPartition) Dispatch(state interface{}, events etf.List) []StageDispatchItem { - st := state.(*partitionState) - // put events into the buffer before we start dispatching - for e := range events { - partition := dp.hash(events[e]) - if partition < 0 || partition > int(dp.n-1) { - // discard this event. partition is out of range - continue - } - select { - case st.events[partition] <- events[e]: - continue - default: - // buffer is full - if st.bufferKeepLast { - <-st.events[partition] - st.events[partition] <- events[e] - continue - } - } - // dont have enough space to keep these events. discard the rest of them. - lib.Warning("DispatcherPartition. Event buffer is full. Discarding event: ", events[e]) - break - } - - dispatchItems := []StageDispatchItem{} - for partition := range st.events { - // do we have anything to dispatch? - if len(st.events[partition]) == 0 { - continue - } - - nextDemand := st.i[partition] - for { - countLeft := uint(0) - for range st.order[partition] { - order_index := st.i[partition] - if order_index > len(st.order[partition])-1 { - order_index = 0 - } - if len(st.events[partition]) == 0 { - // have nothing to dispatch - break - } - - pid := st.order[partition][order_index] - demand := st.demands[pid] - st.i[partition] = order_index + 1 - - if demand.count == 0 || len(st.events[partition]) < int(demand.minDemand) { - continue - } - - nextDemand = st.i[partition] - item := makeDispatchItem(st.events[partition], demand) - demand.count -= uint(len(st.events[partition])) - dispatchItems = append(dispatchItems, item) - if len(st.events[partition]) < int(demand.minDemand) { - continue - } - countLeft += demand.count - } - if countLeft > 0 && len(st.events[partition]) > 0 { - continue - } - break - } - - st.i[partition] = nextDemand - } - return dispatchItems -} - -// Subscribe -func (dp *dispatcherPartition) Subscribe(state interface{}, subscription StageSubscription, opts StageSubscribeOptions) error { - st := state.(*partitionState) - if opts.Partition > dp.n-1 { - return fmt.Errorf("unknown partition") - } - newDemand := &demand{ - subscription: subscription, - minDemand: opts.MinDemand, - maxDemand: opts.MaxDemand, - partition: opts.Partition, - } - st.demands[subscription.Pid] = newDemand - st.order[opts.Partition] = append(st.order[opts.Partition], subscription.Pid) - return nil -} - -// helpers - -func makeDispatchItem(events chan etf.Term, d *demand) StageDispatchItem { - item := StageDispatchItem{ - subscription: d.subscription, - } - - for i := uint(0); i < d.count; i++ { - if i == d.maxDemand { - break - } - select { - case e := <-events: - item.events = append(item.events, e) - continue - default: - // we dont have events in the buffer - } - break - } - return item -} - -func minCountDemand(demands map[etf.Pid]*demand) uint { - if len(demands) == 0 { - return uint(0) - } - - minCount := uint(100) - - for _, d := range demands { - if d.count < minCount { - minCount = d.count - } - } - return minCount -} diff --git a/gen/supervisor.go b/gen/supervisor.go deleted file mode 100644 index 7ef09a12..00000000 --- a/gen/supervisor.go +++ /dev/null @@ -1,459 +0,0 @@ -package gen - -import ( - "fmt" - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -// SupervisorBehavior interface -type SupervisorBehavior interface { - ProcessBehavior - Init(args ...etf.Term) (SupervisorSpec, error) -} - -// SupervisorStrategy -type SupervisorStrategy struct { - Type SupervisorStrategyType - Intensity uint16 - Period uint16 - Restart SupervisorStrategyRestart -} - -// SupervisorStrategyType -type SupervisorStrategyType = string - -// SupervisorStrategyRestart -type SupervisorStrategyRestart = string - -const ( - // Restart strategies: - - // SupervisorRestartIntensity - SupervisorRestartIntensity = uint16(10) - - // SupervisorRestartPeriod - SupervisorRestartPeriod = uint16(10) - - // SupervisorStrategyOneForOne If one child process terminates and is to be restarted, only - // that child process is affected. This is the default restart strategy. - SupervisorStrategyOneForOne = SupervisorStrategyType("one_for_one") - - // SupervisorStrategyOneForAll If one child process terminates and is to be restarted, all other - // child processes are terminated and then all child processes are restarted. - SupervisorStrategyOneForAll = SupervisorStrategyType("one_for_all") - - // SupervisorStrategyRestForOne If one child process terminates and is to be restarted, - // the 'rest' of the child processes (that is, the child - // processes after the terminated child process in the start order) - // are terminated. Then the terminated child process and all - // child processes after it are restarted - SupervisorStrategyRestForOne = SupervisorStrategyType("rest_for_one") - - // SupervisorStrategySimpleOneForOne A simplified one_for_one supervisor, where all - // child processes are dynamically added instances - // of the same process type, that is, running the same code. - SupervisorStrategySimpleOneForOne = SupervisorStrategyType("simple_one_for_one") - - // Restart types: - - // SupervisorStrategyRestartPermanent child process is always restarted - SupervisorStrategyRestartPermanent = SupervisorStrategyRestart("permanent") - - // SupervisorStrategyRestartTemporary child process is never restarted - // (not even when the supervisor restart strategy is rest_for_one - // or one_for_all and a sibling death causes the temporary process - // to be terminated) - SupervisorStrategyRestartTemporary = SupervisorStrategyRestart("temporary") - - // SupervisorStrategyRestartTransient child process is restarted only if - // it terminates abnormally, that is, with an exit reason other - // than normal, shutdown. - SupervisorStrategyRestartTransient = SupervisorStrategyRestart("transient") - - supervisorChildStateStart = 0 - supervisorChildStateRunning = 1 - supervisorChildStateDisabled = -1 -) - -type supervisorChildState int - -// SupervisorSpec -type SupervisorSpec struct { - Name string - Children []SupervisorChildSpec - Strategy SupervisorStrategy - restarts []int64 -} - -// SupervisorChildSpec -type SupervisorChildSpec struct { - Name string - Child ProcessBehavior - Options ProcessOptions - Args []etf.Term - - state supervisorChildState // for internal usage - process Process -} - -// Supervisor is implementation of ProcessBehavior interface -type Supervisor struct{} - -type messageStartChild struct { - name string - args []etf.Term -} - -// ProcessInit -func (sv *Supervisor) ProcessInit(p Process, args ...etf.Term) (ProcessState, error) { - behavior, ok := p.Behavior().(SupervisorBehavior) - if !ok { - return ProcessState{}, fmt.Errorf("ProcessInit: not a SupervisorBehavior") - } - spec, err := behavior.Init(args...) - if err != nil { - return ProcessState{}, err - } - lib.Log("[%s] SUPERVISOR %q with restart strategy: %s[%s] ", p.NodeName(), p.Name(), spec.Strategy.Type, spec.Strategy.Restart) - - p.SetTrapExit(true) - return ProcessState{ - Process: p, - State: &spec, - }, nil -} - -// ProcessLoop -func (sv *Supervisor) ProcessLoop(ps ProcessState, started chan<- bool) string { - spec := ps.State.(*SupervisorSpec) - if spec.Strategy.Type != SupervisorStrategySimpleOneForOne { - startChildren(ps, spec) - } - - waitTerminatingProcesses := []etf.Pid{} - chs := ps.ProcessChannels() - - started <- true - for { - select { - case ex := <-chs.GracefulExit: - if ex.From == ps.Self() { - // stop supervisor gracefully - for i := range spec.Children { - p := spec.Children[i].process - if p != nil && p.IsAlive() { - p.Exit(ex.Reason) - } - } - return ex.Reason - } - waitTerminatingProcesses = handleMessageExit(ps, ex, spec, waitTerminatingProcesses) - - case <-ps.Context().Done(): - return "kill" - - case direct := <-chs.Direct: - value, err := handleDirect(ps, spec, direct.Message) - ps.PutSyncReply(direct.Ref, value, err) - - case <-chs.Mailbox: - // do nothing - } - } -} - -// StartChild dynamically starts a child process with given name of child spec which is defined by Init call. -func (sv *Supervisor) StartChild(supervisor Process, name string, args ...etf.Term) (Process, error) { - message := messageStartChild{ - name: name, - args: args, - } - value, err := supervisor.Direct(message) - if err != nil { - return nil, err - } - process, ok := value.(Process) - if !ok { - return nil, fmt.Errorf("internal error: can't start child %#v", value) - } - return process, nil -} - -func startChildren(supervisor Process, spec *SupervisorSpec) { - spec.restarts = append(spec.restarts, time.Now().Unix()) - if len(spec.restarts) > int(spec.Strategy.Intensity) { - period := time.Now().Unix() - spec.restarts[0] - if period <= int64(spec.Strategy.Period) { - lib.Warning("Supervisor %q. Restart intensity is exceeded (%d restarts for %d seconds)", - spec.Name, spec.Strategy.Intensity, spec.Strategy.Period) - supervisor.Kill() - return - } - spec.restarts = spec.restarts[1:] - } - - for i := range spec.Children { - switch spec.Children[i].state { - case supervisorChildStateDisabled: - spec.Children[i].process = nil - case supervisorChildStateRunning: - continue - case supervisorChildStateStart: - spec.Children[i].state = supervisorChildStateRunning - process := startChild(supervisor, spec.Children[i].Name, spec.Children[i].Child, spec.Children[i].Options, spec.Children[i].Args...) - spec.Children[i].process = process - default: - panic("Incorrect supervisorChildState") - } - } -} - -func startChild(supervisor Process, name string, child ProcessBehavior, opts ProcessOptions, args ...etf.Term) Process { - - opts.GroupLeader = supervisor - if leader := supervisor.GroupLeader(); leader != nil { - opts.GroupLeader = leader - } - - // Child process shouldn't ignore supervisor termination (via TrapExit). - // Using the supervisor's Context makes the child terminate if the supervisor is terminated. - opts.Context = supervisor.Context() - - process, err := supervisor.Spawn(name, opts, child, args...) - - if err != nil { - panic(err.Error()) - } - - supervisor.Link(process.Self()) - - return process -} - -func handleDirect(supervisor Process, spec *SupervisorSpec, message interface{}) (interface{}, error) { - switch m := message.(type) { - case MessageDirectChildren: - children := []etf.Pid{} - for i := range spec.Children { - if spec.Children[i].process == nil { - continue - } - children = append(children, spec.Children[i].process.Self()) - } - - return children, nil - case messageStartChild: - childSpec, err := lookupSpecByName(m.name, spec.Children) - if err != nil { - return nil, err - } - childSpec.state = supervisorChildStateStart - if len(m.args) > 0 { - childSpec.Args = m.args - } - // Dinamically started child can't be registered with a name. - childSpec.Name = "" - process := startChild(supervisor, childSpec.Name, childSpec.Child, childSpec.Options, childSpec.Args...) - childSpec.process = process - spec.Children = append(spec.Children, childSpec) - return process, nil - - default: - } - - return nil, lib.ErrUnsupportedRequest -} - -func handleMessageExit(p Process, exit ProcessGracefulExitRequest, spec *SupervisorSpec, wait []etf.Pid) []etf.Pid { - - terminated := exit.From - reason := exit.Reason - - isChild := false - // We should make sure if it was an exit message from the supervisor's child - for i := range spec.Children { - child := spec.Children[i].process - if child == nil { - continue - } - if child.Self() == terminated { - isChild = true - break - } - } - - if !isChild && reason != "restart" { - return wait - } - - if len(wait) > 0 { - for i := range wait { - if wait[i] == terminated { - wait[i] = wait[0] - wait = wait[1:] - break - } - } - - if len(wait) == 0 { - // it was the last one. lets restart all terminated children - // which hasn't supervisorChildStateDisabled state - startChildren(p, spec) - } - - return wait - } - - switch spec.Strategy.Type { - - case SupervisorStrategyOneForAll: - for i := range spec.Children { - if spec.Children[i].state != supervisorChildStateRunning { - continue - } - - child := spec.Children[i].process - if child == nil { - continue - } - - spec.Children[i].process = nil - if haveToDisableChild(spec.Strategy.Restart, reason) { - spec.Children[i].state = supervisorChildStateDisabled - break - } - - if spec.Children[i].state == supervisorChildStateDisabled { - continue - } - spec.Children[i].state = supervisorChildStateStart - if child.Self() == terminated { - if len(spec.Children) == i+1 && len(wait) == 0 { - // it was the last one. nothing to waiting for - startChildren(p, spec) - } - continue - } - - child.Exit("restart") - - wait = append(wait, child.Self()) - } - - case SupervisorStrategyRestForOne: - isRest := false - for i := range spec.Children { - child := spec.Children[i].process - if child == nil { - continue - } - if child.Self() == terminated { - isRest = true - spec.Children[i].process = nil - if haveToDisableChild(spec.Strategy.Restart, reason) { - spec.Children[i].state = supervisorChildStateDisabled - break - } else { - spec.Children[i].state = supervisorChildStateStart - } - - if len(spec.Children) == i+1 && len(wait) == 0 { - // it was the last one. nothing to waiting for - startChildren(p, spec) - } - - continue - } - - if isRest && spec.Children[i].state == supervisorChildStateRunning { - child.Exit("restart") - spec.Children[i].process = nil - wait = append(wait, child.Self()) - if haveToDisableChild(spec.Strategy.Restart, "restart") { - spec.Children[i].state = supervisorChildStateDisabled - } else { - spec.Children[i].state = supervisorChildStateStart - } - } - } - - case SupervisorStrategyOneForOne: - for i := range spec.Children { - child := spec.Children[i].process - if child == nil { - continue - } - if child.Self() == terminated { - spec.Children[i].process = nil - if haveToDisableChild(spec.Strategy.Restart, reason) { - spec.Children[i].state = supervisorChildStateDisabled - } else { - spec.Children[i].state = supervisorChildStateStart - } - - startChildren(p, spec) - break - } - } - - case SupervisorStrategySimpleOneForOne: - for i := range spec.Children { - child := spec.Children[i].process - if child == nil { - continue - } - if child.Self() == terminated { - - if haveToDisableChild(spec.Strategy.Restart, reason) { - // wont be restarted due to restart strategy - spec.Children[i] = spec.Children[0] - spec.Children = spec.Children[1:] - break - } - - process := startChild(p, spec.Children[i].Name, spec.Children[i].Child, spec.Children[i].Options, spec.Children[i].Args...) - spec.Children[i].process = process - break - } - } - } - // check if all children are disabled. stop this process with reason "normal" - shouldStop := true - for i := range spec.Children { - if spec.Children[i].state == supervisorChildStateDisabled { - continue - } - shouldStop = false - break - } - if shouldStop { - p.Exit("normal") - } - return wait -} - -func haveToDisableChild(strategy SupervisorStrategyRestart, reason string) bool { - switch strategy { - case SupervisorStrategyRestartTransient: - if reason == "shutdown" || reason == "normal" { - return true - } - - case SupervisorStrategyRestartTemporary: - return true - } - - return false -} - -func lookupSpecByName(specName string, spec []SupervisorChildSpec) (SupervisorChildSpec, error) { - for i := range spec { - if spec[i].Name == specName { - return spec[i], nil - } - } - return SupervisorChildSpec{}, fmt.Errorf("unknown child") -} diff --git a/gen/tcp.go b/gen/tcp.go deleted file mode 100644 index 7c7a9c6a..00000000 --- a/gen/tcp.go +++ /dev/null @@ -1,408 +0,0 @@ -package gen - -import ( - "context" - "crypto/tls" - "fmt" - "io" - "net" - "strconv" - "sync/atomic" - "time" - "unsafe" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -type TCPBehavior interface { - ServerBehavior - - InitTCP(process *TCPProcess, args ...etf.Term) (TCPOptions, error) - - HandleTCPCall(process *TCPProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) - HandleTCPCast(process *TCPProcess, message etf.Term) ServerStatus - HandleTCPInfo(process *TCPProcess, message etf.Term) ServerStatus - - HandleTCPTerminate(process *TCPProcess, reason string) -} - -type TCPStatus error - -var ( - TCPStatusOK TCPStatus - TCPStatusStop TCPStatus = fmt.Errorf("stop") - - defaultDeadlineTimeout int = 3 - defaultDirectTimeout int = 5 -) - -type TCP struct { - Server -} - -type TCPOptions struct { - Host string - Port uint16 - TLS *tls.Config - KeepAlivePeriod int - Handler TCPHandlerBehavior - // QueueLength defines how many parallel requests can be directed to this process. Default value is 10. - QueueLength int - // NumHandlers defines how many handlers will be started. Default 1 - NumHandlers int - // IdleTimeout defines how long (in seconds) keeps the started handler alive with no packets. Zero value makes the handler non-stop. - IdleTimeout int - DeadlineTimeout int - MaxPacketSize int - // ExtraHandlers enables starting new handlers if all handlers in the pool are busy. - ExtraHandlers bool -} - -type TCPProcess struct { - ServerProcess - options TCPOptions - behavior TCPBehavior - - pool []*Process - counter uint64 - listener net.Listener -} - -// Server callbacks -func (tcp *TCP) Init(process *ServerProcess, args ...etf.Term) error { - behavior, ok := process.Behavior().(TCPBehavior) - if !ok { - return fmt.Errorf("not a TCPBehavior") - } - - tcpProcess := &TCPProcess{ - ServerProcess: *process, - behavior: behavior, - } - // do not inherit parent State - tcpProcess.State = nil - - options, err := behavior.InitTCP(tcpProcess, args...) - if err != nil { - return err - } - if options.Handler == nil { - return fmt.Errorf("handler must be defined") - } - if options.DeadlineTimeout < 1 { - // we need to check the context if it was canceled to stop - // reading and close the connection socket - options.DeadlineTimeout = defaultDeadlineTimeout - } - - tcpProcess.options = options - if err := tcpProcess.initHandlers(); err != nil { - return err - } - - if options.Port == 0 { - return fmt.Errorf("TCP port must be defined") - } - - lc := net.ListenConfig{} - - if options.KeepAlivePeriod > 0 { - lc.KeepAlive = time.Duration(options.KeepAlivePeriod) * time.Second - } - ctx := process.Context() - hostPort := net.JoinHostPort("", strconv.Itoa(int(options.Port))) - listener, err := lc.Listen(ctx, "tcp", hostPort) - if err != nil { - return err - } - - if options.TLS != nil { - if options.TLS.Certificates == nil && options.TLS.GetCertificate == nil { - return fmt.Errorf("TLS connnnfig has no certificates") - } - listener = tls.NewListener(listener, options.TLS) - } - tcpProcess.listener = listener - - // start acceptor - go func() { - var err error - var c net.Conn - defer func() { - if err == nil { - process.Exit("normal") - return - } - process.Exit(err.Error()) - }() - - for { - c, err = listener.Accept() - if err != nil { - if ctx.Err() == nil { - continue - } - return - } - go tcpProcess.serve(ctx, c) - } - }() - - process.State = tcpProcess - return nil -} - -func (tcp *TCP) HandleCall(process *ServerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - tcpp := process.State.(*TCPProcess) - return tcpp.behavior.HandleTCPCall(tcpp, from, message) -} - -func (tcp *TCP) HandleCast(process *ServerProcess, message etf.Term) ServerStatus { - tcpp := process.State.(*TCPProcess) - return tcpp.behavior.HandleTCPCast(tcpp, message) -} - -func (tcp *TCP) HandleInfo(process *ServerProcess, message etf.Term) ServerStatus { - tcpp := process.State.(*TCPProcess) - return tcpp.behavior.HandleTCPInfo(tcpp, message) -} - -func (tcp *TCP) Terminate(process *ServerProcess, reason string) { - p := process.State.(*TCPProcess) - p.listener.Close() - p.behavior.HandleTCPTerminate(p, reason) -} - -// -// default TCP callbacks -// - -// HandleTCPCall -func (tcp *TCP) HandleTCPCall(process *TCPProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - lib.Warning("[gen.TCP] HandleTCPCall: unhandled message (from %#v) %#v", from, message) - return etf.Atom("ok"), ServerStatusOK -} - -// HandleTCPCast -func (tcp *TCP) HandleTCPCast(process *TCPProcess, message etf.Term) ServerStatus { - lib.Warning("[gen.TCP] HandleTCPCast: unhandled message %#v", message) - return ServerStatusOK -} - -// HandleTCPInfo -func (tcp *TCP) HandleTCPInfo(process *TCPProcess, message etf.Term) ServerStatus { - lib.Warning("[gen.TCP] HandleTCPInfo: unhandled message %#v", message) - return ServerStatusOK -} -func (tcp *TCP) HandleTCPTerminate(process *TCPProcess, reason string) { - return -} - -// internal - -func (tcpp *TCPProcess) serve(ctx context.Context, c net.Conn) error { - var handlerProcess Process - var handlerProcessID int - var packet interface{} - var disconnect bool - var deadline bool - var timeout bool - var disconnectError error - var expectingBytes int = 1 - - defer c.Close() - - deadlineTimeout := time.Second * time.Duration(tcpp.options.DeadlineTimeout) - - tcpConnection := &TCPConnection{ - Addr: c.RemoteAddr(), - Socket: c, - } - - l := uint64(tcpp.options.NumHandlers) - // make round robin using the counter value - cnt := atomic.AddUint64(&tcpp.counter, 1) - // choose process as a handler for the packet received on this connection - handlerProcessID = int(cnt % l) - handlerProcess = *(*Process)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&tcpp.pool[handlerProcessID])))) - - b := lib.TakeBuffer() - -nextPacket: - for { - if ctx.Err() != nil { - return nil - } - - if packet == nil { - // just connected - packet = messageTCPHandlerConnect{ - connection: tcpConnection, - } - break - } - - if b.Len() < expectingBytes { - deadline = false - if err := c.SetReadDeadline(time.Now().Add(deadlineTimeout)); err == nil { - deadline = true - } - - n, e := b.ReadDataFrom(c, tcpp.options.MaxPacketSize) - if n == 0 { - if err, ok := e.(net.Error); deadline && ok && err.Timeout() { - packet = messageTCPHandlerTimeout{ - connection: tcpConnection, - } - timeout = true - break - } - packet = messageTCPHandlerDisconnect{ - connection: tcpConnection, - } - // closed connection - disconnect = true - break - } - - if e != nil && e != io.EOF { - // something went wrong - packet = messageTCPHandlerDisconnect{ - connection: tcpConnection, - } - disconnect = true - disconnectError = e - break - } - - // check onemore time if we should read more data - continue - } - // FIXME take it from the pool - packet = &messageTCPHandlerPacket{ - connection: tcpConnection, - packet: b.B, - } - break - } - -retry: - for a := uint64(0); a < l; a++ { - if ctx.Err() != nil { - return nil - } - - nbytesInt, err := handlerProcess.DirectWithTimeout(packet, defaultDirectTimeout) - switch err { - case TCPHandlerStatusOK: - if disconnect { - return disconnectError - } - if timeout { - timeout = false - goto nextPacket - } - next, _ := nbytesInt.(messageTCPHandlerPacketResult) - if next.left > 0 { - if b.Len() > next.left { - b1 := lib.TakeBuffer() - head := b.Len() - next.left - b1.Set(b.B[head:]) - lib.ReleaseBuffer(b) - b = b1 - } - } else { - b.Reset() - } - expectingBytes = b.Len() + next.await - if expectingBytes == 0 { - expectingBytes++ - } - - goto nextPacket - - case TCPHandlerStatusClose: - return disconnectError - case lib.ErrProcessTerminated: - if handlerProcessID == -1 { - // it was an extra handler do not restart. try to use the existing one - cnt = atomic.AddUint64(&tcpp.counter, 1) - handlerProcessID = int(cnt % l) - handlerProcess = *(*Process)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&tcpp.pool[handlerProcessID])))) - goto retry - } - - // respawn terminated process - handlerProcess = tcpp.startHandler(handlerProcessID, tcpp.options.IdleTimeout) - atomic.SwapPointer((*unsafe.Pointer)(unsafe.Pointer(&tcpp.pool[handlerProcessID])), unsafe.Pointer(&handlerProcess)) - continue - case lib.ErrProcessBusy: - handlerProcessID = int((a + cnt) % l) - handlerProcess = *(*Process)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&tcpp.pool[handlerProcessID])))) - continue - default: - lib.Warning("[gen.TCP] error on handling packet: %s. closing connection with %q", err, c.RemoteAddr()) - return err - } - } - - // create a new handler. we should eather to make a call HandleDisconnect or - // run this connection with the extra handler with idle timeout = 5 second - handlerProcessID = -1 - handlerProcess = tcpp.startHandler(handlerProcessID, 5) - if tcpp.options.ExtraHandlers == false { - packet = messageTCPHandlerDisconnect{ - connection: tcpConnection, - } - - handlerProcess.DirectWithTimeout(packet, defaultDirectTimeout) - lib.Warning("[gen.TCP] all handlers are busy. closing connection with %q", c.RemoteAddr()) - handlerProcess.Kill() - return fmt.Errorf("all handlers are busy") - } - - goto retry -} - -func (tcpp *TCPProcess) initHandlers() error { - if tcpp.options.NumHandlers < 1 { - tcpp.options.NumHandlers = 1 - } - if tcpp.options.IdleTimeout < 0 { - tcpp.options.IdleTimeout = 0 - } - - if tcpp.options.QueueLength < 1 { - tcpp.options.QueueLength = defaultQueueLength - } - - c := atomic.AddUint64(&tcpp.counter, 1) - if c > 1 { - return fmt.Errorf("you can not use the same object more than once") - } - - for i := 0; i < tcpp.options.NumHandlers; i++ { - p := tcpp.startHandler(i, tcpp.options.IdleTimeout) - if p == nil { - return fmt.Errorf("can not initialize handlers") - } - tcpp.pool = append(tcpp.pool, &p) - } - return nil -} - -func (tcpp *TCPProcess) startHandler(id int, idleTimeout int) Process { - opts := ProcessOptions{ - Context: tcpp.Context(), - DirectboxSize: uint16(tcpp.options.QueueLength), - } - - optsHandler := optsTCPHandler{id: id, idleTimeout: idleTimeout} - p, err := tcpp.Spawn("", opts, tcpp.options.Handler, optsHandler) - if err != nil { - lib.Warning("[gen.TCP] can not start TCPHandler: %s", err) - return nil - } - return p -} diff --git a/gen/tcp_handler.go b/gen/tcp_handler.go deleted file mode 100644 index 3151e7aa..00000000 --- a/gen/tcp_handler.go +++ /dev/null @@ -1,207 +0,0 @@ -package gen - -import ( - "fmt" - "io" - "net" - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -type TCPHandlerStatus error - -var ( - TCPHandlerStatusOK TCPHandlerStatus = nil - TCPHandlerStatusClose TCPHandlerStatus = fmt.Errorf("close") - - defaultQueueLength = 10 -) - -type TCPHandlerBehavior interface { - ServerBehavior - - // Mandatory callback - HandlePacket(process *TCPHandlerProcess, packet []byte, conn *TCPConnection) (int, int, TCPHandlerStatus) - - // Optional callbacks - HandleConnect(process *TCPHandlerProcess, conn *TCPConnection) TCPHandlerStatus - HandleDisconnect(process *TCPHandlerProcess, conn *TCPConnection) - HandleTimeout(process *TCPHandlerProcess, conn *TCPConnection) TCPHandlerStatus - - HandleTCPHandlerCall(process *TCPHandlerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) - HandleTCPHandlerCast(process *TCPHandlerProcess, message etf.Term) ServerStatus - HandleTCPHandlerInfo(process *TCPHandlerProcess, message etf.Term) ServerStatus - HandleTCPHandlerTerminate(process *TCPHandlerProcess, reason string) -} - -type TCPHandler struct { - Server - - behavior TCPHandlerBehavior -} - -type TCPHandlerProcess struct { - ServerProcess - behavior TCPHandlerBehavior - - lastPacket int64 - idleTimeout int - id int -} - -type optsTCPHandler struct { - id int - idleTimeout int -} - -type TCPConnection struct { - Addr net.Addr - Socket io.Writer - State interface{} -} - -type messageTCPHandlerIdleCheck struct{} -type messageTCPHandlerPacket struct { - packet []byte - connection *TCPConnection -} -type messageTCPHandlerPacketResult struct { - left int - await int -} -type messageTCPHandlerConnect struct { - connection *TCPConnection -} -type messageTCPHandlerDisconnect struct { - connection *TCPConnection -} - -type messageTCPHandlerTimeout struct { - connection *TCPConnection -} - -func (tcph *TCPHandler) Init(process *ServerProcess, args ...etf.Term) error { - behavior, ok := process.Behavior().(TCPHandlerBehavior) - if !ok { - return fmt.Errorf("TCP: not a TCPHandlerBehavior") - } - handlerProcess := &TCPHandlerProcess{ - ServerProcess: *process, - behavior: behavior, - } - if len(args) == 0 { - return fmt.Errorf("TCP: can not start with no args") - } - - if a, ok := args[0].(optsTCPHandler); ok { - handlerProcess.idleTimeout = a.idleTimeout - handlerProcess.id = a.id - } else { - return fmt.Errorf("TCP: wrong args for the TCPHandler") - } - - // do not inherit parent State - handlerProcess.State = nil - process.State = handlerProcess - - if handlerProcess.idleTimeout > 0 { - process.CastAfter(process.Self(), messageTCPHandlerIdleCheck{}, 5*time.Second) - } - - return nil -} - -func (tcph *TCPHandler) HandleCall(process *ServerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - tcpp := process.State.(*TCPHandlerProcess) - return tcpp.behavior.HandleTCPHandlerCall(tcpp, from, message) -} - -func (tcph *TCPHandler) HandleCast(process *ServerProcess, message etf.Term) ServerStatus { - tcpp := process.State.(*TCPHandlerProcess) - switch message.(type) { - case messageTCPHandlerIdleCheck: - if time.Now().Unix()-tcpp.lastPacket > int64(tcpp.idleTimeout) { - return ServerStatusStop - } - process.CastAfter(process.Self(), messageTCPHandlerIdleCheck{}, 5*time.Second) - - default: - return tcpp.behavior.HandleTCPHandlerCast(tcpp, message) - } - return ServerStatusOK -} - -func (tcph *TCPHandler) HandleInfo(process *ServerProcess, message etf.Term) ServerStatus { - tcpp := process.State.(*TCPHandlerProcess) - return tcpp.behavior.HandleTCPHandlerInfo(tcpp, message) -} - -func (tcph *TCPHandler) HandleDirect(process *ServerProcess, ref etf.Ref, message interface{}) (interface{}, DirectStatus) { - tcpp := process.State.(*TCPHandlerProcess) - switch m := message.(type) { - case *messageTCPHandlerPacket: - tcpp.lastPacket = time.Now().Unix() - left, await, err := tcpp.behavior.HandlePacket(tcpp, m.packet, m.connection) - res := messageTCPHandlerPacketResult{ - left: left, - await: await, - } - return res, err - case messageTCPHandlerConnect: - return nil, tcpp.behavior.HandleConnect(tcpp, m.connection) - case messageTCPHandlerDisconnect: - tcpp.behavior.HandleDisconnect(tcpp, m.connection) - return nil, TCPHandlerStatusClose - case messageTCPHandlerTimeout: - return nil, tcpp.behavior.HandleTimeout(tcpp, m.connection) - default: - return nil, DirectStatusOK - } -} - -func (tcph *TCPHandler) Terminate(process *ServerProcess, reason string) { - tcpp := process.State.(*TCPHandlerProcess) - tcpp.behavior.HandleTCPHandlerTerminate(tcpp, reason) -} - -// -// default callbacks -// - -func (tcph *TCPHandler) HandleConnect(process *TCPHandlerProcess, conn *TCPConnection) TCPHandlerStatus { - return TCPHandlerStatusOK -} -func (tcph *TCPHandler) HandleDisconnect(process *TCPHandlerProcess, conn *TCPConnection) { - return -} -func (tcph *TCPHandler) HandleTimeout(process *TCPHandlerProcess, conn *TCPConnection) TCPHandlerStatus { - return TCPHandlerStatusOK -} - -// HandleTCPHandlerCall -func (tcph *TCPHandler) HandleTCPHandlerCall(process *TCPHandlerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - lib.Warning("HandleTCPHandlerCall: unhandled message (from %#v) %#v", from, message) - return etf.Atom("ok"), ServerStatusOK -} - -// HandleTCPHandlerCast -func (tcph *TCPHandler) HandleTCPHandlerCast(process *TCPHandlerProcess, message etf.Term) ServerStatus { - lib.Warning("HandleTCPHandlerCast: unhandled message %#v", message) - return ServerStatusOK -} - -// HandleTCPHandlerInfo -func (tcph *TCPHandler) HandleTCPHandlerInfo(process *TCPHandlerProcess, message etf.Term) ServerStatus { - lib.Warning("HandleTCPHandlerInfo: unhandled message %#v", message) - return ServerStatusOK -} -func (tcph *TCPHandler) HandleTCPHandlerTerminate(process *TCPHandlerProcess, reason string) { - return -} - -// we should disable SetTrapExit for the TCPHandlerProcess by overriding it. -func (tcpp *TCPHandlerProcess) SetTrapExit(trap bool) { - lib.Warning("[%s] method 'SetTrapExit' is disabled for TCPHandlerProcess", tcpp.Self()) -} diff --git a/gen/types.go b/gen/types.go index b0b5adfe..8c7bcf22 100644 --- a/gen/types.go +++ b/gen/types.go @@ -1,461 +1,194 @@ package gen import ( - "context" "fmt" - "time" - - "github.com/ergo-services/ergo/etf" + "hash/crc32" + "strings" + "sync" ) -// EnvKey -type EnvKey string - -// Process -type Process interface { - Core - - // Spawn create a new process with parent - Spawn(name string, opts ProcessOptions, object ProcessBehavior, args ...etf.Term) (Process, error) - - // RemoteSpawn creates a new process at a remote node. The object name is a regitered - // behavior on a remote name using RegisterBehavior(...). The given options will stored - // in the process environment using node.EnvKeyRemoteSpawn as a key - RemoteSpawn(node string, object string, opts RemoteSpawnOptions, args ...etf.Term) (etf.Pid, error) - RemoteSpawnWithTimeout(timeout int, node string, object string, opts RemoteSpawnOptions, args ...etf.Term) (etf.Pid, error) - - // Name returns process name used on starting. - Name() string - - // RegisterName register associates the name with pid (not overrides registered name on starting) - RegisterName(name string) error - - // UnregisterName unregister named process. Unregistering name is allowed to the owner only - UnregisterName(name string) error - - // NodeName returns node name - NodeName() string - - // NodeStop stops the node - NodeStop() - - // NodeUptime returns node lifespan - NodeUptime() int64 - - // Info returns process details - Info() ProcessInfo - - // Self returns registered process identificator belongs to the process - Self() etf.Pid - - // Direct make a direct request to the actor (gen.Application, gen.Supervisor, gen.Server or - // inherited from gen.Server actor) with default timeout 5 seconds - Direct(request interface{}) (interface{}, error) - - // DirectWithTimeout make a direct request to the actor with the given timeout (in seconds) - DirectWithTimeout(request interface{}, timeout int) (interface{}, error) - - // Send sends a message in fashion of 'erlang:send'. The value of 'to' can be a Pid, registered local name - // or gen.ProcessID{RegisteredName, NodeName} - Send(to interface{}, message etf.Term) error - - // SendAfter starts a timer. When the timer expires, the message sends to the process - // identified by 'to'. 'to' can be a Pid, registered local name or - // gen.ProcessID{RegisteredName, NodeName}. Returns cancel function in order to discard - // sending a message. CancelFunc returns bool value. If it returns false, than the timer has - // already expired and the message has been sent. - SendAfter(to interface{}, message etf.Term, after time.Duration) CancelFunc - - // Exit initiate a graceful stopping process - Exit(reason string) error - - // Kill immediately stops process - Kill() - - // CreateAlias creates a new alias for the Process - CreateAlias() (etf.Alias, error) - - // DeleteAlias deletes the given alias - DeleteAlias(alias etf.Alias) error - - // ListEnv returns a map of configured environment variables. - // It also includes environment variables from the GroupLeader, Parent and Node. - // which are overlapped by priority: Process(Parent(GroupLeader(Node))) - ListEnv() map[EnvKey]interface{} - - // SetEnv set environment variable with given name. Use nil value to remove variable with given name. - SetEnv(name EnvKey, value interface{}) - - // Env returns value associated with given environment name. - Env(name EnvKey) interface{} - - // Wait waits until process stopped - Wait() - - // WaitWithTimeout waits until process stopped. Return ErrTimeout - // if given timeout is exceeded - WaitWithTimeout(d time.Duration) error - - // Link creates a link between the calling process and another process. - // Links are bidirectional and there can only be one link between two processes. - // Repeated calls to Process.Link(Pid) have no effect. If one of the participants - // of a link terminates, it will send an exit signal to the other participant and caused - // termination of the last one. If process set a trap using Process.SetTrapExit(true) the exit signal transorms into the MessageExit and delivers as a regular message. - Link(with etf.Pid) error - - // Unlink removes the link, if there is one, between the calling process and - // the process referred to by Pid. - Unlink(with etf.Pid) error - - // IsAlive returns whether the process is alive - IsAlive() bool - - // SetTrapExit enables/disables the trap on terminate process. When a process is trapping exits, - // it will not terminate when an exit signal is received. Instead, the signal is transformed - // into a 'gen.MessageExit' which is put into the mailbox of the process just like a regular message. - SetTrapExit(trap bool) - - // TrapExit returns whether the trap was enabled on this process - TrapExit() bool - - // Compression returns true if compression is enabled for this process - Compression() bool - - // SetCompression enables/disables compression for the messages sent outside of this node - SetCompression(enabled bool) - - // CompressionLevel returns comression level for the process - CompressionLevel() int - - // SetCompressionLevel defines compression level. Value must be in range: - // 1 (best speed) ... 9 (best compression), or -1 for the default compression level - SetCompressionLevel(level int) bool - - // CompressionThreshold returns compression threshold for the process - CompressionThreshold() int - - // SetCompressionThreshold defines the minimal size for the message that must be compressed - // Value must be greater than DefaultCompressionThreshold (1024) - SetCompressionThreshold(threshold int) bool - - // MonitorNode creates monitor between the current process and node. If Node fails or does not exist, - // the message MessageNodeDown is delivered to the process. - MonitorNode(name string) etf.Ref - - // DemonitorNode removes monitor. Returns false if the given reference wasn't found - DemonitorNode(ref etf.Ref) bool - - // MonitorProcess creates monitor between the processes. - // Allowed types for the 'process' value: etf.Pid, gen.ProcessID - // When a process monitor is triggered, a MessageDown sends to the caller. - // Note: The monitor request is an asynchronous signal. That is, it takes - // time before the signal reaches its destination. - MonitorProcess(process interface{}) etf.Ref - - // DemonitorProcess removes monitor. Returns false if the given reference wasn't found - DemonitorProcess(ref etf.Ref) bool - - // Behavior returns the object this process runs on. - Behavior() ProcessBehavior - // GroupLeader returns group leader process. Usually it points to the application process. - GroupLeader() Process - // Parent returns parent process. It returns nil if this process was spawned using Node.Spawn. - Parent() Process - // Context returns process context. - Context() context.Context - - // Children returns list of children pid (Application, Supervisor) - Children() ([]etf.Pid, error) - - // Links returns list of the process pids this process has linked to. - Links() []etf.Pid - // Monitors returns list of monitors created this process by pid. - Monitors() []etf.Pid - // Monitors returns list of monitors created this process by name. - MonitorsByName() []ProcessID - // MonitoredBy returns list of process pids monitored this process. - MonitoredBy() []etf.Pid - // Aliases returns list of aliases of this process. - Aliases() []etf.Alias +var ( + crc32q = crc32.MakeTable(0xD5828281) + crc32cache sync.Map +) - // RegisterEvent - RegisterEvent(event Event, messages ...EventMessage) error - UnregisterEvent(event Event) error - MonitorEvent(event Event) error - DemonitorEvent(event Event) error - SendEventMessage(event Event, message EventMessage) error +// Atom is a special kind of string used for process names and node names. +// The max allowed length is 255 bytes. Exceed this limit causes disability +// to send it over the network. Keep this in mind. +type Atom string - PutSyncRequest(ref etf.Ref) error - CancelSyncRequest(ref etf.Ref) - WaitSyncReply(ref etf.Ref, timeout int) (etf.Term, error) - PutSyncReply(ref etf.Ref, term etf.Term, err error) error - ProcessChannels() ProcessChannels +func (a Atom) String() string { + return "'" + string(a) + "'" } -// ProcessInfo struct with process details -type ProcessInfo struct { - PID etf.Pid - Name string - CurrentFunction string - Status string - MessageQueueLen int - Links []etf.Pid - Monitors []etf.Pid - MonitorsByName []ProcessID - MonitoredBy []etf.Pid - Aliases []etf.Alias - Dictionary etf.Map - TrapExit bool - GroupLeader etf.Pid - Compression bool +func (a Atom) Host() string { + s := strings.Split(string(a), "@") + if len(s) == 2 { + return s[1] + } + return "" } -// ProcessOptions -type ProcessOptions struct { - // Context allows mixing the system context with the custom one. E.g., to limit - // the lifespan using context.WithTimeout. This context MUST be based on the - // other Process' context. Otherwise, you get the error lib.ErrProcessContext - Context context.Context - // MailboxSize defines the length of message queue for the process - MailboxSize uint16 - // DirectboxSize defines the length of message queue for the direct requests - DirectboxSize uint16 - // GroupLeader - GroupLeader Process - // Env set the process environment variables - Env map[EnvKey]interface{} - - // Fallback defines the process to where messages will be forwarded - // if the mailbox is overflowed. The tag value could be used to - // differentiate the source processes. Forwarded messages are wrapped - // into the MessageFallback struct. - Fallback ProcessFallback +func (a Atom) CRC32() string { + if v, exist := crc32cache.Load(a); exist { + return v.(string) + } + if a == "" { + return "" + } + hash := fmt.Sprintf("%08X", crc32.Checksum([]byte(a), crc32q)) + crc32cache.Store(a, hash) + return hash } -// ProcessFallback -type ProcessFallback struct { - Name string - Tag string +// PID +type PID struct { + Node Atom + ID uint64 + Creation int64 } -// RemoteSpawnRequest -type RemoteSpawnRequest struct { - From etf.Pid - Ref etf.Ref - Options RemoteSpawnOptions +// String +func (p PID) String() string { + return fmt.Sprintf("<%s.%d.%d>", p.Node.CRC32(), int32(p.ID>>32), int32(p.ID)) } - -// RemoteSpawnOptions defines options for RemoteSpawn method -type RemoteSpawnOptions struct { - // Name register associated name with spawned process - Name string - // Monitor enables monitor on the spawned process using provided reference - Monitor etf.Ref - // Link enables link between the calling and spawned processes - Link bool - // Function in order to support {M,F,A} request to the Erlang node - Function string +func (p PID) MarshalJSON() ([]byte, error) { + return []byte("\"" + p.String() + "\""), nil } -// ProcessChannels -type ProcessChannels struct { - Mailbox <-chan ProcessMailboxMessage - Direct <-chan ProcessDirectMessage - GracefulExit <-chan ProcessGracefulExitRequest +// ProcessID long notation of registered process {process_name, node_name} +type ProcessID struct { + Name Atom + Node Atom } -// ProcessMailboxMessage -type ProcessMailboxMessage struct { - From etf.Pid - Message interface{} +// String string representaion of ProcessID value +func (p ProcessID) String() string { + return fmt.Sprintf("<%s.%s>", p.Node.CRC32(), p.Name) } - -// ProcessDirectMessage -type ProcessDirectMessage struct { - Ref etf.Ref - Message interface{} - Err error +func (p ProcessID) MarshalJSON() ([]byte, error) { + return []byte("\"" + p.String() + "\""), nil } -// ProcessGracefulExitRequest -type ProcessGracefulExitRequest struct { - From etf.Pid - Reason string +// Ref +type Ref struct { + Node Atom + Creation int64 + ID [3]uint64 } -// ProcessState -type ProcessState struct { - Process - State interface{} +// String +func (r Ref) String() string { + return fmt.Sprintf("Ref#<%s.%d.%d.%d>", r.Node.CRC32(), r.ID[0], r.ID[1], r.ID[2]) } - -// ProcessBehavior interface contains methods you should implement to make your own process behavior -type ProcessBehavior interface { - ProcessInit(Process, ...etf.Term) (ProcessState, error) - ProcessLoop(ProcessState, chan<- bool) string // method which implements control flow of process +func (r Ref) MarshalJSON() ([]byte, error) { + return []byte("\"" + r.String() + "\""), nil } -// Core the common set of methods provided by Process and node.Node interfaces -type Core interface { - - // ProcessByName returns Process for the given name. - // Returns nil if it doesn't exist (not found) or terminated. - ProcessByName(name string) Process - - // ProcessByPid returns Process for the given Pid. - // Returns nil if it doesn't exist (not found) or terminated. - ProcessByPid(pid etf.Pid) Process - - // ProcessByAlias returns Process for the given alias. - // Returns nil if it doesn't exist (not found) or terminated - ProcessByAlias(alias etf.Alias) Process - - // ProcessInfo returns the details about given Pid - ProcessInfo(pid etf.Pid) (ProcessInfo, error) +// Alias +type Alias Ref - // ProcessList returns the list of running processes - ProcessList() []Process - - // MakeRef creates an unique reference within this node - MakeRef() etf.Ref - - // IsAlias checks whether the given alias is belongs to the alive process on this node. - // If the process died all aliases are cleaned up and this function returns - // false for the given alias. For alias from the remote node always returns false. - IsAlias(etf.Alias) bool - - // IsMonitor returns true if the given references is a monitor - IsMonitor(ref etf.Ref) bool - - // RegisterBehavior - RegisterBehavior(group, name string, behavior ProcessBehavior, data interface{}) error - // RegisteredBehavior - RegisteredBehavior(group, name string) (RegisteredBehavior, error) - // RegisteredBehaviorGroup - RegisteredBehaviorGroup(group string) []RegisteredBehavior - // UnregisterBehavior - UnregisterBehavior(group, name string) error +// String +func (a Alias) String() string { + return fmt.Sprintf("Alias#<%s.%d.%d.%d>", a.Node.CRC32(), a.ID[0], a.ID[1], a.ID[2]) } -// RegisteredBehavior -type RegisteredBehavior struct { - Behavior ProcessBehavior - Data interface{} +func (a Alias) MarshalJSON() ([]byte, error) { + return []byte("\"" + a.String() + "\""), nil } -// ProcessID long notation of registered process {process_name, node_name} -type ProcessID struct { - Name string - Node string +// Event +type Event struct { + Name Atom + Node Atom } -// String string representaion of ProcessID value -func (p ProcessID) String() string { - return fmt.Sprintf("<%s:%s>", p.Name, p.Node) +type EventOptions struct { + Notify bool + Buffer int } -// MessageDown delivers as a message to Server's HandleInfo callback of the process -// that created monitor using MonitorProcess. -// Reason values: -// - the exit reason of the process -// - 'noproc' (process did not exist at the time of monitor creation) -// - 'noconnection' (no connection to the node where the monitored process resides) -// - 'noproxy' (no connection to the proxy this node had has a connection through. monitored process could be still alive) -type MessageDown struct { - Ref etf.Ref // a monitor reference - ProcessID ProcessID // if monitor was created by name - Pid etf.Pid - Reason string +func (e Event) String() string { + return fmt.Sprintf("Event#<%s:%s>", e.Node.CRC32(), e.Name) } - -// MessageNodeDown delivers as a message to Server's HandleInfo callback of the process -// that created monitor using MonitorNode -type MessageNodeDown struct { - Ref etf.Ref - Name string +func (e Event) MarshalJSON() ([]byte, error) { + return []byte("\"" + e.String() + "\""), nil } -// MessageProxyDown delivers as a message to Server's HandleInfo callback of the process -// that created monitor using MonitorNode if the connection to the node was through the proxy -// nodes and one of them went down. -type MessageProxyDown struct { - Ref etf.Ref - Node string - Proxy string - Reason string -} +// Env +type Env string -// MessageExit delievers to Server's HandleInfo callback on enabled trap exit using SetTrapExit(true) -// Reason values: -// - the exit reason of the process -// - 'noproc' (process did not exist at the time of link creation) -// - 'noconnection' (no connection to the node where the linked process resides) -// - 'noproxy' (no connection to the proxy this node had has a connection through. linked process could be still alive) -type MessageExit struct { - Pid etf.Pid - Reason string +func (e Env) String() string { + return strings.ToUpper(string(e)) } - -// MessageFallback delivers to the process specified as a fallback process in ProcessOptions.Fallback.Name if the mailbox has been overflowed -type MessageFallback struct { - Process etf.Pid - Tag string - Message etf.Term +func (e Env) MarshalJSON() ([]byte, error) { + return []byte("\"" + e.String() + "\""), nil } -// MessageDirectChildren type intended to be used in Process.Children which returns []etf.Pid -// You can handle this type of message in your HandleDirect callback to enable Process.Children -// support for your gen.Server actor. -type MessageDirectChildren struct{} - -// IsMessageDown -func IsMessageDown(message etf.Term) (MessageDown, bool) { - var md MessageDown - switch m := message.(type) { - case MessageDown: - return m, true - } - return md, false +// Version +type Version struct { + Name string + Release string + Commit string + License string } -// IsMessageExit -func IsMessageExit(message etf.Term) (MessageExit, bool) { - var me MessageExit - switch m := message.(type) { - case MessageExit: - return m, true +func (v Version) String() string { + if v.Name == "" { + return "" } - return me, false -} - -// IsMessageProxyDown -func IsMessageProxyDown(message etf.Term) (MessageProxyDown, bool) { - var mpd MessageProxyDown - switch m := message.(type) { - case MessageProxyDown: - return m, true + if v.Commit == "" { + return v.Str() } - return mpd, false + return fmt.Sprintf("%s:%s[%s]", v.Name, v.Release, v.Commit) } -// IsMessageFallback -func IsMessageFallback(message etf.Term) (MessageFallback, bool) { - var mf MessageFallback - switch m := message.(type) { - case MessageFallback: - return m, true +func (v Version) Str() string { + if v.Name == "" { + return "" } - return mf, false + return fmt.Sprintf("%s:%s", v.Name, v.Release) +} + +// LogLevel +type LogLevel int + +func (l LogLevel) String() string { + switch l { + case LogLevelTrace: + return "trace" + case LogLevelDebug: + return "debug" + case LogLevelInfo: + return "info" + case LogLevelWarning: + return "warning" + case LogLevelError: + return "error" + case LogLevelPanic: + return "panic" + case LogLevelDisabled: + return "disabled" + + case LogLevelSystem: + return "system" + } + return "unknown log level" } -type CancelFunc func() bool +func (l LogLevel) MarshalJSON() ([]byte, error) { + return []byte("\"" + l.String() + "\""), nil +} -type EventMessage interface{} -type Event string +const ( + LogLevelSystem LogLevel = -100 -// MessageEventDown delivers to the process which monitored EventType if the owner -// of this EventType has terminated -type MessageEventDown struct { - Event Event - Reason string -} + LogLevelTrace LogLevel = -2 + LogLevelDebug LogLevel = -1 + LogLevelDefault LogLevel = 0 // inherite from node/app/parent process + LogLevelInfo LogLevel = 1 + LogLevelWarning LogLevel = 2 + LogLevelError LogLevel = 3 + LogLevelPanic LogLevel = 4 + LogLevelDisabled LogLevel = 5 +) diff --git a/gen/udp.go b/gen/udp.go deleted file mode 100644 index a52c3862..00000000 --- a/gen/udp.go +++ /dev/null @@ -1,317 +0,0 @@ -package gen - -import ( - "fmt" - "io" - "net" - "strconv" - "sync/atomic" - "time" - "unsafe" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -type UDPBehavior interface { - ServerBehavior - - InitUDP(process *UDPProcess, args ...etf.Term) (UDPOptions, error) - - HandleUDPCall(process *UDPProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) - HandleUDPCast(process *UDPProcess, message etf.Term) ServerStatus - HandleUDPInfo(process *UDPProcess, message etf.Term) ServerStatus - - HandleUDPTerminate(process *UDPProcess, reason string) -} - -type UDPStatus error - -var ( - UDPStatusOK UDPStatus - UDPStatusStop UDPStatus = fmt.Errorf("stop") - - defaultUDPDeadlineTimeout int = 3 - defaultUDPQueueLength int = 10 - defaultUDPMaxPacketSize = int(65000) -) - -type UDP struct { - Server -} - -type UDPOptions struct { - Host string - Port uint16 - - Handler UDPHandlerBehavior - NumHandlers int - IdleTimeout int - DeadlineTimeout int - QueueLength int - MaxPacketSize int - ExtraHandlers bool -} - -type UDPProcess struct { - ServerProcess - options UDPOptions - behavior UDPBehavior - - pool []*Process - counter uint64 - packetConn net.PacketConn -} - -type UDPPacket struct { - Addr net.Addr - Socket io.Writer -} - -// Server callbacks -func (udp *UDP) Init(process *ServerProcess, args ...etf.Term) error { - - behavior := process.Behavior().(UDPBehavior) - behavior, ok := process.Behavior().(UDPBehavior) - if !ok { - return fmt.Errorf("not a UDPBehavior") - } - - udpProcess := &UDPProcess{ - ServerProcess: *process, - behavior: behavior, - } - // do not inherit parent State - udpProcess.State = nil - - options, err := behavior.InitUDP(udpProcess, args...) - if err != nil { - return err - } - if options.Handler == nil { - return fmt.Errorf("handler must be defined") - } - - if options.QueueLength == 0 { - options.QueueLength = defaultUDPQueueLength - } - - if options.DeadlineTimeout < 1 { - // we need to check the context if it was canceled to stop - // reading and close the connection socket - options.DeadlineTimeout = defaultUDPDeadlineTimeout - } - - if options.MaxPacketSize == 0 { - options.MaxPacketSize = defaultUDPMaxPacketSize - } - - udpProcess.options = options - if err := udpProcess.initHandlers(); err != nil { - return err - } - - if options.Port == 0 { - return fmt.Errorf("UDP port must be defined") - } - - lc := net.ListenConfig{} - hostPort := net.JoinHostPort("", strconv.Itoa(int(options.Port))) - pconn, err := lc.ListenPacket(process.Context(), "udp", hostPort) - if err != nil { - return err - } - - udpProcess.packetConn = pconn - process.State = udpProcess - - // start serving - go udpProcess.serve() - return nil -} - -func (udp *UDP) HandleCall(process *ServerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - udpp := process.State.(*UDPProcess) - return udpp.behavior.HandleUDPCall(udpp, from, message) -} - -func (udp *UDP) HandleCast(process *ServerProcess, message etf.Term) ServerStatus { - udpp := process.State.(*UDPProcess) - return udpp.behavior.HandleUDPCast(udpp, message) -} - -func (udp *UDP) HandleInfo(process *ServerProcess, message etf.Term) ServerStatus { - udpp := process.State.(*UDPProcess) - return udpp.behavior.HandleUDPInfo(udpp, message) -} - -func (udp *UDP) Terminate(process *ServerProcess, reason string) { - p := process.State.(*UDPProcess) - p.packetConn.Close() - p.behavior.HandleUDPTerminate(p, reason) -} - -// -// default UDP callbacks -// - -// HandleUDPCall -func (udp *UDP) HandleUDPCall(process *UDPProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - lib.Warning("[gen.UDP] HandleUDPCall: unhandled message (from %#v) %#v", from, message) - return etf.Atom("ok"), ServerStatusOK -} - -// HandleUDPCast -func (udp *UDP) HandleUDPCast(process *UDPProcess, message etf.Term) ServerStatus { - lib.Warning("[gen.UDP] HandleUDPCast: unhandled message %#v", message) - return ServerStatusOK -} - -// HandleUDPInfo -func (udp *UDP) HandleUDPInfo(process *UDPProcess, message etf.Term) ServerStatus { - lib.Warning("[gen.UDP] HandleUDPInfo: unhandled message %#v", message) - return ServerStatusOK -} -func (udp *UDP) HandleUDPTerminate(process *UDPProcess, reason string) { - return -} - -// internals - -func (udpp *UDPProcess) initHandlers() error { - if udpp.options.NumHandlers < 1 { - udpp.options.NumHandlers = 1 - } - if udpp.options.IdleTimeout < 0 { - udpp.options.IdleTimeout = 0 - } - - c := atomic.AddUint64(&udpp.counter, 1) - if c > 1 { - return fmt.Errorf("you can not use the same object more than once") - } - - for i := 0; i < udpp.options.NumHandlers; i++ { - p := udpp.startHandler(i, udpp.options.IdleTimeout) - if p == nil { - return fmt.Errorf("can not initialize handlers") - } - udpp.pool = append(udpp.pool, &p) - } - return nil -} - -func (udpp *UDPProcess) startHandler(id int, idleTimeout int) Process { - opts := ProcessOptions{ - Context: udpp.Context(), - MailboxSize: uint16(udpp.options.QueueLength), - } - - optsHandler := optsUDPHandler{id: id, idleTimeout: idleTimeout} - p, err := udpp.Spawn("", opts, udpp.options.Handler, optsHandler) - if err != nil { - lib.Warning("[gen.UDP] can not start UDPHandler: %s", err) - return nil - } - return p -} - -func (udpp *UDPProcess) serve() { - var handlerProcess Process - var handlerProcessID int - var packet interface{} - defer udpp.packetConn.Close() - - writer := &writer{ - pconn: udpp.packetConn, - } - - ctx := udpp.Context() - deadlineTimeout := time.Second * time.Duration(udpp.options.DeadlineTimeout) - - l := uint64(udpp.options.NumHandlers) - // make round robin using the counter value - cnt := atomic.AddUint64(&udpp.counter, 1) - // choose process as a handler for the packet received on this connection - handlerProcessID = int(cnt % l) - handlerProcess = *(*Process)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&udpp.pool[handlerProcessID])))) - -nextPacket: - for { - if ctx.Err() != nil { - return - } - deadline := false - if err := udpp.packetConn.SetReadDeadline(time.Now().Add(deadlineTimeout)); err == nil { - deadline = true - } - buf := lib.TakeBuffer() - buf.Allocate(udpp.options.MaxPacketSize) - n, a, err := udpp.packetConn.ReadFrom(buf.B) - if n == 0 { - if err, ok := err.(net.Error); deadline && ok && err.Timeout() { - packet = messageUDPHandlerTimeout{} - break - } - // stop serving and close this socket - return - } - if err != nil { - lib.Warning("[gen.UDP] got error on receiving packet from %q: %s", a, err) - } - - writer.addr = a - packet = messageUDPHandlerPacket{ - data: buf, - packet: UDPPacket{ - Addr: a, - Socket: writer, - }, - n: n, - } - break - } - -retry: - for a := uint64(0); a < l; a++ { - if ctx.Err() != nil { - return - } - - err := udpp.Cast(handlerProcess.Self(), packet) - switch err { - case nil: - break - case lib.ErrProcessUnknown: - if handlerProcessID == -1 { - // it was an extra handler do not restart. try to use the existing one - cnt = atomic.AddUint64(&udpp.counter, 1) - handlerProcessID = int(cnt % l) - handlerProcess = *(*Process)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&udpp.pool[handlerProcessID])))) - goto retry - } - - // respawn terminated process - handlerProcess = udpp.startHandler(handlerProcessID, udpp.options.IdleTimeout) - atomic.SwapPointer((*unsafe.Pointer)(unsafe.Pointer(&udpp.pool[handlerProcessID])), unsafe.Pointer(&handlerProcess)) - continue - - case lib.ErrProcessBusy: - handlerProcessID = int((a + cnt) % l) - handlerProcess = *(*Process)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&udpp.pool[handlerProcessID])))) - continue - default: - lib.Warning("[gen.UDP] error on handling packet %#v: %s", packet, err) - } - goto nextPacket - } -} - -type writer struct { - pconn net.PacketConn - addr net.Addr -} - -func (w *writer) Write(data []byte) (int, error) { - return w.pconn.WriteTo(data, w.addr) -} diff --git a/gen/udp_handler.go b/gen/udp_handler.go deleted file mode 100644 index 4d206887..00000000 --- a/gen/udp_handler.go +++ /dev/null @@ -1,152 +0,0 @@ -package gen - -import ( - "fmt" - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -type UDPHandlerBehavior interface { - ServerBehavior - - // Mandatory callback - HandlePacket(process *UDPHandlerProcess, data []byte, packet UDPPacket) - - // Optional callbacks - HandleTimeout(process *UDPHandlerProcess) - - HandleUDPHandlerCall(process *UDPHandlerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) - HandleUDPHandlerCast(process *UDPHandlerProcess, message etf.Term) ServerStatus - HandleUDPHandlerInfo(process *UDPHandlerProcess, message etf.Term) ServerStatus - HandleUDPHandlerTerminate(process *UDPHandlerProcess, reason string) -} - -type UDPHandler struct { - Server -} - -type UDPHandlerProcess struct { - ServerProcess - behavior UDPHandlerBehavior - - lastPacket int64 - idleTimeout int - id int -} - -type optsUDPHandler struct { - id int - idleTimeout int -} -type messageUDPHandlerIdleCheck struct{} -type messageUDPHandlerPacket struct { - data *lib.Buffer - packet UDPPacket - n int -} -type messageUDPHandlerTimeout struct{} - -func (udph *UDPHandler) Init(process *ServerProcess, args ...etf.Term) error { - behavior, ok := process.Behavior().(UDPHandlerBehavior) - if !ok { - return fmt.Errorf("UDP: not a UDPHandlerBehavior") - } - handlerProcess := &UDPHandlerProcess{ - ServerProcess: *process, - behavior: behavior, - } - if len(args) == 0 { - return fmt.Errorf("UDP: can not start with no args") - } - - if a, ok := args[0].(optsUDPHandler); ok { - handlerProcess.idleTimeout = a.idleTimeout - handlerProcess.id = a.id - } else { - return fmt.Errorf("UDP: wrong args for the UDPHandler") - } - - // do not inherit parent State - handlerProcess.State = nil - process.State = handlerProcess - - if handlerProcess.idleTimeout > 0 { - process.CastAfter(process.Self(), messageUDPHandlerIdleCheck{}, 5*time.Second) - } - - return nil -} - -func (udph *UDPHandler) HandleCall(process *ServerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - udpp := process.State.(*UDPHandlerProcess) - return udpp.behavior.HandleUDPHandlerCall(udpp, from, message) -} - -func (udph *UDPHandler) HandleCast(process *ServerProcess, message etf.Term) ServerStatus { - udpp := process.State.(*UDPHandlerProcess) - switch m := message.(type) { - case messageUDPHandlerIdleCheck: - if time.Now().Unix()-udpp.lastPacket > int64(udpp.idleTimeout) { - return ServerStatusStop - } - process.CastAfter(process.Self(), messageUDPHandlerIdleCheck{}, 5*time.Second) - - case messageUDPHandlerPacket: - udpp.lastPacket = time.Now().Unix() - udpp.behavior.HandlePacket(udpp, m.data.B[:m.n], m.packet) - lib.ReleaseBuffer(m.data) - - case messageUDPHandlerTimeout: - udpp.behavior.HandleTimeout(udpp) - - default: - return udpp.behavior.HandleUDPHandlerCast(udpp, message) - } - return ServerStatusOK -} - -func (udph *UDPHandler) HandleInfo(process *ServerProcess, message etf.Term) ServerStatus { - udpp := process.State.(*UDPHandlerProcess) - return udpp.behavior.HandleUDPHandlerInfo(udpp, message) -} - -func (udph *UDPHandler) Terminate(process *ServerProcess, reason string) { - udpp := process.State.(*UDPHandlerProcess) - udpp.behavior.HandleUDPHandlerTerminate(udpp, reason) -} - -// -// default callbacks -// - -func (udph *UDPHandler) HandleTimeout(process *UDPHandlerProcess) { - return -} - -// HandleUDPHandlerCall -func (udph *UDPHandler) HandleUDPHandlerCall(process *UDPHandlerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - lib.Warning("HandleUDPHandlerCall: unhandled message (from %#v) %#v", from, message) - return etf.Atom("ok"), ServerStatusOK -} - -// HandleUDPHandlerCast -func (udph *UDPHandler) HandleUDPHandlerCast(process *UDPHandlerProcess, message etf.Term) ServerStatus { - lib.Warning("HandleUDPHandlerCast: unhandled message %#v", message) - return ServerStatusOK -} - -// HandleUDPHandlerInfo -func (udph *UDPHandler) HandleUDPHandlerInfo(process *UDPHandlerProcess, message etf.Term) ServerStatus { - lib.Warning("HandleUDPHandlerInfo: unhandled message %#v", message) - return ServerStatusOK -} -func (udph *UDPHandler) HandleUDPHandlerTerminate(process *UDPHandlerProcess, reason string) { - return -} - -// we should disable SetTrapExit for the UDPHandlerProcess by overriding it. -func (udpp *UDPHandlerProcess) SetTrapExit(trap bool) { - lib.Warning("[%s] method 'SetTrapExit' is disabled for UDPHandlerProcess", udpp.Self()) -} diff --git a/gen/web.go b/gen/web.go deleted file mode 100644 index 1a216799..00000000 --- a/gen/web.go +++ /dev/null @@ -1,223 +0,0 @@ -package gen - -import ( - "crypto/tls" - "fmt" - "net" - "net/http" - "reflect" - "strconv" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -type WebBehavior interface { - ServerBehavior - // mandatory method - InitWeb(process *WebProcess, args ...etf.Term) (WebOptions, error) - - // optional methods - HandleWebCall(process *WebProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) - HandleWebCast(process *WebProcess, message etf.Term) ServerStatus - HandleWebInfo(process *WebProcess, message etf.Term) ServerStatus -} - -type WebStatus error - -var ( - WebStatusOK WebStatus // nil - WebStatusStop WebStatus = fmt.Errorf("stop") - - // internals - defaultWebPort = uint16(8080) - defaultWebTLSPort = uint16(8443) -) - -type Web struct { - Server -} - -type WebOptions struct { - Host string - Port uint16 // default port 8080, for TLS - 8443 - TLS *tls.Config - Handler http.Handler -} - -type WebProcess struct { - ServerProcess - options WebOptions - behavior WebBehavior - listener net.Listener -} - -type defaultHandler struct{} - -func (dh *defaultHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotFound) - fmt.Fprintf(w, "Handler is not initialized\n") -} - -// -// WebProcess API -// - -func (wp *WebProcess) StartWebHandler(web WebHandlerBehavior, options WebHandlerOptions) http.Handler { - handler, err := web.initHandler(wp, web, options) - if err != nil { - name := reflect.ValueOf(web).Elem().Type().Name() - lib.Warning("[%s] can not initialaze WebHandler (%s): %s", wp.Self(), name, err) - - return &defaultHandler{} - } - return handler -} - -// -// Server callbacks -// - -func (web *Web) Init(process *ServerProcess, args ...etf.Term) error { - - behavior, ok := process.Behavior().(WebBehavior) - if !ok { - return fmt.Errorf("Web: not a WebBehavior") - } - - webProcess := &WebProcess{ - ServerProcess: *process, - behavior: behavior, - } - // do not inherit parent State - webProcess.State = nil - - options, err := behavior.InitWeb(webProcess, args...) - if err != nil { - return err - } - - tlsEnabled := false - if options.TLS != nil { - if options.TLS.Certificates == nil && options.TLS.GetCertificate == nil { - return fmt.Errorf("TLS config has no certificates") - } - tlsEnabled = true - } - - if options.Port == 0 { - if tlsEnabled { - options.Port = defaultWebTLSPort - } else { - options.Port = defaultWebPort - } - } - - lc := net.ListenConfig{} - ctx := process.Context() - hostPort := net.JoinHostPort(options.Host, strconv.Itoa(int(options.Port))) - listener, err := lc.Listen(ctx, "tcp", hostPort) - if err != nil { - return err - } - - if tlsEnabled { - listener = tls.NewListener(listener, options.TLS) - } - - httpServer := http.Server{ - Handler: options.Handler, - } - - // start acceptor - go func() { - err := httpServer.Serve(listener) - process.Exit(err.Error()) - }() - - // Golang's listener is weird. It takes the context in the Listen method - // but doesn't use it at all. HTTP server has the same issue. - // So making a little workaround to handle process context cancelation. - // Maybe one day they fix it. - go func() { - // this goroutine will be alive until the process context is canceled. - select { - case <-ctx.Done(): - httpServer.Close() - } - }() - - webProcess.options = options - webProcess.listener = listener - process.State = webProcess - - return nil -} - -// HandleCall -func (web *Web) HandleCall(process *ServerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - webp := process.State.(*WebProcess) - return webp.behavior.HandleWebCall(webp, from, message) -} - -// HandleDirect -func (web *Web) HandleDirect(process *ServerProcess, ref etf.Ref, message interface{}) (interface{}, DirectStatus) { - return nil, DirectStatusOK -} - -// HandleCast -func (web *Web) HandleCast(process *ServerProcess, message etf.Term) ServerStatus { - webp := process.State.(*WebProcess) - status := webp.behavior.HandleWebCast(webp, message) - - switch status { - case WebStatusOK: - return ServerStatusOK - case WebStatusStop: - return ServerStatusStop - default: - return ServerStatus(status) - } -} - -// HandleInfo -func (web *Web) HandleInfo(process *ServerProcess, message etf.Term) ServerStatus { - webp := process.State.(*WebProcess) - status := webp.behavior.HandleWebInfo(webp, message) - - switch status { - case WebStatusOK: - return ServerStatusOK - case WebStatusStop: - return ServerStatusStop - default: - return ServerStatus(status) - } -} - -func (web *Web) Terminate(process *ServerProcess, reason string) { - webp := process.State.(*WebProcess) - webp.listener.Close() -} - -// -// default Web callbacks -// - -// HandleWebCall -func (web *Web) HandleWebCall(process *WebProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - lib.Warning("HandleWebCall: unhandled message (from %#v) %#v", from, message) - return etf.Atom("ok"), ServerStatusOK -} - -// HandleWebCast -func (web *Web) HandleWebCast(process *WebProcess, message etf.Term) ServerStatus { - lib.Warning("HandleWebCast: unhandled message %#v", message) - return ServerStatusOK -} - -// HandleWebInfo -func (web *Web) HandleWebInfo(process *WebProcess, message etf.Term) ServerStatus { - lib.Warning("HandleWebInfo: unhandled message %#v", message) - return ServerStatusOK -} diff --git a/gen/web_handler.go b/gen/web_handler.go deleted file mode 100644 index f656d36c..00000000 --- a/gen/web_handler.go +++ /dev/null @@ -1,351 +0,0 @@ -package gen - -import ( - "fmt" - "net/http" - "reflect" - "strconv" - "sync" - "sync/atomic" - "time" - "unsafe" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -var ( - WebHandlerStatusDone WebHandlerStatus = nil - WebHandlerStatusWait WebHandlerStatus = fmt.Errorf("wait") - - defaultRequestQueueLength = 10 - - webMessageRequestPool = &sync.Pool{ - New: func() interface{} { - return &webMessageRequest{} - }, - } -) - -type WebHandlerStatus error - -type WebHandlerBehavior interface { - ServerBehavior - - // Mandatory callback - HandleRequest(process *WebHandlerProcess, request WebMessageRequest) WebHandlerStatus - - // Optional callbacks - HandleWebHandlerCall(process *WebHandlerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) - HandleWebHandlerCast(process *WebHandlerProcess, message etf.Term) ServerStatus - HandleWebHandlerInfo(process *WebHandlerProcess, message etf.Term) ServerStatus - HandleWebHandlerTerminate(process *WebHandlerProcess, reason string, count int64) - - // internal methods - initHandler(process Process, handler WebHandlerBehavior, options WebHandlerOptions) (http.Handler, error) -} - -type WebHandler struct { - Server - - parent Process - behavior WebHandlerBehavior - options WebHandlerOptions - pool []*Process - counter uint64 -} - -type poolItem struct { - process Process -} - -type WebHandlerOptions struct { - // Timeout for web-requests. The default timeout is 5 seconds. It can also be - // overridden within HTTP requests using the header 'Request-Timeout' - RequestTimeout int - // RequestQueueLength defines how many parallel requests can be directed to this process. Default value is 10. - RequestQueueLength int - // NumHandlers defines how many handlers will be started. Default 1 - NumHandlers int - // IdleTimeout defines how long (in seconds) keep the started handler alive with no requests. Zero value makes handler not stop. - IdleTimeout int -} - -type WebHandlerProcess struct { - ServerProcess - behavior WebHandlerBehavior - lastRequest int64 - counter int64 - idleTimeout int - id int -} - -type WebMessageRequest struct { - Ref etf.Ref - Request *http.Request - Response http.ResponseWriter -} - -type webMessageRequest struct { - sync.Mutex - WebMessageRequest - requestState int // 0 - initial, 1 - canceled, 2 - handled -} - -type optsWebHandler struct { - id int - idleTimeout int -} - -type messageWebHandlerIdleCheck struct{} - -func (wh *WebHandler) initHandler(parent Process, handler WebHandlerBehavior, options WebHandlerOptions) (http.Handler, error) { - if options.NumHandlers < 1 { - options.NumHandlers = 1 - } - if options.RequestTimeout < 1 { - options.RequestTimeout = DefaultCallTimeout - } - - if options.IdleTimeout < 0 { - options.IdleTimeout = 0 - } - - if options.RequestQueueLength < 1 { - options.RequestQueueLength = defaultRequestQueueLength - } - - wh.parent = parent - wh.behavior = handler - wh.options = options - c := atomic.AddUint64(&wh.counter, 1) - if c > 1 { - return nil, fmt.Errorf("you can not use the same object more than once") - } - - for i := 0; i < options.NumHandlers; i++ { - p := wh.startHandler(i, options.IdleTimeout) - if p == nil { - return nil, fmt.Errorf("can not initialize handlers") - } - wh.pool = append(wh.pool, &p) - } - return wh, nil -} - -func (wh *WebHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var p Process - - //w.WriteHeader(http.StatusOK) - //return - - mr := webMessageRequestPool.Get().(*webMessageRequest) - mr.Request = r - mr.Response = w - mr.requestState = 0 - - timeout := wh.options.RequestTimeout - if t := r.Header.Get("Request-Timeout"); t != "" { - intT, err := strconv.Atoi(t) - if err == nil && intT > 0 { - timeout = intT - } - } - - l := uint64(wh.options.NumHandlers) - // make round robin using the counter value - c := atomic.AddUint64(&wh.counter, 1) - - // attempts - for a := uint64(0); a < l; a++ { - i := (c + a) % l - - p = *(*Process)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&wh.pool[i])))) - - respawned: - if r.Context().Err() != nil { - // canceled by the client - return - } - _, err := p.DirectWithTimeout(mr, timeout) - switch err { - case nil: - webMessageRequestPool.Put(mr) - return - - case lib.ErrProcessTerminated: - mr.Lock() - if mr.requestState > 0 { - mr.Unlock() - return - } - mr.Unlock() - p = wh.startHandler(int(i), wh.options.IdleTimeout) - atomic.SwapPointer((*unsafe.Pointer)(unsafe.Pointer(&wh.pool[i])), unsafe.Pointer(&p)) - goto respawned - - case lib.ErrProcessBusy: - continue - - case lib.ErrTimeout: - mr.Lock() - if mr.requestState == 2 { - // timeout happened during the handling request - mr.Unlock() - webMessageRequestPool.Put(mr) - return - } - mr.requestState = 1 // canceled - mr.Unlock() - w.WriteHeader(http.StatusGatewayTimeout) - return - - default: - lib.Warning("WebHandler %s return error: %s", p.Self(), err) - mr.Lock() - if mr.requestState > 0 { - mr.Unlock() - return - } - mr.Unlock() - - w.WriteHeader(http.StatusInternalServerError) // 500 - return - } - } - - // all handlers are busy - name := reflect.ValueOf(wh.behavior).Elem().Type().Name() - lib.Warning("too many requests for %s", name) - w.WriteHeader(http.StatusServiceUnavailable) // 503 - webMessageRequestPool.Put(mr) -} - -func (wh *WebHandler) startHandler(id int, idleTimeout int) Process { - opts := ProcessOptions{ - Context: wh.parent.Context(), - DirectboxSize: uint16(wh.options.RequestQueueLength), - } - - optsHandler := optsWebHandler{id: id, idleTimeout: idleTimeout} - p, err := wh.parent.Spawn("", opts, wh.behavior, optsHandler) - if err != nil { - lib.Warning("can not start WebHandler: %s", err) - return nil - } - return p -} - -func (wh *WebHandler) Init(process *ServerProcess, args ...etf.Term) error { - behavior, ok := process.Behavior().(WebHandlerBehavior) - if !ok { - return fmt.Errorf("Web: not a WebHandlerBehavior") - } - handlerProcess := &WebHandlerProcess{ - ServerProcess: *process, - behavior: behavior, - } - if len(args) == 0 { - return fmt.Errorf("Web: can not start with no args") - } - - if a, ok := args[0].(optsWebHandler); ok { - handlerProcess.idleTimeout = a.idleTimeout - handlerProcess.id = a.id - } else { - return fmt.Errorf("Web: wrong args for the WebHandler") - } - - // do not inherit parent State - handlerProcess.State = nil - process.State = handlerProcess - - if handlerProcess.idleTimeout > 0 { - process.CastAfter(process.Self(), messageWebHandlerIdleCheck{}, 5*time.Second) - } - - return nil -} - -func (wh *WebHandler) HandleCall(process *ServerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - whp := process.State.(*WebHandlerProcess) - return whp.behavior.HandleWebHandlerCall(whp, from, message) -} - -func (wh *WebHandler) HandleCast(process *ServerProcess, message etf.Term) ServerStatus { - whp := process.State.(*WebHandlerProcess) - switch message.(type) { - case messageWebHandlerIdleCheck: - if time.Now().Unix()-whp.lastRequest > int64(whp.idleTimeout) { - return ServerStatusStop - } - process.CastAfter(process.Self(), messageWebHandlerIdleCheck{}, 5*time.Second) - - default: - return whp.behavior.HandleWebHandlerCast(whp, message) - } - return ServerStatusOK -} - -func (wh *WebHandler) HandleInfo(process *ServerProcess, message etf.Term) ServerStatus { - whp := process.State.(*WebHandlerProcess) - return whp.behavior.HandleWebHandlerInfo(whp, message) -} - -func (wh *WebHandler) HandleDirect(process *ServerProcess, ref etf.Ref, message interface{}) (interface{}, DirectStatus) { - whp := process.State.(*WebHandlerProcess) - switch m := message.(type) { - case *webMessageRequest: - whp.lastRequest = time.Now().Unix() - whp.counter++ - m.Lock() - defer m.Unlock() - if m.requestState != 0 || m.Request.Context().Err() != nil { // canceled - return nil, DirectStatusOK - } - m.requestState = 2 // handled - m.Ref = ref - status := whp.behavior.HandleRequest(whp, m.WebMessageRequest) - switch status { - case WebHandlerStatusDone: - return nil, DirectStatusOK - - case WebHandlerStatusWait: - return nil, DirectStatusIgnore - default: - return nil, status - } - } - return nil, DirectStatusOK -} - -func (wh *WebHandler) Terminate(process *ServerProcess, reason string) { - whp := process.State.(*WebHandlerProcess) - whp.behavior.HandleWebHandlerTerminate(whp, reason, whp.counter) -} - -// HandleWebHandlerCall -func (wh *WebHandler) HandleWebHandlerCall(process *WebHandlerProcess, from ServerFrom, message etf.Term) (etf.Term, ServerStatus) { - lib.Warning("HandleWebHandlerCall: unhandled message (from %#v) %#v", from, message) - return etf.Atom("ok"), ServerStatusOK -} - -// HandleWebHandlerCast -func (wh *WebHandler) HandleWebHandlerCast(process *WebHandlerProcess, message etf.Term) ServerStatus { - lib.Warning("HandleWebHandlerCast: unhandled message %#v", message) - return ServerStatusOK -} - -// HandleWebHandlerInfo -func (wh *WebHandler) HandleWebHandlerInfo(process *WebHandlerProcess, message etf.Term) ServerStatus { - lib.Warning("HandleWebHandlerInfo: unhandled message %#v", message) - return ServerStatusOK -} -func (wh *WebHandler) HandleWebHandlerTerminate(process *WebHandlerProcess, reason string, count int64) { - return -} - -// we should disable SetTrapExit for the WebHandlerProcess by overriding it. -func (whp *WebHandlerProcess) SetTrapExit(trap bool) { - lib.Warning("[%s] method 'SetTrapExit' is disabled for WebHandlerProcess", whp.Self()) -} diff --git a/go.mod b/go.mod index ee76f0a4..1e3afc4e 100644 --- a/go.mod +++ b/go.mod @@ -1,3 +1,3 @@ -module github.com/ergo-services/ergo +module ergo.services/ergo -go 1.17 +go 1.20 diff --git a/lib/buffer.go b/lib/buffer.go new file mode 100644 index 00000000..3ef5f732 --- /dev/null +++ b/lib/buffer.go @@ -0,0 +1,185 @@ +package lib + +import ( + "fmt" + "io" + "math" + "sync" + "sync/atomic" +) + +// Buffer +type Buffer struct { + B []byte + original []byte +} + +var ( + DefaultBufferLength = 4096 + buffers = &sync.Pool{ + New: func() interface{} { + b := &Buffer{ + B: make([]byte, 0, DefaultBufferLength), + } + b.original = b.B + return b + }, + } + buffersTaken uint64 = 0 + buffersTakenSize uint64 = 0 + buffersReturned uint64 = 0 + buffersReturnedSize uint64 = 0 +) + +func StatBuffers() { + fmt.Printf("taken: %d (size: %d)\n", buffersTaken, buffersTakenSize) + fmt.Printf("returned: %d (size: %d)\n", buffersReturned, buffersReturnedSize) +} + +// TakeBuffer +func TakeBuffer() *Buffer { + b := buffers.Get().(*Buffer) + atomic.AddUint64(&buffersTaken, 1) + atomic.AddUint64(&buffersTakenSize, uint64(cap(b.B))) + return b +} + +// ReleaseBuffer +func ReleaseBuffer(b *Buffer) { + b.B = b.original[:0] + atomic.AddUint64(&buffersReturned, 1) + atomic.AddUint64(&buffersReturnedSize, uint64(cap(b.B))) + buffers.Put(b) +} + +// Reset +func (b *Buffer) Reset() { + b.B = b.B[:0] +} + +// Set +func (b *Buffer) Set(v []byte) { + if len(v) > cap(b.original) { + b.B = append(b.B[:0], v...) + return + } + b.B = append(b.original[:0], v...) +} + +// AppendByte +func (b *Buffer) AppendByte(v byte) { + b.B = append(b.B, v) +} + +// Append +func (b *Buffer) Append(v []byte) { + b.B = append(b.B, v...) +} + +// AppendString +func (b *Buffer) AppendString(s string) { + b.B = append(b.B, s...) +} + +// String +func (b *Buffer) String() string { + return string(b.B) +} + +// Len +func (b *Buffer) Len() int { + return len(b.B) +} + +func (b *Buffer) Cap() int { + return cap(b.B) +} + +// WriteDataTo +func (b *Buffer) WriteDataTo(w io.Writer) error { + l := len(b.B) + if l == 0 { + return nil + } + + for { + n, e := w.Write(b.B) + if e != nil { + return e + } + + l -= n + if l > 0 { + continue + } + + break + } + + return nil +} + +// ReadDataFrom +func (b *Buffer) ReadDataFrom(r io.Reader, limit int) (int, error) { + capB := cap(b.B) + lenB := len(b.B) + if limit == 0 { + limit = math.MaxInt + } + // if buffer becomes too large + if lenB > limit { + return 0, fmt.Errorf("too large") + } + if capB-lenB < capB>>1 { + // less than (almost) 50% space left. increase capacity + b.increase() + capB = cap(b.B) + } + n, e := r.Read(b.B[lenB:capB]) + l := lenB + n + b.B = b.B[:l] + return n, e +} + +func (b *Buffer) Write(v []byte) (n int, err error) { + b.B = append(b.B, v...) + return len(v), nil +} + +func (b *Buffer) Read(v []byte) (n int, err error) { + copy(v, b.B) + return len(b.B), io.EOF +} + +func (b *Buffer) increase() { + cap1 := cap(b.B) * 2 + b1 := make([]byte, cap(b.B), cap1) + copy(b1, b.B) + b.B = b1 +} + +// Allocate +func (b *Buffer) Allocate(n int) { + for { + if cap(b.B) < n { + b.increase() + continue + } + b.B = b.B[:n] + return + } +} + +// Extend +func (b *Buffer) Extend(n int) []byte { + l := len(b.B) + e := l + n + for { + if e > cap(b.B) { + b.increase() + continue + } + b.B = b.B[:e] + return b.B[l:e] + } +} diff --git a/lib/cert.go b/lib/cert.go index 4f210cc0..e1e24423 100644 --- a/lib/cert.go +++ b/lib/cert.go @@ -11,7 +11,6 @@ import ( "encoding/pem" "math/big" "net" - "sync" "time" ) @@ -71,29 +70,3 @@ func GenerateSelfSignedCert(org string, hosts ...string) (tls.Certificate, error return tls.X509KeyPair(certPEM.Bytes(), certPrivKeyPEM.Bytes()) } - -type CertUpdater struct { - sync.RWMutex - cert *tls.Certificate -} - -func CreateCertUpdater(cert tls.Certificate) *CertUpdater { - return &CertUpdater{ - cert: &cert, - } -} - -func (cu *CertUpdater) Update(cert tls.Certificate) { - cu.Lock() - defer cu.Unlock() - - cu.cert = &cert -} - -func (cu *CertUpdater) GetCertificateFunc() func(*tls.ClientHelloInfo) (*tls.Certificate, error) { - return func(ch *tls.ClientHelloInfo) (*tls.Certificate, error) { - cu.RLock() - defer cu.RUnlock() - return cu.cert, nil - } -} diff --git a/lib/compress.go b/lib/compress.go new file mode 100644 index 00000000..693e80d2 --- /dev/null +++ b/lib/compress.go @@ -0,0 +1,177 @@ +package lib + +import ( + "bytes" + "compress/flate" + "compress/gzip" + "compress/lzw" + "compress/zlib" + "encoding/binary" + "fmt" + "io" + "math" + "sync" +) + +var ( + gzipWriters [3]*sync.Pool + gzipReaders = &sync.Pool{ + New: func() interface{} { + return nil + }, + } +) + +// CompressLZW +func CompressLZW(src *Buffer, preallocate uint) (dst *Buffer, err error) { + if src.Len() > math.MaxUint32 { + return nil, fmt.Errorf("message to large") + } + + zBuffer := TakeBuffer() + zBuffer.Allocate(int(preallocate) + 4) + binary.BigEndian.PutUint32(zBuffer.B[preallocate:], uint32(src.Len())) + + zWriter := lzw.NewWriter(zBuffer, lzw.LSB, 8) + if _, err := zWriter.Write(src.B); err != nil { + return nil, err + } + zWriter.Close() + return zBuffer, nil +} + +// CompressZLIB +func CompressZLIB(src *Buffer, preallocate uint) (dst *Buffer, err error) { + if src.Len() > math.MaxUint32 { + return nil, fmt.Errorf("message to large") + } + + zBuffer := TakeBuffer() + zBuffer.Allocate(int(preallocate) + 4) + binary.BigEndian.PutUint32(zBuffer.B[preallocate:], uint32(src.Len())) + + zWriter := zlib.NewWriter(zBuffer) + if _, err := zWriter.Write(src.B); err != nil { + return nil, err + } + zWriter.Close() + return zBuffer, nil +} + +// CompressGZIP level: 0 - default, 1 - best speed, 2 - best size +func CompressGZIP(src *Buffer, preallocate uint, level int) (dst *Buffer, err error) { + var zWriter *gzip.Writer + + if src.Len() > math.MaxUint32 { + return nil, fmt.Errorf("message to large") + } + + zBuffer := TakeBuffer() + zBuffer.Allocate(int(preallocate) + 4) + binary.BigEndian.PutUint32(zBuffer.B[preallocate:], uint32(src.Len())) + + var lev int + switch level { + case 2: + lev = flate.BestCompression + case 1: + lev = flate.BestSpeed + default: + level = 0 + lev = flate.DefaultCompression + } + if w, ok := gzipWriters[level].Get().(*gzip.Writer); ok { + zWriter = w + zWriter.Reset(zBuffer) + } else { + zWriter, _ = gzip.NewWriterLevel(zBuffer, lev) + } + if _, err := zWriter.Write(src.B); err != nil { + return nil, err + } + zWriter.Close() + gzipWriters[level].Put(zWriter) + return zBuffer, nil +} + +func DecompressLZW(src *Buffer, skip uint) (dst *Buffer, err error) { + if src.Len() < int(skip)+4 { + return nil, fmt.Errorf("too short source buffer") + } + source := src.B[skip:] + lenUnpacked := int(binary.BigEndian.Uint32(source[:4])) + reader := lzw.NewReader(bytes.NewBuffer(source[4:]), lzw.LSB, 8) + dst = TakeBuffer() + dst.Allocate(lenUnpacked) + if err := decompress(dst.B, reader); err != nil { + return nil, err + } + return +} +func DecompressZLIB(src *Buffer, skip uint) (dst *Buffer, err error) { + if src.Len() < int(skip)+4 { + return nil, fmt.Errorf("too short source buffer") + } + source := src.B[skip:] + lenUnpacked := int(binary.BigEndian.Uint32(source[:4])) + reader, err := zlib.NewReader(bytes.NewBuffer(source[4:])) + if err != nil { + return nil, err + } + dst = TakeBuffer() + dst.Allocate(lenUnpacked) + if err := decompress(dst.B, reader); err != nil { + return nil, err + } + return +} +func DecompressGZIP(src *Buffer, skip uint) (dst *Buffer, err error) { + if src.Len() < int(skip)+4 { + return nil, fmt.Errorf("too short source buffer") + } + source := src.B[skip:] + lenUnpacked := int(binary.BigEndian.Uint32(source[:4])) + reader, err := gzip.NewReader(bytes.NewBuffer(source[4:])) + if err != nil { + return nil, err + } + dst = TakeBuffer() + dst.Allocate(lenUnpacked) + + if err := decompress(dst.B, reader); err != nil { + return nil, err + } + return +} + +func decompress(dst []byte, reader io.Reader) error { + total := 0 + for { + n, e := reader.Read(dst[total:]) + total += n + if e == io.EOF { + break + } + if n == 0 { + return fmt.Errorf("dst buffer too small") + } + if e != nil { + return e + } + } + if total != len(dst) { + return fmt.Errorf("unpacked size mismatch") + } + + return nil +} + +func init() { + for i := range gzipWriters { + gzipWriters[i] = &sync.Pool{ + New: func() interface{} { + return nil + }, + } + } +} diff --git a/lib/compress_test.go b/lib/compress_test.go new file mode 100644 index 00000000..fc86612d --- /dev/null +++ b/lib/compress_test.go @@ -0,0 +1,66 @@ +package lib + +import ( + "testing" +) + +var ( + srcCompress string = RandomString(1024) +) + +func TestCompressDecompressGZIP(t *testing.T) { + buf := TakeBuffer() + buf.AppendString(srcCompress) + header := uint(12) + dst, err := CompressGZIP(buf, header, 2) + if err != nil { + t.Fatal(err) + } + + d, err := DecompressGZIP(dst, header) + if err != nil { + t.Fatal(err) + } + + if srcCompress != string(d.B) { + t.Fatal("incorrect result") + } +} + +func TestCompressDecompressZLIB(t *testing.T) { + buf := TakeBuffer() + buf.AppendString(srcCompress) + header := uint(12) + dst, err := CompressZLIB(buf, header) + if err != nil { + t.Fatal(err) + } + + d, err := DecompressZLIB(dst, header) + if err != nil { + t.Fatal(err) + } + + if srcCompress != string(d.B) { + t.Fatal("incorrect result") + } +} + +func TestCompressDecompressLZW(t *testing.T) { + buf := TakeBuffer() + buf.AppendString(srcCompress) + header := uint(12) + dst, err := CompressLZW(buf, header) + if err != nil { + t.Fatal(err) + } + + d, err := DecompressLZW(dst, header) + if err != nil { + t.Fatal(err) + } + + if srcCompress != string(d.B) { + t.Fatal("incorrect result") + } +} diff --git a/lib/errors.go b/lib/errors.go deleted file mode 100644 index 74259fe4..00000000 --- a/lib/errors.go +++ /dev/null @@ -1,53 +0,0 @@ -package lib - -import "fmt" - -var ( - ErrAppAlreadyLoaded = fmt.Errorf("application is already loaded") - ErrAppAlreadyStarted = fmt.Errorf("application is already started") - ErrAppUnknown = fmt.Errorf("unknown application name") - ErrAppIsNotRunning = fmt.Errorf("application is not running") - ErrNameUnknown = fmt.Errorf("unknown name") - ErrNameOwner = fmt.Errorf("not an owner") - ErrProcessBusy = fmt.Errorf("process is busy") - ErrProcessMailboxFull = fmt.Errorf("process mailbox is full") - ErrProcessUnknown = fmt.Errorf("unknown process") - ErrProcessContext = fmt.Errorf("not a Process context") - ErrProcessIncarnation = fmt.Errorf("process ID belongs to the previous incarnation") - ErrProcessTerminated = fmt.Errorf("process terminated") - ErrMonitorUnknown = fmt.Errorf("unknown monitor reference") - ErrSenderUnknown = fmt.Errorf("unknown sender") - ErrBehaviorUnknown = fmt.Errorf("unknown behavior") - ErrBehaviorGroupUnknown = fmt.Errorf("unknown behavior group") - ErrAliasUnknown = fmt.Errorf("unknown alias") - ErrAliasOwner = fmt.Errorf("not an owner") - ErrEventMismatch = fmt.Errorf("message type mismatch") - ErrEventUnknown = fmt.Errorf("unknown event type") - ErrEventOwner = fmt.Errorf("not an owner") - ErrEventSelf = fmt.Errorf("monitor events from itself") - ErrNoRoute = fmt.Errorf("no route to node") - ErrTaken = fmt.Errorf("resource is taken") - ErrFragmented = fmt.Errorf("fragmented data") - ErrReferenceUnknown = fmt.Errorf("unknown reference") - - ErrRouteName = fmt.Errorf("incorrect route name") - - ErrTimeout = fmt.Errorf("timed out") - ErrUnsupported = fmt.Errorf("not supported") - ErrUnknown = fmt.Errorf("unknown") - ErrPeerUnsupported = fmt.Errorf("peer does not support this feature") - - ErrUnsupportedRequest = fmt.Errorf("unsupported request") - ErrServerTerminated = fmt.Errorf("server terminated") - - ErrProxyUnknownRequest = fmt.Errorf("unknown proxy request") - ErrProxyTransitDisabled = fmt.Errorf("proxy feature disabled") - ErrProxyTransitRestricted = fmt.Errorf("proxy connect restricted") - ErrProxyNoRoute = fmt.Errorf("no proxy route to node") - ErrProxyConnect = fmt.Errorf("can't establish proxy connection") - ErrProxyHopExceeded = fmt.Errorf("proxy hop is exceeded") - ErrProxyLoopDetected = fmt.Errorf("proxy loop detected") - ErrProxyPathTooLong = fmt.Errorf("proxy path too long") - ErrProxySessionUnknown = fmt.Errorf("unknown session id") - ErrProxySessionDuplicate = fmt.Errorf("session is already exist") -) diff --git a/lib/flusher.go b/lib/flusher.go new file mode 100644 index 00000000..945fb93d --- /dev/null +++ b/lib/flusher.go @@ -0,0 +1,103 @@ +package lib + +import ( + "bufio" + "io" + "net" + "sync" + "time" +) + +const ( + latency time.Duration = 300 * time.Nanosecond +) + +func NewFlusherWithKeepAlive(conn net.Conn, keepalive []byte, keepalivePeriod time.Duration) io.Writer { + f := &flusher{ + writer: bufio.NewWriter(conn), + } + // first time it should be longer + f.timer = time.AfterFunc(latency*10, func() { + f.Lock() + defer f.Unlock() + + if f.pending == false { + // nothing to write. send keepalive. + f.writer.Write(keepalive) + if err := f.writer.Flush(); err != nil { + return + } + + f.timer.Reset(keepalivePeriod) + return + } + + f.writer.Flush() + f.pending = false + f.timer.Reset(latency) + }) + + return f + +} + +func NewFlusher(conn net.Conn) io.Writer { + f := &flusher{ + writer: bufio.NewWriter(conn), + } + f.timer = time.AfterFunc(latency, func() { + f.Lock() + defer f.Unlock() + + if f.pending == false { + // nothing to write + return + } + + f.writer.Flush() + f.pending = false + f.timer.Reset(latency) + }) + return f +} + +type flusher struct { + sync.Mutex + timer *time.Timer + writer *bufio.Writer + pending bool +} + +func (f *flusher) Write(b []byte) (n int, err error) { + f.Lock() + defer f.Unlock() + + l := len(b) + + // write data to the buffer + for { + n, e := f.writer.Write(b) + if e != nil { + return n, e + } + // check if something left + l -= n + if l > 0 { + continue + } + break + } + + if f.pending { + return len(b), nil + } + + // if f.writer.Size() > 65000 { + // f.writer.Flush() + // return len(b), nil + // } + + f.pending = true + f.timer.Reset(latency) + return len(b), nil +} diff --git a/lib/map.go b/lib/map.go new file mode 100644 index 00000000..e2b736ce --- /dev/null +++ b/lib/map.go @@ -0,0 +1,87 @@ +package lib + +import ( + "sync" +) + +type Map[K comparable, V any] struct { + sync.RWMutex + m map[K]V +} + +func (m *Map[K, V]) Load(key K) (V, bool) { + m.RLock() + v, found := m.m[key] + m.RUnlock() + return v, found +} + +func (m *Map[K, V]) LoadAndDelete(key K) (V, bool) { + m.Lock() + v, found := m.m[key] + m.Unlock() + return v, found +} + +func (m *Map[K, V]) Store(key K, value V) { + m.Lock() + if m.m == nil { + m.m = make(map[K]V) + } + m.m[key] = value + m.Unlock() +} + +func (m *Map[K, V]) Delete(key K) { + m.Lock() + delete(m.m, key) + m.Unlock() +} + +// DeleteNoLock to be used within RangeLock method +func (m *Map[K, V]) DeleteNoLock(key K) { + delete(m.m, key) +} + +func (m *Map[K, V]) Range(f func(k K, v V) bool) { + m.RLock() + for mk, mv := range m.m { + if f(mk, mv) == false { + break + } + } + m.RUnlock() +} + +// RangeLock locks map during iterating. You can use DeleteNoLock/StoreNoLock +// within your f-function +func (m *Map[K, V]) RangeLock(f func(k K, v V) bool) { + m.Lock() + for mk, mv := range m.m { + if f(mk, mv) == false { + break + } + } + m.Unlock() +} + +func (m *Map[K, V]) Len() int { + m.RLock() + l := len(m.m) + m.RUnlock() + return l +} + +func (m *Map[K, V]) LoadOrStore(key K, value V) (V, bool) { + m.Lock() + if m.m == nil { + m.m = make(map[K]V) + } + if x, exist := m.m[key]; exist { + m.Unlock() + return x, true + } + m.m[key] = value + m.Unlock() + return value, false +} diff --git a/lib/mpsc.go b/lib/mpsc.go index 253d4e5f..edc176cb 100644 --- a/lib/mpsc.go +++ b/lib/mpsc.go @@ -9,23 +9,32 @@ import ( ) type queueMPSC struct { - head *itemMPSC - tail *itemMPSC + lock uint32 + head *itemMPSC + tail *itemMPSC + length int64 } type queueLimitMPSC struct { + lock uint32 head *itemMPSC tail *itemMPSC length int64 limit int64 + flush bool } type QueueMPSC interface { - Push(value interface{}) bool - Pop() (interface{}, bool) + Push(value any) bool + Pop() (any, bool) Item() ItemMPSC // Len returns the number of items in the queue Len() int64 + // Size returns the limit for the queue. -1 - for unlimited + Size() int64 + + Lock() bool + Unlock() bool } func NewQueueMPSC() QueueMPSC { @@ -36,13 +45,18 @@ func NewQueueMPSC() QueueMPSC { } } -func NewQueueLimitMPSC(limit int64) QueueMPSC { +// NewQueueLimitMPSC creates MPSC queue with limited length. Enabling "flush" options +// makes this queue flush out the tail item if the limit has been reached. +// Warning: enabled "flush" option also makes this queue unusable +// for the concurrent environment +func NewQueueLimitMPSC(limit int64, flush bool) QueueMPSC { if limit < 1 { limit = math.MaxInt64 } emptyItem := &itemMPSC{} return &queueLimitMPSC{ limit: limit, + flush: flush, head: emptyItem, tail: emptyItem, } @@ -50,30 +64,36 @@ func NewQueueLimitMPSC(limit int64) QueueMPSC { type ItemMPSC interface { Next() ItemMPSC - Value() interface{} + Value() any Clear() } type itemMPSC struct { - value interface{} + value any next *itemMPSC } // Push place the given value in the queue head (FIFO). Returns always true -func (q *queueMPSC) Push(value interface{}) bool { +func (q *queueMPSC) Push(value any) bool { i := &itemMPSC{ value: value, } + atomic.AddInt64(&q.length, 1) old_head := (*itemMPSC)(atomic.SwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.head)), unsafe.Pointer(i))) atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&old_head.next)), unsafe.Pointer(i)) return true } // Push place the given value in the queue head (FIFO). Returns false if exceeded the limit -func (q *queueLimitMPSC) Push(value interface{}) bool { +func (q *queueLimitMPSC) Push(value any) bool { if q.Len()+1 > q.limit { - return false + if q.flush == false { + return false + } + // flush one item to keep the length within the limit + q.Pop() } + atomic.AddInt64(&q.length, 1) i := &itemMPSC{ value: value, @@ -84,7 +104,7 @@ func (q *queueLimitMPSC) Push(value interface{}) bool { } // Pop takes the item from the queue tail. Returns false if the queue is empty. Can be used in a single consumer (goroutine) only. -func (q *queueMPSC) Pop() (interface{}, bool) { +func (q *queueMPSC) Pop() (any, bool) { tail_next := (*itemMPSC)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail.next)))) if tail_next == nil { return nil, false @@ -92,12 +112,17 @@ func (q *queueMPSC) Pop() (interface{}, bool) { value := tail_next.value tail_next.value = nil // let the GC free this item + + // TODO a little race condition happens with node.process.go:1500 within the running a new goroutine + // to handle process mailbox (invoking Item() method). + // nothing serios, but we should use atomic operation here to set the q.tail q.tail = tail_next + atomic.AddInt64(&q.length, -1) return value, true } // Pop takes the item from the queue tail. Returns false if the queue is empty. Can be used in a single consumer (goroutine) only. -func (q *queueLimitMPSC) Pop() (interface{}, bool) { +func (q *queueLimitMPSC) Pop() (any, bool) { tail_next := (*itemMPSC)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail.next)))) if tail_next == nil { return nil, false @@ -110,9 +135,21 @@ func (q *queueLimitMPSC) Pop() (interface{}, bool) { return value, true } -// Len returns -1 for the queue with no limit func (q *queueMPSC) Len() int64 { - return -1 + return atomic.LoadInt64(&q.length) +} + +func (q *queueMPSC) Size() int64 { + return -1 // unlimited +} + +func (q *queueMPSC) Lock() bool { + return atomic.SwapUint32(&q.lock, 1) == 0 +} + +func (q *queueMPSC) Unlock() bool { + return atomic.SwapUint32(&q.lock, 0) == 1 + } // Len returns queue length @@ -120,6 +157,18 @@ func (q *queueLimitMPSC) Len() int64 { return atomic.LoadInt64(&q.length) } +func (q *queueLimitMPSC) Size() int64 { + return q.limit +} +func (q *queueLimitMPSC) Lock() bool { + return atomic.SwapUint32(&q.lock, 1) == 0 +} + +func (q *queueLimitMPSC) Unlock() bool { + return atomic.SwapUint32(&q.lock, 0) == 1 + +} + // Item returns the tail item of the queue. Returns nil if queue is empty. func (q *queueMPSC) Item() ItemMPSC { item := (*itemMPSC)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail.next)))) @@ -152,7 +201,7 @@ func (i *itemMPSC) Next() ItemMPSC { } // Value returns stored value of the queue item -func (i *itemMPSC) Value() interface{} { +func (i *itemMPSC) Value() any { return i.value } diff --git a/lib/mpsc_test.go b/lib/mpsc_test.go index 2a8242e0..14f1902a 100644 --- a/lib/mpsc_test.go +++ b/lib/mpsc_test.go @@ -15,7 +15,7 @@ func TestMPSCsequential(t *testing.T) { v int64 } l := int64(10) - queue := NewQueueLimitMPSC(l) + queue := NewQueueLimitMPSC(l, false) // append to the queue for i := int64(0); i < l; i++ { v := vv{v: i + 100} @@ -88,7 +88,7 @@ func TestMPSCparallel(t *testing.T) { v int64 } l := int64(100000) - queue := NewQueueLimitMPSC(l) + queue := NewQueueLimitMPSC(l, false) sum := int64(0) // append to the queue var wg sync.WaitGroup @@ -98,7 +98,7 @@ func TestMPSCparallel(t *testing.T) { wg.Add(1) go func(v vv) { if queue.Push(v) == false { - t.Fatal("can't push value into the queue") + panic("can't push value into the queue") } wg.Done() }(v) @@ -189,7 +189,7 @@ func BenchmarkMPSC(b *testing.B) { queues := map[string]testQueue{ "Chan queue ": newChanQueue(), "MPSC queue ": NewQueueMPSC(), - "MPSC with limit queue": NewQueueLimitMPSC(0), + "MPSC with limit queue": NewQueueLimitMPSC(0, false), } length := 1 << 12 diff --git a/lib/netreadwriter.go b/lib/netreadwriter.go deleted file mode 100644 index 9f157ee1..00000000 --- a/lib/netreadwriter.go +++ /dev/null @@ -1,21 +0,0 @@ -package lib - -import ( - "io" - "time" -) - -type NetReadWriter interface { - NetReader - NetWriter -} - -type NetReader interface { - io.Reader - SetReadDeadline(t time.Time) error -} - -type NetWriter interface { - io.Writer - SetWriteDeadline(t time.Time) error -} diff --git a/lib/norecover.go b/lib/norecover.go new file mode 100644 index 00000000..b2c77ce5 --- /dev/null +++ b/lib/norecover.go @@ -0,0 +1,7 @@ +//go:build !norecover + +package lib + +func Recover() bool { + return true +} diff --git a/lib/notrace.go b/lib/notrace.go new file mode 100644 index 00000000..abe84fdc --- /dev/null +++ b/lib/notrace.go @@ -0,0 +1,7 @@ +//go:build !trace + +package lib + +func Trace() bool { + return false +} diff --git a/lib/recover.go b/lib/recover.go new file mode 100644 index 00000000..0246ef8d --- /dev/null +++ b/lib/recover.go @@ -0,0 +1,7 @@ +//go:build norecover + +package lib + +func Recover() bool { + return false +} diff --git a/lib/timer.go b/lib/timer.go new file mode 100644 index 00000000..070a2d67 --- /dev/null +++ b/lib/timer.go @@ -0,0 +1,30 @@ +package lib + +import ( + "sync" + "time" +) + +var ( + timers = &sync.Pool{ + New: func() interface{} { + return time.NewTimer(time.Second * 5) + }, + } +) + +// TakeTimer +func TakeTimer() *time.Timer { + return timers.Get().(*time.Timer) +} + +// ReleaseTimer +func ReleaseTimer(t *time.Timer) { + if !t.Stop() { + select { + case <-t.C: + default: + } + } + timers.Put(t) +} diff --git a/lib/tools.go b/lib/tools.go index a903394d..86f0f23d 100644 --- a/lib/tools.go +++ b/lib/tools.go @@ -3,261 +3,8 @@ package lib import ( "crypto/rand" "encoding/hex" - "flag" - "fmt" - "hash/crc32" - "io" - "log" - "math" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "time" ) -// Buffer -type Buffer struct { - B []byte - original []byte -} - -var ( - ergoTrace = false - ergoWarning = false - ergoNoRecover = false - ergoDebug = false - - DefaultBufferLength = 16384 - buffers = &sync.Pool{ - New: func() interface{} { - b := &Buffer{ - B: make([]byte, 0, DefaultBufferLength), - } - b.original = b.B - return b - }, - } - - timers = &sync.Pool{ - New: func() interface{} { - return time.NewTimer(time.Second * 5) - }, - } - - ErrTooLarge = fmt.Errorf("Too large") - - CRC32Q = crc32.MakeTable(0xD5828281) -) - -func init() { - flag.BoolVar(&ergoTrace, "ergo.trace", false, "enable/disable extended node logging info") - flag.BoolVar(&ergoWarning, "ergo.warning", true, "enable/disable warning messages") - flag.BoolVar(&ergoNoRecover, "ergo.norecover", false, "disable panic catching") - flag.BoolVar(&ergoDebug, "ergo.debug", false, "enable/disable debug messages") -} - -// Log -func Log(f string, a ...interface{}) { - if ergoTrace { - printf(f, a...) - } -} - -// Warning -func Warning(f string, a ...interface{}) { - if ergoWarning { - printf("WARNING! "+f, a...) - } -} - -// Print log information -func printf(formating string, args ...interface{}) { - if ergoDebug { - goID, fileName, line, funcName := 0, "unknown", 0, "unknown" - var buf [64]byte - n := runtime.Stack(buf[:], false) - idField := strings.Fields(strings.TrimPrefix(string(buf[:n]), "goroutine "))[0] - goID, _ = strconv.Atoi(idField) - pc, fileName, line, ok := runtime.Caller(2) - if ok { - funcName = runtime.FuncForPC(pc).Name() - funcName = filepath.Base(funcName) - fileName = filepath.Base(fileName) - } - log.Printf("@%s:%d %s %s\n", goID, fileName, line, funcName, fmt.Sprintf(formating, args...)) - } else { - log.Printf(formating, args...) - } -} - -// CatchPanic -func CatchPanic() bool { - return ergoNoRecover == false -} - -// TakeTimer -func TakeTimer() *time.Timer { - return timers.Get().(*time.Timer) -} - -// ReleaseTimer -func ReleaseTimer(t *time.Timer) { - if !t.Stop() { - select { - case <-t.C: - default: - } - } - timers.Put(t) -} - -// TakeBuffer -func TakeBuffer() *Buffer { - return buffers.Get().(*Buffer) -} - -// ReleaseBuffer -func ReleaseBuffer(b *Buffer) { - c := cap(b.B) - // cO := cap(b.original) - // overlaps := c > 0 && cO > 0 && &(x[:c][c-1]) == &(y[:cO][cO-1]) - if c > DefaultBufferLength && c < 65536 { - // reallocation happened. keep reallocated buffer as an original - // if it doesn't exceed the size of 65K (we don't want to keep - // too big slices) - b.original = b.B[:0] - } - b.B = b.original[:0] - buffers.Put(b) -} - -// Reset -func (b *Buffer) Reset() { - c := cap(b.B) - if c > DefaultBufferLength && c < 65536 { - b.original = b.B[:0] - } - // use the original start point of the slice - b.B = b.original[:0] -} - -// Set -func (b *Buffer) Set(v []byte) { - b.B = append(b.B[:0], v...) -} - -// AppendByte -func (b *Buffer) AppendByte(v byte) { - b.B = append(b.B, v) -} - -// Append -func (b *Buffer) Append(v []byte) { - b.B = append(b.B, v...) -} - -// String -func (b *Buffer) String() string { - return string(b.B) -} - -// Len -func (b *Buffer) Len() int { - return len(b.B) -} - -// WriteDataTo -func (b *Buffer) WriteDataTo(w io.Writer) error { - l := len(b.B) - if l == 0 { - return nil - } - - for { - n, e := w.Write(b.B) - if e != nil { - return e - } - - l -= n - if l > 0 { - continue - } - - break - } - - b.Reset() - return nil -} - -// ReadDataFrom -func (b *Buffer) ReadDataFrom(r io.Reader, limit int) (int, error) { - capB := cap(b.B) - lenB := len(b.B) - if limit == 0 { - limit = math.MaxInt - } - // if buffer becomes too large - if lenB > limit { - return 0, ErrTooLarge - } - if capB-lenB < capB>>1 { - // less than (almost) 50% space left. increase capacity - b.increase() - capB = cap(b.B) - } - n, e := r.Read(b.B[lenB:capB]) - l := lenB + n - b.B = b.B[:l] - return n, e -} - -func (b *Buffer) Write(v []byte) (n int, err error) { - b.B = append(b.B, v...) - return len(v), nil -} - -func (b *Buffer) Read(v []byte) (n int, err error) { - copy(v, b.B) - return len(b.B), io.EOF -} - -func (b *Buffer) increase() { - cap1 := cap(b.B) * 8 - b1 := make([]byte, cap(b.B), cap1) - copy(b1, b.B) - b.B = b1 -} - -// Allocate -func (b *Buffer) Allocate(n int) { - for { - if cap(b.B) < n { - b.increase() - continue - } - b.B = b.B[:n] - return - } -} - -// Extend -func (b *Buffer) Extend(n int) []byte { - l := len(b.B) - e := l + n - for { - if e > cap(b.B) { - b.increase() - continue - } - b.B = b.B[:e] - return b.B[l:e] - } -} - // RandomString func RandomString(length int) string { buff := make([]byte, length/2) diff --git a/lib/tools_test.go b/lib/tools_test.go deleted file mode 100644 index 4fb90802..00000000 --- a/lib/tools_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package lib - -import ( - "testing" -) - -func TestBuffer(t *testing.T) { - b := TakeBuffer() - - if cap(b.B) != DefaultBufferLength { - t.Fatal("incorrect capacity") - } - - if len(b.B) != 0 { - t.Fatal("should be zero length") - } - -} diff --git a/lib/trace.go b/lib/trace.go new file mode 100644 index 00000000..80b86c34 --- /dev/null +++ b/lib/trace.go @@ -0,0 +1,7 @@ +//go:build trace + +package lib + +func Trace() bool { + return true +} diff --git a/meta/tcp_connection.go b/meta/tcp_connection.go new file mode 100644 index 00000000..b392eb5b --- /dev/null +++ b/meta/tcp_connection.go @@ -0,0 +1,198 @@ +package meta + +import ( + "crypto/tls" + "fmt" + "net" + "strconv" + "sync" + "sync/atomic" + + "ergo.services/ergo/gen" +) + +func CreateTCPConnection(options TCPConnectionOptions) (gen.MetaBehavior, error) { + var conn net.Conn + + hp := net.JoinHostPort(options.Host, strconv.Itoa(int(options.Port))) + if options.CertManager != nil { + config := &tls.Config{ + GetCertificate: options.CertManager.GetCertificateFunc(), + InsecureSkipVerify: options.InsecureSkipVerify, + } + c, err := tls.Dial("tcp", hp, config) + if err != nil { + return nil, err + } + conn = c + } else { + dialer := net.Dialer{} + if options.KeepAlivePeriod > 0 { + dialer.KeepAlive = options.KeepAlivePeriod + } + c, err := net.Dial("tcp", hp) + if err != nil { + return nil, err + } + conn = c + } + + if options.BufferSize < 1 { + options.BufferSize = gen.DefaultTCPBufferSize + } + + c := &tcpconnection{ + conn: conn, + bufpool: options.BufferPool, + bufferSize: options.BufferSize, + } + + if options.Process == "" { + return c, nil + } + + c.process = options.Process + return c, nil +} + +// +// Connection gen.MetaBehavior implementation +// + +type tcpconnection struct { + gen.MetaProcess + process gen.Atom + conn net.Conn + bufpool *sync.Pool + bufferSize int + bytesIn uint64 + bytesOut uint64 +} + +func (t *tcpconnection) Init(process gen.MetaProcess) error { + t.MetaProcess = process + return nil +} + +func (t *tcpconnection) Start() error { + var to any + var buf []byte + + id := t.ID() + + if t.process == "" { + to = t.Parent() + } else { + to = t.process + } + + defer func() { + t.conn.Close() + message := MessageTCPDisconnect{ + ID: id, + } + if err := t.Send(to, message); err != nil { + t.Log().Error("unable to send MessageTCPDisconnect to %s: %s", to, err) + return + } + }() + + message := MessageTCPConnect{ + ID: id, + RemoteAddr: t.conn.RemoteAddr(), + LocalAddr: t.conn.LocalAddr(), + } + if err := t.Send(to, message); err != nil { + t.Log().Error("unable to send MessageTCPConnect to %v: %s", to, err) + return err + } + + for { + if t.bufpool == nil { + buf = make([]byte, t.bufferSize) + } else { + buf = t.bufpool.Get().([]byte) + } + + retry: + n, err := t.conn.Read(buf) + if err != nil { + if n == 0 { + // closed connection + return nil + } + + t.Log().Error("unable to read from tcp socket: %s", err) + return err + } + if n == 0 { + // keepalive + goto retry // use goto to get rid of buffer reallocation + } + message := MessageTCP{ + ID: id, + Data: buf[:n], + } + atomic.AddUint64(&t.bytesIn, uint64(n)) + if err := t.Send(to, message); err != nil { + t.Log().Error("unable to send MessageTCP: %s", err) + return err + } + } +} + +func (t *tcpconnection) HandleMessage(from gen.PID, message any) error { + switch m := message.(type) { + case MessageTCP: + l := len(m.Data) + lenD := l + for { + n, e := t.conn.Write(m.Data[lenD-l:]) + if e != nil { + return e + } + // check if something left + l -= n + if l == 0 { + break + } + } + atomic.AddUint64(&t.bytesOut, uint64(lenD)) + if t.bufpool != nil { + t.bufpool.Put(m.Data) + } + default: + t.Log().Error("unsupported message from %s. ignored", from) + } + return nil +} + +func (t *tcpconnection) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + return nil, nil +} + +func (t *tcpconnection) Terminate(reason error) { + defer t.conn.Close() + if reason == nil || reason == gen.TerminateReasonNormal { + return + } + t.Log().Error("terminated abnormaly: %s", reason) +} + +func (t *tcpconnection) HandleInspect(from gen.PID, item ...string) map[string]string { + var to any + bytesIn := atomic.LoadUint64(&t.bytesIn) + bytesOut := atomic.LoadUint64(&t.bytesOut) + if t.process == "" { + to = t.Parent() + } else { + to = t.process + } + return map[string]string{ + "local": t.conn.LocalAddr().String(), + "remote": t.conn.RemoteAddr().String(), + "process": fmt.Sprintf("%s", to), + "bytes in": fmt.Sprintf("%d", bytesIn), + "bytes out": fmt.Sprintf("%d", bytesOut), + } +} diff --git a/meta/tcp_server.go b/meta/tcp_server.go new file mode 100644 index 00000000..bdff14f4 --- /dev/null +++ b/meta/tcp_server.go @@ -0,0 +1,131 @@ +package meta + +import ( + "context" + "crypto/tls" + "errors" + "net" + "strconv" + "sync" + + "ergo.services/ergo/gen" +) + +// +// TCP Server meta process +// + +func CreateTCPServer(options TCPServerOptions) (gen.MetaBehavior, error) { + lc := net.ListenConfig{ + KeepAlive: -1, // disabled + } + if options.KeepAlivePeriod > 0 { + lc.KeepAlive = options.KeepAlivePeriod + } + if options.BufferSize < 1 { + options.BufferSize = gen.DefaultTCPBufferSize + } + + // check sync.Pool + if options.BufferPool != nil { + b := options.BufferPool.Get() + if _, ok := b.([]byte); ok == false { + return nil, errors.New("options.BufferPool must be pool of []byte values") + } + // get it back to the pool + options.BufferPool.Put(b) + } + + hp := net.JoinHostPort(options.Host, strconv.Itoa(int(options.Port))) + listener, err := lc.Listen(context.Background(), "tcp", hp) + if err != nil { + return nil, err + } + + if options.CertManager != nil { + config := &tls.Config{ + GetCertificate: options.CertManager.GetCertificateFunc(), + InsecureSkipVerify: options.InsecureSkipVerify, + } + listener = tls.NewListener(listener, config) + } + + s := &tcpserver{ + bufpool: options.BufferPool, + listener: listener, + procpool: options.ProcessPool, + bufferSize: options.BufferSize, + } + + return s, nil +} + +type tcpserver struct { + gen.MetaProcess + procpool []gen.Atom + bufpool *sync.Pool + listener net.Listener + bufferSize int +} + +func (t *tcpserver) Init(process gen.MetaProcess) error { + t.MetaProcess = process + return nil +} + +func (t *tcpserver) Start() error { + i := 0 + + for { + conn, err := t.listener.Accept() + if err != nil { + return err + } + + c := &tcpconnection{ + conn: conn, + bufpool: t.bufpool, + bufferSize: t.bufferSize, + } + if len(t.procpool) > 0 { + l := len(t.procpool) + c.process = t.procpool[i%l] + } + + if _, err := t.Spawn(c, gen.MetaOptions{}); err != nil { + conn.Close() + t.Log().Error("unable to spawn meta process: %s", err) + } + i++ + + } +} +func (t *tcpserver) HandleMessage(from gen.PID, message any) error { + if t.MetaProcess != nil { + t.Log().Error("ignored message from %s", from) + return nil + } + return nil +} + +func (t *tcpserver) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + if t.MetaProcess != nil { + t.Log().Error("ignored request from %s", from) + } + return gen.ErrUnsupported, nil +} + +func (t *tcpserver) Terminate(reason error) { + defer t.listener.Close() + + if reason == nil || reason == gen.TerminateReasonNormal { + return + } + t.Log().Error("terminated abnormaly: %s", reason) +} + +func (t *tcpserver) HandleInspect(from gen.PID, item ...string) map[string]string { + return map[string]string{ + "listener": t.listener.Addr().String(), + } +} diff --git a/meta/tcp_types.go b/meta/tcp_types.go new file mode 100644 index 00000000..426e732f --- /dev/null +++ b/meta/tcp_types.go @@ -0,0 +1,44 @@ +package meta + +import ( + "ergo.services/ergo/gen" + "net" + "sync" + "time" +) + +type MessageTCPConnect struct { + ID gen.Alias + RemoteAddr net.Addr + LocalAddr net.Addr +} + +type MessageTCPDisconnect struct { + ID gen.Alias +} + +type MessageTCP struct { + ID gen.Alias + Data []byte +} + +type TCPConnectionOptions struct { + Host string + Port uint16 + Process gen.Atom + CertManager gen.CertManager + BufferSize int + BufferPool *sync.Pool + KeepAlivePeriod time.Duration + InsecureSkipVerify bool +} +type TCPServerOptions struct { + Host string + Port uint16 + ProcessPool []gen.Atom + CertManager gen.CertManager + BufferSize int + BufferPool *sync.Pool + KeepAlivePeriod time.Duration + InsecureSkipVerify bool +} diff --git a/meta/udp_server.go b/meta/udp_server.go new file mode 100644 index 00000000..34641277 --- /dev/null +++ b/meta/udp_server.go @@ -0,0 +1,147 @@ +package meta + +import ( + "errors" + "fmt" + "net" + "strconv" + "sync" + "sync/atomic" + + "ergo.services/ergo/gen" +) + +// +// UDP Server meta process +// + +const ( + defaultUDPBufferSize int = 65000 +) + +func CreateUDPServer(options UDPServerOptions) (gen.MetaBehavior, error) { + hp := net.JoinHostPort(options.Host, strconv.Itoa(int(options.Port))) + pc, err := net.ListenPacket("udp", hp) + if err != nil { + return nil, err + } + + mb := &udpserver{ + pc: pc, + } + if options.BufferSize < 1 { + options.BufferSize = defaultUDPBufferSize + } + + mb.bufferSize = options.BufferSize + + // check sync.Pool + if options.BufferPool != nil { + b := options.BufferPool.Get() + if _, ok := b.([]byte); ok == false { + return nil, errors.New("options.BufferPool must be pool of []byte values") + } + // get it back to the pool + options.BufferPool.Put(b) + } + + mb.bufpool = options.BufferPool + mb.process = options.Process + + return mb, nil +} + +type udpserver struct { + gen.MetaProcess + pc net.PacketConn + bufferSize int + process gen.Atom + bufpool *sync.Pool + + bytesIn uint64 + bytesOut uint64 +} + +func (u *udpserver) Init(process gen.MetaProcess) error { + u.MetaProcess = process + return nil +} + +func (u *udpserver) Start() error { + var buf []byte + var to any + + if u.process == "" { + to = u.Parent() + } else { + to = u.process + } + + id := u.ID() + + for { + if u.bufpool == nil { + buf = make([]byte, u.bufferSize) + } else { + b := u.bufpool.Get() + buf = b.([]byte) + } + n, addr, err := u.pc.ReadFrom(buf) + if n > 0 { + packet := MessageUDP{ + ID: id, + Data: buf[:n], + Addr: addr, + } + atomic.AddUint64(&u.bytesIn, uint64(n)) + + if err := u.Send(to, packet); err != nil { + u.Log().Error("unable to send MessageUDP to %s: %s", to, err) + } + } + if err != nil { + return err + } + } +} + +func (u *udpserver) HandleMessage(from gen.PID, message any) error { + switch m := message.(type) { + case MessageUDP: + n, err := u.pc.WriteTo(m.Data, m.Addr) + if u.bufpool != nil { + u.bufpool.Put(m.Data) + } + atomic.AddUint64(&u.bytesOut, uint64(n)) + return err + default: + u.Log().Error("unsupported message from %s. ignored", from) + } + return nil +} + +func (u *udpserver) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + return nil, nil +} + +func (u *udpserver) Terminate(reason error) { + u.pc.Close() +} + +func (u *udpserver) HandleInspect(from gen.PID, item ...string) map[string]string { + var to any + bytesIn := atomic.LoadUint64(&u.bytesIn) + bytesOut := atomic.LoadUint64(&u.bytesOut) + + if u.process == "" { + to = u.Parent() + } else { + to = u.process + } + return map[string]string{ + "listener": u.pc.LocalAddr().String(), + "process": fmt.Sprintf("%s", to), + "bytes in": fmt.Sprintf("%d", bytesIn), + "bytes out": fmt.Sprintf("%d", bytesOut), + } +} diff --git a/meta/udp_types.go b/meta/udp_types.go new file mode 100644 index 00000000..22395237 --- /dev/null +++ b/meta/udp_types.go @@ -0,0 +1,22 @@ +package meta + +import ( + "net" + "sync" + + "ergo.services/ergo/gen" +) + +type UDPServerOptions struct { + Host string + Port uint16 + Process gen.Atom + BufferSize int + BufferPool *sync.Pool +} + +type MessageUDP struct { + ID gen.Alias + Addr net.Addr + Data []byte +} diff --git a/meta/web_handler.go b/meta/web_handler.go new file mode 100644 index 00000000..2ed012f1 --- /dev/null +++ b/meta/web_handler.go @@ -0,0 +1,123 @@ +package meta + +import ( + "context" + "fmt" + "net/http" + "time" + + "ergo.services/ergo/gen" +) + +// +// Web Handler meta process +// + +func CreateWebHandler(options WebHandlerOptions) WebHandler { + if options.RequestTimeout == 0 { + options.RequestTimeout = 5 * time.Second + } + + return &webhandler{ + options: options, + ch: make(chan error), + } +} + +type WebHandler interface { + http.Handler + gen.MetaBehavior +} + +type webhandler struct { + gen.MetaProcess + options WebHandlerOptions + to any + terminated bool + ch chan error +} + +// +// gen.MetaBehavior implementation +// + +func (w *webhandler) Init(process gen.MetaProcess) error { + w.MetaProcess = process + return nil +} + +func (w *webhandler) Start() error { + if w.options.Worker == "" { + w.to = w.Parent() + } else { + w.to = w.options.Worker + } + return <-w.ch +} + +func (w *webhandler) HandleMessage(from gen.PID, message any) error { + return nil +} + +func (w *webhandler) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + return gen.ErrUnsupported, nil +} + +func (w *webhandler) Terminate(reason error) { + w.terminated = true + w.ch <- reason + close(w.ch) +} + +func (w *webhandler) HandleInspect(from gen.PID, item ...string) map[string]string { + if w.MetaProcess != nil { + return nil + } + return map[string]string{ + "worker process": fmt.Sprintf("%s", w.to), + } +} + +// +// http.Handler implementation +// + +func (w *webhandler) ServeHTTP(writer http.ResponseWriter, request *http.Request) { + if w.MetaProcess == nil { + http.Error(writer, "Handler is not initialized", http.StatusServiceUnavailable) + return + } + + if w.terminated { + http.Error(writer, "Handler terminated", http.StatusServiceUnavailable) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), w.options.RequestTimeout) + + message := MessageWebRequest{ + Response: writer, + Request: request, + Done: cancel, + } + if err := w.Send(w.to, message); err != nil { + w.Log().Error("can not handle HTTP request: %s", err) + http.Error(writer, "Bad gateway", http.StatusBadGateway) + cancel() + return + } + + <-ctx.Done() + + err := ctx.Err() + switch err { + case context.Canceled: + return + case context.DeadlineExceeded: + w.Log().Error("handling HTTP-request timed out") + http.Error(writer, "Gateway timeout", http.StatusGatewayTimeout) + default: + cancel() + w.Log().Error("got context error: %s", err) + } +} diff --git a/meta/web_server.go b/meta/web_server.go new file mode 100644 index 00000000..926fab29 --- /dev/null +++ b/meta/web_server.go @@ -0,0 +1,79 @@ +package meta + +import ( + "crypto/tls" + "log" + "net" + "net/http" + "strconv" + "strings" + + "ergo.services/ergo/gen" +) + +// +// Web Server meta process +// + +func CreateWebServer(options WebServerOptions) (gen.MetaBehavior, error) { + hostPort := net.JoinHostPort(options.Host, strconv.Itoa(int(options.Port))) + listener, err := net.Listen("tcp", hostPort) + if err != nil { + return nil, err + } + if options.CertManager != nil { + config := &tls.Config{GetCertificate: options.CertManager.GetCertificateFunc()} + listener = tls.NewListener(listener, config) + } + + w := &webserver{ + listener: listener, + } + + w.server = http.Server{ + Handler: options.Handler, + ErrorLog: log.New(w, "", 0), + } + return w, nil +} + +type webserver struct { + gen.MetaProcess + server http.Server + listener net.Listener +} + +func (w *webserver) Init(process gen.MetaProcess) error { + w.MetaProcess = process + w.Log().Debug("web server started on %s", w.listener.Addr()) + return nil +} + +func (w *webserver) Start() error { + w.server.Serve(w.listener) + return nil +} + +func (w *webserver) HandleMessage(from gen.PID, message any) error { + return nil +} + +func (w *webserver) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + return nil, nil +} + +func (w *webserver) Terminate(reason error) { + w.listener.Close() +} + +func (w *webserver) HandleInspect(from gen.PID, item ...string) map[string]string { + return map[string]string{ + "listener": w.listener.Addr().String(), + } +} + +func (w *webserver) Write(log []byte) (int, error) { + // http server adds '[\r]\n' at the end of the message. remove it before logging + w.Log().Error(strings.TrimSpace(string(log))) + return len(log), nil +} diff --git a/meta/web_types.go b/meta/web_types.go new file mode 100644 index 00000000..c283f87d --- /dev/null +++ b/meta/web_types.go @@ -0,0 +1,25 @@ +package meta + +import ( + "net/http" + "time" + + "ergo.services/ergo/gen" +) + +type WebServerOptions struct { + Host string + Port uint16 + CertManager gen.CertManager + Handler http.Handler +} +type WebHandlerOptions struct { + Worker gen.Atom + RequestTimeout time.Duration +} + +type MessageWebRequest struct { + Response http.ResponseWriter + Request *http.Request + Done func() +} diff --git a/net/README.md b/net/README.md new file mode 100644 index 00000000..7efdfb51 --- /dev/null +++ b/net/README.md @@ -0,0 +1,3 @@ +# Ergo Network Stack + +Doc: https://docs.ergo.services/networking/network-stack diff --git a/net/edf/benchmarks_test.go b/net/edf/benchmarks_test.go new file mode 100644 index 00000000..db4a4191 --- /dev/null +++ b/net/edf/benchmarks_test.go @@ -0,0 +1,694 @@ +package edf + +import ( + "bytes" + "encoding/gob" + "reflect" + "sync" + "testing" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +func BenchmarkEncodeString(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := Encode("Ergo Framework", buf, Options{}); err != nil { + b.Fatal(err) + } + } +} +func BenchmarkEncodeStringGob(b *testing.B) { + var buf bytes.Buffer + + enc := gob.NewEncoder(&buf) + gob.Register(benchEncodeStruct{}) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + if err := enc.Encode("Ergo Framework"); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeAtom(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := Encode(gen.Atom("Ergo Framework"), buf, Options{}); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeAtomGob(b *testing.B) { + var buf bytes.Buffer + + enc := gob.NewEncoder(&buf) + gob.Register(benchEncodeStruct{}) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + if err := enc.Encode(gen.Atom("Ergo Framework")); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeBinary(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + value := []byte{1, 2, 3, 4, 5} + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + if err := Encode(value, buf, Options{}); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeBinaryGob(b *testing.B) { + var buf bytes.Buffer + + enc := gob.NewEncoder(&buf) + + value := []byte{1, 2, 3, 4, 5} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + if err := enc.Encode(value); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeSlice(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := []int{12345, 67890, 12345, 67890} + b.ResetTimer() + + for i := 0; i < b.N; i++ { + buf.Reset() + if err := Encode(value, buf, Options{}); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeSliceGob(b *testing.B) { + var buf bytes.Buffer + + enc := gob.NewEncoder(&buf) + + value := []int{12345, 67890, 12345, 67890} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + if err := enc.Encode(value); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeArray(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := [4]int{12345, 67890, 12345, 67890} + b.ResetTimer() + + for i := 0; i < b.N; i++ { + buf.Reset() + if err := Encode(value, buf, Options{}); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeArrayGob(b *testing.B) { + var buf bytes.Buffer + + enc := gob.NewEncoder(&buf) + + value := [4]int{12345, 67890, 12345, 67890} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + if err := enc.Encode(value); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeMap(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := map[string]int{ + "key1": 1234, + "key2": 5678, + } + b.ResetTimer() + + for i := 0; i < b.N; i++ { + buf.Reset() + if err := Encode(value, buf, Options{}); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeMapGob(b *testing.B) { + var buf bytes.Buffer + + enc := gob.NewEncoder(&buf) + + value := map[string]int{ + "key1": 1234, + "key2": 5678, + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + if err := enc.Encode(value); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodePID(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := gen.PID{Node: "demo@127.0.0.1", ID: 312, Creation: 2} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + if err := Encode(value, buf, Options{}); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodePIDGob(b *testing.B) { + var buf bytes.Buffer + + enc := gob.NewEncoder(&buf) + + value := gen.PID{Node: "demo@127.0.0.1", ID: 312, Creation: 2} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + if err := enc.Encode(value); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeProcessID(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := gen.ProcessID{Node: "demo@127.0.0.1", Name: "example"} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + if err := Encode(value, buf, Options{}); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeProcessIDGob(b *testing.B) { + var buf bytes.Buffer + + enc := gob.NewEncoder(&buf) + + value := gen.ProcessID{Node: "demo@127.0.0.1", Name: "example"} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + if err := enc.Encode(value); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeRef(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := gen.Ref{ + Node: "demo@127.0.0.1", + Creation: 2, + ID: [3]uint32{73444, 3082813441, 2373634851}, + } + + for i := 0; i < b.N; i++ { + buf.Reset() + if err := Encode(value, buf, Options{}); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeRefGob(b *testing.B) { + var buf bytes.Buffer + + enc := gob.NewEncoder(&buf) + + value := gen.Ref{ + Node: "demo@127.0.0.1", + Creation: 2, + ID: [3]uint32{73444, 3082813441, 2373634851}, + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + if err := enc.Encode(value); err != nil { + b.Fatal(err) + } + } +} + +type benchEncodeStruct struct { + A float32 + B float64 + C any +} + +func BenchmarkEncodeStructEDF(b *testing.B) { + var regCache *sync.Map + + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := benchEncodeStruct{ + A: 3.14, + B: 3.15, + C: float64(3.18), + } + + RegisterTypeOf(benchEncodeStruct{}) + regCache = new(sync.Map) + regCache.Store(reflect.TypeOf(value), []byte{edtReg, 0x13, 0x88}) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + if err := Encode(value, buf, Options{RegCache: regCache}); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeStructGob(b *testing.B) { + var buf bytes.Buffer + + enc := gob.NewEncoder(&buf) + gob.Register(benchEncodeStruct{}) + + value := benchEncodeStruct{ + A: 3.14, + B: 3.15, + C: float64(3.18), + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + if err := enc.Encode(value); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeSliceStructEDF(b *testing.B) { + var regCache *sync.Map + + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := []benchEncodeStruct{ + { + A: 3.14, + B: 3.15, + C: float64(3.18), + }, + { + A: 3.15, + B: 3.14, + C: float64(3.19), + }, + } + + RegisterTypeOf(benchEncodeStruct{}) + regCache = new(sync.Map) + regCache.Store(reflect.TypeOf(value), []byte{edtReg, 0x13, 0x88}) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + if err := Encode(value, buf, Options{RegCache: regCache}); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeSliceStructGob(b *testing.B) { + var buf bytes.Buffer + + enc := gob.NewEncoder(&buf) + gob.Register(benchEncodeStruct{}) + + value := []benchEncodeStruct{ + { + A: 3.14, + B: 3.15, + C: float64(3.18), + }, + { + A: 3.15, + B: 3.14, + C: float64(3.19), + }, + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + if err := enc.Encode(value); err != nil { + b.Fatal(err) + } + } +} + +// decoding + +func BenchmarkDecodeString(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + if err := Encode("Ergo Framework", buf, Options{}); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := Decode(buf.B, Options{}) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecodeAtom(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + if err := Encode(gen.Atom("Ergo Framework"), buf, Options{}); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := Decode(buf.B, Options{}) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecodeBinary(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := []byte{1, 2, 3, 4, 5} + if err := Encode(value, buf, Options{}); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := Decode(buf.B, Options{}) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecodeSlice(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := []int{12345, 67890, 12345, 67890} + if err := Encode(value, buf, Options{}); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := Decode(buf.B, Options{}) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecodeArray(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := [4]int{12345, 67890, 12345, 67890} + if err := Encode(value, buf, Options{}); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := Decode(buf.B, Options{}) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecodeMap(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := map[string]int{ + "key1": 1234, + "key2": 5678, + } + if err := Encode(value, buf, Options{}); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := Decode(buf.B, Options{}) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecodePID(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := gen.PID{Node: "demo@127.0.0.1", ID: 312, Creation: 2} + if err := Encode(value, buf, Options{}); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := Decode(buf.B, Options{}) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecodeProcessID(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := gen.ProcessID{Node: "demo@127.0.0.1", Name: "example"} + if err := Encode(value, buf, Options{}); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := Decode(buf.B, Options{}) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecodeRef(b *testing.B) { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := gen.Ref{ + Node: "demo@127.0.0.1", + Creation: 2, + ID: [3]uint32{73444, 3082813441, 2373634851}, + } + if err := Encode(value, buf, Options{}); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := Decode(buf.B, Options{}) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecodeStructEDF(b *testing.B) { + var regCache *sync.Map + + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := benchEncodeStruct{ + A: 3.14, + B: 3.15, + C: float64(3.18), + } + + RegisterTypeOf(benchEncodeStruct{}) + //regCache = new(sync.Map) + //regCache.Store(reflect.TypeOf(value), []byte{edtReg, 0x13, 0x88}) + if err := Encode(value, buf, Options{RegCache: regCache}); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := Decode(buf.B, Options{}) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecodeStructGob(b *testing.B) { + var buf bytes.Buffer + + enc := gob.NewEncoder(&buf) + gob.Register(benchEncodeStruct{}) + + value := benchEncodeStruct{ + A: 3.14, + B: 3.15, + C: float64(3.18), + } + if err := enc.Encode(value); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + var x benchEncodeStruct + decbuf := bytes.NewBuffer(buf.Bytes()) + dec := gob.NewDecoder(decbuf) + if err := dec.Decode(&x); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecodeSliceStructEDF(b *testing.B) { + var regCache *sync.Map + + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + value := []benchEncodeStruct{ + { + A: 3.14, + B: 3.15, + C: float64(3.18), + }, + { + A: 3.15, + B: 3.14, + C: float64(3.19), + }, + } + + RegisterTypeOf(benchEncodeStruct{}) + //regCache = new(sync.Map) + //regCache.Store(reflect.TypeOf(value), []byte{edtReg, 0x13, 0x88}) + if err := Encode(value, buf, Options{RegCache: regCache}); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := Decode(buf.B, Options{}) + if err != nil { + b.Fatal(err) + } + } +} +func BenchmarkDecodeSliceStructGob(b *testing.B) { + var buf bytes.Buffer + + enc := gob.NewEncoder(&buf) + gob.Register(benchEncodeStruct{}) + + value := []benchEncodeStruct{ + { + A: 3.14, + B: 3.15, + C: float64(3.18), + }, + { + A: 3.15, + B: 3.14, + C: float64(3.19), + }, + } + if err := enc.Encode(value); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + var x []benchEncodeStruct + decbuf := bytes.NewBuffer(buf.Bytes()) + dec := gob.NewDecoder(decbuf) + if err := dec.Decode(&x); err != nil { + b.Fatal(err) + } + } +} diff --git a/net/edf/decode.go b/net/edf/decode.go new file mode 100644 index 00000000..10f7ed82 --- /dev/null +++ b/net/edf/decode.go @@ -0,0 +1,1257 @@ +package edf + +import ( + "encoding/binary" + "fmt" + "math" + "reflect" + "time" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +var ( + errInternal = fmt.Errorf("internal error") + errDecodeEOD = fmt.Errorf("end of data") + + anyType = reflect.TypeOf((*any)(nil)).Elem() + errType = reflect.TypeOf((*error)(nil)).Elem() +) + +type stateDecode struct { + child *stateDecode + options Options + + decodeType bool + decoder *decoder +} + +// Decode +func Decode(packet []byte, options Options) (_ any, _ []byte, ret error) { + if lib.Recover() { + defer func() { + if r := recover(); r != nil { + ret = fmt.Errorf("%v", r) + } + }() + } + + state := &stateDecode{ + options: options, + decodeType: true, + } + + dec, packet, err := getDecoder(packet, state) + if err != nil { + return nil, nil, err + } + + if dec == nil { + return nil, packet, nil + } + state.decoder = dec + v := reflect.Indirect(reflect.New(dec.Type)) + + value, packet, err := dec.Decode(&v, packet, state) + if err != nil { + return nil, nil, fmt.Errorf("malformed EDF: %w", err) + } + return value.Interface(), packet, nil +} + +func getDecoder(packet []byte, state *stateDecode) (*decoder, []byte, error) { + if len(packet) == 0 { + return nil, nil, errDecodeEOD + } + + id := packet[0] + packet = packet[1:] + + switch id { + case edtReg: + return getRegDecoder(packet, state) + + case edtType: + if len(packet) < 2 { + return nil, nil, errDecodeEOD + } + n := binary.BigEndian.Uint16(packet[:2]) + packet = packet[2:] + + if len(packet) < int(n) { + return nil, nil, errDecodeEOD + } + + dec, _, err := decodeType(packet[:n], state) + if err != nil { + return nil, nil, err + } + packet = packet[n:] + return dec, packet, nil + + case edtNil: + return nil, packet, nil + } + + state.decodeType = false + + if v, found := decoders.Load(id); found { + return v.(*decoder), packet, nil + } + + return nil, nil, fmt.Errorf("unknown type %v for decoding", id) +} + +func getRegDecoder(packet []byte, state *stateDecode) (*decoder, []byte, error) { + var id string + + if len(packet) < 2 { + return nil, nil, errDecodeEOD + } + + n := binary.BigEndian.Uint16(packet[:2]) + packet = packet[2:] + + if n > 4095 { + // cached. n is a cache id + if state.options.RegCache == nil { + return nil, nil, fmt.Errorf("no RegCache to decode cached id %d", n) + } + v, found := state.options.RegCache.Load(n) + if found == false { + return nil, nil, fmt.Errorf("unknown RegCache id %d", n) + + } + id = v.(string) + } else { + if len(packet) < int(n) { + return nil, nil, errDecodeEOD + } + id = string(packet[:n]) + packet = packet[n:] + } + + if v, found := decoders.Load(id); found { + state.decodeType = false + return v.(*decoder), packet, nil + } + return nil, nil, fmt.Errorf("unknown reg type %v for decoding", id) +} + +func decodeType(fold []byte, state *stateDecode) (*decoder, []byte, error) { + // check in the decoder cache first + if state.options.Cache != nil { + if v, found := state.options.Cache.Load(string(fold)); found { + return v.(*decoder), nil, nil + } + } + + if len(fold) == 0 { + return nil, nil, errDecodeEOD + } + + switch fold[0] { + case edtMap: + // unfold key type + decKey, f, err := decodeType(fold[1:], state) + if err != nil { + return nil, nil, fmt.Errorf("unable to unfold type (map key): %s", err) + } + decValue, f, err := decodeType(f, state) + if err != nil { + return nil, nil, fmt.Errorf("unable to unfold type (map value): %s", err) + } + if len(f) > 0 { + return nil, nil, fmt.Errorf("extra data in folded type (map): %#v", f) + } + + vtype := reflect.MapOf(decKey.Type, decValue.Type) + + fdec := func(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if len(packet) == 0 { + return nil, nil, errDecodeEOD + } + + if packet[0] == edtNil { + packet = packet[1:] + return nil, packet, nil + } + + if packet[0] != edtMap { + return nil, nil, fmt.Errorf("incorrect map type %d", packet[0]) + } + packet = packet[1:] + + if len(packet) < 4 { + return nil, nil, errDecodeEOD + } + + n := int(binary.BigEndian.Uint32(packet[:4])) + packet = packet[4:] + + if n == 0 { + x := reflect.MakeMap(vtype) + if value == nil { + value = &x + } else { + value.Set(x) + } + return value, packet, nil + } + + if n > len(packet) { + return nil, nil, fmt.Errorf("incorrect data length") + } + + x := reflect.MakeMapWithSize(vtype, n) + if value == nil { + value = &x + } else { + value.Set(x) + } + + if state.child == nil { + state.child = &stateDecode{ + options: state.options, + } + } + state = state.child + + for i := 0; i < n; i++ { + k := reflect.Indirect(reflect.New(decKey.Type)) + state.decoder = decKey + _, p, err := decKey.Decode(&k, packet, state) + if err != nil { + return nil, nil, err + } + packet = p + + v := reflect.Indirect(reflect.New(decValue.Type)) + state.decoder = decValue + _, p, err = decValue.Decode(&v, packet, state) + if err != nil { + return nil, nil, err + } + packet = p + + value.SetMapIndex(k, v) + } + + return value, packet, nil + } + + dec := decoder{ + Type: vtype, + Decode: fdec, + } + if state.options.Cache != nil { + state.options.Cache.LoadOrStore(string(fold), &dec) + } + + return &dec, nil, nil + + case edtSlice: + // unfold key type + decItem, f, err := decodeType(fold[1:], state) + if err != nil { + return nil, nil, fmt.Errorf("unable to unfold type (slice): %s", err) + } + if len(f) > 0 { + return nil, nil, fmt.Errorf("extra data in folded type (slice): %#v", f) + } + + vtype := reflect.SliceOf(decItem.Type) + + fdec := func(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if len(packet) == 0 { + return nil, nil, errDecodeEOD + } + + if packet[0] == edtNil { + packet = packet[1:] + return nil, packet, nil + } + + if packet[0] != edtSlice { + return nil, nil, fmt.Errorf("incorrect slice type %d", packet[0]) + } + packet = packet[1:] + + if len(packet) < 4 { + return nil, nil, errDecodeEOD + } + + n := int(binary.BigEndian.Uint32(packet[:4])) + packet = packet[4:] + + if n == 0 { + x := reflect.MakeSlice(vtype, 0, 0) + if value == nil { + value = &x + } else { + value.Set(x) + } + return value, packet, nil + } + + if n > len(packet) { + return nil, nil, fmt.Errorf("incorrect data length") + } + + x := reflect.MakeSlice(vtype, n, n) + if value == nil { + value = &x + } else { + value.Set(x) + } + + if state.child == nil { + state.child = &stateDecode{ + options: state.options, + decoder: decItem, + } + } + state = state.child + + for i := 0; i < n; i++ { + item := value.Index(i) + _, p, err := decItem.Decode(&item, packet, state) + if err != nil { + return nil, nil, err + } + packet = p + } + + return value, packet, nil + } + + dec := decoder{ + Type: vtype, + Decode: fdec, + } + if state.options.Cache != nil { + state.options.Cache.LoadOrStore(string(fold), &dec) + } + + return &dec, nil, nil + + case edtArray: + // length of the array + if len(fold) < 6 { + return nil, nil, errDecodeEOD + } + + n := int(binary.BigEndian.Uint32(fold[1:5])) + + // unfold key type + decItem, f, err := decodeType(fold[5:], state) + if err != nil { + return nil, nil, fmt.Errorf("unable to unfold type (array): %s", err) + } + if len(f) > 0 { + return nil, nil, fmt.Errorf("extra data in folded type (array): %#v", f) + } + + vtype := reflect.ArrayOf(n, decItem.Type) + + fdec := func(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if len(packet) == 0 { + if n == 0 { + return value, packet, nil + } + return nil, nil, errDecodeEOD + } + + if value == nil { + x := reflect.Indirect(reflect.New(vtype)) + value = &x + } + + if state.child == nil { + state.child = &stateDecode{ + options: state.options, + decoder: decItem, + } + } + state = state.child + + for i := 0; i < n; i++ { + item := value.Index(i) + _, p, err := decItem.Decode(&item, packet, state) + if err != nil { + return nil, nil, err + } + packet = p + } + + return value, packet, nil + } + + dec := decoder{ + Type: vtype, + Decode: fdec, + } + + if state.options.Cache != nil { + state.options.Cache.LoadOrStore(string(fold), &dec) + } + + return &dec, nil, nil + + case edtReg: + return getRegDecoder(fold[1:], state) + } + + if v, found := decoders.Load(fold[0]); found { + return v.(*decoder), fold[1:], nil + } + return nil, nil, fmt.Errorf("no decoder for type %d", fold[0]) +} + +func decodePID(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtPID { + return nil, nil, fmt.Errorf("incorrect gen.PID type id %d", t) + } + packet = packet[1:] + } + + var pid gen.PID + var err error + + pid.Node, packet, err = readAtom(packet, state) + if err != nil { + return nil, nil, err + } + + if len(packet) < 16 { + return nil, nil, errDecodeEOD + } + pid.ID = binary.BigEndian.Uint64(packet[:8]) + pid.Creation = int64(binary.BigEndian.Uint64(packet[8:16])) + packet = packet[16:] + + v := reflect.ValueOf(pid) + if value == nil { + return &v, packet, nil + } + // should we check the value type? + value.Set(v) + return value, packet, nil +} + +func decodeProcessID(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtProcessID { + return nil, nil, fmt.Errorf("incorrect gen.ProcessID type id %d", t) + } + packet = packet[1:] + } + + var p gen.ProcessID + var err error + + if p.Node, packet, err = readAtom(packet, state); err != nil { + return nil, nil, err + } + if p.Name, packet, err = readAtom(packet, state); err != nil { + return nil, nil, err + } + + v := reflect.ValueOf(p) + if value == nil { + return &v, packet, nil + } + // should we check the value type? + value.Set(v) + return value, packet, nil +} + +func decodeRef(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtRef { + return nil, nil, fmt.Errorf("incorrect gen.Ref type id %d", t) + } + packet = packet[1:] + } + + var ref gen.Ref + var err error + + if ref.Node, packet, err = readAtom(packet, state); err != nil { + return nil, nil, err + } + + // 8 (creation) + 24 ([3]uint64) + if len(packet) < 32 { + return nil, nil, errDecodeEOD + } + ref.Creation = int64(binary.BigEndian.Uint64(packet[:8])) + ref.ID[0] = binary.BigEndian.Uint64(packet[8:16]) + ref.ID[1] = binary.BigEndian.Uint64(packet[16:24]) + ref.ID[2] = binary.BigEndian.Uint64(packet[24:32]) + packet = packet[32:] + + v := reflect.ValueOf(ref) + if value == nil { + return &v, packet, nil + } + // should we check the value type? + value.Set(v) + return value, packet, nil +} + +func decodeAlias(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtAlias { + return nil, nil, fmt.Errorf("incorrect gen.Alias type id %d", t) + } + packet = packet[1:] + } + + var alias gen.Alias + var err error + + if alias.Node, packet, err = readAtom(packet, state); err != nil { + return nil, nil, err + } + + // 8 (creation) + 24 ([3]uint64) + if len(packet) < 32 { + return nil, nil, errDecodeEOD + } + alias.Creation = int64(binary.BigEndian.Uint64(packet[:8])) + alias.ID[0] = binary.BigEndian.Uint64(packet[8:16]) + alias.ID[1] = binary.BigEndian.Uint64(packet[16:24]) + alias.ID[2] = binary.BigEndian.Uint64(packet[24:32]) + packet = packet[32:] + + v := reflect.ValueOf(alias) + if value == nil { + return &v, packet, nil + } + // should we check the value type? + value.Set(v) + return value, packet, nil +} + +func decodeEvent(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtEvent { + return nil, nil, fmt.Errorf("incorrect gen.Event type id %d", t) + } + packet = packet[1:] + } + + var e gen.Event + var err error + + if e.Node, packet, err = readAtom(packet, state); err != nil { + return nil, nil, err + } + if e.Name, packet, err = readAtom(packet, state); err != nil { + return nil, nil, err + } + v := reflect.ValueOf(e) + if value == nil { + return &v, packet, nil + } + // should we check the value type? + value.Set(v) + return value, packet, nil +} + +func decodeTime(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtTime { + return nil, nil, fmt.Errorf("incorrect time.Time type id %d", t) + } + packet = packet[1:] + } + if len(packet) == 0 { + return nil, nil, errDecodeEOD + } + l := int(packet[0]) + if len(packet) < 1+l { + return nil, nil, errDecodeEOD + } + + var t time.Time + if err := t.UnmarshalBinary(packet[1 : 1+l]); err != nil { + return nil, nil, err + } + packet = packet[1+l:] + + v := reflect.ValueOf(t) + if value == nil { + return &v, packet, nil + } + // should we check the value type? + value.Set(v) + return value, packet, nil +} + +func decodeString(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtString { + return nil, nil, fmt.Errorf("incorrect string type id %d", t) + } + packet = packet[1:] + } + if len(packet) < 2 { + return nil, nil, errDecodeEOD + } + l := binary.BigEndian.Uint16(packet) + if len(packet) < int(2+l) { + return nil, nil, errDecodeEOD + } + + s := string(packet[2 : 2+l]) + packet = packet[2+l:] + + if value == nil { + v := reflect.ValueOf(s) + return &v, packet, nil + } + + if value.Kind() == reflect.String { + value.SetString(s) + return value, packet, nil + } + + value.Set(reflect.ValueOf(s)) + return value, packet, nil +} + +func decodeBinary(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtBinary { + return nil, nil, fmt.Errorf("incorrect []byte type id %d", t) + } + packet = packet[1:] + } + if len(packet) < 4 { + return nil, nil, errDecodeEOD + } + l := binary.BigEndian.Uint32(packet) + if len(packet) < int(4+l) { + return nil, nil, errDecodeEOD + } + + // we can't reuse the underlying slice since it is a part of the buffer + // which is bringing back to the buffer pool after all. + bin := append([]byte{}, packet[4:4+l]...) + packet = packet[4+l:] + + if value == nil { + v := reflect.ValueOf(bin) + return &v, packet, nil + } + if value.Kind() == reflect.Interface { // if type is 'any' + value.Set(reflect.ValueOf(bin)) + return value, packet, nil + } + + value.SetBytes(bin) + return value, packet, nil +} + +func decodeAtom(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtAtom { + return nil, nil, fmt.Errorf("incorrect gen.Atom type id %d", t) + } + packet = packet[1:] + } + + atom, p, err := readAtom(packet, state) + if err != nil { + return nil, nil, err + } + packet = p + v := reflect.ValueOf(atom) + if value == nil { + return &v, packet, nil + } + value.Set(v) + return value, packet, nil +} + +func decodeInt(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtInt { + return nil, nil, fmt.Errorf("incorrect int type id %d", t) + } + packet = packet[1:] + } + if len(packet) < 8 { + return nil, nil, errDecodeEOD + } + v := binary.BigEndian.Uint64(packet[:8]) + packet = packet[8:] + if value == nil { + v := reflect.ValueOf(int(v)) + return &v, packet, nil + } + + if value.Kind() == reflect.Int { + value.SetInt(int64(v)) + return value, packet, nil + } + + value.Set(reflect.ValueOf(int(v))) + return value, packet, nil +} + +func decodeInt8(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtInt8 { + return nil, nil, fmt.Errorf("incorrect int8 type id %d", t) + } + packet = packet[1:] + } + + if len(packet) == 0 { + return nil, nil, errDecodeEOD + } + + v := packet[0] + packet = packet[1:] + + if value == nil { + v := reflect.ValueOf(int8(v)) + return &v, packet, nil + } + + if value.Kind() == reflect.Int8 { + value.SetInt(int64(v)) + return value, packet, nil + } + + value.Set(reflect.ValueOf(int8(v))) + return value, packet, nil +} + +func decodeInt16(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtInt16 { + return nil, nil, fmt.Errorf("incorrect int16 type id %d", t) + } + packet = packet[1:] + } + + if len(packet) < 2 { + return nil, nil, errDecodeEOD + } + + v := binary.BigEndian.Uint16(packet[:2]) + packet = packet[2:] + + if value == nil { + v := reflect.ValueOf(int16(v)) + return &v, packet, nil + } + + if value.Kind() == reflect.Int16 { + value.SetInt(int64(v)) + return value, packet, nil + } + + value.Set(reflect.ValueOf(int16(v))) + return value, packet, nil +} + +func decodeInt32(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtInt32 { + return nil, nil, fmt.Errorf("incorrect int32 type id %d", t) + } + packet = packet[1:] + } + if len(packet) < 4 { + return nil, nil, errDecodeEOD + } + v := binary.BigEndian.Uint32(packet[:4]) + packet = packet[4:] + if value == nil { + v := reflect.ValueOf(int32(v)) + return &v, packet, nil + } + if value.Kind() == reflect.Int32 { + value.SetInt(int64(v)) + return value, packet, nil + } + + value.Set(reflect.ValueOf(int32(v))) + return value, packet, nil +} + +func decodeInt64(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtInt64 { + return nil, nil, fmt.Errorf("incorrect int64 type id %d", t) + } + packet = packet[1:] + } + if len(packet) < 8 { + return nil, nil, errDecodeEOD + } + v := int64(binary.BigEndian.Uint64(packet[:8])) + packet = packet[8:] + + if value == nil { + v := reflect.ValueOf(v) + return &v, packet, nil + } + if value.Kind() == reflect.Int64 { + value.SetInt(v) + return value, packet, nil + } + + value.Set(reflect.ValueOf(v)) + return value, packet, nil +} + +func decodeUint(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtUint { + return nil, nil, fmt.Errorf("incorrect uint type id %d", t) + } + packet = packet[1:] + } + + if len(packet) < 8 { + return nil, nil, errDecodeEOD + } + + v := binary.BigEndian.Uint64(packet[:8]) + packet = packet[8:] + + if value == nil { + v := reflect.ValueOf(uint(v)) + return &v, packet, nil + } + + if value.Kind() == reflect.Uint { + value.SetUint(uint64(v)) + return value, packet, nil + } + + value.Set(reflect.ValueOf(uint(v))) + return value, packet, nil +} + +func decodeUint8(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtUint8 { + return nil, nil, fmt.Errorf("incorrect uint8 type id %d", t) + } + packet = packet[1:] + } + + if len(packet) == 0 { + return nil, nil, errDecodeEOD + } + + v := packet[0] + packet = packet[1:] + + if value == nil { + v := reflect.ValueOf(uint8(v)) + return &v, packet, nil + } + + if value.Kind() == reflect.Uint8 { + value.SetUint(uint64(v)) + return value, packet, nil + } + + value.Set(reflect.ValueOf(uint8(v))) + return value, packet, nil +} + +func decodeUint16(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtUint16 { + return nil, nil, fmt.Errorf("incorrect uint16 type id %d", t) + } + packet = packet[1:] + } + + if len(packet) < 2 { + return nil, nil, errDecodeEOD + } + + v := binary.BigEndian.Uint16(packet[:2]) + packet = packet[2:] + + if value == nil { + v := reflect.ValueOf(v) + return &v, packet, nil + } + + if value.Kind() == reflect.Uint16 { + value.SetUint(uint64(v)) + return value, packet, nil + } + + value.Set(reflect.ValueOf(v)) + return value, packet, nil +} + +func decodeUint32(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtUint32 { + return nil, nil, fmt.Errorf("incorrect uint32 type id %d", t) + } + packet = packet[1:] + } + + if len(packet) < 4 { + return nil, nil, errDecodeEOD + } + + v := binary.BigEndian.Uint32(packet[:4]) + packet = packet[4:] + + if value == nil { + v := reflect.ValueOf(v) + return &v, packet, nil + } + + if value.Kind() == reflect.Uint32 { + value.SetUint(uint64(v)) + return value, packet, nil + } + + value.Set(reflect.ValueOf(v)) + return value, packet, nil +} + +func decodeUint64(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtUint64 { + return nil, nil, fmt.Errorf("incorrect uint64 type id %d", t) + } + packet = packet[1:] + } + + if len(packet) < 8 { + return nil, nil, errDecodeEOD + } + + v := binary.BigEndian.Uint64(packet[:8]) + packet = packet[8:] + + if value == nil { + v := reflect.ValueOf(v) + return &v, packet, nil + } + + if value.Kind() == reflect.Uint64 { + value.SetUint(v) + return value, packet, nil + } + + value.Set(reflect.ValueOf(v)) + return value, packet, nil +} + +func decodeFloat32(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtFloat32 { + return nil, nil, fmt.Errorf("incorrect float32 type id %d", t) + } + packet = packet[1:] + } + + if len(packet) < 4 { + return nil, nil, errDecodeEOD + } + bits := binary.BigEndian.Uint32(packet[:4]) + packet = packet[4:] + + if value == nil { + v := reflect.ValueOf(math.Float32frombits(bits)) + return &v, packet, nil + } + + if value.Kind() == reflect.Float32 { + value.SetFloat(float64(math.Float32frombits(bits))) + return value, packet, nil + } + + value.Set(reflect.ValueOf(math.Float32frombits(bits))) + return value, packet, nil +} + +func decodeFloat64(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtFloat64 { + return nil, nil, fmt.Errorf("incorrect float64 type id %d", t) + } + packet = packet[1:] + } + + if len(packet) < 8 { + return nil, nil, fmt.Errorf("float64. not enough data") + } + bits := binary.BigEndian.Uint64(packet[:8]) + packet = packet[8:] + + if value == nil { + v := reflect.ValueOf(math.Float64frombits(bits)) + return &v, packet, nil + } + if value.Kind() == reflect.Float64 { + value.SetFloat(math.Float64frombits(bits)) + return value, packet, nil + } + + value.Set(reflect.ValueOf(math.Float64frombits(bits))) + return value, packet, nil +} + +func decodeBool(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if state.decodeType { + if len(packet) < 1 { + return nil, nil, errDecodeEOD + } + t := packet[0] + if t != edtBool { + return nil, nil, fmt.Errorf("incorrect bool type id %d", t) + } + packet = packet[1:] + } + if len(packet) == 0 { + return nil, nil, errDecodeEOD + } + b := packet[0] == 1 + packet = packet[1:] + if value == nil { + v := reflect.ValueOf(b) + return &v, packet, nil + } + if value.Kind() == reflect.Bool { + value.SetBool(b) + return value, packet, nil + } + + value.Set(reflect.ValueOf(b)) + return value, packet, nil +} + +func decodeAny(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + dec, p, err := getDecoder(packet, state) + if err != nil { + return nil, nil, err + } + + if dec == nil { + return value, p, nil + } + + if value == nil { + v := reflect.Indirect(reflect.New(dec.Type)) + value, packet, err = dec.Decode(&v, p, state) + if err != nil { + return nil, nil, err + } + return value, packet, nil + } + + if value.Type() == dec.Type { + value, packet, err = dec.Decode(value, p, state) + if err != nil { + return nil, nil, err + } + return value, packet, nil + } + + v := reflect.Indirect(reflect.New(dec.Type)) + _, packet, err = dec.Decode(&v, p, state) + if err != nil { + return nil, nil, err + } + value.Set(v) + + return value, packet, nil +} +func decodeError(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + var err error + + if state.decodeType { + if len(packet) == 0 { + return nil, nil, errDecodeEOD + } + + if packet[0] != edtError { + return nil, nil, fmt.Errorf("incorrect error type id %d", packet[0]) + } + packet = packet[1:] + } + + if len(packet) < 2 { + return nil, nil, errDecodeEOD + } + + id := binary.BigEndian.Uint16(packet) + packet = packet[2:] + + if id == math.MaxUint16 { + // nil value + return value, packet, nil + } + + if id > math.MaxInt16 { + // registered error + if state.options.ErrCache == nil { + return nil, nil, fmt.Errorf("no ErrCache to decode id %d", id) + } + + v, found := state.options.ErrCache.Load(id) + if found == false { + return nil, nil, fmt.Errorf("unknown ErrCache id %d", id) + } + err = v.(error) + } else { + // regular error. id has a length value + l := int(id) + if len(packet) < l { + return nil, nil, errDecodeEOD + } + err = fmt.Errorf(string(packet[:l])) + packet = packet[l:] + } + + v := reflect.ValueOf(err) + if value == nil { + return &v, packet, nil + } + value.Set(v) + return value, packet, nil +} + +func readAtom(packet []byte, state *stateDecode) (gen.Atom, []byte, error) { + var atom gen.Atom + + if len(packet) < 2 { + return atom, nil, errDecodeEOD + } + + id := binary.BigEndian.Uint16(packet) + packet = packet[2:] + + if id > 255 { + // cached atom + if state.options.AtomCache == nil { + return atom, nil, fmt.Errorf("no AtomCache to decode id %d", id) + } + v, found := state.options.AtomCache.Load(id) + if found == false { + return atom, nil, fmt.Errorf("unknown AtomCache id %d", id) + } + atom = v.(gen.Atom) + } else { + // this is atom and 'id' is the len of it + l := int(id) // len + if len(packet) < l { + return atom, nil, errDecodeEOD + } + atom = gen.Atom(packet[:l]) + packet = packet[l:] + } + + if state.options.AtomMapping != nil { + if v, found := state.options.AtomMapping.Load(atom); found { + return v.(gen.Atom), packet, nil + } + } + + return atom, packet, nil +} diff --git a/net/edf/decode_test.go b/net/edf/decode_test.go new file mode 100644 index 00000000..430afa32 --- /dev/null +++ b/net/edf/decode_test.go @@ -0,0 +1,3549 @@ +package edf + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "reflect" + "sync" + "testing" + "time" + + "ergo.services/ergo/gen" +) + +func TestDecodeBool(t *testing.T) { + expect := true + packet := []byte{edtBool, 1} + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } + expect = false + packet = []byte{edtBool, 0} + value, _, err = Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceBool(t *testing.T) { + expect := []bool{false, true, false} + packet := []byte{edtType, 0, 2, + edtSlice, edtBool, + edtSlice, + 0, 0, 0, 3, + 0, 1, 0, + } + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} +func TestDecodeSliceAnyBool(t *testing.T) { + expect := []any{false, true, false} + packet := []byte{edtType, 0, 2, + edtSlice, edtAny, + edtSlice, + 0, 0, 0, 3, + edtBool, 0, + edtBool, 1, + edtBool, 0, + } + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeAtom(t *testing.T) { + expect := gen.Atom("hello world") + packet := []byte{edtAtom, + 0, 0x0b, // len + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, // "hello world" + } + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeAtomCache(t *testing.T) { + expect := gen.Atom("hello world") + packet := []byte{edtAtom, + 0x01, 0x2c, // cached "hello world" => 300 + } + + atomCache := new(sync.Map) + atomCache.Store(uint16(300), expect) + + value, _, err := Decode(packet, Options{AtomCache: atomCache}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeAtomMapping(t *testing.T) { + expect := gen.Atom("hello world") + mapped := gen.Atom("hi") + packet := []byte{edtAtom, + 0, 0x02, // len + 0x68, 0x69, // "hi" + } + + atomMapping := new(sync.Map) + atomMapping.Store(mapped, expect) + + value, _, err := Decode(packet, Options{AtomMapping: atomMapping}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeAtomMappingCache(t *testing.T) { + expect := gen.Atom("hello world") + mapped := gen.Atom("hi") + + packet := []byte{edtAtom, + 0x01, 0x2c, // mapped "hello world" => "hi", cached "hi" => 300 + } + + atomCache := new(sync.Map) + atomCache.Store(uint16(300), mapped) + atomMapping := new(sync.Map) + atomMapping.Store(mapped, expect) + + value, _, err := Decode(packet, Options{AtomCache: atomCache, AtomMapping: atomMapping}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAtom(t *testing.T) { + + v := gen.Atom("hello world") + expect := []gen.Atom{ + v, v, v, + } + packet := []byte{edtType, 0, 2, + edtSlice, + edtAtom, + edtSlice, + 0, 0, 0, 3, + 0, 0x0b, // len + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, // "hello world" + 0, 0x0b, // len + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, // "hello world" + 0, 0x0b, // len + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, // "hello world" + } + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} +func TestDecodeSliceAtomCache(t *testing.T) { + + v := gen.Atom("hello world") + expect := []gen.Atom{ + v, v, v, + } + packet := []byte{edtType, 0, 2, + edtSlice, + edtAtom, + edtSlice, + 0, 0, 0, 3, + 0x01, 0x2c, // cached "hello world" => 300 + 0x01, 0x2c, // cached "hello world" => 300 + 0x01, 0x2c, // cached "hello world" => 300 + } + atomCache := new(sync.Map) + atomCache.Store(uint16(300), v) + value, _, err := Decode(packet, Options{AtomCache: atomCache}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyAtom(t *testing.T) { + + v := gen.Atom("hello world") + expect := []any{ + v, nil, v, + } + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtAtom, 0, 0x0b, // len + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, // "hello world" + edtNil, + edtAtom, 0, 0x0b, // len + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, // "hello world" + } + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyAtomCache(t *testing.T) { + + v := gen.Atom("hello world") + expect := []any{ + v, nil, v, + } + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtAtom, 0x01, 0x2c, // cached "hello world" => 300 + edtNil, + edtAtom, 0x01, 0x2c, // cached "hello world" => 300 + } + atomCache := new(sync.Map) + atomCache.Store(uint16(300), v) + value, _, err := Decode(packet, Options{AtomCache: atomCache}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeString(t *testing.T) { + expect := "abc" + packet := []byte{edtString, 0, 3, 97, 98, 99} + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceString(t *testing.T) { + expect := []string{"abc", "def", "ghi"} + packet := []byte{edtType, 0, 2, + edtSlice, + edtString, + edtSlice, + 0, 0, 0, 3, + 0, 3, 97, 98, 99, // "abc" + 0, 3, 100, 101, 102, // "def" + 0, 3, 103, 104, 105, // "ghi" + } + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} +func TestDecodeSliceAnyString(t *testing.T) { + expect := []any{"abc", "def", "ghi"} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtString, 0, 3, 97, 98, 99, // "abc" + edtString, 0, 3, 100, 101, 102, // "def" + edtString, 0, 3, 103, 104, 105, // "ghi" + } + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeBinary(t *testing.T) { + expect := []byte{1, 2, 3, 4, 5} + packet := []byte{edtBinary, + 0x0, 0x0, 0x0, 0x05, // len + 0x1, 0x2, 0x3, 0x4, 0x5, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceBinary(t *testing.T) { + expect := [][]byte{{1, 2, 3, 4, 5}, {6, 7, 8}, {9}} + packet := []byte{edtType, 0, 2, + edtSlice, + edtBinary, + edtSlice, + 0, 0, 0, 3, + 0x0, 0x0, 0x0, 0x05, // len + 0x1, 0x2, 0x3, 0x4, 0x5, + 0x0, 0x0, 0x0, 0x03, // len + 0x6, 0x7, 0x8, + 0x0, 0x0, 0x0, 0x01, // len + 0x9, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyBinary(t *testing.T) { + expect := []any{[]byte{1, 2, 3, 4, 5}, []byte{6, 7, 8}, []byte{9}} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtBinary, 0x0, 0x0, 0x0, 0x05, // len + 0x1, 0x2, 0x3, 0x4, 0x5, + edtBinary, 0x0, 0x0, 0x0, 0x03, // len + 0x6, 0x7, 0x8, + edtBinary, 0x0, 0x0, 0x0, 0x01, // len + 0x9, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeFloat32(t *testing.T) { + expect32 := float32(3.14) + packet := []byte{edtFloat32, 64, 72, 245, 195} + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect32) { + fmt.Println("exp", expect32) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} +func TestDecodeSliceFloat32(t *testing.T) { + + expect := []float32{3.14, 3.15, 3.16} + packet := []byte{edtType, 0, 2, + edtSlice, + edtFloat32, + edtSlice, + 0, 0, 0, 3, + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyFloat32(t *testing.T) { + + expect := []any{float32(3.14), float32(3.15), float32(3.16)} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtFloat32, 0x40, 0x48, 0xf5, 0xc3, // 3.14 + edtFloat32, 0x40, 0x49, 0x99, 0x9a, // 3.15 + edtFloat32, 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeFloat64(t *testing.T) { + expect64 := float64(3.14) + packet := []byte{edtFloat64, 64, 9, 30, 184, 81, 235, 133, 31} + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect64) { + fmt.Println("exp", expect64) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceFloat64(t *testing.T) { + + expect := []float64{3.14, 3.15, 3.16} + packet := []byte{edtType, 0, 2, + edtSlice, + edtFloat64, + edtSlice, + 0, 0, 0, 3, + 0x40, 0x9, 0x1e, 0xb8, 0x51, 0xeb, 0x85, 0x1f, // 3.14 + 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + 0x40, 0x9, 0x47, 0xae, 0x14, 0x7a, 0xe1, 0x48, // 3.16 + + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyFloat64(t *testing.T) { + + expect := []any{float64(3.14), float64(3.15), float64(3.16)} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtFloat64, 0x40, 0x9, 0x1e, 0xb8, 0x51, 0xeb, 0x85, 0x1f, // 3.14 + edtFloat64, 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtFloat64, 0x40, 0x9, 0x47, 0xae, 0x14, 0x7a, 0xe1, 0x48, // 3.16 + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeInteger(t *testing.T) { + for _, c := range integerCases() { + t.Run(c.name, func(t *testing.T) { + value, _, err := Decode(c.bin, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, c.integer) { + fmt.Println("exp ", c.integer) + fmt.Println("got ", value) + t.Fatal("incorrect value") + } + }) + } +} + +func TestDecodeSliceInt(t *testing.T) { + expect := []int{1, 2, 3} + packet := []byte{edtType, 0, 2, + edtSlice, + edtInt, + edtSlice, + 0, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 2, + 0, 0, 0, 0, 0, 0, 0, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyInt(t *testing.T) { + expect := []any{int(1), int(2), int(3)} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtInt, 0, 0, 0, 0, 0, 0, 0, 1, + edtInt, 0, 0, 0, 0, 0, 0, 0, 2, + edtInt, 0, 0, 0, 0, 0, 0, 0, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceInt8(t *testing.T) { + expect := []int8{1, 2, 3} + packet := []byte{edtType, 0, 2, + edtSlice, + edtInt8, + edtSlice, + 0, 0, 0, 3, + 1, + 2, + 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyInt8(t *testing.T) { + expect := []any{int8(1), int8(2), int8(3)} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtInt8, 1, + edtInt8, 2, + edtInt8, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceInt16(t *testing.T) { + expect := []int16{1, 2, 3} + packet := []byte{edtType, 0, 2, + edtSlice, + edtInt16, + edtSlice, + 0, 0, 0, 3, + 0, 1, + 0, 2, + 0, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyInt16(t *testing.T) { + expect := []any{int16(1), int16(2), int16(3)} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtInt16, 0, 1, + edtInt16, 0, 2, + edtInt16, 0, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceInt32(t *testing.T) { + expect := []int32{1, 2, 3} + packet := []byte{edtType, 0, 2, + edtSlice, + edtInt32, + edtSlice, + 0, 0, 0, 3, + 0, 0, 0, 1, + 0, 0, 0, 2, + 0, 0, 0, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyInt32(t *testing.T) { + expect := []any{int32(1), int32(2), int32(3)} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtInt32, 0, 0, 0, 1, + edtInt32, 0, 0, 0, 2, + edtInt32, 0, 0, 0, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceInt64(t *testing.T) { + expect := []int64{1, 2, 3} + packet := []byte{edtType, 0, 2, + edtSlice, + edtInt64, + edtSlice, + 0, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 2, + 0, 0, 0, 0, 0, 0, 0, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyInt64(t *testing.T) { + expect := []any{int64(1), int64(2), int64(3)} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtInt64, 0, 0, 0, 0, 0, 0, 0, 1, + edtInt64, 0, 0, 0, 0, 0, 0, 0, 2, + edtInt64, 0, 0, 0, 0, 0, 0, 0, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceUint(t *testing.T) { + expect := []uint{1, 2, 3} + packet := []byte{edtType, 0, 2, + edtSlice, + edtUint, + edtSlice, + 0, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 2, + 0, 0, 0, 0, 0, 0, 0, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyUint(t *testing.T) { + expect := []any{uint(1), uint(2), uint(3)} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtUint, 0, 0, 0, 0, 0, 0, 0, 1, + edtUint, 0, 0, 0, 0, 0, 0, 0, 2, + edtUint, 0, 0, 0, 0, 0, 0, 0, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceUint8(t *testing.T) { + // basically, []uint8 == []byte, which means it should be encoded as a binary, + // but check this way of encoding anyway. + expect := []uint8{1, 2, 3} + packet := []byte{edtType, 0, 2, + edtSlice, + edtUint8, + edtSlice, + 0, 0, 0, 3, + 1, + 2, + 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyUint8(t *testing.T) { + expect := []any{uint8(1), byte(2), uint8(3)} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtUint8, 1, + edtUint8, 2, + edtUint8, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceUint16(t *testing.T) { + expect := []uint16{1, 2, 3} + packet := []byte{edtType, 0, 2, + edtSlice, + edtUint16, + edtSlice, + 0, 0, 0, 3, + 0, 1, + 0, 2, + 0, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyUint16(t *testing.T) { + expect := []any{uint16(1), uint16(2), uint16(3)} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtUint16, 0, 1, + edtUint16, 0, 2, + edtUint16, 0, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceUint32(t *testing.T) { + expect := []uint32{1, 2, 3} + packet := []byte{edtType, 0, 2, + edtSlice, + edtUint32, + edtSlice, + 0, 0, 0, 3, + 0, 0, 0, 1, + 0, 0, 0, 2, + 0, 0, 0, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyUint32(t *testing.T) { + expect := []any{uint32(1), uint32(2), uint32(3)} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtUint32, 0, 0, 0, 1, + edtUint32, 0, 0, 0, 2, + edtUint32, 0, 0, 0, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceUint64(t *testing.T) { + expect := []uint64{1, 2, 3} + packet := []byte{edtType, 0, 2, + edtSlice, + edtUint64, + edtSlice, + 0, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 2, + 0, 0, 0, 0, 0, 0, 0, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyUint64(t *testing.T) { + expect := []any{uint64(1), uint64(2), uint64(3)} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtUint64, 0, 0, 0, 0, 0, 0, 0, 1, + edtUint64, 0, 0, 0, 0, 0, 0, 0, 2, + edtUint64, 0, 0, 0, 0, 0, 0, 0, 3, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} +func TestDecodeSliceAnyInteger(t *testing.T) { + expect := []any{ + int(1), nil, int8(2), nil, int16(3), nil, int32(4), nil, int64(5), nil, + uint(6), nil, uint8(7), nil, uint16(8), nil, uint32(9), nil, uint64(10), + } + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 19, + edtInt, 0, 0, 0, 0, 0, 0, 0, 1, + edtNil, + edtInt8, 2, + edtNil, + edtInt16, 0, 3, + edtNil, + edtInt32, 0, 0, 0, 4, + edtNil, + edtInt64, 0, 0, 0, 0, 0, 0, 0, 5, + edtNil, + edtUint, 0, 0, 0, 0, 0, 0, 0, 6, + edtNil, + edtUint8, 7, + edtNil, + edtUint16, 0, 8, + edtNil, + edtUint32, 0, 0, 0, 9, + edtNil, + edtUint64, 0, 0, 0, 0, 0, 0, 0, 10, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnySlice(t *testing.T) { + expect := []any{ + []int{4}, + nil, + []float32{3.14, 3.15, 3.16}, + nil, + } + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 4, + + edtType, 0, 2, + edtSlice, edtInt, + edtSlice, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 4, + + edtNil, + + edtType, 0, 2, + edtSlice, edtFloat32, + edtSlice, 0, 0, 0, 3, + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + + edtNil, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeTime(t *testing.T) { + expect := time.Date(1399, time.January, 26, 0, 0, 0, 0, time.UTC) + packet := []byte{edtTime, + 0xf, // len + 0x1, 0x0, 0x0, 0x0, 0xa, 0x45, 0xaf, 0x1f, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, + } + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceTime(t *testing.T) { + v := time.Date(1399, time.January, 26, 0, 0, 0, 0, time.UTC) + expect := []time.Time{ + v, v, v, + } + packet := []byte{edtType, 0, 2, + edtSlice, + edtTime, + edtSlice, + 0, 0, 0, 3, + 0xf, // len + 0x1, 0x0, 0x0, 0x0, 0xa, 0x45, 0xaf, 0x1f, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, + 0xf, // len + 0x1, 0x0, 0x0, 0x0, 0xa, 0x45, 0xaf, 0x1f, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, + 0xf, // len + 0x1, 0x0, 0x0, 0x0, 0xa, 0x45, 0xaf, 0x1f, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, + } + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyTime(t *testing.T) { + v := time.Date(1399, time.January, 26, 0, 0, 0, 0, time.UTC) + expect := []any{ + v, v, v, + } + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtTime, 0xf, // len + 0x1, 0x0, 0x0, 0x0, 0xa, 0x45, 0xaf, 0x1f, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, + edtTime, 0xf, // len + 0x1, 0x0, 0x0, 0x0, 0xa, 0x45, 0xaf, 0x1f, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, + edtTime, 0xf, // len + 0x1, 0x0, 0x0, 0x0, 0xa, 0x45, 0xaf, 0x1f, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, + } + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceReg(t *testing.T) { + type MyFloat float32 + var x MyFloat + + expect := []MyFloat{3.14, 3.15, 3.16} + packet := []byte{edtType, 0, 39, + edtSlice, + edtReg, 0, 35, + // name: #ergo.services/ergo/net/edf/MyFloat + 0x23, 0x65, 0x72, 0x67, 0x6f, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x65, + 0x72, 0x67, 0x6f, 0x2f, 0x6e, 0x65, 0x74, 0x2f, + 0x65, 0x64, 0x66, 0x2f, 0x4d, 0x79, 0x46, 0x6c, + 0x6f, 0x61, 0x74, + edtSlice, + 0, 0, 0, 3, // len + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + RegisterTypeOf(x) + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceRegCache(t *testing.T) { + type MyFloat123 float32 + var x MyFloat123 + + expect := []MyFloat123{3.14, 3.15, 3.16} + packet := []byte{edtType, 0, 4, + edtSlice, + edtReg, 0x13, 0x88, // cache id uint16(5000) => name: #ergo.services/ergo/net/edf/MyFloat123 + edtSlice, + 0, 0, 0, 3, // len + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + + RegisterTypeOf(x) + + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/MyFloat123") + + value, _, err := Decode(packet, Options{RegCache: regCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeRegSlice(t *testing.T) { + type MySlice29 []float32 + + expect := MySlice29{3.14, 3.15, 3.16} + packet := []byte{edtReg, 0x13, 0x88, + edtReg, + 0, 0, 0, 3, // len + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + if err := RegisterTypeOf(expect); err != nil { + t.Fatal(err) + } + + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/MySlice29") + + opts := Options{ + RegCache: regCache, + } + + value, _, err := Decode(packet, opts) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeRegSliceRegSlice(t *testing.T) { + type MyDecSliceFloat []float32 + type MyDecSliceOfSlice []MyDecSliceFloat + + expect := MyDecSliceOfSlice{ + {3.14, 3.15, 3.16}, + nil, + {3.14}, + } + packet := []byte{edtReg, 0x13, 0x88, + edtReg, + 0x0, 0x0, 0x0, 0x3, + edtSlice, + 0x0, 0x0, 0x0, 0x3, + 0x40, 0x48, 0xf5, 0xc3, + 0x40, 0x49, 0x99, 0x9a, + 0x40, 0x4a, 0x3d, 0x71, + edtNil, + edtSlice, + 0x0, 0x0, 0x0, 0x1, + 0x40, 0x48, 0xf5, 0xc3, + } + + if err := RegisterTypeOf(MyDecSliceOfSlice{}); err != nil { + t.Fatal(err) + } + if err := RegisterTypeOf(MyDecSliceFloat{}); err != nil { + t.Fatal(err) + } + + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/MyDecSliceOfSlice") + + opts := Options{ + RegCache: regCache, + } + + value, _, err := Decode(packet, opts) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyReg(t *testing.T) { + type MyFloat20 float32 + x := MyFloat20(3.16) + + expect := []any{float32(3.14), float64(3.15), float32(3.16), nil, x} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 5, + edtFloat32, 0x40, 0x48, 0xf5, 0xc3, // 3.14 + edtFloat64, 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtFloat32, 0x40, 0x4a, 0x3d, 0x71, // 3.16 + edtNil, + edtReg, 0x13, 0x88, + 0x40, 0x4a, 0x3d, 0x71, // MyFloat20(3.16) + } + + RegisterTypeOf(x) + + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/MyFloat20") + + opts := Options{ + RegCache: regCache, + } + + value, _, err := Decode(packet, opts) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %t\n", expect) + fmt.Printf("got %t\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeRegSliceReg(t *testing.T) { + type MyFloat19 float32 + type MySlice19 []MyFloat19 + var x MyFloat19 + + expect := MySlice19{3.14, 3.15, 3.16} + packet := []byte{edtReg, 0x13, 0x88, + edtReg, + 0, 0, 0, 3, // len + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + + if err := RegisterTypeOf(x); err != nil { + t.Fatal(err) + } + if err := RegisterTypeOf(expect); err != nil { + t.Fatal(err) + } + + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/MySlice19") + + opts := Options{ + RegCache: regCache, + } + + value, _, err := Decode(packet, opts) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodePID(t *testing.T) { + expect := gen.PID{Node: "abc@def", ID: 32767, Creation: 2} + packet := []byte{edtPID, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7f, 0xff, // id + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSlicePID(t *testing.T) { + v := gen.PID{Node: "abc@def", ID: 32767, Creation: 2} + expect := []gen.PID{v, v, v} + packet := []byte{edtType, 0, 2, + edtSlice, + edtPID, + edtSlice, + 0, 0, 0, 3, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7f, 0xff, // id + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7f, 0xff, // id + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7f, 0xff, // id + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyPID(t *testing.T) { + v := gen.PID{Node: "abc@def", ID: 32767, Creation: 2} + expect := []any{v, v, v} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtPID, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7f, 0xff, // id + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + edtPID, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7f, 0xff, // id + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + edtPID, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7f, 0xff, // id + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeProcessID(t *testing.T) { + expect := gen.ProcessID{Node: "abc@def", Name: "ghi"} + packet := []byte{edtProcessID, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (node name) + 0x67, 0x68, 0x69, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceProcessID(t *testing.T) { + v := gen.ProcessID{Node: "abc@def", Name: "ghi"} + expect := []gen.ProcessID{v, v, v} + packet := []byte{edtType, 0, 2, + edtSlice, + edtProcessID, + edtSlice, + 0, 0, 0, 3, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (process name) + 0x67, 0x68, 0x69, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (process name) + 0x67, 0x68, 0x69, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (process name) + 0x67, 0x68, 0x69, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyProcessID(t *testing.T) { + v := gen.ProcessID{Node: "abc@def", Name: "ghi"} + expect := []any{v, v, v} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtProcessID, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (process name) + 0x67, 0x68, 0x69, + edtProcessID, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (node name) + 0x67, 0x68, 0x69, + edtProcessID, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (node name) + 0x67, 0x68, 0x69, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeEvent(t *testing.T) { + expect := gen.Event{Node: "abc@def", Name: "ghi"} + packet := []byte{edtEvent, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (node name) + 0x67, 0x68, 0x69, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceEvent(t *testing.T) { + v := gen.Event{Node: "abc@def", Name: "ghi"} + expect := []gen.Event{v, v, v} + packet := []byte{edtType, 0, 2, + edtSlice, + edtEvent, + edtSlice, + 0, 0, 0, 3, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (process name) + 0x67, 0x68, 0x69, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (process name) + 0x67, 0x68, 0x69, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (process name) + 0x67, 0x68, 0x69, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyEvent(t *testing.T) { + v := gen.Event{Node: "abc@def", Name: "ghi"} + expect := []any{v, v, v} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtEvent, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (process name) + 0x67, 0x68, 0x69, + edtEvent, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (node name) + 0x67, 0x68, 0x69, + edtEvent, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (node name) + 0x67, 0x68, 0x69, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeRef(t *testing.T) { + expect := gen.Ref{Node: "abc@def", ID: [3]uint32{4, 5, 6}, Creation: 2} + packet := []byte{edtRef, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceRef(t *testing.T) { + v := gen.Ref{Node: "abc@def", ID: [3]uint32{4, 5, 6}, Creation: 2} + expect := []gen.Ref{v, v, v} + packet := []byte{edtType, 0, 2, + edtSlice, + edtRef, + edtSlice, + 0, 0, 0, 3, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyRef(t *testing.T) { + v := gen.Ref{Node: "abc@def", ID: [3]uint32{4, 5, 6}, Creation: 2} + expect := []any{v, v, v} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtRef, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + edtRef, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + edtRef, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeAlias(t *testing.T) { + expect := gen.Alias{Node: "abc@def", ID: [3]uint32{4, 5, 6}, Creation: 2} + packet := []byte{edtAlias, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAlias(t *testing.T) { + v := gen.Alias{Node: "abc@def", ID: [3]uint32{4, 5, 6}, Creation: 2} + expect := []gen.Alias{v, v, v} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAlias, + edtSlice, + 0, 0, 0, 3, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyAlias(t *testing.T) { + v := gen.Alias{Node: "abc@def", ID: [3]uint32{4, 5, 6}, Creation: 2} + expect := []any{v, v, v} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtAlias, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + edtAlias, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + edtAlias, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeError(t *testing.T) { + packet := []byte{edtError, + 0, 3, // len + 97, 98, 99, // "abc" + } + expect := errors.New("abc") + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceError(t *testing.T) { + packet := []byte{edtType, 0, 2, + edtSlice, + edtError, + edtSlice, + 0, 0, 0, 3, + 0, 4, // len + 97, 98, 99, 100, // "abcd" + 0, 4, // len + 97, 98, 99, 100, // "abcd" + 0, 4, // len + 97, 98, 99, 100, // "abcd" + } + v := errors.New("abcd") + expect := []error{v, v, v} + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceErrorNil(t *testing.T) { + packet := []byte{edtType, 0, 2, + edtSlice, + edtError, + edtSlice, + 0, 0, 0, 3, + 0, 4, // len + 97, 98, 99, 100, // "abcd" + 0xff, 0xff, // nil error + 0, 4, // len + 97, 98, 99, 100, // "abcd" + } + v := errors.New("abcd") + expect := []error{v, nil, v} + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeRegError(t *testing.T) { + packet := []byte{edtError, + 0x88, 0xb8, // 35000 => error "abc" + } + + expect := errors.New("abc") + + errCache := new(sync.Map) + errCache.Store(uint16(35000), expect) + + value, _, err := Decode(packet, Options{ErrCache: errCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } + +} + +func TestDecodeSliceAnyError(t *testing.T) { + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtError, 0, 4, // len + 97, 98, 99, 100, // "abcd" + edtNil, + edtError, 0, 4, // len + 97, 98, 99, 100, // "abcd" + } + v := errors.New("abcd") + expect := []any{v, nil, v} + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceTypeReg(t *testing.T) { + type MyFloa float32 + var x MyFloa + expect := []MyFloa{3.14, 3.15, 3.16} + packet := []byte{edtType, 0, 38, + edtSlice, + edtReg, 0, 34, + // name: #ergo.services/ergo/net/proto/edf/MyFloa + 0x23, 0x65, 0x72, 0x67, 0x6f, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x65, + 0x72, 0x67, 0x6f, 0x2f, 0x6e, 0x65, 0x74, 0x2f, + 0x65, 0x64, 0x66, 0x2f, 0x4d, 0x79, 0x46, 0x6c, + 0x6f, 0x61, + edtSlice, + 0, 0, 0, 3, // len + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + RegisterTypeOf(x) + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } + +} + +func TestDecodeSliceRegTypeReg(t *testing.T) { + type MyFloatD19 float32 + type MySliceD19 []MyFloatD19 + var x MyFloatD19 + + expect := MySliceD19{3.14, 3.15, 3.16} + packet := []byte{edtReg, 0x13, 0x88, + edtReg, + 0, 0, 0, 3, // len + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + RegisterTypeOf(x) + if err := RegisterTypeOf(expect); err != nil { + t.Fatal(err) + } + + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/MySliceD19") + + value, _, err := Decode(packet, Options{RegCache: regCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } + +} + +func TestDecodeSliceAny(t *testing.T) { + expect := []any{float32(3.14), float64(3.15), float32(3.16)} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtFloat32, 0x40, 0x48, 0xf5, 0xc3, // 3.14 + edtFloat64, 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtFloat32, 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } + +} + +func TestDecodeSliceNil(t *testing.T) { + + expect := []any{nil, nil, nil} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtNil, + edtNil, + edtNil, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceNil2(t *testing.T) { + + expect := []any{} + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 0, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceNest(t *testing.T) { + expect := []any{ + []any{float32(3.15)}, + float32(3.14), + float32(3.16), + []any{float64(3.15)}, + } + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 4, + edtType, 0, 2, + edtSlice, edtAny, + edtSlice, + 0, 0, 0, 1, edtFloat32, 0x40, 0x49, 0x99, 0x9a, // 3.15 + edtFloat32, 0x40, 0x48, 0xf5, 0xc3, // 3.14 + edtFloat32, 0x40, 0x4a, 0x3d, 0x71, // 3.16 + edtType, 0, 2, + edtSlice, edtAny, + edtSlice, + 0, 0, 0, 1, edtFloat64, 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceSlice(t *testing.T) { + expect := [][]float32{ + {3.14, 3.15, 3.16}, + {3.16}, + nil, + {3.14, 3.15}, + {}, + } + packet := []byte{edtType, 0, 3, + edtSlice, + edtSlice, + edtFloat32, + edtSlice, + 0, 0, 0, 5, + edtSlice, + 0, 0, 0, 3, // first slice with 3 items + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + edtSlice, + 0, 0, 0, 1, // second slice with 1 item + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + edtNil, // third one + edtSlice, + 0, 0, 0, 2, // 4th + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + edtSlice, + 0, 0, 0, 0, // 5th + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceSliceAny(t *testing.T) { + expect := [][]any{ + {float32(3.14), float32(3.16), float64(3.15)}, + {float64(3.15)}, + nil, + {float32(3.14), float32(3.16)}, + {}, + } + packet := []byte{edtType, 0, 3, + edtSlice, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 5, + edtSlice, + 0, 0, 0, 3, // first slice with 3 items + edtFloat32, 0x40, 0x48, 0xf5, 0xc3, // 3.14 + edtFloat32, 0x40, 0x4a, 0x3d, 0x71, // 3.16 + edtFloat64, 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtSlice, + 0, 0, 0, 1, // second slice with 1 item + edtFloat64, 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtNil, // third one + edtSlice, + 0, 0, 0, 2, // 4th + edtFloat32, 0x40, 0x48, 0xf5, 0xc3, // 3.14 + edtFloat32, 0x40, 0x4a, 0x3d, 0x71, // 3.16 + edtSlice, + 0, 0, 0, 0, // 5th + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceSliceNil(t *testing.T) { + expect := [][]any{nil, []any{}, nil, nil} + packet := []byte{edtType, 0, 3, + edtSlice, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 4, + edtNil, + edtSlice, + 0, 0, 0, 0, + edtNil, + edtNil, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceSliceReg(t *testing.T) { + type MySlice555 []float32 + + if err := RegisterTypeOf(MySlice555{}); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + + expect := []MySlice555{ + MySlice555{3.14, 3.16, 3.15}, + MySlice555{3.15}, + nil, + MySlice555{3.14, 3.16}, + MySlice555{}, + } + packet := []byte{edtType, 0, 4, + edtSlice, + edtReg, 0x13, 0x88, + edtSlice, + 0, 0, 0, 5, + edtReg, + 0, 0, 0, 3, // first slice with 3 items + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + edtReg, + 0, 0, 0, 1, // second slice with 1 item + 0x40, 0x49, 0x99, 0x9a, // 3.15 + edtNil, + edtReg, + 0, 0, 0, 2, // 4th + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + edtReg, + 0, 0, 0, 0, // third one + } + + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/MySlice555") + opts := Options{ + RegCache: regCache, + } + + value, _, err := Decode(packet, opts) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSlice3DZero(t *testing.T) { + expect := [][][]float32{} + packet := []byte{edtType, 0, 4, + edtSlice, + edtSlice, + edtSlice, + edtFloat32, + edtSlice, + 0, 0, 0, 0, + } + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} +func TestDecodeSlice3D(t *testing.T) { + expect := [][][]float32{ /* len 3 */ + { /* len 5 */ + { /* len 7 */ 2.21018848, 2.94523878, 1.67807658, 1.30014748, 1.1873558, 8.1819557, 3.2368748}, + { /* len 10 */ 2.17948558, 2.95483828, 3.29734688, 2.72996818, 2.50011478, 2.98767788, 1.31364818, 8.06395757, 2.53354848, 2.38570578}, + { /* len 4 */ 2.9838078, 1.61728128, 1.8756628, 1.5756598}, + { /* len 10 */ 8.5187367, 2.79348, 4.3456557, 1.29794587, 3.38391948, 1.4460748, 5.0206397, 2.02001097, 1.77825548, 2.33810328}, + { /* len 8 */ 3.15617888, 2.21068618, 3.01507718, 7.0342597, 2.12085158, 7.9914467, 2.92003388, 3.19992137}, + }, { /* len 6 */ + { /* len 3 */ 3.3188187, 2.82300078, 7.3257346}, + { /* len 10 */ 1.47951058, 1.47638718, 3.1678068, 1.24334058, 1.48100658, 1.8274938, 2.07265258, 1.83188888, 5.8776197, 1.64099568}, + { /* len 6 */ 2.26154558, 9.5987497, 3.24544727, 1.34864688, 2.47839448, 2.0456888}, + { /* len 5 */ 9.0369537, 3.69528477, 3.04563028, 1.4488858, 3.80179227}, + { /* len 5 */ 1.53326348, 2.77105168, 1.05977548, 2.75297638, 8.9171847}, + { /* len 10 */ 1.65367358, 9.4070457, 3.06440548, 2.4763148, 2.22120158, 2.3734938, 3.37481478, 2.22900497, 6.2138987, 2.80613798}, + }, { /* len 1 */ + { /* len 10 */ 8.03434337, 2.55059418, 2.20168828, 2.86517478, 4.38993137, 8.6655217, 2.22159657, 3.0119788, 1.19758818, 2.58799087}, + }, + } + + packet := []byte{edtType, 0, 4, + edtSlice, + edtSlice, + edtSlice, + edtFloat32, + + edtSlice, + 0x0, 0x0, 0x0, 0x3, // len 3 { x, x, x} + edtSlice, + 0x0, 0x0, 0x0, 0x5, // len 5 { {y, y, y, y, y}, x, x} + edtSlice, + 0x0, 0x0, 0x0, 0x7, // len 7 { { {z, z, z, z, z, z, z}, y, y, y, y}, x, x} + 0x40, 0xd, 0x73, 0xba, // z + 0x40, 0x3c, 0x7e, 0xcb, // z + 0x3f, 0xd6, 0xcb, 0x37, // z + 0x3f, 0xa6, 0x6b, 0x3c, // z + 0x3f, 0x97, 0xfb, 0x46, // z + 0x41, 0x2, 0xe9, 0x4a, // z + 0x40, 0x4f, 0x28, 0xf5, // z + edtSlice, + 0x0, 0x0, 0x0, 0xa, // len 10 + 0x40, 0xb, 0x7c, 0xb1, + 0x40, 0x3d, 0x1c, 0x12, + 0x40, 0x53, 0x7, 0xbb, + 0x40, 0x2e, 0xb7, 0xcc, + 0x40, 0x20, 0x1, 0xe1, + 0x40, 0x3f, 0x36, 0x1d, + 0x3f, 0xa8, 0x25, 0xa0, + 0x41, 0x1, 0x5, 0xf8, + 0x40, 0x22, 0x25, 0xa9, + 0x40, 0x18, 0xaf, 0x67, + edtSlice, + 0x0, 0x0, 0x0, 0x4, // len 4 + 0x40, 0x3e, 0xf6, 0xb5, + 0x3f, 0xcf, 0x3, 0x13, + 0x3f, 0xf0, 0x15, 0xb8, + 0x3f, 0xc9, 0xaf, 0x38, + edtSlice, + 0x0, 0x0, 0x0, 0xa, // len 10 + 0x41, 0x8, 0x4c, 0xbf, + 0x40, 0x32, 0xc8, 0x60, + 0x40, 0x8b, 0xf, 0x9d, + 0x3f, 0xa6, 0x23, 0x17, + 0x40, 0x58, 0x92, 0x23, + 0x3f, 0xb9, 0x18, 0xfb, + 0x40, 0xa0, 0xa9, 0x15, + 0x40, 0x1, 0x47, 0xdc, + 0x3f, 0xe3, 0x9d, 0xe0, + 0x40, 0x15, 0xa3, 0x7c, + edtSlice, + 0x0, 0x0, 0x0, 0x8, // len 8 + 0x40, 0x49, 0xfe, 0xd6, + 0x40, 0xd, 0x7b, 0xe2, + 0x40, 0x40, 0xf7, 0x6, + 0x40, 0xe1, 0x18, 0xa8, + 0x40, 0x7, 0xbc, 0x8, + 0x40, 0xff, 0xb9, 0xee, + 0x40, 0x3a, 0xe1, 0xd6, + 0x40, 0x4c, 0xcb, 0x83, + edtSlice, + 0x0, 0x0, 0x0, 0x6, // len 6 + edtSlice, + 0x0, 0x0, 0x0, 0x3, // len 3 + 0x40, 0x54, 0x67, 0x87, + 0x40, 0x34, 0xac, 0xb, + 0x40, 0xea, 0x6c, 0x6b, + edtSlice, + 0x0, 0x0, 0x0, 0xa, // len 10 + 0x3f, 0xbd, 0x60, 0x9a, + 0x3f, 0xbc, 0xfa, 0x41, + 0x40, 0x4a, 0xbd, 0x59, + 0x3f, 0x9f, 0x25, 0xc9, + 0x3f, 0xbd, 0x91, 0xa0, + 0x3f, 0xe9, 0xeb, 0x51, + 0x40, 0x4, 0xa6, 0x57, + 0x3f, 0xea, 0x7b, 0x56, + 0x40, 0xbc, 0x15, 0x76, + 0x3f, 0xd2, 0xc, 0x25, + edtSlice, + 0x0, 0x0, 0x0, 0x6, // len 6 + 0x40, 0x10, 0xbd, 0x2a, + 0x41, 0x19, 0x94, 0x7b, + 0x40, 0x4f, 0xb5, 0x68, + 0x3f, 0xac, 0xa0, 0x76, + 0x40, 0x1e, 0x9e, 0x4, + 0x40, 0x2, 0xec, 0x91, + edtSlice, + 0x0, 0x0, 0x0, 0x5, // len 5 + 0x41, 0x10, 0x97, 0x5d, + 0x40, 0x6c, 0x7f, 0x8c, + 0x40, 0x42, 0xeb, 0x9b, + 0x3f, 0xb9, 0x75, 0x17, + 0x40, 0x73, 0x50, 0x91, + edtSlice, + 0x0, 0x0, 0x0, 0x5, // len 5 + 0x3f, 0xc4, 0x41, 0xfa, + 0x40, 0x31, 0x58, 0xe9, + 0x3f, 0x87, 0xa6, 0xb9, + 0x40, 0x30, 0x30, 0xc4, + 0x41, 0xe, 0xac, 0xca, + edtSlice, + 0x0, 0x0, 0x0, 0xa, // len 10 + 0x3f, 0xd3, 0xab, 0x93, + 0x41, 0x16, 0x83, 0x42, + 0x40, 0x44, 0x1f, 0x38, + 0x40, 0x1e, 0x7b, 0xf1, + 0x40, 0xe, 0x28, 0x2b, + 0x40, 0x17, 0xe7, 0x53, + 0x40, 0x57, 0xfc, 0xf7, + 0x40, 0xe, 0xa8, 0x4, + 0x40, 0xc6, 0xd8, 0x42, + 0x40, 0x33, 0x97, 0xc4, + edtSlice, + 0x0, 0x0, 0x0, 0x1, // len 1 + edtSlice, + 0x0, 0x0, 0x0, 0xa, // len 10 + 0x41, 0x0, 0x8c, 0xac, + 0x40, 0x23, 0x3c, 0xef, + 0x40, 0xc, 0xe8, 0x76, + 0x40, 0x37, 0x5f, 0x6, + 0x40, 0x8c, 0x7a, 0x51, + 0x41, 0xa, 0xa5, 0xfa, + 0x40, 0xe, 0x2e, 0xa3, + 0x40, 0x40, 0xc4, 0x43, + 0x3f, 0x99, 0x4a, 0x92, + 0x40, 0x25, 0xa1, 0xa4, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +type testUnmarshal struct { + X float32 +} + +func (testUnmarshal) MarshalEDF(io.Writer) error { + return nil +} + +func (u *testUnmarshal) UnmarshalEDF(data []byte) error { + u.X = math.Float32frombits(binary.BigEndian.Uint32(data)) + return nil +} + +func TestDecodeUnmarshal(t *testing.T) { + packet := []byte{edtReg, 0x13, 0x88, + 0, 0, 0, 4, // len + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + } + expect := testUnmarshal{X: 3.14} + + if err := RegisterTypeOf(testUnmarshal{}); err != nil { + t.Fatal(err) + } + + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/testUnmarshal") + + value, _, err := Decode(packet, Options{RegCache: regCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceUnmarshal(t *testing.T) { + packet := []byte{edtType, 0, 4, + edtSlice, + edtReg, 0x13, 0x88, + edtSlice, + 0, 0, 0, 3, + 0, 0, 0, 4, // len + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0, 0, 0, 4, // len + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0, 0, 0, 4, // len + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + expect := []testUnmarshal{{3.14}, {3.15}, {3.16}} + + RegisterTypeOf(testUnmarshal{}) + + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/testUnmarshal") + + value, _, err := Decode(packet, Options{RegCache: regCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Println("exp", expect) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} + +type testDecStruct struct { + A float32 + B float64 +} + +func TestDecodeStruct(t *testing.T) { + packet := []byte{edtReg, 0x13, 0x88, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + } + expect := testDecStruct{3.16, 3.15} + + RegisterTypeOf(testDecStruct{}) + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/testDecStruct") + + value, _, err := Decode(packet, Options{RegCache: regCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceStruct(t *testing.T) { + packet := []byte{edtType, 0, 4, + edtSlice, + edtReg, 0x13, 0x88, + edtSlice, + 0, 0, 0, 2, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x9, 0x1e, 0xb8, 0x51, 0xeb, 0x85, 0x1f, // 3.14 + } + expect := []testDecStruct{{3.16, 3.15}, {3.15, 3.14}} + + RegisterTypeOf(testDecStruct{}) + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/testDecStruct") + + value, _, err := Decode(packet, Options{RegCache: regCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +type testDecStructWithAny struct { + A float32 + B float64 + C any +} + +func TestDecodeStructWithAny(t *testing.T) { + packet := []byte{edtReg, 0x13, 0x88, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtNil, + } + expect := testDecStructWithAny{3.16, 3.15, nil} + + RegisterTypeOf(testDecStructWithAny{}) + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/testDecStructWithAny") + + value, _, err := Decode(packet, Options{RegCache: regCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } + + packet = []byte{edtReg, 0x13, 0x88, + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x9, 0x1e, 0xb8, 0x51, 0xeb, 0x85, 0x1f, // 3.14 + edtFloat64, 0x40, 0x9, 0x1e, 0xb8, 0x51, 0xeb, 0x85, 0x1f, // 3.14 + } + expect = testDecStructWithAny{3.15, 3.14, float64(3.14)} + + value, _, err = Decode(packet, Options{RegCache: regCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +type testDecSliceString []string +type testDecStructWithSlice struct { + A float32 + B float64 + C []bool + D testDecSliceString + E []int +} + +func TestDecodeStructWithSlice(t *testing.T) { + packet := []byte{edtReg, 0x13, 0x88, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 (float32) + 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 (float64) + edtSlice, + 0x0, 0x0, 0x0, 0x2, // len of []bool + 0x1, 0x0, // true, false + edtReg, // testDecSliceString + 0x0, 0x0, 0x0, 0x2, // len of testDecSliceString + 0x0, 0x4, // len of "true" + 0x74, 0x72, 0x75, 0x65, // "true" + 0x0, 0x5, // len of "false" + 0x66, 0x61, 0x6c, 0x73, 0x65, // "false" + edtNil, // nil value of []int + } + expect := testDecStructWithSlice{ + 3.16, + 3.15, + []bool{true, false}, + testDecSliceString{"true", "false"}, + nil, + } + + RegisterTypeOf(testDecSliceString{}) + RegisterTypeOf(testDecStructWithSlice{}) + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/testDecStructWithSlice") + + value, _, err := Decode(packet, Options{RegCache: regCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceStructWithAny(t *testing.T) { + packet := []byte{edtType, 0, 4, + edtSlice, + edtReg, 0x13, 0x88, + edtSlice, + 0, 0, 0, 3, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtNil, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtFloat32, 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x9, 0x1e, 0xb8, 0x51, 0xeb, 0x85, 0x1f, // 3.14 + edtFloat64, 0x40, 0x9, 0x1e, 0xb8, 0x51, 0xeb, 0x85, 0x1f, // 3.14 + } + expect := []testDecStructWithAny{ + {3.16, 3.15, nil}, + {3.16, 3.15, float32(3.16)}, + {3.15, 3.14, float64(3.14)}, + } + RegisterTypeOf(testDecStructWithAny{}) + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/testDecStructWithAny") + + value, _, err := Decode(packet, Options{RegCache: regCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyWithStruct(t *testing.T) { + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 4, + edtNil, + edtReg, 0x13, 0x88, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtNil, + edtReg, 0x13, 0x88, + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x9, 0x1e, 0xb8, 0x51, 0xeb, 0x85, 0x1f, // 3.14 + } + expect := []any{ + nil, + testDecStruct{3.16, 3.15}, + nil, + testDecStruct{3.15, 3.14}, + } + RegisterTypeOf(testDecStruct{}) + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/testDecStruct") + + value, _, err := Decode(packet, Options{RegCache: regCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeMap(t *testing.T) { + expect := map[int16]string{ + 8: "hello", + 9: "world", + } + + packet := []byte{edtType, 0, 3, + edtMap, + edtInt16, + edtString, + edtMap, + 0, 0, 0, 2, + 0, 8, // key 8 + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 0, 9, // key 9 + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeMapAnyString(t *testing.T) { + expect := map[any]string{ + nil: "hello", + int16(9): "world", + } + + packet := []byte{edtType, 0, 3, + edtMap, + edtAny, + edtString, + edtMap, + 0, 0, 0, 2, + edtNil, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + edtInt16, 0, 9, // key 9 + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeMapStringAny(t *testing.T) { + expect := map[string]any{ + "hello": nil, + "world": int16(9), + "helloo": map[float32]any{ + 3.16: uint16(3), + }, + "worldd": map[float64]any{}, + } + + packet := []byte{edtType, 0, 3, + edtMap, + edtString, + edtAny, + edtMap, 0, 0, 0, 4, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + edtNil, + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + edtInt16, 0, 9, // key 9 + 0, 6, // len of value "helloo" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x6f, // "helloo" + edtType, 0, 3, + edtMap, edtFloat32, edtAny, + edtMap, 0, 0, 0, 1, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + edtUint16, 0, 3, + 0, 6, // len of value "worldd" + 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x64, // "worldd" + edtType, 0, 3, + edtMap, edtFloat64, edtAny, + edtMap, 0, 0, 0, 0, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeMapStringMapAny(t *testing.T) { + expect := map[string]map[any]int16{ + "hello": nil, + "world": { + float32(3.16): int16(9), + }, + } + + packet := []byte{edtType, 0, 5, + edtMap, + edtString, + edtMap, + edtAny, + edtInt16, + edtMap, 0, 0, 0, 2, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + edtNil, + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + edtMap, 0, 0, 0, 1, + edtFloat32, 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0, 9, // 9 + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} +func TestDecodeMapStringMapNilZero(t *testing.T) { + expect := map[string]map[any]int16{ + "hello": nil, + "world": {}, + } + + packet := []byte{edtType, 0, 5, + edtMap, + edtString, + edtMap, + edtAny, + edtInt16, + edtMap, 0, 0, 0, 2, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + edtNil, + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + edtMap, 0, 0, 0, 0, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeMap3DZero(t *testing.T) { + expect := map[int16]map[string]map[float32]int{} + packet := []byte{edtType, 0, 7, + edtMap, + edtInt16, + edtMap, + edtString, + edtMap, + edtFloat32, + edtInt, + edtMap, 0, 0, 0, 0, + } + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeMapZero(t *testing.T) { + expect := map[int16]string{} + + packet := []byte{edtType, 0, 3, + edtMap, + edtInt16, edtString, + edtMap, 0, 0, 0, 0, + } + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceMap(t *testing.T) { + expect := []map[int16]string{ + { + 8: "hello", + }, { + + 10: "helloo", + }, + { + 12: "hellooo", + }, + } + + packet := []byte{edtType, 0, 4, + edtSlice, + edtMap, + edtInt16, + edtString, + edtSlice, + 0, 0, 0, 3, + edtMap, + 0, 0, 0, 1, + 0, 8, // key 8 + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + edtMap, + 0, 0, 0, 1, // len of second map + 0, 0xa, // key 10 + 0, 6, // len of value "helloo" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x6f, // "helloo" + edtMap, + 0, 0, 0, 1, // len of 3rd map + 0, 0xc, // key 12 + 0, 7, // len of value "helloo" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x6f, 0x6f, // "hellooo" + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} +func TestDecodeMapValueSliceNil(t *testing.T) { + expect := map[int16][]any{ + int16(8): nil, + int16(9): []any{"world"}, + } + + packet := []byte{edtType, 0, 4, + edtMap, + edtInt16, + edtSlice, + edtAny, + edtMap, + 0, 0, 0, 2, + + 0, 8, // key 8 + edtNil, + + 0, 9, // key 9 + edtSlice, + 0, 0, 0, 1, + edtString, 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeMapValueMap(t *testing.T) { + expect := map[int16]map[string]int{ + int16(8): nil, + int16(9): { + "world": 10, + }, + } + + packet := []byte{edtType, 0, 5, + edtMap, + edtInt16, + edtMap, + edtString, + edtInt, + edtMap, + 0, 0, 0, 2, + + 0, 8, // key + edtNil, + + 0, 9, // 9 => map + edtMap, + 0, 0, 0, 1, + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, 0, 0, 0, 0, 0, 0, 0xa, // key 10 + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +type testMapKey string + +func TestDecodeMapValueMapRegKey(t *testing.T) { + var x testMapKey + + if err := RegisterTypeOf(x); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/testMapKey") + + expect := map[int16]map[testMapKey]int{ + int16(8): nil, + int16(9): { + "world": 10, + }, + } + + packet := []byte{edtType, 0, 7, + edtMap, + edtInt16, + edtMap, + edtReg, 0x13, 0x88, + edtInt, + + edtMap, + 0, 0, 0, 2, + + 0, 8, // 8 => map + edtNil, + + 0, 9, // 9 => map + edtMap, + 0, 0, 0, 1, + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, 0, 0, 0, 0, 0, 0, 0xa, // value 10 + } + + value, _, err := Decode(packet, Options{RegCache: regCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeMapValueMapAnyWithRegKey(t *testing.T) { + var x testMapKey = "world" + + if err := RegisterTypeOf(x); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/testMapKey") + + expect := map[int16]map[any]int{ + int16(8): nil, + int16(9): { + x: 10, + }, + } + + packet := []byte{edtType, 0, 5, + edtMap, + edtInt16, + edtMap, + edtAny, + edtInt, + + edtMap, + 0, 0, 0, 2, + + 0, 8, // 8 => map + edtNil, + + 0, 9, // 9 => map + edtMap, + 0, 0, 0, 1, + edtReg, 0x13, 0x88, 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, 0, 0, 0, 0, 0, 0, 0xa, // value 10 + } + + value, _, err := Decode(packet, Options{RegCache: regCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +type MyMap map[string]bool + +func TestDecodeRegMap(t *testing.T) { + var x MyMap + + if err := RegisterTypeOf(x); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/MyMap") + + expect := MyMap{ + "hello": true, + "world": false, + } + packet := []byte{edtReg, 0x13, 0x88, + edtReg, + 0, 0, 0, 2, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 1, // true + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, // false + } + + value, _, err := Decode(packet, Options{RegCache: regCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeRegMapRegSlice(t *testing.T) { + type myDecSlice90 []bool + type myDecMap90 map[string]myDecSlice90 + + RegisterTypeOf(myDecSlice90{}) + RegisterTypeOf(myDecMap90{}) + + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/myDecMap90") + + expect := myDecMap90{ + "world": nil, + "hello": {true, false, true}, + } + + packet := []byte{edtReg, 0x13, 0x88, + edtReg, + 0, 0, 0, 2, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + edtReg, + 0, 0, 0, 3, + 1, 0, 1, // true, false, true + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + edtNil, + } + + value, _, err := Decode(packet, Options{RegCache: regCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeArray(t *testing.T) { + expect := [2]string{ + "hello", "world", + } + packet := []byte{edtType, 0, 6, + edtArray, 0, 0, 0, 2, + edtString, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeArrayZero(t *testing.T) { + expect := [0]string{} + packet := []byte{edtType, 0, 6, + edtArray, 0, 0, 0, 0, + edtString, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceArray(t *testing.T) { + expect := [][2]string{ + {"hello", "world"}, + } + packet := []byte{edtType, 0, 7, + edtSlice, + edtArray, 0, 0, 0, 2, + edtString, + edtSlice, + 0, 0, 0, 1, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeSliceAnyArray(t *testing.T) { + expect := []any{ + nil, + [2]string{"hello", "world"}, + nil, + } + packet := []byte{edtType, 0, 2, + edtSlice, + edtAny, + + edtSlice, + 0, 0, 0, 3, + + edtNil, + + edtType, 0, 6, + edtArray, 0, 0, 0, 2, + edtString, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + + edtNil, + } + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +type myArrayDec [2]string + +func TestDecodeRegArray(t *testing.T) { + expect := myArrayDec{"hello", "world"} + + packet := []byte{edtReg, + 0, 38, // len of the type name #ergo.services/ergo/net/edf/myArrayDec + 0x23, 0x65, 0x72, 0x67, 0x6f, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x65, + 0x72, 0x67, 0x6f, 0x2f, 0x6e, 0x65, 0x74, 0x2f, + 0x65, 0x64, 0x66, 0x2f, 0x6d, 0x79, 0x41, 0x72, + 0x72, 0x61, 0x79, 0x44, 0x65, 0x63, + + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + } + + RegisterTypeOf(expect) + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeArrayReg(t *testing.T) { + // type myArrayStr is declared in encode_test.go (before the TestEncodeArrayReg test) + expect := [2]myArrayStr{"hello", "world"} + + packet := []byte{edtType, 0, 46, + edtArray, 0, 0, 0, 2, + edtReg, + 0, 38, // len of the type name #ergo.services/ergo/net/edf/myArrayStr + 0x23, 0x65, 0x72, 0x67, 0x6f, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x65, + 0x72, 0x67, 0x6f, 0x2f, 0x6e, 0x65, 0x74, 0x2f, + 0x65, 0x64, 0x66, 0x2f, 0x6d, 0x79, 0x41, 0x72, + 0x72, 0x61, 0x79, 0x53, 0x74, 0x72, + + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + } + + RegisterTypeOf(expect[0]) + + value, _, err := Decode(packet, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} + +func TestDecodeRegArrayRegArray(t *testing.T) { + type myDecArrayMyStr1 [2]string + type myDecArrayArray1 [3]myDecArrayMyStr1 + + expect := myDecArrayArray1{ + {"hello", "world"}, + {"", ""}, + {"world", "hello"}, + } + + packet := []byte{edtReg, 0x13, 0x88, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, 0, + 0, 0, + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + } + + RegisterTypeOf(myDecArrayMyStr1{}) + RegisterTypeOf(myDecArrayArray1{}) + + regCache := new(sync.Map) + regCache.Store(uint16(5000), "#ergo.services/ergo/net/edf/myDecArrayArray1") + + value, _, err := Decode(packet, Options{RegCache: regCache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", value) + t.Fatal("incorrect value") + } +} diff --git a/net/edf/edf.go b/net/edf/edf.go new file mode 100644 index 00000000..17441e6a --- /dev/null +++ b/net/edf/edf.go @@ -0,0 +1,59 @@ +package edf + +import ( + "io" + "sync" +) + +// Options for encoding/decoding +type Options struct { + AtomCache *sync.Map // atom => id (encoding), id => atom (decoding) + AtomMapping *sync.Map // atomX => atomY (encoding/decoding) + RegCache *sync.Map // type/name => id (encoding), id => type (for decoding) + ErrCache *sync.Map // error => id (for encoder), id => error (for decoder) + Cache *sync.Map // common cache (caching reflect.Type => encoder, string([]byte) => decoder) +} + +const ( + edtType = byte(130) // 0x82 + edtReg = byte(131) // 0x83 + edtAny = byte(132) // 0x84 + + edtAtom = byte(140) // 0x8c + edtString = byte(141) // 0x8d + edtBinary = byte(142) // 0x8e + edtFloat32 = byte(143) // 0x8f + edtFloat64 = byte(144) // 0x90 + edtBool = byte(145) // 0x91 + edtInt8 = byte(146) // 0x92 + edtInt16 = byte(147) // 0x93 + edtInt32 = byte(148) // 0x94 + edtInt64 = byte(149) // 0x95 + edtInt = byte(150) // 0x96 + edtUint8 = byte(151) // 0x97 + edtUint16 = byte(152) // 0x98 + edtUint32 = byte(153) // 0x99 + edtUint64 = byte(154) // 0x9a + edtUint = byte(155) // 0x9b + edtError = byte(156) // 0x9c + edtSlice = byte(157) // 0x9d + edtArray = byte(158) // 0x9e + edtMap = byte(159) // 0x9f + + edtPID = byte(170) // 0xaa + edtProcessID = byte(171) // 0xab + edtAlias = byte(172) // 0xac + edtEvent = byte(173) // 0xad + edtRef = byte(174) // 0xae + edtTime = byte(175) // 0xaf + + edtNil = byte(255) // 0xff +) + +type Marshaler interface { + MarshalEDF(io.Writer) error +} + +type Unmarshaler interface { + UnmarshalEDF([]byte) error +} diff --git a/net/edf/edf_test.go b/net/edf/edf_test.go new file mode 100644 index 00000000..9f682af0 --- /dev/null +++ b/net/edf/edf_test.go @@ -0,0 +1,40 @@ +package edf + +type integerCase struct { + name string + integer any + bin []byte +} + +func integerCases() []integerCase { + + return []integerCase{ + // + // unsigned integers + // + {"uint8::255", uint8(255), []byte{edtUint8, 255}}, + {"uint16::65535", uint16(65535), []byte{edtUint16, 255, 255}}, + {"uint32::4294967295", uint32(4294967295), []byte{edtUint32, 255, 255, 255, 255}}, + {"uint64::18446744073709551615", uint64(18446744073709551615), []byte{edtUint64, 255, 255, 255, 255, 255, 255, 255, 255}}, + + // fails on 32bit arch + {"uint::18446744073709551615", uint(18446744073709551615), []byte{edtUint, 255, 255, 255, 255, 255, 255, 255, 255}}, + + // + // signed integers + // + + {"int8::-127", int8(-127), []byte{edtInt8, 129}}, + {"int8::127", int8(127), []byte{edtInt8, 127}}, + {"int16::-32767", int16(-32767), []byte{edtInt16, 128, 1}}, + {"int16::32767", int16(32767), []byte{edtInt16, 127, 255}}, + {"int32::-2147483647", int32(-2147483647), []byte{edtInt32, 128, 0, 0, 1}}, + {"int32::2147483647", int32(2147483647), []byte{edtInt32, 127, 255, 255, 255}}, + {"int64::-9223372036854775807", int64(-9223372036854775807), []byte{edtInt64, 128, 0, 0, 0, 0, 0, 0, 1}}, + {"int64::9223372036854775807", int64(9223372036854775807), []byte{edtInt64, 127, 255, 255, 255, 255, 255, 255, 255}}, + // fails on 32bit arch + {"int::-9223372036854775807", int(-9223372036854775807), []byte{edtInt, 128, 0, 0, 0, 0, 0, 0, 1}}, + // fails on 32bit arch + {"int::9223372036854775807", int(9223372036854775807), []byte{edtInt, 127, 255, 255, 255, 255, 255, 255, 255}}, + } +} diff --git a/net/edf/encode.go b/net/edf/encode.go new file mode 100644 index 00000000..d2894dd6 --- /dev/null +++ b/net/edf/encode.go @@ -0,0 +1,644 @@ +package edf + +import ( + "encoding/binary" + "fmt" + "math" + "reflect" + "time" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +var ( + ErrBinaryTooLong = fmt.Errorf("binary too long - max allowed length is 2^32-1 bytes (4GB)") + ErrStringTooLong = fmt.Errorf("string too long - max allowed length is 2^16-1 (65535) bytes") + ErrAtomTooLong = fmt.Errorf("atom too long - max allowed length is 255 bytes") + ErrErrorTooLong = fmt.Errorf("error too long - max allowed length is 32767 bytes") +) + +type stateEncode struct { + child *stateEncode + + // TODO loop detection (in slices) + //loop map[unsafe.Pointer]struct{} + //ptr unsafe.Pointer + + encodeType bool + + options Options + encoder encoder +} + +// Encode +func Encode(x any, b *lib.Buffer, options Options) (ret error) { + if x == nil { + return fmt.Errorf("nothing to encode") + } + state := &stateEncode{ + options: options, + // TODO + //loop: make(map[unsafe.Pointer]struct{}), + } + + xv := reflect.ValueOf(x) + enc, err := getEncoder(xv.Type(), state) + if err != nil { + return err + } + + if lib.Recover() { + defer func() { + if r := recover(); r != nil { + ret = fmt.Errorf("%v", r) + } + }() + } + + l := len(enc.Prefix) + if l > 1 && enc.Prefix[0] != edtReg { + buf := b.Extend(3) + buf[0] = edtType + binary.BigEndian.PutUint16(buf[1:3], uint16(l)) + } + + b.Append(enc.Prefix) + return enc.Encode(xv, b, state) +} + +func getEncoder(t reflect.Type, state *stateEncode) (*encoder, error) { + // look in the common cache + if state.options.Cache != nil { + if v, found := state.options.Cache.Load(t); found { + return v.(*encoder), nil + } + } + + // try to find by the type + if v, found := encoders.Load(t); found { + enc := v.(*encoder) + + if state.options.RegCache == nil { + if state.options.Cache == nil { + return enc, nil + } + // store it in the cache + state.options.Cache.Store(t, v) + return enc, nil + } + + // check if this type is a registered one. + if v, found := state.options.RegCache.Load(t); found { + cachedenc := &encoder{ + Prefix: v.([]byte), // use cache ID (3 bytes only) instead of the full name + Encode: enc.Encode, + } + if state.options.Cache == nil { + return cachedenc, nil + } + // store encoder with cached prefix in the cache + state.options.Cache.Store(t, cachedenc) + return cachedenc, nil + } + + if state.options.Cache == nil { + return enc, nil + } + + // store it in the cache + state.options.Cache.Store(t, v) + return enc, nil + } + + kind := t.Kind() + switch kind { + case reflect.Map: + encKey, err := getEncoder(t.Key(), state) + if err != nil { + return nil, err + } + encItem, err := getEncoder(t.Elem(), state) + if err != nil { + return nil, err + } + + keyPrefix := encKey.Prefix + if state.options.RegCache != nil { + if v, found := state.options.RegCache.Load(t.Key()); found { + keyPrefix = v.([]byte) + } + } + itemPrefix := encItem.Prefix + if state.options.RegCache != nil { + if v, found := state.options.RegCache.Load(t.Elem()); found { + itemPrefix = v.([]byte) + } + } + + prefix := append([]byte{edtMap}, keyPrefix...) + prefix = append(prefix, itemPrefix...) + + fenc := func(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + buf := b.Extend(3) + buf[0] = edtType + binary.BigEndian.PutUint16(buf[1:3], uint16(len(prefix))) + b.Append(prefix) + } + if value.IsNil() { + b.AppendByte(edtNil) + return nil + } else { + b.AppendByte(edtMap) + } + + if state.child == nil { + state.child = &stateEncode{ + options: state.options, + } + } + state = state.child + + n := value.Len() + buf := b.Extend(4) + binary.BigEndian.PutUint32(buf, uint32(n)) + + iter := value.MapRange() + for iter.Next() { + state.encodeType = false + if err := encKey.Encode(iter.Key(), b, state); err != nil { + return err + } + state.encodeType = false + if err := encItem.Encode(iter.Value(), b, state); err != nil { + return err + } + } + + return nil + } + + enc := &encoder{ + Prefix: prefix, + Encode: fenc, + } + + if state.options.Cache != nil { + state.options.Cache.Store(t, enc) + } + + return enc, nil + + case reflect.Slice: + encItem, err := getEncoder(t.Elem(), state) + if err != nil { + return nil, err + } + + itemPrefix := encItem.Prefix + if state.options.RegCache != nil { + if v, found := state.options.RegCache.Load(t.Elem()); found { + itemPrefix = v.([]byte) + } + } + prefix := append([]byte{edtSlice}, itemPrefix...) + fenc := func(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + buf := b.Extend(3) + buf[0] = edtType + binary.BigEndian.PutUint16(buf[1:3], uint16(len(prefix))) + b.Append(prefix) + } + + if value.IsNil() { + b.AppendByte(edtNil) + return nil + } else { + b.AppendByte(edtSlice) + } + + if state.child == nil { + state.child = &stateEncode{ + options: state.options, + } + } + state = state.child + + n := value.Len() + buf := b.Extend(4) + binary.BigEndian.PutUint32(buf, uint32(n)) + + for i := 0; i < n; i++ { + state.encodeType = false + if err := encItem.Encode(value.Index(i), b, state); err != nil { + return err + } + } + + return nil + } + + enc := &encoder{ + Prefix: prefix, + Encode: fenc, + } + if state.options.Cache != nil { + state.options.Cache.Store(t, enc) + } + return enc, nil + + case reflect.Array: + encItem, err := getEncoder(t.Elem(), state) + if err != nil { + return nil, err + } + + itemPrefix := encItem.Prefix + if state.options.RegCache != nil { + if v, found := state.options.RegCache.Load(t.Elem()); found { + itemPrefix = v.([]byte) + } + } + + l := t.Len() + if l > math.MaxUint32 { + return nil, fmt.Errorf("too big array size (allowed max: %d)", math.MaxUint32) + } + + prefix := append([]byte{edtArray, 0, 0, 0, 0}, itemPrefix...) + binary.BigEndian.PutUint32(prefix[1:5], uint32(l)) + + fenc := func(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + buf := b.Extend(3) + buf[0] = edtType + binary.BigEndian.PutUint16(buf[1:3], uint16(len(prefix))) + b.Append(prefix) + } + + if state.child == nil { + state.child = &stateEncode{ + options: state.options, + } + } + state = state.child + + for i := 0; i < l; i++ { + state.encodeType = false + if err := encItem.Encode(value.Index(i), b, state); err != nil { + return err + } + } + + return nil + } + + enc := &encoder{ + Prefix: prefix, + Encode: fenc, + } + if state.options.Cache != nil { + state.options.Cache.Store(t, enc) + } + return enc, nil + + case reflect.Pointer: + return nil, fmt.Errorf("pointer type is not supported") + } + + // look among the standard types + v, found := encoders.Load(kind) + if found == false { + return nil, fmt.Errorf("no encoder for type %v", t) + } + + enc := v.(*encoder) + // store encoder t => v + encoders.Store(t, v) + + return enc, nil +} + +func encodePID(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtPID) + } + + pid := value.Interface().(gen.PID) + writeAtom(pid.Node, b, state) + buf := b.Extend(16) + binary.BigEndian.PutUint64(buf[:8], pid.ID) + binary.BigEndian.PutUint64(buf[8:16], uint64(pid.Creation)) + return nil +} + +func encodeProcessID(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtProcessID) + } + p := value.Interface().(gen.ProcessID) + writeAtom(p.Node, b, state) + writeAtom(p.Name, b, state) + return nil +} + +func encodeRef(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtRef) + } + r := value.Interface().(gen.Ref) + writeAtom(r.Node, b, state) + // 8 (creation) + 24 ([3]uint64) + buf := b.Extend(8 + 24) + binary.BigEndian.PutUint64(buf[0:8], uint64(r.Creation)) + binary.BigEndian.PutUint64(buf[8:16], r.ID[0]) + binary.BigEndian.PutUint64(buf[16:24], r.ID[1]) + binary.BigEndian.PutUint64(buf[24:32], r.ID[2]) + return nil +} + +func encodeAlias(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtAlias) + } + a := value.Interface().(gen.Alias) + writeAtom(a.Node, b, state) + // 8 (creation) + 24 ([3]uint64) + buf := b.Extend(8 + 24) + binary.BigEndian.PutUint64(buf[0:8], uint64(a.Creation)) + binary.BigEndian.PutUint64(buf[8:16], a.ID[0]) + binary.BigEndian.PutUint64(buf[16:24], a.ID[1]) + binary.BigEndian.PutUint64(buf[24:32], a.ID[2]) + return nil +} + +func encodeEvent(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtEvent) + } + e := value.Interface().(gen.Event) + writeAtom(e.Node, b, state) + writeAtom(e.Name, b, state) + return nil +} + +func encodeAny(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if value.IsNil() { + b.AppendByte(edtNil) + return nil + } + + if state.child != nil { + state.child = nil + } + enc, err := getEncoder(value.Elem().Type(), state) + if err != nil { + return err + } + + state.encodeType = true + return enc.Encode(value.Elem(), b, state) +} + +func encodeTime(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtTime) + } + bin, err := value.Interface().(time.Time).MarshalBinary() + if err != nil { + return err + } + lbin := len(bin) + buf := b.Extend(1 + lbin) + buf[0] = byte(lbin) + copy(buf[1:], bin) + return nil +} + +func encodeAtom(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + atom := value.Interface().(gen.Atom) + if len(atom) > 255 { + return ErrAtomTooLong + } + + if state.encodeType { + b.AppendByte(edtAtom) + } + + writeAtom(atom, b, state) + return nil +} + +func encodeBool(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtBool) + } + if value.Bool() { + b.AppendByte(1) + return nil + } + b.AppendByte(0) + return nil +} +func encodeString(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + v := value.String() + lenString := len(v) + if lenString > math.MaxUint16 { + return ErrStringTooLong + } + + if state.encodeType { + b.AppendByte(edtString) + } + buf := b.Extend(2) + binary.BigEndian.PutUint16(buf, uint16(lenString)) + b.AppendString(v) + return nil +} + +func encodeBinary(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + v := value.Bytes() + lenBinary := len(v) + if lenBinary > math.MaxUint32 { + return ErrBinaryTooLong + } + + if state.encodeType { + b.AppendByte(edtBinary) + } + buf := b.Extend(4) + binary.BigEndian.PutUint32(buf, uint32(lenBinary)) + b.Append(v) + return nil +} + +func encodeInt(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtInt) + } + buf := b.Extend(8) + binary.BigEndian.PutUint64(buf, uint64(value.Int())) + return nil +} + +func encodeInt8(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtInt8) + } + b.AppendByte(byte(value.Int())) + return nil +} + +func encodeInt16(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtInt16) + } + buf := b.Extend(2) + binary.BigEndian.PutUint16(buf, uint16(value.Int())) + return nil +} + +func encodeInt32(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtInt32) + } + buf := b.Extend(4) + binary.BigEndian.PutUint32(buf, uint32(value.Int())) + return nil +} + +func encodeInt64(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtInt64) + } + buf := b.Extend(8) + binary.BigEndian.PutUint64(buf, uint64(value.Int())) + return nil +} + +func encodeUint(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtUint) + } + buf := b.Extend(8) + binary.BigEndian.PutUint64(buf, value.Uint()) + return nil +} + +func encodeUint8(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtUint8) + } + b.AppendByte(byte(value.Uint())) + return nil +} + +func encodeUint16(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtUint16) + } + buf := b.Extend(2) + binary.BigEndian.PutUint16(buf, uint16(value.Uint())) + return nil +} + +func encodeUint32(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtUint32) + } + buf := b.Extend(4) + binary.BigEndian.PutUint32(buf, uint32(value.Uint())) + return nil +} + +func encodeUint64(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtUint64) + } + buf := b.Extend(8) + binary.BigEndian.PutUint64(buf, value.Uint()) + return nil +} + +func encodeFloat32(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtFloat32) + } + + buf := b.Extend(4) + binary.BigEndian.PutUint32(buf, math.Float32bits(float32(value.Float()))) + return nil +} + +func encodeFloat64(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.encodeType { + b.AppendByte(edtFloat64) + } + buf := b.Extend(8) + binary.BigEndian.PutUint64(buf, math.Float64bits(value.Float())) + return nil +} + +func encodeError(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if value.IsNil() { + b.Append([]byte{0xff, 0xff}) + return nil + } + + if state.encodeType { + b.AppendByte(edtError) + } + + err := value.Interface().(error) + if state.options.ErrCache != nil { + if x, found := state.options.ErrCache.Load(err); found { + id := x.(uint16) + // atom cache id MUST be > math.MaxInt16, otherwise encode as a regular string + if id > math.MaxInt16 { + buf := b.Extend(2) + binary.BigEndian.PutUint16(buf, id) + return nil + } + } + } + + estr := err.Error() + lenErr := len(estr) + if lenErr > math.MaxInt16 { + return ErrErrorTooLong + } + + buf := b.Extend(2 + lenErr) + binary.BigEndian.PutUint16(buf[:2], uint16(lenErr)) + copy(buf[2:], estr) + + return nil +} + +func writeAtom(atom gen.Atom, b *lib.Buffer, state *stateEncode) { + // replace atom value if we have mapped value for it + if state.options.AtomMapping != nil { + v, found := state.options.AtomMapping.Load(atom) + if found { + atom = v.(gen.Atom) + } + } + + if state.options.AtomCache != nil { + if x, found := state.options.AtomCache.Load(atom); found { + id := x.(uint16) + // atom cache id MUST be > 255, otherwise encode as a regular atom + if id > 255 { + buf := b.Extend(2) + binary.BigEndian.PutUint16(buf, id) + return + } + } + } + + lenAtom := len(atom) + buf := b.Extend(2 + lenAtom) + binary.BigEndian.PutUint16(buf[:2], uint16(lenAtom)) + copy(buf[2:], atom) +} diff --git a/net/edf/encode_test.go b/net/edf/encode_test.go new file mode 100644 index 00000000..c787d094 --- /dev/null +++ b/net/edf/encode_test.go @@ -0,0 +1,3917 @@ +package edf + +import ( + "errors" + "fmt" + "io" + "reflect" + "sync" + "testing" + "time" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +func TestEncodeBool(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + if err := Encode(false, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, []byte{edtBool, 0}) { + t.Fatal("incorrect value") + } + + b.Reset() + if err := Encode(true, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, []byte{edtBool, 1}) { + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceBool(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []bool{false, true, false} + expect := []byte{edtType, 0, 2, + edtSlice, edtBool, + edtSlice, + 0, 0, 0, 3, + 0, 1, 0, + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyBool(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []any{false, true, false} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtBool, 0, + edtBool, 1, + edtBool, 0, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeAtom(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := gen.Atom("hello world") + expect := []byte{edtAtom, + 0, 0x0b, // len + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, // "hello world" + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeAtomCache(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := gen.Atom("hello world") + expect := []byte{edtAtom, + 0x01, 0x2c, // cached "hello world" => 300 + } + + atomCache := new(sync.Map) + atomCache.Store(value, uint16(300)) + + if err := Encode(value, b, Options{AtomCache: atomCache}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeAtomMapping(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := gen.Atom("hello world") + mapped := gen.Atom("hi") + expect := []byte{edtAtom, + 0, 0x02, // len + 0x68, 0x69, // "hi" + } + + atomMapping := new(sync.Map) + atomMapping.Store(value, mapped) + + if err := Encode(value, b, Options{AtomMapping: atomMapping}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeAtomMappingCache(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := gen.Atom("hello world") + mapped := gen.Atom("hi") + expect := []byte{edtAtom, + 0x01, 0x2c, // mapped "hello world" => "hi", cached "hi" => 300 + } + + atomMapping := new(sync.Map) + atomMapping.Store(value, mapped) + atomCache := new(sync.Map) + atomCache.Store(mapped, uint16(300)) + + if err := Encode(value, b, Options{AtomCache: atomCache, AtomMapping: atomMapping}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAtom(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + v := gen.Atom("hello world") + value := []gen.Atom{ + v, v, v, + } + expect := []byte{edtType, 0, 2, + edtSlice, + edtAtom, + edtSlice, + 0, 0, 0, 3, + 0, 0x0b, // len + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, // "hello world" + 0, 0x0b, // len + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, // "hello world" + 0, 0x0b, // len + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, // "hello world" + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAtomCache(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + v := gen.Atom("hello world") + value := []gen.Atom{ + v, v, v, + } + expect := []byte{edtType, 0, 2, + edtSlice, + edtAtom, + edtSlice, + 0, 0, 0, 3, + 0x01, 0x2c, // cached "hello world" => 300 + 0x01, 0x2c, // cached "hello world" => 300 + 0x01, 0x2c, // cached "hello world" => 300 + } + + atomCache := new(sync.Map) + atomCache.Store(v, uint16(300)) + + if err := Encode(value, b, Options{AtomCache: atomCache}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyAtom(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + v := gen.Atom("hello world") + value := []any{ + v, nil, v, + } + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtAtom, 0, 0x0b, // len + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, // "hello world" + edtNil, + edtAtom, 0, 0x0b, // len + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, // "hello world" + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyAtomCache(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + v := gen.Atom("hello world") + value := []any{ + v, nil, v, + } + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtAtom, 0x01, 0x2c, // cached "hello world" => 300 + edtNil, + edtAtom, 0x01, 0x2c, // cached "hello world" => 300 + } + + atomCache := new(sync.Map) + atomCache.Store(v, uint16(300)) + + if err := Encode(value, b, Options{AtomCache: atomCache}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeString(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := "abc" + expect := []byte{edtString, 0, 3, 97, 98, 99} + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceString(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + value := []string{"abc", "def", "ghi"} + expect := []byte{edtType, 0, 2, + edtSlice, + edtString, + edtSlice, + 0, 0, 0, 3, + 0, 3, 97, 98, 99, // "abc" + 0, 3, 100, 101, 102, // "def" + 0, 3, 103, 104, 105, // "ghi" + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyString(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + value := []any{"abc", "def", "ghi"} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtString, 0, 3, 97, 98, 99, // "abc" + edtString, 0, 3, 100, 101, 102, // "def" + edtString, 0, 3, 103, 104, 105, // "ghi" + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeBinary(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []byte{1, 2, 3, 4, 5} + expect := []byte{edtBinary, + 0x0, 0x0, 0x0, 0x05, // len + 0x1, 0x2, 0x3, 0x4, 0x5, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceBinary(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := [][]byte{{1, 2, 3, 4, 5}, {6, 7, 8}, {9}} + expect := []byte{edtType, 0, 2, + edtSlice, + edtBinary, + edtSlice, + 0, 0, 0, 3, + 0x0, 0x0, 0x0, 0x05, // len + 0x1, 0x2, 0x3, 0x4, 0x5, + 0x0, 0x0, 0x0, 0x03, // len + 0x6, 0x7, 0x8, + 0x0, 0x0, 0x0, 0x01, // len + 0x9, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyBinary(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []any{[]byte{1, 2, 3, 4, 5}, []byte{6, 7, 8}, []byte{9}} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtBinary, 0x0, 0x0, 0x0, 0x05, // len + 0x1, 0x2, 0x3, 0x4, 0x5, + edtBinary, 0x0, 0x0, 0x0, 0x03, // len + 0x6, 0x7, 0x8, + edtBinary, 0x0, 0x0, 0x0, 0x01, // len + 0x9, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeFloat32(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + if err := Encode(float32(3.14), b, Options{}); err != nil { + t.Fatal(err) + } + + expect := []byte{edtFloat32, 64, 72, 245, 195} + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceFloat32(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []float32{3.14, 3.15, 3.16} + expect := []byte{edtType, 0, 2, + edtSlice, + edtFloat32, + edtSlice, + 0, 0, 0, 3, + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyFloat32(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []any{float32(3.14), float32(3.15), float32(3.16)} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtFloat32, 0x40, 0x48, 0xf5, 0xc3, // 3.14 + edtFloat32, 0x40, 0x49, 0x99, 0x9a, // 3.15 + edtFloat32, 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeFloat64(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtFloat64, 64, 9, 30, 184, 81, 235, 133, 31} + + if err := Encode(float64(3.14), b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Println("exp", expect) + fmt.Println("got", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceFloat64(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []float64{3.14, 3.15, 3.16} + expect := []byte{edtType, 0, 2, + edtSlice, + edtFloat64, + edtSlice, + 0, 0, 0, 3, + 0x40, 0x9, 0x1e, 0xb8, 0x51, 0xeb, 0x85, 0x1f, // 3.14 + 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + 0x40, 0x9, 0x47, 0xae, 0x14, 0x7a, 0xe1, 0x48, // 3.16 + + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyFloat64(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []any{float64(3.14), float64(3.15), float64(3.16)} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtFloat64, 0x40, 0x9, 0x1e, 0xb8, 0x51, 0xeb, 0x85, 0x1f, // 3.14 + edtFloat64, 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtFloat64, 0x40, 0x9, 0x47, 0xae, 0x14, 0x7a, 0xe1, 0x48, // 3.16 + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeInteger(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + for _, c := range integerCases() { + t.Run(c.name, func(t *testing.T) { + b.Reset() + + if err := Encode(c.integer, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, c.bin) { + fmt.Printf("exp %#v\n", c.bin) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + }) + } +} + +func TestEncodeSliceInt(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []int{1, 2, 3} + expect := []byte{edtType, 0, 2, + edtSlice, + edtInt, + edtSlice, + 0, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 2, + 0, 0, 0, 0, 0, 0, 0, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Println("exp", expect) + fmt.Println("got", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyInt(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []any{int(1), int(2), int(3)} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtInt, 0, 0, 0, 0, 0, 0, 0, 1, + edtInt, 0, 0, 0, 0, 0, 0, 0, 2, + edtInt, 0, 0, 0, 0, 0, 0, 0, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Println("exp", expect) + fmt.Println("got", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceInt8(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []int8{1, 2, 3, 4, 5} + expect := []byte{edtType, 0, 2, + edtSlice, + edtInt8, + edtSlice, + 0, 0, 0, 5, + 1, 2, 3, 4, 5, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyInt8(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []any{int8(1), int8(2), int8(3)} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtInt8, 1, + edtInt8, 2, + edtInt8, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceInt16(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []int16{1, 2, 3} + expect := []byte{edtType, 0, 2, + edtSlice, + edtInt16, + edtSlice, + 0, 0, 0, 3, + 0, 1, + 0, 2, + 0, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyInt16(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []any{int16(1), int16(2), int16(3)} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtInt16, 0, 1, + edtInt16, 0, 2, + edtInt16, 0, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceInt32(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []int32{1, 2, 3} + expect := []byte{edtType, 0, 2, + edtSlice, + edtInt32, + edtSlice, + 0, 0, 0, 3, + 0, 0, 0, 1, + 0, 0, 0, 2, + 0, 0, 0, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyInt32(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []any{int32(1), int32(2), int32(3)} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtInt32, 0, 0, 0, 1, + edtInt32, 0, 0, 0, 2, + edtInt32, 0, 0, 0, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceInt64(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []int64{1, 2, 3} + expect := []byte{edtType, 0, 2, + edtSlice, + edtInt64, + edtSlice, + 0, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 2, + 0, 0, 0, 0, 0, 0, 0, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyInt64(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []any{int64(1), int64(2), int64(3)} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtInt64, 0, 0, 0, 0, 0, 0, 0, 1, + edtInt64, 0, 0, 0, 0, 0, 0, 0, 2, + edtInt64, 0, 0, 0, 0, 0, 0, 0, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceUint(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []uint{1, 2, 3} + expect := []byte{edtType, 0, 2, + edtSlice, + edtUint, + edtSlice, + 0, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 2, + 0, 0, 0, 0, 0, 0, 0, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyUint(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []any{uint(1), uint(2), uint(3)} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtUint, 0, 0, 0, 0, 0, 0, 0, 1, + edtUint, 0, 0, 0, 0, 0, 0, 0, 2, + edtUint, 0, 0, 0, 0, 0, 0, 0, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceUint8(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []uint8{1, 2, 3, 4, 5} + // since the byte type is the alias to the uint8 + // []byte is the same as []uint8 + expect := []byte{edtBinary, + 0, 0, 0, 5, // len + 1, 2, 3, 4, 5} + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceUint16(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []uint16{1, 2, 3} + expect := []byte{edtType, 0, 2, + edtSlice, + edtUint16, + edtSlice, + 0, 0, 0, 3, + 0, 1, + 0, 2, + 0, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyUint16(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []any{uint16(1), uint16(2), uint16(3)} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtUint16, 0, 1, + edtUint16, 0, 2, + edtUint16, 0, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceUint32(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []uint32{1, 2, 3} + expect := []byte{edtType, 0, 2, + edtSlice, + edtUint32, + edtSlice, + 0, 0, 0, 3, + 0, 0, 0, 1, + 0, 0, 0, 2, + 0, 0, 0, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyUint32(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []any{uint32(1), uint32(2), uint32(3)} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtUint32, 0, 0, 0, 1, + edtUint32, 0, 0, 0, 2, + edtUint32, 0, 0, 0, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceUint64(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []uint64{1, 2, 3} + expect := []byte{edtType, 0, 2, + edtSlice, + edtUint64, + edtSlice, + 0, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 2, + 0, 0, 0, 0, 0, 0, 0, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyUint64(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []any{uint64(1), uint64(2), uint64(3)} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtUint64, 0, 0, 0, 0, 0, 0, 0, 1, + edtUint64, 0, 0, 0, 0, 0, 0, 0, 2, + edtUint64, 0, 0, 0, 0, 0, 0, 0, 3, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyInteger(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []any{ + int(1), nil, int8(2), nil, int16(3), nil, int32(4), nil, int64(5), nil, + uint(6), nil, uint8(7), nil, uint16(8), nil, uint32(9), nil, uint64(10), + } + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 19, + edtInt, 0, 0, 0, 0, 0, 0, 0, 1, + edtNil, + edtInt8, 2, + edtNil, + edtInt16, 0, 3, + edtNil, + edtInt32, 0, 0, 0, 4, + edtNil, + edtInt64, 0, 0, 0, 0, 0, 0, 0, 5, + edtNil, + edtUint, 0, 0, 0, 0, 0, 0, 0, 6, + edtNil, + edtUint8, 7, + edtNil, + edtUint16, 0, 8, + edtNil, + edtUint32, 0, 0, 0, 9, + edtNil, + edtUint64, 0, 0, 0, 0, 0, 0, 0, 10, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnySlice(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []any{ + []int{4}, + nil, + []float32{3.14, 3.15, 3.16}, + nil, + } + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 4, + + edtType, 0, 2, + edtSlice, edtInt, + edtSlice, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 4, + + edtNil, + + edtType, 0, 2, + edtSlice, edtFloat32, + edtSlice, 0, 0, 0, 3, + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + + edtNil, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeTime(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := time.Date(1399, time.January, 26, 0, 0, 0, 0, time.UTC) + expect := []byte{edtTime, + 0xf, // len + 0x1, 0x0, 0x0, 0x0, 0xa, 0x45, 0xaf, 0x1f, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceTime(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + v := time.Date(1399, time.January, 26, 0, 0, 0, 0, time.UTC) + value := []time.Time{ + v, v, v, + } + expect := []byte{edtType, 0, 2, + edtSlice, + edtTime, + edtSlice, + 0, 0, 0, 3, + 0xf, // len + 0x1, 0x0, 0x0, 0x0, 0xa, 0x45, 0xaf, 0x1f, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, + 0xf, // len + 0x1, 0x0, 0x0, 0x0, 0xa, 0x45, 0xaf, 0x1f, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, + 0xf, // len + 0x1, 0x0, 0x0, 0x0, 0xa, 0x45, 0xaf, 0x1f, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyTime(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + v := time.Date(1399, time.January, 26, 0, 0, 0, 0, time.UTC) + value := []any{ + v, v, v, + } + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtTime, 0xf, // len + 0x1, 0x0, 0x0, 0x0, 0xa, 0x45, 0xaf, 0x1f, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, + edtTime, 0xf, // len + 0x1, 0x0, 0x0, 0x0, 0xa, 0x45, 0xaf, 0x1f, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, + edtTime, 0xf, // len + 0x1, 0x0, 0x0, 0x0, 0xa, 0x45, 0xaf, 0x1f, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeReg(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + type MyRegF1 float32 + var value MyRegF1 + value = 3.14 + expect := []byte{edtReg, 0, 35, + // name: #ergo.services/ergo/net/edf/MyRegF1 + 0x23, 0x65, 0x72, 0x67, 0x6f, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x65, + 0x72, 0x67, 0x6f, 0x2f, 0x6e, 0x65, 0x74, 0x2f, + 0x65, 0x64, 0x66, 0x2f, 0x4d, 0x79, 0x52, 0x65, + 0x67, 0x46, 0x31, + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + } + + RegisterTypeOf(value) + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceReg(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + type MyFloattt float32 + var x MyFloattt + + value := []MyFloattt{3.14, 3.15, 3.16} + expect := []byte{edtType, 0, 41, + edtSlice, + edtReg, 0, 37, + // name: #ergo.services/ergo/net/edf/MyFloat + 0x23, 0x65, 0x72, 0x67, 0x6f, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x65, + 0x72, 0x67, 0x6f, 0x2f, 0x6e, 0x65, 0x74, 0x2f, + 0x65, 0x64, 0x66, 0x2f, 0x4d, 0x79, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x74, 0x74, + edtSlice, + 0, 0, 0, 3, // len + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + + RegisterTypeOf(x) + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceRegCache(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + type MyFloat12333 float32 + var x MyFloat12333 + + value := []MyFloat12333{3.14, 3.15, 3.16} + expect := []byte{edtType, 0, 4, + edtSlice, + edtReg, 0x13, 0x88, // name: #ergo.services/ergo/net/proto/edf/MyFloat12333 => cache id 5000 + edtSlice, + 0, 0, 0, 3, // len + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + RegisterTypeOf(x) + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(x), []byte{edtReg, 0x13, 0x88}) + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeRegSlice(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + type MySlice99 []float32 + + x := MySlice99{3.14, 3.15, 3.16} + expect := []byte{edtReg, 0x13, 0x88, + edtReg, + 0, 0, 0, 3, // len + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + if err := RegisterTypeOf(x); err != nil { + t.Fatal(err) + } + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(x), []byte{edtReg, 0x13, 0x88}) + + if err := Encode(x, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + +} + +func TestEncodeRegSliceRegSlice(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + type MySliceFloat []float32 + type MySliceOfSlice []MySliceFloat + + x := MySliceOfSlice{ + {3.14, 3.15, 3.16}, + nil, + {3.14}, + } + expect := []byte{edtReg, 0x13, 0x88, + edtReg, + 0x0, 0x0, 0x0, 0x3, + edtSlice, + 0x0, 0x0, 0x0, 0x3, + 0x40, 0x48, 0xf5, 0xc3, + 0x40, 0x49, 0x99, 0x9a, + 0x40, 0x4a, 0x3d, 0x71, + edtNil, + edtSlice, + 0x0, 0x0, 0x0, 0x1, + 0x40, 0x48, 0xf5, 0xc3, + } + + if err := RegisterTypeOf(MySliceOfSlice{}); err != nil { + t.Fatal(err) + } + if err := RegisterTypeOf(MySliceFloat{}); err != nil { + t.Fatal(err) + } + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(x), []byte{edtReg, 0x13, 0x88}) + + if err := Encode(x, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + +} + +func TestEncodePID(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtPID, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7f, 0xff, // id + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + } + value := gen.PID{Node: "abc@def", ID: 32767, Creation: 2} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSlicePID(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtType, 0, 2, + edtSlice, + edtPID, + edtSlice, + 0, 0, 0, 3, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7f, 0xff, // id + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7f, 0xff, // id + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7f, 0xff, // id + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + } + v := gen.PID{Node: "abc@def", ID: 32767, Creation: 2} + value := []gen.PID{v, v, v} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyPID(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtPID, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7f, 0xff, // id + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + edtPID, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7f, 0xff, // id + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + edtPID, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7f, 0xff, // id + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + } + v := gen.PID{Node: "abc@def", ID: 32767, Creation: 2} + value := []any{v, v, v} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeProcessID(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtProcessID, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (node name) + 0x67, 0x68, 0x69, + } + value := gen.ProcessID{Node: "abc@def", Name: "ghi"} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceProcessID(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtType, 0, 2, + edtSlice, + edtProcessID, + edtSlice, + 0, 0, 0, 3, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (process name) + 0x67, 0x68, 0x69, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (process name) + 0x67, 0x68, 0x69, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (process name) + 0x67, 0x68, 0x69, + } + v := gen.ProcessID{Node: "abc@def", Name: "ghi"} + value := []gen.ProcessID{v, v, v} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyProcessID(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtProcessID, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (process name) + 0x67, 0x68, 0x69, + edtProcessID, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (node name) + 0x67, 0x68, 0x69, + edtProcessID, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (node name) + 0x67, 0x68, 0x69, + } + v := gen.ProcessID{Node: "abc@def", Name: "ghi"} + value := []any{v, v, v} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeEvent(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtEvent, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (node name) + 0x67, 0x68, 0x69, + } + value := gen.Event{Node: "abc@def", Name: "ghi"} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceEvent(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtType, 0, 2, + edtSlice, + edtEvent, + edtSlice, + 0, 0, 0, 3, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (process name) + 0x67, 0x68, 0x69, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (process name) + 0x67, 0x68, 0x69, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (process name) + 0x67, 0x68, 0x69, + } + v := gen.Event{Node: "abc@def", Name: "ghi"} + value := []gen.Event{v, v, v} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyEvent(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtEvent, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (process name) + 0x67, 0x68, 0x69, + edtEvent, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (node name) + 0x67, 0x68, 0x69, + edtEvent, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x3, // len atom (node name) + 0x67, 0x68, 0x69, + } + v := gen.Event{Node: "abc@def", Name: "ghi"} + value := []any{v, v, v} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeRef(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtRef, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + } + value := gen.Ref{Node: "abc@def", ID: [3]uint32{4, 5, 6}, Creation: 2} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceRef(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtType, 0, 2, + edtSlice, + edtRef, + edtSlice, + 0, 0, 0, 3, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + } + v := gen.Ref{Node: "abc@def", ID: [3]uint32{4, 5, 6}, Creation: 2} + value := []gen.Ref{v, v, v} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyRef(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtRef, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + edtRef, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + edtRef, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + } + v := gen.Ref{Node: "abc@def", ID: [3]uint32{4, 5, 6}, Creation: 2} + value := []any{v, v, v} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeAlias(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtAlias, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + } + value := gen.Alias{Node: "abc@def", ID: [3]uint32{4, 5, 6}, Creation: 2} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAlias(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtType, 0, 2, + edtSlice, + edtAlias, + edtSlice, + 0, 0, 0, 3, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + } + v := gen.Alias{Node: "abc@def", ID: [3]uint32{4, 5, 6}, Creation: 2} + value := []gen.Alias{v, v, v} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyAlias(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtAlias, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + edtAlias, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + edtAlias, 0x0, 0x7, // len atom (node name) + 0x61, 0x62, 0x63, 0x40, 0x64, 0x65, 0x66, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, // creation + 0x0, 0x0, 0x0, 0x4, + 0x0, 0x0, 0x0, 0x5, + 0x0, 0x0, 0x0, 0x6, + } + v := gen.Alias{Node: "abc@def", ID: [3]uint32{4, 5, 6}, Creation: 2} + value := []any{v, v, v} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeError(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtError, + 0, 3, // len + 97, 98, 99, // "abc" + } + value := errors.New("abc") + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceError(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtType, 0, 2, + edtSlice, + edtError, + edtSlice, + 0, 0, 0, 3, + 0, 4, // len + 97, 98, 99, 100, // "abcd" + 0, 4, // len + 97, 98, 99, 100, // "abcd" + 0, 4, // len + 97, 98, 99, 100, // "abcd" + } + v := errors.New("abcd") + value := []error{v, v, v} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceErrorNil(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtType, 0, 2, + edtSlice, + edtError, + edtSlice, + 0, 0, 0, 3, + 0, 4, // len + 97, 98, 99, 100, // "abcd" + 0xff, 0xff, // nil error + 0, 4, // len + 97, 98, 99, 100, // "abcd" + } + v := errors.New("abcd") + value := []error{v, nil, v} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeRegError(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := errors.New("abc") + errCache := new(sync.Map) + errCache.Store(value, uint16(35000)) + + expect := []byte{edtError, + 0x88, 0xb8, // 35000 => error "abc" + } + + if err := Encode(value, b, Options{ErrCache: errCache}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyError(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtError, 0, 4, // len + 97, 98, 99, 100, // "abcd" + edtNil, + edtError, 0, 4, // len + 97, 98, 99, 100, // "abcd" + } + v := errors.New("abcd") + value := []any{v, nil, v} + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceType(t *testing.T) { + b := lib.TakeBuffer() + + value := []float32{3.14, 3.15, 3.16} + expect := []byte{edtType, 0, 2, + edtSlice, + edtFloat32, + edtSlice, + 0, 0, 0, 3, // len + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + + lib.ReleaseBuffer(b) +} + +func TestEncodeSliceTypeReg(t *testing.T) { + type MyFloaaa float32 + var x MyFloaaa + + b := lib.TakeBuffer() + value := []MyFloaaa{3.14, 3.15, 3.16} + expect := []byte{edtType, 0, 40, + edtSlice, + edtReg, 0, 36, + // name: #ergo.services/ergo/net/edf/MyFloa + 0x23, 0x65, 0x72, 0x67, 0x6f, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x65, + 0x72, 0x67, 0x6f, 0x2f, 0x6e, 0x65, 0x74, 0x2f, + 0x65, 0x64, 0x66, 0x2f, 0x4d, 0x79, 0x46, 0x6c, + 0x6f, 0x61, 0x61, 0x61, + edtSlice, + 0, 0, 0, 3, // len + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + + RegisterTypeOf(x) + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + + lib.ReleaseBuffer(b) +} + +func TestEncodeSliceTypeRegCache(t *testing.T) { + type MyFloatE123 float32 + var x MyFloatE123 + + b := lib.TakeBuffer() + value := []MyFloatE123{3.14, 3.15, 3.16} + expect := []byte{edtType, 0, 4, + edtSlice, + edtReg, 0x13, 0x88, // cache id uint16(5000) => name: #ergo.services/ergo/net/proto/edf/MyFloatE123 + edtSlice, + 0, 0, 0, 3, // len + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + + RegisterTypeOf(x) + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(x), []byte{edtReg, 0x13, 0x88}) + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + + lib.ReleaseBuffer(b) +} + +func TestEncodeSliceRegTypeReg(t *testing.T) { + type MyFloatE19 float32 + type MySliceE19 []MyFloatE19 + var x MyFloatE19 + + b := lib.TakeBuffer() + value := MySliceE19{3.14, 3.15, 3.16} + expect := []byte{edtReg, 0x13, 0x88, + edtReg, + 0, 0, 0, 3, // len + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + + if err := RegisterTypeOf(x); err != nil { + t.Fatal(err) + } + + if err := RegisterTypeOf(value); err != nil { + t.Fatal(err) + } + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(value), []byte{edtReg, 0x13, 0x88}) + regCache.Store(reflect.TypeOf(x), []byte{edtReg, 0x13, 0x89}) + + opts := Options{ + RegCache: regCache, + } + if err := Encode(value, b, opts); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + + lib.ReleaseBuffer(b) +} + +func TestEncodeSliceAny(t *testing.T) { + + b := lib.TakeBuffer() + value := []any{float32(3.14), float64(3.15), float32(3.16)} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtFloat32, 0x40, 0x48, 0xf5, 0xc3, // 3.14 + edtFloat64, 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtFloat32, 0x40, 0x4a, 0x3d, 0x71, // 3.16 + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + + lib.ReleaseBuffer(b) +} + +func TestEncodeSliceNil(t *testing.T) { + b := lib.TakeBuffer() + value := []any{nil, nil, nil} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 3, + edtNil, + edtNil, + edtNil, + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + +} + +func TestEncodeSliceNil2(t *testing.T) { + b := lib.TakeBuffer() + value := []any{} + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 0, + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + +} + +func TestEncodeSliceNest(t *testing.T) { + b := lib.TakeBuffer() + value := []any{ + []any{float32(3.15)}, + float32(3.14), + float32(3.16), + []any{float64(3.15)}, + } + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 4, + edtType, 0, 2, + edtSlice, edtAny, + edtSlice, + 0, 0, 0, 1, edtFloat32, 0x40, 0x49, 0x99, 0x9a, // 3.15 + edtFloat32, 0x40, 0x48, 0xf5, 0xc3, // 3.14 + edtFloat32, 0x40, 0x4a, 0x3d, 0x71, // 3.16 + edtType, 0, 2, + edtSlice, edtAny, + edtSlice, + 0, 0, 0, 1, edtFloat64, 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + +} + +func TestEncodeSliceSlice(t *testing.T) { + b := lib.TakeBuffer() + value := [][]float32{ + {3.14, 3.15, 3.16}, + {3.16}, + nil, + {3.14, 3.15}, + {}, + } + expect := []byte{edtType, 0, 3, + edtSlice, + edtSlice, + edtFloat32, + edtSlice, + 0, 0, 0, 5, + edtSlice, + 0, 0, 0, 3, // first slice with 3 items + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + edtSlice, + 0, 0, 0, 1, // second slice with 1 item + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + edtNil, // third one + edtSlice, + 0, 0, 0, 2, // 4th + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + edtSlice, + 0, 0, 0, 0, // 5th + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + +} + +func TestEncodeSliceSliceAny(t *testing.T) { + b := lib.TakeBuffer() + value := [][]any{ + {float32(3.14), float32(3.16), float64(3.15)}, + {float64(3.15)}, + nil, + {float32(3.14), float32(3.16)}, + {}, + } + expect := []byte{edtType, 0, 3, + edtSlice, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 5, + edtSlice, + 0, 0, 0, 3, // first slice with 3 items + edtFloat32, 0x40, 0x48, 0xf5, 0xc3, // 3.14 + edtFloat32, 0x40, 0x4a, 0x3d, 0x71, // 3.16 + edtFloat64, 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtSlice, + 0, 0, 0, 1, // second slice with 1 item + edtFloat64, 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtNil, // third one + edtSlice, + 0, 0, 0, 2, // 4th + edtFloat32, 0x40, 0x48, 0xf5, 0xc3, // 3.14 + edtFloat32, 0x40, 0x4a, 0x3d, 0x71, // 3.16 + edtSlice, + 0, 0, 0, 0, // 5th + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + +} + +func TestEncodeSliceSliceNil(t *testing.T) { + b := lib.TakeBuffer() + value := [][]any{nil, []any{}, nil, nil} + expect := []byte{edtType, 0, 3, + edtSlice, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 4, + edtNil, + edtSlice, + 0, 0, 0, 0, + edtNil, + edtNil, + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + +} + +func TestEncodeSliceSliceReg(t *testing.T) { + b := lib.TakeBuffer() + + type MySlice1555 []float32 + + if err := RegisterTypeOf(MySlice1555{}); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(MySlice1555{}), []byte{edtReg, 0x13, 0x88}) + + value := []MySlice1555{ + MySlice1555{3.14, 3.16, 3.15}, + MySlice1555{3.15}, + nil, + MySlice1555{3.14, 3.16}, + MySlice1555{}, + } + expect := []byte{edtType, 0, 4, + edtSlice, + edtReg, 0x13, 0x88, + edtSlice, + 0, 0, 0, 5, + edtReg, + 0, 0, 0, 3, // first slice with 3 items + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + edtReg, + 0, 0, 0, 1, // second slice with 1 item + 0x40, 0x49, 0x99, 0x9a, // 3.15 + edtNil, + edtReg, + 0, 0, 0, 2, // 4th + 0x40, 0x48, 0xf5, 0xc3, // 3.14 + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + edtReg, + 0, 0, 0, 0, // third one + } + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + +} + +func TestEncodeSlice3DZero(t *testing.T) { + + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := [][][]float32{} + expect := []byte{edtType, 0, 4, + edtSlice, + edtSlice, + edtSlice, + edtFloat32, + edtSlice, + 0, 0, 0, 0, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + +} + +func TestEncodeSlice3D(t *testing.T) { + b := lib.TakeBuffer() + + value := [][][]float32{ /* len 3 */ + { /* len 5 */ + { /* len 7 */ 2.21018848, 2.94523878, 1.67807658, 1.30014748, 1.1873558, 8.1819557, 3.2368748}, + { /* len 10 */ 2.17948558, 2.95483828, 3.29734688, 2.72996818, 2.50011478, 2.98767788, 1.31364818, 8.06395757, 2.53354848, 2.38570578}, + { /* len 4 */ 2.9838078, 1.61728128, 1.8756628, 1.5756598}, + { /* len 10 */ 8.5187367, 2.79348, 4.3456557, 1.29794587, 3.38391948, 1.4460748, 5.0206397, 2.02001097, 1.77825548, 2.33810328}, + { /* len 8 */ 3.15617888, 2.21068618, 3.01507718, 7.0342597, 2.12085158, 7.9914467, 2.92003388, 3.19992137}, + }, { /* len 6 */ + { /* len 3 */ 3.3188187, 2.82300078, 7.3257346}, + { /* len 10 */ 1.47951058, 1.47638718, 3.1678068, 1.24334058, 1.48100658, 1.8274938, 2.07265258, 1.83188888, 5.8776197, 1.64099568}, + { /* len 6 */ 2.26154558, 9.5987497, 3.24544727, 1.34864688, 2.47839448, 2.0456888}, + { /* len 5 */ 9.0369537, 3.69528477, 3.04563028, 1.4488858, 3.80179227}, + { /* len 5 */ 1.53326348, 2.77105168, 1.05977548, 2.75297638, 8.9171847}, + { /* len 10 */ 1.65367358, 9.4070457, 3.06440548, 2.4763148, 2.22120158, 2.3734938, 3.37481478, 2.22900497, 6.2138987, 2.80613798}, + }, { /* len 1 */ + { /* len 10 */ 8.03434337, 2.55059418, 2.20168828, 2.86517478, 4.38993137, 8.6655217, 2.22159657, 3.0119788, 1.19758818, 2.58799087}, + }, + } + + expect := []byte{edtType, 0, 4, + edtSlice, + edtSlice, + edtSlice, + edtFloat32, + + edtSlice, + 0x0, 0x0, 0x0, 0x3, // len 3 { x, x, x} + edtSlice, + 0x0, 0x0, 0x0, 0x5, // len 5 { {y, y, y, y, y}, x, x} + edtSlice, + 0x0, 0x0, 0x0, 0x7, // len 7 { { {z, z, z, z, z, z, z}, y, y, y, y}, x, x} + 0x40, 0xd, 0x73, 0xba, // z + 0x40, 0x3c, 0x7e, 0xcb, // z + 0x3f, 0xd6, 0xcb, 0x37, // z + 0x3f, 0xa6, 0x6b, 0x3c, // z + 0x3f, 0x97, 0xfb, 0x46, // z + 0x41, 0x2, 0xe9, 0x4a, // z + 0x40, 0x4f, 0x28, 0xf5, // z + edtSlice, + 0x0, 0x0, 0x0, 0xa, // len 10 + 0x40, 0xb, 0x7c, 0xb1, + 0x40, 0x3d, 0x1c, 0x12, + 0x40, 0x53, 0x7, 0xbb, + 0x40, 0x2e, 0xb7, 0xcc, + 0x40, 0x20, 0x1, 0xe1, + 0x40, 0x3f, 0x36, 0x1d, + 0x3f, 0xa8, 0x25, 0xa0, + 0x41, 0x1, 0x5, 0xf8, + 0x40, 0x22, 0x25, 0xa9, + 0x40, 0x18, 0xaf, 0x67, + edtSlice, + 0x0, 0x0, 0x0, 0x4, // len 4 + 0x40, 0x3e, 0xf6, 0xb5, + 0x3f, 0xcf, 0x3, 0x13, + 0x3f, 0xf0, 0x15, 0xb8, + 0x3f, 0xc9, 0xaf, 0x38, + edtSlice, + 0x0, 0x0, 0x0, 0xa, // len 10 + 0x41, 0x8, 0x4c, 0xbf, + 0x40, 0x32, 0xc8, 0x60, + 0x40, 0x8b, 0xf, 0x9d, + 0x3f, 0xa6, 0x23, 0x17, + 0x40, 0x58, 0x92, 0x23, + 0x3f, 0xb9, 0x18, 0xfb, + 0x40, 0xa0, 0xa9, 0x15, + 0x40, 0x1, 0x47, 0xdc, + 0x3f, 0xe3, 0x9d, 0xe0, + 0x40, 0x15, 0xa3, 0x7c, + edtSlice, + 0x0, 0x0, 0x0, 0x8, // len 8 + 0x40, 0x49, 0xfe, 0xd6, + 0x40, 0xd, 0x7b, 0xe2, + 0x40, 0x40, 0xf7, 0x6, + 0x40, 0xe1, 0x18, 0xa8, + 0x40, 0x7, 0xbc, 0x8, + 0x40, 0xff, 0xb9, 0xee, + 0x40, 0x3a, 0xe1, 0xd6, + 0x40, 0x4c, 0xcb, 0x83, + edtSlice, + 0x0, 0x0, 0x0, 0x6, // len 6 + edtSlice, + 0x0, 0x0, 0x0, 0x3, // len 3 + 0x40, 0x54, 0x67, 0x87, + 0x40, 0x34, 0xac, 0xb, + 0x40, 0xea, 0x6c, 0x6b, + edtSlice, + 0x0, 0x0, 0x0, 0xa, // len 10 + 0x3f, 0xbd, 0x60, 0x9a, + 0x3f, 0xbc, 0xfa, 0x41, + 0x40, 0x4a, 0xbd, 0x59, + 0x3f, 0x9f, 0x25, 0xc9, + 0x3f, 0xbd, 0x91, 0xa0, + 0x3f, 0xe9, 0xeb, 0x51, + 0x40, 0x4, 0xa6, 0x57, + 0x3f, 0xea, 0x7b, 0x56, + 0x40, 0xbc, 0x15, 0x76, + 0x3f, 0xd2, 0xc, 0x25, + edtSlice, + 0x0, 0x0, 0x0, 0x6, // len 6 + 0x40, 0x10, 0xbd, 0x2a, + 0x41, 0x19, 0x94, 0x7b, + 0x40, 0x4f, 0xb5, 0x68, + 0x3f, 0xac, 0xa0, 0x76, + 0x40, 0x1e, 0x9e, 0x4, + 0x40, 0x2, 0xec, 0x91, + edtSlice, + 0x0, 0x0, 0x0, 0x5, // len 5 + 0x41, 0x10, 0x97, 0x5d, + 0x40, 0x6c, 0x7f, 0x8c, + 0x40, 0x42, 0xeb, 0x9b, + 0x3f, 0xb9, 0x75, 0x17, + 0x40, 0x73, 0x50, 0x91, + edtSlice, + 0x0, 0x0, 0x0, 0x5, // len 5 + 0x3f, 0xc4, 0x41, 0xfa, + 0x40, 0x31, 0x58, 0xe9, + 0x3f, 0x87, 0xa6, 0xb9, + 0x40, 0x30, 0x30, 0xc4, + 0x41, 0xe, 0xac, 0xca, + edtSlice, + 0x0, 0x0, 0x0, 0xa, // len 10 + 0x3f, 0xd3, 0xab, 0x93, + 0x41, 0x16, 0x83, 0x42, + 0x40, 0x44, 0x1f, 0x38, + 0x40, 0x1e, 0x7b, 0xf1, + 0x40, 0xe, 0x28, 0x2b, + 0x40, 0x17, 0xe7, 0x53, + 0x40, 0x57, 0xfc, 0xf7, + 0x40, 0xe, 0xa8, 0x4, + 0x40, 0xc6, 0xd8, 0x42, + 0x40, 0x33, 0x97, 0xc4, + edtSlice, + 0x0, 0x0, 0x0, 0x1, // len 1 + edtSlice, + 0x0, 0x0, 0x0, 0xa, // len 10 + 0x41, 0x0, 0x8c, 0xac, + 0x40, 0x23, 0x3c, 0xef, + 0x40, 0xc, 0xe8, 0x76, + 0x40, 0x37, 0x5f, 0x6, + 0x40, 0x8c, 0x7a, 0x51, + 0x41, 0xa, 0xa5, 0xfa, + 0x40, 0xe, 0x2e, 0xa3, + 0x40, 0x40, 0xc4, 0x43, + 0x3f, 0x99, 0x4a, 0x92, + 0x40, 0x25, 0xa1, 0xa4, + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + + lib.ReleaseBuffer(b) +} + +type testMarshal struct{} + +func (testMarshal) MarshalEDF(w io.Writer) error { + w.Write([]byte{10, 20, 30, 40}) + return nil +} + +func (*testMarshal) UnmarshalEDF(b []byte) error { + return nil +} + +func TestEncodeMarshal(t *testing.T) { + var value testMarshal + + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + if err := Encode(value, b, Options{}); err == nil { + t.Fatal("incorrect value") + } + b.Reset() + + if err := RegisterTypeOf(value); err != nil { + t.Fatal(err) + } + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(value), []byte{edtReg, 0x13, 0x88}) + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + expect := []byte{edtReg, 0x13, 0x88, + 0, 0, 0, 4, + 10, 20, 30, 40} + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceMarshal(t *testing.T) { + x := testMarshal{} + value := []testMarshal{x, x} + + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + RegisterTypeOf(x) + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(x), []byte{edtReg, 0x13, 0x88}) + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + expect := []byte{edtType, 0, 4, + edtSlice, + edtReg, 0x13, 0x88, + edtSlice, + 0, 0, 0, 2, // num of elements + 0, 0, 0, 4, // len + 10, 20, 30, 40, + 0, 0, 0, 4, // len + 10, 20, 30, 40, + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +type testStruct struct { + A float32 + B float64 +} + +func TestEncodeStruct(t *testing.T) { + + if err := RegisterTypeOf(testStruct{}); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := testStruct{3.16, 3.15} + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(value), []byte{edtReg, 0x13, 0x88}) + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + expect := []byte{edtReg, 0x13, 0x88, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceStruct(t *testing.T) { + + if err := RegisterTypeOf(testStruct{}); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := []testStruct{{3.16, 3.15}, {3.15, 3.14}} + expect := []byte{edtType, 0, 4, + edtSlice, + edtReg, 0x13, 0x88, + edtSlice, + 0, 0, 0, 2, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x9, 0x1e, 0xb8, 0x51, 0xeb, 0x85, 0x1f, // 3.14 + } + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(testStruct{}), []byte{edtReg, 0x13, 0x88}) + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +type testStructWithAny struct { + A float32 + B float64 + C any +} + +func TestEncodeStructWithAny(t *testing.T) { + if err := RegisterTypeOf(testStructWithAny{}); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := testStructWithAny{3.16, 3.15, nil} + expect := []byte{edtReg, 0x13, 0x88, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtNil, + } + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(testStructWithAny{}), []byte{edtReg, 0x13, 0x88}) + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + + b.Reset() + + value = testStructWithAny{3.15, 3.14, float64(3.14)} + expect = []byte{edtReg, 0x13, 0x88, + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x9, 0x1e, 0xb8, 0x51, 0xeb, 0x85, 0x1f, // 3.14 + edtFloat64, 0x40, 0x9, 0x1e, 0xb8, 0x51, 0xeb, 0x85, 0x1f, // 3.14 + } + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +type regSliceString []string +type testStructWithSlice struct { + A float32 + B float64 + C []bool + D regSliceString + E []int +} + +func TestEncodeStructWithSlice(t *testing.T) { + if err := RegisterTypeOf(regSliceString{}); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + + if err := RegisterTypeOf(testStructWithSlice{}); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := testStructWithSlice{ + 3.16, + 3.15, + []bool{true, false}, + regSliceString{"true", "false"}, + nil, + } + + expect := []byte{edtReg, 0x13, 0x88, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 (float32) + 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 (float64) + edtSlice, + 0x0, 0x0, 0x0, 0x2, // len of []bool + 0x1, 0x0, // true, false + edtReg, // regSliceString + 0x0, 0x0, 0x0, 0x2, // len of regSliceString + 0x0, 0x4, // len of "true" + 0x74, 0x72, 0x75, 0x65, // "true" + 0x0, 0x5, // len of "false" + 0x66, 0x61, 0x6c, 0x73, 0x65, // "false" + edtNil, // nil value of []int + } + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(testStructWithSlice{}), []byte{edtReg, 0x13, 0x88}) + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceStructWithAny(t *testing.T) { + + if err := RegisterTypeOf(testStructWithAny{}); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(testStructWithAny{}), []byte{edtReg, 0x13, 0x88}) + + value := []testStructWithAny{ + {3.16, 3.15, nil}, + {3.16, 3.15, float32(3.16)}, + {3.15, 3.14, float64(3.14)}, + } + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + expect := []byte{edtType, 0, 4, + edtSlice, + edtReg, 0x13, 0x88, + edtSlice, + 0, 0, 0, 3, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtNil, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtFloat32, 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x9, 0x1e, 0xb8, 0x51, 0xeb, 0x85, 0x1f, // 3.14 + edtFloat64, 0x40, 0x9, 0x1e, 0xb8, 0x51, 0xeb, 0x85, 0x1f, // 3.14 + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyWithStruct(t *testing.T) { + + if err := RegisterTypeOf(testStruct{}); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(testStruct{}), []byte{edtReg, 0x13, 0x88}) + + value := []any{ + nil, + testStruct{3.16, 3.15}, + nil, + testStruct{3.15, 3.14}, + } + + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + edtSlice, + 0, 0, 0, 4, + edtNil, + edtReg, 0x13, 0x88, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + 0x40, 0x9, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // 3.15 + edtNil, + edtReg, 0x13, 0x88, + 0x40, 0x49, 0x99, 0x9a, // 3.15 + 0x40, 0x9, 0x1e, 0xb8, 0x51, 0xeb, 0x85, 0x1f, // 3.14 + } + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeMap(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + value := map[int16]string{ + 8: "hello", + 9: "world", + } + + expect := []byte{edtType, 0, 3, + edtMap, + edtInt16, + edtString, + edtMap, + 0, 0, 0, 2, + 0, 8, // key 8 + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 0, 9, // key 9 + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + } + expect2 := []byte{edtType, 0, 3, + edtMap, + edtInt16, + edtString, + edtMap, + 0, 0, 0, 2, + 0, 9, // key 9 + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, 8, // key 8 + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + if !reflect.DeepEqual(b.B, expect2) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + } +} + +func TestEncodeMapAnyString(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := map[any]string{ + nil: "hello", + int16(9): "world", + } + expect := []byte{edtType, 0, 3, + edtMap, + edtAny, + edtString, + edtMap, + 0, 0, 0, 2, + edtNil, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + edtInt16, 0, 9, // key 9 + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + } + expect2 := []byte{edtType, 0, 3, + edtMap, + edtAny, + edtString, + edtMap, + 0, 0, 0, 2, + edtInt16, 0, 9, // key 9 + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + edtNil, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + if !reflect.DeepEqual(b.B, expect2) { + fmt.Printf("exp1 %#v\n", expect) + fmt.Printf("exp2 %#v\n", expect2) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + } +} + +func TestEncodeMapStringAny(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + value := map[string]any{ + "hello": nil, + "helloo": map[float32]any{ + 3.16: uint16(3), + }, + } + expect := []byte{edtType, 0, 3, + edtMap, + edtString, + edtAny, + edtMap, 0, 0, 0, 2, + + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + edtNil, + + 0, 6, // len of value "helloo" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x6f, // "helloo" + edtType, 0, 3, + edtMap, edtFloat32, edtAny, + edtMap, 0, 0, 0, 1, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + edtUint16, 0, 3, + } + + expect2 := []byte{edtType, 0, 3, + edtMap, + edtString, + edtAny, + edtMap, 0, 0, 0, 2, + + 0, 6, // len of value "helloo" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x6f, // "helloo" + edtType, 0, 3, + edtMap, edtFloat32, edtAny, + edtMap, 0, 0, 0, 1, + 0x40, 0x4a, 0x3d, 0x71, // 3.16 + edtUint16, 0, 3, + + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + edtNil, + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + if !reflect.DeepEqual(b.B, expect2) { + fmt.Printf("exp1 %#v\n", expect) + fmt.Printf("exp2 %#v\n", expect2) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + } + +} + +func TestEncodeMapStringMapNilZero(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := map[string]map[any]int16{ + "hello": nil, + "world": {}, + } + + expect := []byte{edtType, 0, 5, + edtMap, + edtString, + edtMap, + edtAny, + edtInt16, + edtMap, 0, 0, 0, 2, + + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + edtNil, + + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + edtMap, 0, 0, 0, 0, + } + + expect2 := []byte{edtType, 0, 5, + edtMap, + edtString, + edtMap, + edtAny, + edtInt16, + edtMap, 0, 0, 0, 2, + + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + edtMap, 0, 0, 0, 0, + + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + edtNil, + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + if !reflect.DeepEqual(b.B, expect2) { + fmt.Printf("exp1 %#v\n", expect) + fmt.Printf("exp2 %#v\n", expect2) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + } +} + +func TestEncodeMap3DZero(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + value := map[int16]map[string]map[float32]int{} + + expect := []byte{edtType, 0, 7, + edtMap, + edtInt16, + edtMap, + edtString, + edtMap, + edtFloat32, + edtInt, + edtMap, 0, 0, 0, 0, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeMapZero(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + value := map[int16]string{} + + expect := []byte{edtType, 0, 3, + edtMap, + edtInt16, edtString, + edtMap, 0, 0, 0, 0, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceMap(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + value := []map[int16]string{ + { + 8: "hello", + }, { + + 10: "helloo", + }, + { + 12: "hellooo", + }, + } + + expect := []byte{edtType, 0, 4, + edtSlice, + edtMap, + edtInt16, + edtString, + edtSlice, + 0, 0, 0, 3, + edtMap, + 0, 0, 0, 1, + 0, 8, // key 8 + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + edtMap, + 0, 0, 0, 1, // len of second map + 0, 0xa, // key 10 + 0, 6, // len of value "helloo" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x6f, // "helloo" + edtMap, + 0, 0, 0, 1, // len of 3rd map + 0, 0xc, // key 12 + 0, 7, // len of value "helloo" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x6f, 0x6f, // "hellooo" + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeMapValueSliceNil(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + value := map[int16][]any{ + int16(8): nil, + int16(9): []any{"world"}, + } + + expect := []byte{edtType, 0, 4, + edtMap, + edtInt16, + edtSlice, + edtAny, + edtMap, + 0, 0, 0, 2, + + 0, 8, // key 8 + edtNil, + + 0, 9, // key 9 + edtSlice, + 0, 0, 0, 1, + edtString, 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + } + expect2 := []byte{edtType, 0, 4, + edtMap, + edtInt16, + edtSlice, + edtAny, + edtMap, + 0, 0, 0, 2, + + 0, 9, // key 9 + edtSlice, + 0, 0, 0, 1, + edtString, 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + + 0, 8, // key 8 + edtNil, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + if !reflect.DeepEqual(b.B, expect2) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + } +} + +func TestEncodeMapValueMap(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + value := map[int16]map[string]int{ + int16(8): nil, + int16(9): { + "world": 10, + }, + } + + expect := []byte{edtType, 0, 5, + edtMap, + edtInt16, + edtMap, + edtString, + edtInt, + edtMap, + 0, 0, 0, 2, + + 0, 8, // key + edtNil, + + 0, 9, // 9 => map + edtMap, + 0, 0, 0, 1, + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, 0, 0, 0, 0, 0, 0, 0xa, // key 10 + } + expect2 := []byte{edtType, 0, 5, + edtMap, + edtInt16, + edtMap, + edtString, + edtInt, + edtMap, + 0, 0, 0, 2, + + 0, 9, // 9 => map + edtMap, + 0, 0, 0, 1, + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, 0, 0, 0, 0, 0, 0, 0xa, // key 10 + + 0, 8, // key + edtNil, + } + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + if !reflect.DeepEqual(b.B, expect2) { + fmt.Printf("exp1 %#v\n", expect) + fmt.Printf("exp2 %#v\n", expect2) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + } +} + +func TestEncodeMapValueMapRegKey(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + var x testMapKey + + if err := RegisterTypeOf(x); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(x), []byte{edtReg, 0x13, 0x88}) + + value := map[int16]map[testMapKey]int{ + int16(8): nil, + int16(9): { + "world": 10, + }, + } + + expect := []byte{edtType, 0, 7, + edtMap, + edtInt16, + edtMap, + edtReg, 0x13, 0x88, + edtInt, + + edtMap, + 0, 0, 0, 2, + + 0, 8, // 8 => map + edtNil, + + 0, 9, // 9 => map + edtMap, + 0, 0, 0, 1, + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, 0, 0, 0, 0, 0, 0, 0xa, // value 10 + } + + expect2 := []byte{edtType, 0, 7, + edtMap, + edtInt16, + edtMap, + edtReg, 0x13, 0x88, + edtInt, + + edtMap, + 0, 0, 0, 2, + + 0, 9, // 9 => map + edtMap, + 0, 0, 0, 1, + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, 0, 0, 0, 0, 0, 0, 0xa, // value 10 + + 0, 8, // 8 => map + edtNil, + } + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + if !reflect.DeepEqual(b.B, expect2) { + fmt.Printf("exp1 %#v\n", expect) + fmt.Printf("exp2 %#v\n", expect2) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + } +} + +func TestEncodeMapValueMapAnyWithRegKey(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + var x testMapKey = "world" + + if err := RegisterTypeOf(x); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(x), []byte{edtReg, 0x13, 0x88}) + + value := map[int16]map[any]int{ + int16(8): nil, + int16(9): { + x: 10, + }, + } + + expect := []byte{edtType, 0, 5, + edtMap, + edtInt16, + edtMap, + edtAny, + edtInt, + + edtMap, + 0, 0, 0, 2, + + 0, 8, // 8 => map + edtNil, + + 0, 9, // 9 => map + edtMap, + 0, 0, 0, 1, + edtReg, 0x13, 0x88, 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, 0, 0, 0, 0, 0, 0, 0xa, // value 10 + } + + expect2 := []byte{edtType, 0, 5, + edtMap, + edtInt16, + edtMap, + edtAny, + edtInt, + + edtMap, + 0, 0, 0, 2, + + 0, 9, // 9 => map + edtMap, + 0, 0, 0, 1, + edtReg, 0x13, 0x88, 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, 0, 0, 0, 0, 0, 0, 0xa, // value 10 + + 0, 8, // 8 => map + edtNil, + } + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + if !reflect.DeepEqual(b.B, expect2) { + fmt.Printf("exp1 %#v\n", expect) + fmt.Printf("exp2 %#v\n", expect2) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + } +} + +func TestEncodeRegMap(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + var x MyMap + + if err := RegisterTypeOf(x); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(x), []byte{edtReg, 0x13, 0x88}) + + value := MyMap{ + "hello": true, + "world": false, + } + + expect := []byte{edtReg, 0x13, 0x88, + edtReg, + 0, 0, 0, 2, + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, // false + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 1, // true + } + + expect2 := []byte{edtReg, 0x13, 0x88, + edtReg, + 0, 0, 0, 2, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 1, // true + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, // false + } + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + if !reflect.DeepEqual(b.B, expect2) { + fmt.Printf("exp1 %#v\n", expect) + fmt.Printf("exp2 %#v\n", expect2) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + } +} + +func TestEncodeRegMapRegSlice(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + type mySlice90 []bool + type myMap90 map[string]mySlice90 + + RegisterTypeOf(mySlice90{}) + RegisterTypeOf(myMap90{}) + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(myMap90{}), []byte{edtReg, 0x13, 0x88}) + + value := myMap90{ + "world": nil, + "hello": {true, false, true}, + } + + expect := []byte{edtReg, 0x13, 0x88, + edtReg, + 0, 0, 0, 2, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + edtReg, + 0, 0, 0, 3, + 1, 0, 1, // true, false, true + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + edtNil, + } + + expect2 := []byte{edtReg, 0x13, 0x88, + edtReg, + 0, 0, 0, 2, + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + edtNil, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + edtReg, + 0, 0, 0, 3, + 1, 0, 1, // true, false, true + } + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + if !reflect.DeepEqual(b.B, expect2) { + fmt.Printf("exp1 %#v\n", expect) + fmt.Printf("exp2 %#v\n", expect2) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } + } +} + +func TestEncodeArray(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + value := [2]string{ + "hello", "world", + } + expect := []byte{edtType, 0, 6, + edtArray, 0, 0, 0, 2, + edtString, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeArrayZero(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + value := [0]string{} + expect := []byte{edtType, 0, 6, + edtArray, 0, 0, 0, 0, + edtString, + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceArray(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + value := [][2]string{ + {"hello", "world"}, + } + expect := []byte{edtType, 0, 7, + edtSlice, + edtArray, 0, 0, 0, 2, + edtString, + edtSlice, + 0, 0, 0, 1, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeSliceAnyArray(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + value := []any{ + nil, + [2]string{"hello", "world"}, + nil, + } + expect := []byte{edtType, 0, 2, + edtSlice, + edtAny, + + edtSlice, + 0, 0, 0, 3, + + edtNil, + + edtType, 0, 6, + edtArray, 0, 0, 0, 2, + edtString, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + + edtNil, + } + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +type myArrayEnc [2]string + +func TestEncodeRegArray(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := myArrayEnc{"hello", "world"} + + expect := []byte{edtReg, + 0, 38, // len of the type name #ergo.services/ergo/net/edf/myArrayEnc + 0x23, 0x65, 0x72, 0x67, 0x6f, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x65, + 0x72, 0x67, 0x6f, 0x2f, 0x6e, 0x65, 0x74, 0x2f, + 0x65, 0x64, 0x66, 0x2f, 0x6d, 0x79, 0x41, 0x72, + 0x72, 0x61, 0x79, 0x45, 0x6e, 0x63, + + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + } + + RegisterTypeOf(value) + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +type myArrayStr string + +func TestEncodeArrayReg(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + value := [2]myArrayStr{"hello", "world"} + + expect := []byte{edtType, 0, 46, + edtArray, 0, 0, 0, 2, + edtReg, + 0, 38, // len of the type name #ergo.services/ergo/net/edf/myArrayStr + 0x23, 0x65, 0x72, 0x67, 0x6f, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x65, + 0x72, 0x67, 0x6f, 0x2f, 0x6e, 0x65, 0x74, 0x2f, + 0x65, 0x64, 0x66, 0x2f, 0x6d, 0x79, 0x41, 0x72, + 0x72, 0x61, 0x79, 0x53, 0x74, 0x72, + + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + } + + RegisterTypeOf(value[0]) + + if err := Encode(value, b, Options{}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} +func TestEncodeRegArrayRegArray(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + type myArrayMyStr1 [2]string + type myArrayArray1 [3]myArrayMyStr1 + + value := myArrayArray1{ + {"hello", "world"}, + {"", ""}, + {"world", "hello"}, + } + + expect := []byte{edtReg, 0x13, 0x88, + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, 0, + 0, 0, + 0, 5, // len of value "world" + 0x77, 0x6f, 0x72, 0x6c, 0x64, // "world" + 0, 5, // len of value "hello" + 0x68, 0x65, 0x6c, 0x6c, 0x6f, // "hello" + } + + regCache := new(sync.Map) + regCache.Store(reflect.TypeOf(myArrayArray1{}), []byte{edtReg, 0x13, 0x88}) + + RegisterTypeOf(myArrayMyStr1{}) + RegisterTypeOf(myArrayArray1{}) + + if err := Encode(value, b, Options{RegCache: regCache}); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(b.B, expect) { + fmt.Printf("exp %#v\n", expect) + fmt.Printf("got %#v\n", b.B) + t.Fatal("incorrect value") + } +} + +func TestEncodeStructWithMap(t *testing.T) { + // there was a bug with such kind of data + type BugInfo struct { + Env map[gen.Env]any + Loggers []gen.LoggerInfo + } + in := BugInfo{ + Env: map[gen.Env]any{ + "x": "y", + }, + Loggers: []gen.LoggerInfo{ + gen.LoggerInfo{}, + }, + } + if err := RegisterTypeOf(in); err != nil { + panic(err) + } + + b := lib.TakeBuffer() + if err := Encode(in, b, Options{}); err != nil { + t.Fatal(err) + } + value, _, err := Decode(b.B, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(value, in) { + fmt.Println("exp", in) + fmt.Println("got", value) + t.Fatal("incorrect value") + } +} diff --git a/net/edf/init.go b/net/edf/init.go new file mode 100644 index 00000000..378689c4 --- /dev/null +++ b/net/edf/init.go @@ -0,0 +1,292 @@ +package edf + +import ( + "fmt" + "reflect" + "time" + + "ergo.services/ergo/app/system/inspect" + "ergo.services/ergo/gen" +) + +var ( + // register generic Ergo Framework types for the networking + genTypes = []any{ + + gen.Env(""), + gen.LogLevel(0), + gen.ProcessState(0), + gen.MetaState(0), + gen.NetworkMode(0), + gen.MessagePriority(0), + gen.CompressionType(""), + gen.CompressionLevel(0), + gen.ApplicationMode(0), + gen.ApplicationState(0), + + gen.Version{}, + + gen.ApplicationDepends{}, + + gen.LoggerInfo{}, + gen.NodeInfo{}, + gen.Compression{}, + gen.ProcessFallback{}, + gen.MailboxQueues{}, + gen.ProcessInfo{}, + gen.ProcessShortInfo{}, + gen.ProcessOptions{}, + gen.ProcessOptionsExtra{}, + gen.ApplicationOptions{}, + gen.ApplicationOptionsExtra{}, + gen.MetaInfo{}, + + gen.NetworkFlags{}, + gen.NetworkProxyFlags{}, + gen.NetworkSpawnInfo{}, + gen.NetworkApplicationStartInfo{}, + gen.RemoteNodeInfo{}, + gen.RouteInfo{}, + gen.ProxyRouteInfo{}, + gen.Route{}, + gen.ApplicationRoute{}, + gen.ProxyRoute{}, + gen.RegisterRoutes{}, + gen.RegistrarInfo{}, + gen.AcceptorInfo{}, + gen.NetworkInfo{}, + gen.MessageEvent{}, + gen.MessageEventStart{}, + gen.MessageEventStop{}, + + // inspector messages + + inspect.RequestInspectNode{}, + inspect.ResponseInspectNode{}, + inspect.MessageInspectNode{}, + + inspect.RequestInspectNetwork{}, + inspect.ResponseInspectNetwork{}, + inspect.MessageInspectNetwork{}, + + inspect.RequestInspectConnection{}, + inspect.ResponseInspectConnection{}, + inspect.MessageInspectConnection{}, + + inspect.RequestInspectProcessList{}, + inspect.ResponseInspectProcessList{}, + inspect.MessageInspectProcessList{}, + + inspect.RequestInspectLog{}, + inspect.ResponseInspectLog{}, + inspect.MessageInspectLogNode{}, + inspect.MessageInspectLogNetwork{}, + inspect.MessageInspectLogProcess{}, + inspect.MessageInspectLogMeta{}, + + inspect.RequestInspectProcess{}, + inspect.ResponseInspectProcess{}, + inspect.MessageInspectProcess{}, + + inspect.RequestInspectProcessState{}, + inspect.ResponseInspectProcessState{}, + inspect.MessageInspectProcessState{}, + + inspect.RequestInspectMeta{}, + inspect.ResponseInspectMeta{}, + inspect.MessageInspectMeta{}, + + inspect.RequestInspectMetaState{}, + inspect.ResponseInspectMetaState{}, + inspect.MessageInspectMetaState{}, + + inspect.RequestDoSend{}, + inspect.ResponseDoSend{}, + + inspect.RequestDoSendMeta{}, + inspect.ResponseDoSendMeta{}, + + inspect.RequestDoSendExit{}, + inspect.ResponseDoSendExit{}, + + inspect.RequestDoSendExitMeta{}, + inspect.ResponseDoSendExitMeta{}, + + inspect.RequestDoKill{}, + inspect.ResponseDoKill{}, + + inspect.RequestDoSetLogLevel{}, + inspect.RequestDoSetLogLevelProcess{}, + inspect.RequestDoSetLogLevelMeta{}, + inspect.ResponseDoSetLogLevel{}, + } + + // register standard errors of the Ergo Framework + genErrors = []error{ + gen.ErrIncorrect, + gen.ErrTimeout, + gen.ErrUnsupported, + gen.ErrUnknown, + gen.ErrNameUnknown, + gen.ErrNotAllowed, + gen.ErrProcessUnknown, + gen.ErrProcessTerminated, + gen.ErrMetaUnknown, + gen.ErrApplicationUnknown, + gen.ErrTaken, + gen.TerminateReasonNormal, + gen.TerminateReasonShutdown, + gen.TerminateReasonKill, + gen.TerminateReasonPanic, + } +) + +func init() { + // + // encoders + // + encoders.Store(reflect.TypeOf(gen.PID{}), &encoder{Prefix: []byte{edtPID}, Encode: encodePID}) + encoders.Store(reflect.TypeOf(gen.ProcessID{}), &encoder{Prefix: []byte{edtProcessID}, Encode: encodeProcessID}) + encoders.Store(reflect.TypeOf(gen.Ref{}), &encoder{Prefix: []byte{edtRef}, Encode: encodeRef}) + encoders.Store(reflect.TypeOf(gen.Alias{}), &encoder{Prefix: []byte{edtAlias}, Encode: encodeAlias}) + encoders.Store(reflect.TypeOf(gen.Event{}), &encoder{Prefix: []byte{edtEvent}, Encode: encodeEvent}) + encoders.Store(reflect.TypeOf(true), &encoder{Prefix: []byte{edtBool}, Encode: encodeBool}) + encoders.Store(reflect.TypeOf(gen.Atom("atom")), &encoder{Prefix: []byte{edtAtom}, Encode: encodeAtom}) + encoders.Store(reflect.TypeOf("string"), &encoder{Prefix: []byte{edtString}, Encode: encodeString}) + encoders.Store(reflect.TypeOf(int(0)), &encoder{Prefix: []byte{edtInt}, Encode: encodeInt}) + encoders.Store(reflect.TypeOf(int8(0)), &encoder{Prefix: []byte{edtInt8}, Encode: encodeInt8}) + encoders.Store(reflect.TypeOf(int16(0)), &encoder{Prefix: []byte{edtInt16}, Encode: encodeInt16}) + encoders.Store(reflect.TypeOf(int32(0)), &encoder{Prefix: []byte{edtInt32}, Encode: encodeInt32}) + encoders.Store(reflect.TypeOf(int64(0)), &encoder{Prefix: []byte{edtInt64}, Encode: encodeInt64}) + encoders.Store(reflect.TypeOf(uint(0)), &encoder{Prefix: []byte{edtUint}, Encode: encodeUint}) + encoders.Store(reflect.TypeOf(uint8(0)), &encoder{Prefix: []byte{edtUint8}, Encode: encodeUint8}) + encoders.Store(reflect.TypeOf(uint16(0)), &encoder{Prefix: []byte{edtUint16}, Encode: encodeUint16}) + encoders.Store(reflect.TypeOf(uint32(0)), &encoder{Prefix: []byte{edtUint32}, Encode: encodeUint32}) + encoders.Store(reflect.TypeOf(uint64(0)), &encoder{Prefix: []byte{edtUint64}, Encode: encodeUint64}) + encoders.Store(reflect.TypeOf([]byte(nil)), &encoder{Prefix: []byte{edtBinary}, Encode: encodeBinary}) + encoders.Store(reflect.TypeOf(float32(0.0)), &encoder{Prefix: []byte{edtFloat32}, Encode: encodeFloat32}) + encoders.Store(reflect.TypeOf(float64(0.0)), &encoder{Prefix: []byte{edtFloat64}, Encode: encodeFloat64}) + encoders.Store(reflect.TypeOf(time.Time{}), &encoder{Prefix: []byte{edtTime}, Encode: encodeTime}) + encoders.Store(anyType, &encoder{Prefix: []byte{edtAny}, Encode: encodeAny}) + + // error types + encoders.Store(errType, &encoder{Prefix: []byte{edtError}, Encode: encodeError}) + encoders.Store(reflect.TypeOf(fmt.Errorf("")), &encoder{Prefix: []byte{edtError}, Encode: encodeError}) + // wrapped error has a different type + encoders.Store(reflect.TypeOf(fmt.Errorf("%w", nil)), &encoder{Prefix: []byte{edtError}, Encode: encodeError}) + + // + // decoders + // + decPID := &decoder{reflect.TypeOf(gen.PID{}), decodePID} + decoders.Store(edtPID, decPID) + decoders.Store(decPID.Type, decPID) + + decProcessID := &decoder{reflect.TypeOf(gen.ProcessID{}), decodeProcessID} + decoders.Store(edtProcessID, decProcessID) + decoders.Store(decProcessID.Type, decProcessID) + + decRef := &decoder{reflect.TypeOf(gen.Ref{}), decodeRef} + decoders.Store(edtRef, decRef) + decoders.Store(decRef.Type, decRef) + + decAlias := &decoder{reflect.TypeOf(gen.Alias{}), decodeAlias} + decoders.Store(edtAlias, decAlias) + decoders.Store(decAlias.Type, decAlias) + + decEvent := &decoder{reflect.TypeOf(gen.Event{}), decodeEvent} + decoders.Store(edtEvent, decEvent) + decoders.Store(decEvent.Type, decEvent) + + decTime := &decoder{reflect.TypeOf(time.Time{}), decodeTime} + decoders.Store(edtTime, decTime) + decoders.Store(decTime.Type, decTime) + + decBool := &decoder{reflect.TypeOf(true), decodeBool} + decoders.Store(edtBool, decBool) + decoders.Store(decBool.Type, decBool) + + decAtom := &decoder{reflect.TypeOf(gen.Atom("atom")), decodeAtom} + decoders.Store(edtAtom, decAtom) + decoders.Store(decAtom.Type, decAtom) + + decString := &decoder{reflect.TypeOf("string"), decodeString} + decoders.Store(edtString, decString) + decoders.Store(decString.Type, decString) + + decInt := &decoder{reflect.TypeOf(int(0)), decodeInt} + decoders.Store(edtInt, decInt) + decoders.Store(decInt.Type, decInt) + + decInt8 := &decoder{reflect.TypeOf(int8(0)), decodeInt8} + decoders.Store(edtInt8, decInt8) + decoders.Store(decInt8.Type, decInt8) + + decInt16 := &decoder{reflect.TypeOf(int16(0)), decodeInt16} + decoders.Store(edtInt16, decInt16) + decoders.Store(decInt16.Type, decInt16) + + decInt32 := &decoder{reflect.TypeOf(int32(0)), decodeInt32} + decoders.Store(edtInt32, decInt32) + decoders.Store(decInt32.Type, decInt32) + + decInt64 := &decoder{reflect.TypeOf(int64(0)), decodeInt64} + decoders.Store(edtInt64, decInt64) + decoders.Store(decInt64.Type, decInt64) + + decUint := &decoder{reflect.TypeOf(uint(0)), decodeUint} + decoders.Store(edtUint, decUint) + decoders.Store(decUint.Type, decUint) + + decUint8 := &decoder{reflect.TypeOf(uint8(0)), decodeUint8} + decoders.Store(edtUint8, decUint8) + decoders.Store(decUint8.Type, decUint8) + + decUint16 := &decoder{reflect.TypeOf(uint16(0)), decodeUint16} + decoders.Store(edtUint16, decUint16) + decoders.Store(decUint16.Type, decUint16) + + decUint32 := &decoder{reflect.TypeOf(uint32(0)), decodeUint32} + decoders.Store(edtUint32, decUint32) + decoders.Store(decUint32.Type, decUint32) + + decUint64 := &decoder{reflect.TypeOf(uint64(0)), decodeUint64} + decoders.Store(edtUint64, decUint64) + decoders.Store(decUint64.Type, decUint64) + + decBinary := &decoder{reflect.TypeOf([]byte(nil)), decodeBinary} + decoders.Store(edtBinary, decBinary) + decoders.Store(decBinary.Type, decBinary) + + decFloat32 := &decoder{reflect.TypeOf(float32(0.0)), decodeFloat32} + decoders.Store(edtFloat32, decFloat32) + decoders.Store(decFloat32.Type, decFloat32) + + decFloat64 := &decoder{reflect.TypeOf(float64(0.0)), decodeFloat64} + decoders.Store(edtFloat64, decFloat64) + decoders.Store(decFloat64.Type, decFloat64) + + decAny := &decoder{anyType, decodeAny} + decoders.Store(edtAny, decAny) + decoders.Store(anyType, decAny) + + decErr := &decoder{errType, decodeError} + decoders.Store(edtError, decErr) + decoders.Store(decErr.Type, decErr) + + for _, t := range genTypes { + err := RegisterTypeOf(t) + if err == nil || err == gen.ErrTaken { + continue + } + panic(err) + } + + for _, e := range genErrors { + err := RegisterError(e) + if err == nil || err == gen.ErrTaken { + continue + } + panic(err) + } +} diff --git a/net/edf/register.go b/net/edf/register.go new file mode 100644 index 00000000..36a3569b --- /dev/null +++ b/net/edf/register.go @@ -0,0 +1,767 @@ +package edf + +import ( + "encoding/binary" + "fmt" + "math" + "reflect" + "sync" + "sync/atomic" + "time" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +type decoder struct { + Type reflect.Type + Decode func(*reflect.Value, []byte, *stateDecode) (*reflect.Value, []byte, error) +} + +type encodeFunc func(value reflect.Value, b *lib.Buffer, state *stateEncode) error +type encoder struct { + Prefix []byte + Encode encodeFunc +} + +func regTypeName(t reflect.Type) string { + return fmt.Sprintf("#%s/%s", t.PkgPath(), t.Name()) +} + +func RegisterTypeOf(v any) error { + vov := reflect.ValueOf(v) + tov := vov.Type() + + if tov.Kind() == reflect.Pointer { + return fmt.Errorf("pointer type is not supported") + } + + switch v.(type) { + case bool, string, error, + int, int8, int16, int32, int64, + uint, uint8, uint16, uint32, uint64, + []byte, + float32, float64: + return fmt.Errorf("unable to register a regular type") + + case gen.Atom, gen.PID, gen.ProcessID, gen.Event, gen.Ref, gen.Alias, time.Time: + return fmt.Errorf("unable to register a type of Ergo Framework") + + case Unmarshaler: + return fmt.Errorf("UnmarshalEDF method of %v must be a method of *%v", tov, tov) + + case Marshaler: + // unmarshaling must be implemented as a method of a pointer to the object + if reflect.PointerTo(tov).Implements(reflect.TypeOf((*Unmarshaler)(nil)).Elem()) == false { + return fmt.Errorf("UnmarshalEDF method of %v must be a method of *%v", tov, tov) + } + name := regTypeName(tov) + + fenc := func(value reflect.Value, b *lib.Buffer, _ *stateEncode) error { + v := value.Interface().(Marshaler) + buf := b.Extend(4) + l := b.Len() + if err := v.MarshalEDF(b); err != nil { + return err + } + + lenBinary := b.Len() - l + if int64(lenBinary) > int64(math.MaxUint32-1) { + return ErrBinaryTooLong + } + binary.BigEndian.PutUint32(buf, uint32(lenBinary)) + return nil + } + encoders.Store(tov, regEncoder(name, fenc)) + + fdec := func(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if len(packet) < 4 { + return nil, nil, errDecodeEOD + } + + l := binary.BigEndian.Uint32(packet) + if len(packet) < int(l+4) { + return nil, nil, errDecodeEOD + } + + if value == nil { + v := reflect.Indirect(reflect.New(state.decoder.Type)) + value = &v + } + + v := value.Addr().Interface().(Unmarshaler) + if err := v.UnmarshalEDF(packet[4 : l+4]); err != nil { + return nil, nil, err + } + + packet = packet[l+4:] + return value, packet, nil + } + dec := &decoder{tov, fdec} + decoders.Store(name, dec) + decoders.Store(tov, dec) + addRegCache(tov) + return nil + } + + return registerType(tov) +} + +func registerType(tov reflect.Type) error { + + name := regTypeName(tov) + + if _, found := encoders.Load(tov); found { + return gen.ErrTaken + } + + if _, found := decoders.Load(name); found { + return gen.ErrTaken + } + + switch tov.Kind() { + case reflect.Bool: + encoders.Store(tov, regEncoder(name, encodeBool)) + dec := &decoder{tov, decodeBool} + decoders.Store(name, dec) + decoders.Store(tov, dec) + addRegCache(tov) + return nil + + case reflect.Int: + encoders.Store(tov, regEncoder(name, encodeInt)) + dec := &decoder{tov, decodeInt} + decoders.Store(name, dec) + decoders.Store(tov, dec) + addRegCache(tov) + return nil + + case reflect.Int8: + encoders.Store(tov, regEncoder(name, encodeInt8)) + dec := &decoder{tov, decodeInt8} + decoders.Store(name, dec) + decoders.Store(tov, dec) + addRegCache(tov) + return nil + + case reflect.Int16: + encoders.Store(tov, regEncoder(name, encodeInt16)) + dec := &decoder{tov, decodeInt16} + decoders.Store(name, dec) + decoders.Store(tov, dec) + addRegCache(tov) + return nil + + case reflect.Int32: + encoders.Store(tov, regEncoder(name, encodeInt32)) + dec := &decoder{tov, decodeInt32} + decoders.Store(name, dec) + decoders.Store(tov, dec) + addRegCache(tov) + return nil + + case reflect.Int64: + encoders.Store(tov, regEncoder(name, encodeInt64)) + dec := &decoder{tov, decodeInt64} + decoders.Store(name, dec) + decoders.Store(tov, dec) + addRegCache(tov) + return nil + + case reflect.Uint: + encoders.Store(tov, regEncoder(name, encodeUint)) + dec := &decoder{tov, decodeUint} + decoders.Store(name, dec) + decoders.Store(tov, dec) + addRegCache(tov) + return nil + + case reflect.Uint8: + encoders.Store(tov, regEncoder(name, encodeUint8)) + dec := &decoder{tov, decodeUint8} + decoders.Store(name, dec) + decoders.Store(tov, dec) + addRegCache(tov) + return nil + + case reflect.Uint16: + encoders.Store(tov, regEncoder(name, encodeUint16)) + dec := &decoder{tov, decodeUint16} + decoders.Store(name, dec) + decoders.Store(tov, dec) + addRegCache(tov) + return nil + + case reflect.Uint32: + encoders.Store(tov, regEncoder(name, encodeUint32)) + dec := &decoder{tov, decodeUint32} + decoders.Store(name, dec) + decoders.Store(tov, dec) + addRegCache(tov) + return nil + + case reflect.Uint64: + encoders.Store(tov, regEncoder(name, encodeUint64)) + dec := &decoder{tov, decodeUint64} + decoders.Store(name, dec) + decoders.Store(tov, dec) + addRegCache(tov) + return nil + + case reflect.Float32: + encoders.Store(tov, regEncoder(name, encodeFloat32)) + dec := &decoder{tov, decodeFloat32} + decoders.Store(name, dec) + decoders.Store(tov, dec) + addRegCache(tov) + return nil + + case reflect.Float64: + encoders.Store(tov, regEncoder(name, encodeFloat64)) + dec := &decoder{tov, decodeFloat64} + decoders.Store(name, dec) + decoders.Store(tov, dec) + addRegCache(tov) + return nil + + case reflect.String: + encoders.Store(tov, regEncoder(name, encodeString)) + dec := &decoder{tov, decodeString} + decoders.Store(name, dec) + decoders.Store(tov, dec) + addRegCache(tov) + return nil + + case reflect.Struct: + var encs []*encoder + var decs []*decoder + + nf := tov.NumField() + for i := 0; i < nf; i++ { + ft := tov.Field(i).Type + + enc, err := getEncoder(ft, &stateEncode{}) + if err != nil { + return fmt.Errorf("(struct field encode) type %v must be registered first: %s", ft, err) + } + encs = append(encs, enc) + + dec, _, err := decodeType(enc.Prefix, &stateDecode{}) + if err != nil { + return fmt.Errorf("(struct field decode) type %v must be registered first: %s", ft, err) + } + decs = append(decs, dec) + } + + // encoder closure + fenc := func(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.child == nil { + state.child = &stateEncode{options: state.options} + } + state = state.child + for i := 0; i < nf; i++ { + state.encodeType = false + if err := encs[i].Encode(value.Field(i), b, state); err != nil { + return err + } + } + return nil + } + encoders.Store(tov, regEncoder(name, fenc)) + + // decoder closure + fdec := func(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + var err error + if state.child == nil { + state.child = &stateDecode{options: state.options} + } + + if value == nil { + v := reflect.Indirect(reflect.New(state.decoder.Type)) + value = &v + } + + state = state.child + for i := 0; i < nf; i++ { + field := value.Field(i) + _, packet, err = decs[i].Decode(&field, packet, state) + if err != nil { + return nil, nil, err + } + } + return value, packet, nil + } + decoders.Store(name, &decoder{tov, fdec}) + addRegCache(tov) + + return nil + + case reflect.Slice: + itemType := tov.Elem() + + // encoder + enc, err := getEncoder(itemType, &stateEncode{}) + if err != nil { + return fmt.Errorf("(slice item encoder) type %v must be registered first: %s", itemType, err) + } + + // decoder + dec, _, err := decodeType(enc.Prefix, &stateDecode{}) + if err != nil { + return fmt.Errorf("(slice item decoder) type %v must be registered first: %s", itemType, err) + } + + // encode closure + fenc := func(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if value.IsNil() { + b.AppendByte(edtNil) + return nil + } + b.AppendByte(edtReg) + + n := value.Len() + buf := b.Extend(4) + binary.BigEndian.PutUint32(buf, uint32(n)) + if state.child == nil { + state.child = &stateEncode{options: state.options} + } + state = state.child + for i := 0; i < n; i++ { + state.encodeType = false + if err := enc.Encode(value.Index(i), b, state); err != nil { + return err + } + } + return nil + } + encoders.Store(tov, regEncoder(name, fenc)) + + // decode closure + fdec := func(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if len(packet) == 0 { + return nil, nil, errDecodeEOD + } + + if packet[0] == edtNil { + packet = packet[1:] + return nil, packet, nil + } + if packet[0] != edtReg { + return nil, nil, fmt.Errorf("incorrect slice/array type %d", packet[0]) + } + packet = packet[1:] + + if len(packet) < 4 { + return nil, nil, errDecodeEOD + } + + n := int(binary.BigEndian.Uint32(packet[:4])) + packet = packet[4:] + + if n > len(packet) { + return nil, nil, fmt.Errorf("incorrect data length %d", n) + } + + x := reflect.MakeSlice(tov, n, n) + if value == nil { + value = &x + } else { + value.Set(x) + } + + if n == 0 { + return value, packet, nil + } + + if state.child == nil { + state.child = &stateDecode{ + options: state.options, + decoder: dec, + } + } + state = state.child + + for i := 0; i < n; i++ { + item := value.Index(i) + _, p, err := dec.Decode(&item, packet, state) + if err != nil { + return nil, nil, err + } + packet = p + } + + return value, packet, nil + } + decoders.Store(name, &decoder{tov, fdec}) + addRegCache(tov) + + case reflect.Array: + itemType := tov.Elem() + + // encoder + enc, err := getEncoder(itemType, &stateEncode{}) + if err != nil { + return fmt.Errorf("(array item encoder) type %v must be registered first: %s", itemType, err) + } + + // decoder + dec, _, err := decodeType(enc.Prefix, &stateDecode{}) + if err != nil { + return fmt.Errorf("(array item decoder) type %v must be registered first: %s", itemType, err) + } + + fenc := func(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if state.child == nil { + state.child = &stateEncode{options: state.options} + } + state = state.child + for i := 0; i < value.Len(); i++ { + state.encodeType = false + if err := enc.Encode(value.Index(i), b, state); err != nil { + return err + } + } + return nil + } + encoders.Store(tov, regEncoder(name, fenc)) + + fdec := func(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if len(packet) == 0 { + if tov.Len() == 0 { + return value, packet, nil + } + return nil, nil, errDecodeEOD + } + if value == nil { + x := reflect.Indirect(reflect.New(tov)) + value = &x + } + + if state.child == nil { + state.child = &stateDecode{ + options: state.options, + decoder: dec, + } + } + state = state.child + + for i := 0; i < tov.Len(); i++ { + item := value.Index(i) + _, p, err := dec.Decode(&item, packet, state) + if err != nil { + return nil, nil, err + } + packet = p + } + + return value, packet, nil + } + decoders.Store(name, &decoder{tov, fdec}) + addRegCache(tov) + + case reflect.Map: + typeKey := tov.Key() + typeValue := tov.Elem() + + // encoders for key/value + encKey, err := getEncoder(typeKey, &stateEncode{}) + if err != nil { + return fmt.Errorf("(map key encoder) type %v must be registered first: %s", typeKey, err) + } + encValue, err := getEncoder(typeValue, &stateEncode{}) + if err != nil { + return fmt.Errorf("(map value encoder) type %v must be registered first: %s", typeValue, err) + } + + // decoders for key/value + decKey, _, err := decodeType(encKey.Prefix, &stateDecode{}) + if err != nil { + return fmt.Errorf("(map key decoder) type %v must be registered first: %s", typeKey, err) + } + decValue, _, err := decodeType(encValue.Prefix, &stateDecode{}) + if err != nil { + return fmt.Errorf("(map value decoder) type %v must be registered first: %s", typeValue, err) + } + + fenc := func(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + if value.IsNil() { + b.AppendByte(edtNil) + return nil + } else { + b.AppendByte(edtReg) + } + + if state.child == nil { + state.child = &stateEncode{ + options: state.options, + } + } + state = state.child + + n := value.Len() + buf := b.Extend(4) + binary.BigEndian.PutUint32(buf, uint32(n)) + + iter := value.MapRange() + for iter.Next() { + state.encodeType = false + if err := encKey.Encode(iter.Key(), b, state); err != nil { + return err + } + state.encodeType = false + if err := encValue.Encode(iter.Value(), b, state); err != nil { + return err + } + } + return nil + } + encoders.Store(tov, regEncoder(name, fenc)) + + fdec := func(value *reflect.Value, packet []byte, state *stateDecode) (*reflect.Value, []byte, error) { + if len(packet) == 0 { + return nil, nil, errDecodeEOD + } + + if packet[0] == edtNil { + packet = packet[1:] + return nil, packet, nil + } + + if packet[0] != edtReg { + return nil, nil, fmt.Errorf("incorrect map type %d", packet[0]) + } + packet = packet[1:] + + if len(packet) < 4 { + return nil, nil, errDecodeEOD + } + + n := int(binary.BigEndian.Uint32(packet[:4])) + packet = packet[4:] + + x := reflect.MakeMapWithSize(tov, n) + if value == nil { + value = &x + } else { + value.Set(x) + } + + if n == 0 { + return value, packet, nil + } + + if n > len(packet) { + return nil, nil, fmt.Errorf("incorrect data length") + } + + if state.child == nil { + state.child = &stateDecode{ + options: state.options, + } + } + state = state.child + + for i := 0; i < n; i++ { + k := reflect.Indirect(reflect.New(decKey.Type)) + state.decoder = decKey + _, p, err := decKey.Decode(&k, packet, state) + if err != nil { + return nil, nil, err + } + packet = p + + v := reflect.Indirect(reflect.New(decValue.Type)) + state.decoder = decValue + _, p, err = decValue.Decode(&v, packet, state) + if err != nil { + return nil, nil, err + } + packet = p + + value.SetMapIndex(k, v) + } + + return value, packet, nil + } + decoders.Store(name, &decoder{tov, fdec}) + addRegCache(tov) + + default: + return fmt.Errorf("type %v is not supported", tov) + } + + return nil +} + +func RegisterError(e error) error { + return addErrCache(e) +} + +func RegisterAtom(a gen.Atom) error { + return addAtomCache(a) +} + +var ( + encoders sync.Map + decoders sync.Map +) + +func regEncoder(name string, enc encodeFunc) *encoder { + l := uint16(len(name)) + if l > 4095 { + panic(fmt.Sprintf("unable to register type. too long name: %s", name)) + } + prefix := []byte{edtReg, 0, 0} + binary.BigEndian.PutUint16(prefix[1:3], l) + prefix = append(prefix, name...) + + return &encoder{ + Prefix: prefix, + Encode: func(value reflect.Value, b *lib.Buffer, state *stateEncode) error { + var prev bool + if state.encodeType { + if state.options.RegCache != nil { + if v, found := state.options.RegCache.Load(value.Type()); found { + b.Append(v.([]byte)) + } else { + b.Append(prefix) + } + } else { + b.Append(prefix) + } + + state.encodeType = false + prev = true + } + err := enc(value, b, state) + state.encodeType = prev + if err != nil { + return err + } + + return nil + }, + } +} + +// for outgoing (encoding) messages. +var regCacheID uint32 = 4095 // 0..4095 - reserved (used as a length) +var regCache sync.Map + +func addRegCache(t reflect.Type) error { + id := atomic.AddUint32(®CacheID, 1) + if id > math.MaxUint16 { + return fmt.Errorf("too many registered types") + } + reg := []byte{edtReg, 0, 0} + binary.BigEndian.PutUint16(reg[1:3], uint16(id)) + + if _, exist := regCache.LoadOrStore(t, reg); exist { + return gen.ErrTaken + } + regCache.Store(uint16(id), regTypeName(t)) + return nil +} + +func GetRegCache() map[uint16]string { + cache := make(map[uint16]string) + regCache.Range(func(k, v any) bool { + id, ok := k.(uint16) + if ok == false { + return true + } + name := v.(string) + cache[id] = name + return true + }) + if len(cache) == 0 { + return nil + } + return cache +} + +func MakeEncodeRegTypeCache(names []string) *sync.Map { + mapnames := make(map[string]bool) + for _, name := range names { + mapnames[name] = true + } + if len(mapnames) == 0 { + return nil + } + cache := new(sync.Map) + regCache.Range(func(k, v any) bool { + t, ok := k.(reflect.Type) + if ok == false { + return true + } + tn := regTypeName(t) + if _, found := mapnames[tn]; found == false { + return true + } + cache.Store(t, v) + return true + }) + return cache +} + +var errCacheID uint32 = math.MaxInt16 // 0..32767 - reserved (used as a length) +var errCache sync.Map + +func GetErrCache() map[uint16]error { + cache := make(map[uint16]error) + errCache.Range(func(k, v any) bool { + if id, ok := v.(uint16); ok { + err := k.(error) + cache[id] = err + } + return true + }) + if len(cache) == 0 { + return nil + } + return cache +} + +func addErrCache(e error) error { + id := atomic.AddUint32(&errCacheID, 1) + // math.MaxUint16 is used for encoding a nil value + if id > math.MaxUint16-1 { + return fmt.Errorf("too many registered errors") + } + if _, exist := errCache.LoadOrStore(e, uint16(id)); exist { + return gen.ErrTaken + } + return nil +} + +var atomCacheID uint32 = 255 // 0..255 - reserved (used as a length) +var atomCache sync.Map + +func GetAtomCache() map[uint16]gen.Atom { + cache := make(map[uint16]gen.Atom) + atomCache.Range(func(k, v any) bool { + id, ok := v.(uint16) + if ok == false { + return true + } + atom := k.(gen.Atom) + cache[id] = atom + return true + }) + if len(cache) == 0 { + return nil + } + return cache +} + +func addAtomCache(atom gen.Atom) error { + id := atomic.AddUint32(&atomCacheID, 1) + // the last 1000 ids for the custom atoms + if id > math.MaxUint16-1000 { + return fmt.Errorf("too many registered atoms") + } + if _, exist := atomCache.LoadOrStore(atom, uint16(id)); exist { + return gen.ErrTaken + } + return nil +} diff --git a/net/edf/register_test.go b/net/edf/register_test.go new file mode 100644 index 00000000..f3400411 --- /dev/null +++ b/net/edf/register_test.go @@ -0,0 +1,139 @@ +package edf + +import ( + // "encoding/binary" + "fmt" + "reflect" + "sync" + "testing" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +type testRegBool bool +type testRegString string +type testRegFloat32 float32 +type testRegFloat64 float64 +type testRegInt int +type testRegInt8 int8 +type testRegInt16 int16 +type testRegInt32 int32 +type testRegInt64 int64 +type testRegUint uint +type testRegUint8 uint8 +type testRegUint16 uint16 +type testRegUint32 uint32 +type testRegUint64 uint64 +type testRegBin []byte +type testRegMap map[bool]string + +type testRegStruct struct{ A bool } +type testRegSlice []bool +type testRegArray [3]bool + +type regCases struct { + name string + value any +} + +func registerCases() []regCases { + + return []regCases{ + {"bool", testRegBool(true)}, + {"string", testRegString("string")}, + {"float32", testRegFloat32(3.12)}, + {"float64", testRegFloat64(3.14)}, + {"int", testRegInt(10)}, + {"int8", testRegInt8(11)}, + {"int16", testRegInt16(12)}, + {"int32", testRegInt32(13)}, + {"int64", testRegInt64(14)}, + {"uint", testRegUint(15)}, + {"uint8", testRegUint8(16)}, + {"uint16", testRegUint16(17)}, + {"uint32", testRegUint32(18)}, + {"uint64", testRegUint64(19)}, + {"[]byte", testRegBin([]byte{1, 2, 3, 4, 5})}, + {"struct", testRegStruct{A: true}}, + {"slice", testRegSlice{false, true, false, true, false}}, + {"array", testRegArray{false, true, false}}, + {"map", testRegMap{false: "string1", true: "string2"}}, + } +} + +func TestRegTypes(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + cache := new(sync.Map) + for _, c := range registerCases() { + t.Run(c.name, func(t *testing.T) { + b.Reset() + if err := RegisterTypeOf(c.value); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + + if err := Encode(c.value, b, Options{Cache: cache}); err != nil { + t.Fatal(err) + } + value, _, err := Decode(b.B, Options{}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(c.value, value) { + fmt.Printf("exp (%T) %#v\n", c.value, c.value) + fmt.Printf("got (%T) %#v\n", value, value) + t.Fatal("incorrect value") + } + }) + } +} + +func TestRegCacheTypes(t *testing.T) { + b := lib.TakeBuffer() + defer lib.ReleaseBuffer(b) + + cache := new(sync.Map) + for _, c := range registerCases() { + t.Run(c.name, func(t *testing.T) { + b.Reset() + if err := RegisterTypeOf(c.value); err != nil { + if err != gen.ErrTaken { + t.Fatal(err) + } + } + rcDec := new(sync.Map) + names := []string{} + for k, v := range GetRegCache() { + names = append(names, v) + rcDec.Store(k, v) + } + + if len(names) == 0 { + t.Fatal("decoding reg cache is empty") + } + rcEnc := MakeEncodeRegTypeCache(names) + if rcEnc == nil { + t.Fatal("encoding reg cache is nil") + } + + if err := Encode(c.value, b, Options{RegCache: rcEnc, Cache: cache}); err != nil { + t.Fatal(err) + } + value, _, err := Decode(b.B, Options{RegCache: rcDec, Cache: cache}) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(c.value, value) { + fmt.Printf("exp (%T) %#v\n", c.value, c.value) + fmt.Printf("got (%T) %#v\n", value, value) + t.Fatal("incorrect value") + } + }) + } +} diff --git a/net/handshake/accept.go b/net/handshake/accept.go new file mode 100644 index 00000000..50dc95c6 --- /dev/null +++ b/net/handshake/accept.go @@ -0,0 +1,175 @@ +package handshake + +import ( + "crypto/sha1" + "crypto/sha256" + "crypto/tls" + "fmt" + "net" + "time" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" + "ergo.services/ergo/net/edf" +) + +func (h *handshake) Accept(node gen.NodeHandshake, conn net.Conn, options gen.HandshakeOptions) (gen.HandshakeResult, error) { + var result gen.HandshakeResult + var salt string + result.HandshakeVersion = h.Version() + + v, tail, err := h.readMessage(conn, time.Second, nil) + if err != nil { + return result, err + } + switch m := v.(type) { + case MessageHello: + hash := sha256.New() + hash.Write([]byte(fmt.Sprintf("%s:%s", m.Salt, options.Cookie))) + + if m.Digest != fmt.Sprintf("%x", hash.Sum(nil)) { + return result, fmt.Errorf("incorrect digest (accept stage 'hello')") + } + + salt = lib.RandomString(64) + hash = sha256.New() + hash.Write([]byte(fmt.Sprintf("%s:%s:%s", salt, m.Digest, options.Cookie))) + + hello := MessageHello{ + Salt: salt, + Digest: fmt.Sprintf("%x", hash.Sum(nil)), + } + + if fp := h.getLocalTLSFingerprint(conn, options.CertManager); fp != nil { + hash = sha256.New() + hash.Write([]byte(fmt.Sprintf("%s:%s:%s", salt, m.Salt, options.Cookie))) + hash.Write(fp) + hello.DigestCert = fmt.Sprintf("%x", hash.Sum(nil)) + } + + if err := h.writeMessage(conn, hello); err != nil { + return result, err + } + + case MessageJoin: + result.Peer = m.Node + hash := sha256.New() + hash.Write([]byte(fmt.Sprintf("%s:%s:%s", m.ConnectionID, m.Salt, options.Cookie))) + if m.Digest != fmt.Sprintf("%x", hash.Sum(nil)) { + return result, fmt.Errorf("incorrect join digest") + } + result.ConnectionID = m.ConnectionID + result.Custom = ConnectionOptions{} + + hash = sha256.New() + hash.Write([]byte(fmt.Sprintf("%s:%s", m.Digest, options.Cookie))) + accept := MessageAccept{ + Digest: fmt.Sprintf("%x", hash.Sum(nil)), + } + if fp := h.getLocalTLSFingerprint(conn, options.CertManager); fp != nil { + hash = sha256.New() + hash.Write([]byte(fmt.Sprintf("%s:%s:%s", m.Digest, m.Salt, options.Cookie))) + hash.Write(fp) + accept.DigestCert = fmt.Sprintf("%x", hash.Sum(nil)) + } + if err := h.writeMessage(conn, accept); err != nil { + return result, err + } + if len(h.atom_mapping) > 0 { + result.AtomMapping = make(map[gen.Atom]gen.Atom) + for k, v := range h.atom_mapping { + result.AtomMapping[k] = v + } + } + return result, nil + + default: + return result, fmt.Errorf("malformed handshake Hello/Join message") + } + + // wait for the introduce message + v, tail, err = h.readMessage(conn, time.Second, nil) + if err != nil { + return result, err + } + + intro, ok := v.(MessageIntroduce) + if ok == false { + return result, fmt.Errorf("malformed handshake Introduce message") + } + + if intro.Node == node.Name() { + return result, fmt.Errorf("malformed handshake Introduce message (same name)") + } + hash := sha256.New() + hash.Write([]byte(fmt.Sprintf("%s:%s", salt, options.Cookie))) + if intro.Digest != fmt.Sprintf("%x", hash.Sum(nil)) { + return result, fmt.Errorf("incorrect digest (accept stage 'introduce')") + } + + accept := MessageAccept{} + accept.ID = lib.RandomString(32) + accept.PoolSize = h.poolsize + accept.PoolDSN = append(accept.PoolDSN, conn.LocalAddr().String()) + if err := h.writeMessage(conn, accept); err != nil { + return result, err + } + + intro2 := MessageIntroduce{ + Node: node.Name(), + Version: node.Version(), + Flags: options.Flags, + Creation: node.Creation(), + + MaxMessageSize: options.MaxMessageSize, + + AtomCache: edf.GetAtomCache(), + RegCache: edf.GetRegCache(), + ErrCache: edf.GetErrCache(), + } + if err := h.writeMessage(conn, intro2); err != nil { + return result, err + } + + // wait for the accept message + v, tail, err = h.readMessage(conn, time.Second, tail) + if err != nil { + return result, err + } + + if _, ok := v.(MessageAccept); ok == false { + return result, fmt.Errorf("malformed handshake Accept message") + } + + result.ConnectionID = accept.ID + result.Peer = intro.Node + result.PeerVersion = intro.Version + result.PeerCreation = intro.Creation + result.PeerFlags = intro.Flags + result.PeerMaxMessageSize = intro.MaxMessageSize + result.NodeFlags = options.Flags + result.NodeMaxMessageSize = options.MaxMessageSize + result.Tail = tail + + custom := ConnectionOptions{ + PoolSize: h.poolsize, + EncodeAtomCache: h.makeEncodeAtomCache(intro2.AtomCache), + EncodeRegCache: h.makeEncodeRegCache(intro2.RegCache), + EncodeErrCache: h.makeEncodeErrCache(intro2.ErrCache), + DecodeAtomCache: h.makeDecodeAtomCache(intro.AtomCache), + DecodeRegCache: h.makeDecodeRegCache(intro.RegCache), + DecodeErrCache: h.makeDecodeErrCache(intro2.ErrCache, intro.ErrCache), + } + result.Custom = custom + + return result, nil +} + +func (h *handshake) getLocalTLSFingerprint(conn net.Conn, cm gen.CertManager) []byte { + if _, tls := conn.(*tls.Conn); tls == false { + return nil + } + cert := cm.GetCertificate() + fp := sha1.Sum(cert.Certificate[0]) + return fp[:] +} diff --git a/net/handshake/handshake.go b/net/handshake/handshake.go new file mode 100644 index 00000000..9ec78a03 --- /dev/null +++ b/net/handshake/handshake.go @@ -0,0 +1,206 @@ +package handshake + +import ( + "encoding/binary" + "fmt" + "math" + "net" + "sync" + "time" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" + "ergo.services/ergo/net/edf" +) + +type handshake struct { + poolsize int // how many TCP links accepts withing the connection + disable_fingerprint bool + flags gen.NetworkFlags + atom_mapping map[gen.Atom]gen.Atom +} + +type Options struct { + PoolSize int + DisableTLSFingerprint bool + NetworkFlags gen.NetworkFlags + AtomMapping map[gen.Atom]gen.Atom +} + +func Create(options Options) gen.NetworkHandshake { + var mapping map[gen.Atom]gen.Atom + if options.PoolSize < 1 { + options.PoolSize = defaultPoolSize + } + if len(options.AtomMapping) > 0 { + mapping = make(map[gen.Atom]gen.Atom) + for k, v := range options.AtomMapping { + mapping[k] = v + } + } + return &handshake{ + poolsize: options.PoolSize, + disable_fingerprint: options.DisableTLSFingerprint, + flags: options.NetworkFlags, + atom_mapping: mapping, + } +} + +func (h *handshake) NetworkFlags() gen.NetworkFlags { + return h.flags +} + +func (h *handshake) Version() gen.Version { + return gen.Version{ + Name: handshakeName, + Release: handshakeRelease, + License: gen.LicenseMIT, + } +} + +func (h *handshake) writeMessage(conn net.Conn, message any) error { + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + buf.Allocate(6) + buf.B[0] = handshakeMagic + buf.B[1] = handshakeVersion + + if err := edf.Encode(message, buf, edf.Options{}); err != nil { + return err + } + + binary.BigEndian.PutUint32(buf.B[2:6], uint32(buf.Len()-6)) + l := buf.Len() + lenP := l + for { + n, e := conn.Write(buf.B[lenP-l:]) + if e != nil { + return e + } + // check if something left + l -= n + if l == 0 { + break + } + } + return nil +} + +func (h *handshake) readMessage(conn net.Conn, timeout time.Duration, chunk []byte) (any, []byte, error) { + var b [4096]byte + + if timeout == 0 { + conn.SetReadDeadline(time.Time{}) + } + + expect := 6 + for { + if len(chunk) < expect { + if timeout > 0 { + conn.SetReadDeadline(time.Now().Add(timeout)) + } + + n, err := conn.Read(b[:]) + if err != nil { + return nil, nil, err + } + + chunk = append(chunk, b[:n]...) + continue + } + + if chunk[0] != handshakeMagic { + return nil, nil, fmt.Errorf("malformed handshake packet") + } + if chunk[1] != handshakeVersion { + return nil, nil, fmt.Errorf("mismatch handshake version") + } + + l := int(binary.BigEndian.Uint32(chunk[2:6])) + if l > math.MaxUint16 { + return nil, nil, fmt.Errorf("too long handshake message") + } + + if len(chunk) < 6+l { + expect = 6 + l + continue + } + + return edf.Decode(chunk[6:], edf.Options{}) + } +} + +func (h *handshake) makeEncodeAtomCache(local map[uint16]gen.Atom) *sync.Map { + if len(local) == 0 { + return nil + } + cache := new(sync.Map) + for k, v := range local { + cache.Store(v, k) + } + return cache +} + +func (h *handshake) makeEncodeRegCache(local map[uint16]string) *sync.Map { + var names []string + for _, name := range local { + names = append(names, name) + } + if len(names) == 0 { + return nil + } + return edf.MakeEncodeRegTypeCache(names) +} + +func (h *handshake) makeEncodeErrCache(local map[uint16]error) *sync.Map { + if len(local) == 0 { + return nil + } + cache := new(sync.Map) + for k, v := range local { + cache.Store(v, k) + } + return cache +} + +func (h *handshake) makeDecodeAtomCache(remote map[uint16]gen.Atom) *sync.Map { + if len(remote) == 0 { + return nil + } + cache := new(sync.Map) + for k, v := range remote { + cache.Store(k, v) + } + return cache +} + +func (h *handshake) makeDecodeRegCache(remote map[uint16]string) *sync.Map { + if len(remote) == 0 { + return nil + } + cache := new(sync.Map) + for k, v := range remote { + cache.Store(k, v) + } + return cache +} + +func (h *handshake) makeDecodeErrCache(local, remote map[uint16]error) *sync.Map { + if len(remote) == 0 { + return nil + } + c := new(sync.Map) + localRegisteredErrors := make(map[string]error) + for _, v := range local { + localRegisteredErrors[v.Error()] = v + } + for k, v := range remote { + if err, exist := localRegisteredErrors[v.Error()]; exist { + c.Store(k, err) + continue + } + c.Store(k, v) + } + return c +} diff --git a/net/handshake/join.go b/net/handshake/join.go new file mode 100644 index 00000000..a29f55c9 --- /dev/null +++ b/net/handshake/join.go @@ -0,0 +1,56 @@ +package handshake + +import ( + "crypto/sha256" + "fmt" + "net" + "time" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +func (h *handshake) Join(node gen.NodeHandshake, conn net.Conn, id string, options gen.HandshakeOptions) ([]byte, error) { + salt := lib.RandomString(64) + hash := sha256.New() + hash.Write([]byte(fmt.Sprintf("%s:%s:%s", id, salt, options.Cookie))) + digest := fmt.Sprintf("%x", hash.Sum(nil)) + message := MessageJoin{ + Node: node.Name(), + ConnectionID: id, + Salt: salt, + Digest: digest, + } + + if err := h.writeMessage(conn, message); err != nil { + conn.Close() + return nil, err + } + + v, tail, err := h.readMessage(conn, time.Second, nil) + if err != nil { + conn.Close() + return nil, err + } + accept, ok := v.(MessageAccept) + if ok == false { + return nil, fmt.Errorf("malformed handshake Accept message") + } + + hash = sha256.New() + hash.Write([]byte(fmt.Sprintf("%s:%s", message.Digest, options.Cookie))) + if accept.Digest != fmt.Sprintf("%x", hash.Sum(nil)) { + return nil, fmt.Errorf("incorrect digest (join)") + } + + if fp := h.getRemoteTLSFingerprint(conn); fp != nil { + hash = sha256.New() + hash.Write([]byte(fmt.Sprintf("%s:%s:%s", digest, salt, options.Cookie))) + hash.Write(fp) + if accept.DigestCert != fmt.Sprintf("%x", hash.Sum(nil)) { + return nil, fmt.Errorf("incorrect cert digest (join)") + } + } + + return tail, nil +} diff --git a/net/handshake/start.go b/net/handshake/start.go new file mode 100644 index 00000000..d5aa1553 --- /dev/null +++ b/net/handshake/start.go @@ -0,0 +1,158 @@ +package handshake + +import ( + "crypto/sha1" + "crypto/sha256" + "crypto/tls" + "fmt" + "net" + "time" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" + "ergo.services/ergo/net/edf" +) + +func (h *handshake) Start(node gen.NodeHandshake, conn net.Conn, options gen.HandshakeOptions) (gen.HandshakeResult, error) { + var result gen.HandshakeResult + result.HandshakeVersion = h.Version() + + salt := lib.RandomString(64) + hash := sha256.New() + hash.Write([]byte(fmt.Sprintf("%s:%s", salt, options.Cookie))) + + digest := fmt.Sprintf("%x", hash.Sum(nil)) + + hello := MessageHello{ + Salt: salt, + Digest: digest, + } + + if err := h.writeMessage(conn, hello); err != nil { + return result, err + } + + v, tail, err := h.readMessage(conn, time.Second, nil) + if err != nil { + return result, err + } + + hello2, ok := v.(MessageHello) + if ok == false { + return result, fmt.Errorf("malformed handshake Hello message") + } + hash = sha256.New() + hash.Write([]byte(fmt.Sprintf("%s:%s:%s", hello2.Salt, hello.Digest, options.Cookie))) + + if hello2.Digest != fmt.Sprintf("%x", hash.Sum(nil)) { + return result, fmt.Errorf("incorrect digest") + } + + if fp := h.getRemoteTLSFingerprint(conn); fp != nil { + hash = sha256.New() + hash.Write([]byte(fmt.Sprintf("%s:%s:%s", hello2.Salt, hello.Salt, options.Cookie))) + hash.Write(fp) + if hello2.DigestCert != fmt.Sprintf("%x", hash.Sum(nil)) { + return result, fmt.Errorf("incorrect cert digest") + } + } + + intro := MessageIntroduce{ + Node: node.Name(), + Version: node.Version(), + Flags: options.Flags, + Creation: node.Creation(), + + MaxMessageSize: options.MaxMessageSize, + + AtomCache: edf.GetAtomCache(), + RegCache: edf.GetRegCache(), + ErrCache: edf.GetErrCache(), + } + + hash = sha256.New() + hash.Write([]byte(fmt.Sprintf("%s:%s", hello2.Salt, options.Cookie))) + intro.Digest = fmt.Sprintf("%x", hash.Sum(nil)) + + if err := h.writeMessage(conn, intro); err != nil { + return result, err + } + + // waiting for Accept message + v, tail, err = h.readMessage(conn, time.Second, tail) + if err != nil { + return result, err + } + + accept, ok := v.(MessageAccept) + if ok == false { + return result, fmt.Errorf("malformed handshake Accept message") + } + + // waiting for Intro message + v, tail, err = h.readMessage(conn, time.Second, tail) + if err != nil { + return result, err + } + + intro2, ok := v.(MessageIntroduce) + if ok == false { + return result, fmt.Errorf("malformed handshake Introduce message") + } + + if intro2.Node == node.Name() { + return result, fmt.Errorf("malformed handshake Introduce message (same name)") + } + + // everything looks good. just send an Accept message + if err := h.writeMessage(conn, MessageAccept{}); err != nil { + return result, err + } + + result.ConnectionID = accept.ID + result.Peer = intro2.Node + result.PeerVersion = intro2.Version + result.PeerCreation = intro2.Creation + result.PeerFlags = intro2.Flags + result.PeerMaxMessageSize = intro2.MaxMessageSize + result.NodeFlags = options.Flags + result.NodeMaxMessageSize = options.MaxMessageSize + result.Tail = tail + + custom := ConnectionOptions{ + PoolSize: accept.PoolSize, + PoolDSN: accept.PoolDSN, + EncodeAtomCache: h.makeEncodeAtomCache(intro.AtomCache), + EncodeRegCache: h.makeEncodeRegCache(intro.RegCache), + EncodeErrCache: h.makeEncodeErrCache(intro.ErrCache), + DecodeAtomCache: h.makeDecodeAtomCache(intro2.AtomCache), + DecodeRegCache: h.makeDecodeRegCache(intro2.RegCache), + DecodeErrCache: h.makeDecodeErrCache(intro.ErrCache, intro2.ErrCache), + } + result.Custom = custom + + if len(h.atom_mapping) > 0 { + result.AtomMapping = make(map[gen.Atom]gen.Atom) + for k, v := range h.atom_mapping { + result.AtomMapping[k] = v + } + } + + return result, nil +} + +func (h *handshake) getRemoteTLSFingerprint(conn net.Conn) []byte { + if h.disable_fingerprint { + return nil + } + c, ok := conn.(*tls.Conn) + if ok == false { + return nil + } + certs := c.ConnectionState().PeerCertificates + if len(certs) == 0 { + return nil + } + fp := sha1.Sum(certs[0].Raw) + return fp[:] +} diff --git a/net/handshake/types.go b/net/handshake/types.go new file mode 100644 index 00000000..1cc65fec --- /dev/null +++ b/net/handshake/types.go @@ -0,0 +1,86 @@ +package handshake + +import ( + "ergo.services/ergo/gen" + "ergo.services/ergo/net/edf" + "sync" +) + +const ( + handshakeName string = "EHS" + handshakeRelease string = "R1" // Ergo Handshake (Rev.1) + + handshakeMagic byte = 87 + handshakeVersion byte = 1 + + defaultPoolSize int = 3 +) + +var ( + DefaultPoolSize int = 1 +) + +type MessageHello struct { + Salt string + Digest string + DigestCert string +} + +type MessageJoin struct { + Node gen.Atom + ConnectionID string + Salt string + Digest string +} + +type MessageIntroduce struct { + Node gen.Atom + Version gen.Version + Flags gen.NetworkFlags + Creation int64 + + MaxMessageSize int + + AtomCache map[uint16]gen.Atom + RegCache map[uint16]string + ErrCache map[uint16]error + Digest string +} + +type MessageAccept struct { + ID string + PoolSize int + PoolDSN []string + Digest string + DigestCert string +} + +type ConnectionOptions struct { + PoolSize int + PoolDSN []string + + EncodeAtomCache *sync.Map + EncodeRegCache *sync.Map + EncodeErrCache *sync.Map + + DecodeAtomCache *sync.Map + DecodeRegCache *sync.Map + DecodeErrCache *sync.Map +} + +func init() { + types := []any{ + MessageHello{}, + MessageJoin{}, + MessageIntroduce{}, + MessageAccept{}, + } + + for _, t := range types { + err := edf.RegisterTypeOf(t) + if err == nil || err == gen.ErrTaken { + continue + } + panic(err) + } +} diff --git a/net/proto/connection.go b/net/proto/connection.go new file mode 100644 index 00000000..273d684e --- /dev/null +++ b/net/proto/connection.go @@ -0,0 +1,2944 @@ +package proto + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/rand" + "net" + "runtime" + "sync" + "sync/atomic" + "time" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" + "ergo.services/ergo/net/edf" +) + +const ( + latency time.Duration = 300 * time.Nanosecond + + // lib.Buffer has 4096 of capacity + // 256 messages could have at least 1Mb of allocated memory + limitMemRecvQueues int64 = 1024 * 1024 * 512 +) + +type connection struct { + id string + creation int64 // for uptime + core gen.Core + log gen.Log + node_flags gen.NetworkFlags + node_maxmessagesize int + + peer gen.Atom + peer_creation int64 + peer_flags gen.NetworkFlags + peer_version gen.Version + peer_maxmessagesize int + + handshakeVersion gen.Version + protoVersion gen.Version + + pool_dsn []string + pool_size int + + pool_mutex sync.RWMutex + pool []*pool_item + + recvQueues []lib.QueueMPSC + allocatedInQueues int64 + + encodeOptions edf.Options + decodeOptions edf.Options + + requestsMutex sync.RWMutex + requests map[gen.Ref]chan MessageResult + + messagesIn uint64 + messagesOut uint64 + bytesIn uint64 + bytesOut uint64 + transitIn uint64 + transitOut uint64 + + order uint32 + terminated bool + wg sync.WaitGroup +} + +type pool_item struct { + connection net.Conn + fl io.Writer + timer *time.Timer + handling atomic.Bool +} + +// +// gen.RemoteNode implementation +// + +func (c *connection) Name() gen.Atom { + return c.peer +} + +func (c *connection) Proxy() gen.Atom { + // TODO + return "" +} + +func (c *connection) Uptime() int64 { + return time.Now().Unix() - c.peer_creation +} +func (c *connection) Version() gen.Version { + return c.peer_version +} + +func (c *connection) Info() gen.RemoteNodeInfo { + info := gen.RemoteNodeInfo{ + Node: c.peer, + Uptime: time.Now().Unix() - c.peer_creation, + ConnectionUptime: time.Now().Unix() - c.creation, + Version: c.peer_version, + + HandshakeVersion: c.handshakeVersion, + ProtoVersion: c.protoVersion, + + NetworkFlags: c.peer_flags, + + PoolSize: c.pool_size, + PoolDSN: c.pool_dsn, + + MaxMessageSize: c.peer_maxmessagesize, + MessagesIn: atomic.LoadUint64(&c.messagesIn), + MessagesOut: atomic.LoadUint64(&c.messagesOut), + + BytesIn: atomic.LoadUint64(&c.bytesIn), + BytesOut: atomic.LoadUint64(&c.bytesOut), + + TransitBytesIn: atomic.LoadUint64(&c.transitIn), + TransitBytesOut: atomic.LoadUint64(&c.transitOut), + } + return info +} + +func (c *connection) Spawn(name gen.Atom, options gen.ProcessOptions, args ...any) (gen.PID, error) { + opts := gen.ProcessOptionsExtra{ + ProcessOptions: options, + ParentPID: c.core.PID(), + ParentLeader: c.core.PID(), + ParentLogLevel: c.core.LogLevel(), + Args: args, + } + if c.core.Security().ExposeEnvRemoteSpawn { + opts.ParentEnv = c.core.EnvList() + } + return c.RemoteSpawn(name, opts) +} + +func (c *connection) SpawnRegister(register gen.Atom, name gen.Atom, options gen.ProcessOptions, args ...any) (gen.PID, error) { + opts := gen.ProcessOptionsExtra{ + ProcessOptions: options, + ParentPID: c.core.PID(), + ParentLeader: c.core.PID(), + ParentLogLevel: c.core.LogLevel(), + Register: register, + Args: args, + } + if c.core.Security().ExposeEnvRemoteSpawn { + opts.ParentEnv = c.core.EnvList() + } + return c.RemoteSpawn(name, opts) +} + +func (c *connection) ApplicationStart(name gen.Atom, options gen.ApplicationOptions) error { + return c.applicationStart(name, 0, options) +} +func (c *connection) ApplicationStartTemporary(name gen.Atom, options gen.ApplicationOptions) error { + return c.applicationStart(name, gen.ApplicationModeTemporary, options) +} +func (c *connection) ApplicationStartTransient(name gen.Atom, options gen.ApplicationOptions) error { + return c.applicationStart(name, gen.ApplicationModeTransient, options) +} +func (c *connection) ApplicationStartPermanent(name gen.Atom, options gen.ApplicationOptions) error { + return c.applicationStart(name, gen.ApplicationModePermanent, options) +} + +func (c *connection) applicationStart(name gen.Atom, mode gen.ApplicationMode, options gen.ApplicationOptions) error { + if c.peer_flags.Enable && c.peer_flags.EnableRemoteApplicationStart == false { + return gen.ErrNotAllowed + } + + ref := c.core.MakeRef() + extra := gen.ApplicationOptionsExtra{ + ApplicationOptions: options, + CorePID: c.core.PID(), + CoreLogLevel: c.core.LogLevel(), + } + if c.core.Security().ExposeEnvRemoteApplicationStart { + extra.CoreEnv = c.core.EnvList() + } + message := MessageApplicationStart{ + Name: name, + Mode: mode, + Options: extra, + Ref: ref, + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, 0, 0, gen.Compression{}); err != nil { + return err + } + result := c.waitResult(ref, ch) + return result.Error +} + +func (c *connection) Creation() int64 { + return c.peer_creation +} + +func (c *connection) Disconnect() { + c.Terminate(gen.TerminateReasonNormal) +} + +func (c *connection) ConnectionUptime() int64 { + return time.Now().Unix() - c.creation +} + +func (c *connection) updateCache() error { + + // TODO + //implement cache updating and add API methods for that + + // create new entries for: + // - AtomCache + // - AtomMapping + // - RegCache + // - ErrCache + + ref := c.core.MakeRef() + message := MessageUpdateCache{ + Ref: ref, + // put them here + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, 0, 0, gen.Compression{}); err != nil { + return err + } + result := c.waitResult(ref, ch) + if result.Error != nil { + return result.Error + } + + // TODO + // add new entries to the local encoding cache + // - c.encodeOptions.AtomCache + // - c.encodeOptions.AtomMapping + // - c.encodeOptions.RegCache + // - c.encodeOptions.ErrCache + + return nil +} + +// +// gen.Connection implementation +// + +func (c *connection) Node() gen.RemoteNode { + return c +} + +func (c *connection) SendPID(from gen.PID, to gen.PID, options gen.MessageOptions, message any) error { + if to.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + + if options.ImportantDelivery { + if c.peer_flags.EnableImportantDelivery == false { + return gen.ErrUnsupported + } + } + + order := uint8(from.ID % 255) + orderPeer := uint8(to.ID % 255) + if options.KeepNetworkOrder == false { + order = uint8(0) + orderPeer = uint8(0) + } + + buf := lib.TakeBuffer() + // 8 (header) + 8 (process id from) + 1 priority +8 (message id) + 8 (process id to) + buf.Allocate(8 + 8 + 1 + 8 + 8) + + if err := edf.Encode(message, buf, c.encodeOptions); err != nil { + return err + } + + if buf.Len() > math.MaxUint32 { + return gen.ErrTooLarge + } + + buf.B[0] = protoMagic + buf.B[1] = protoVersion + binary.BigEndian.PutUint32(buf.B[2:6], uint32(buf.Len())) + buf.B[6] = orderPeer + buf.B[7] = protoMessagePID + binary.BigEndian.PutUint64(buf.B[8:16], from.ID) + + buf.B[16] = byte(options.Priority) // usual value 0, 1, or 2, so just cast it + if options.ImportantDelivery { + if c.peer_flags.EnableImportantDelivery == false { + lib.ReleaseBuffer(buf) + return gen.ErrUnsupported + } + // set important flag + buf.B[16] |= 128 + binary.BigEndian.PutUint64(buf.B[17:25], options.Ref.ID[0]) + } + + binary.BigEndian.PutUint64(buf.B[25:33], to.ID) + + return c.send(buf, order, options.Compression) +} + +func (c *connection) SendProcessID(from gen.PID, to gen.ProcessID, options gen.MessageOptions, message any) error { + toName := to.Name + toNameCached := uint16(0) + if c.encodeOptions.AtomMapping != nil { + if v, found := c.encodeOptions.AtomMapping.Load(toName); found { + toName = v.(gen.Atom) + } + } + + bname := []byte(toName) + if len(bname) > 255 { + return fmt.Errorf("process name too long") + } + + if c.encodeOptions.AtomCache != nil { + if v, found := c.encodeOptions.AtomCache.Load(toName); found { + toNameCached = v.(uint16) + } + } + + order := uint8(from.ID % 255) + if options.KeepNetworkOrder == false { + order = uint8(0) + } + + buf := lib.TakeBuffer() + if toNameCached > 0 { + // 8 (header) + 8 (process id from) + 1 priority + 8 (message id) + 2 (cache id) + buf.Allocate(8 + 8 + 1 + 8 + 2) + } else { + // 8 (header) + 8 (process id from) + 1 priority + 8 (message id) + 1 (size(bname) + bname + buf.Allocate(8 + 8 + 1 + 8 + 1 + len(bname)) + } + + if err := edf.Encode(message, buf, c.encodeOptions); err != nil { + return err + } + + if buf.Len() > math.MaxUint32 { + return gen.ErrTooLarge + } + + if c.peer_maxmessagesize > 0 && buf.Len() > c.peer_maxmessagesize { + return gen.ErrTooLarge + } + + buf.B[0] = protoMagic + buf.B[1] = protoVersion + binary.BigEndian.PutUint32(buf.B[2:6], uint32(buf.Len())) + buf.B[6] = order // use the same order for the peer + binary.BigEndian.PutUint64(buf.B[8:16], from.ID) + + buf.B[16] = byte(options.Priority) // usual value 0, 1, or 2, so just cast it + if options.ImportantDelivery { + if c.peer_flags.EnableImportantDelivery == false { + lib.ReleaseBuffer(buf) + return gen.ErrUnsupported + } + // set important flag + buf.B[16] |= 128 + binary.BigEndian.PutUint64(buf.B[17:25], options.Ref.ID[0]) + } + + if toNameCached > 0 { + buf.B[7] = protoMessageNameCache + binary.BigEndian.PutUint16(buf.B[25:27], toNameCached) + } else { + buf.B[7] = protoMessageName + buf.B[25] = byte(len(bname)) + copy(buf.B[26:], bname) + } + + return c.send(buf, order, options.Compression) +} + +func (c *connection) SendAlias(from gen.PID, to gen.Alias, options gen.MessageOptions, message any) error { + if to.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + + order := uint8(from.ID % 255) + orderPeer := uint8(to.ID[1] % 255) + if options.KeepNetworkOrder == false { + order = uint8(0) + orderPeer = uint8(0) + } + + buf := lib.TakeBuffer() + // 8 (header) + 8 (process id from) + 1 priority + 8 (message id) + 24 (alias id [3]uint64) + buf.Allocate(8 + 8 + 1 + 8 + 24) + + if err := edf.Encode(message, buf, c.encodeOptions); err != nil { + return err + } + + if buf.Len() > math.MaxUint32 { + return gen.ErrTooLarge + } + + if c.peer_maxmessagesize > 0 && buf.Len() > c.peer_maxmessagesize { + return gen.ErrTooLarge + } + + buf.B[0] = protoMagic + buf.B[1] = protoVersion + binary.BigEndian.PutUint32(buf.B[2:6], uint32(buf.Len())) + buf.B[6] = orderPeer + buf.B[7] = protoMessageAlias + binary.BigEndian.PutUint64(buf.B[8:16], from.ID) + + buf.B[16] = byte(options.Priority) // usual value 0, 1, or 2, so just cast it + if options.ImportantDelivery { + if c.peer_flags.EnableImportantDelivery == false { + lib.ReleaseBuffer(buf) + return gen.ErrUnsupported + } + // set important flag + buf.B[16] |= 128 + binary.BigEndian.PutUint64(buf.B[17:25], options.Ref.ID[0]) + } + + binary.BigEndian.PutUint64(buf.B[25:33], to.ID[0]) + binary.BigEndian.PutUint64(buf.B[33:41], to.ID[1]) + binary.BigEndian.PutUint64(buf.B[41:49], to.ID[2]) + + return c.send(buf, order, options.Compression) +} + +func (c *connection) SendEvent(from gen.PID, options gen.MessageOptions, message gen.MessageEvent) error { + eventName := message.Event.Name + eventNameCached := uint16(0) + + if c.encodeOptions.AtomMapping != nil { + if v, found := c.encodeOptions.AtomMapping.Load(eventName); found { + eventName = v.(gen.Atom) + } + } + + bname := []byte(eventName) + if len(bname) > 255 { + return fmt.Errorf("event name too long") + } + + if c.encodeOptions.AtomCache != nil { + if v, found := c.encodeOptions.AtomCache.Load(eventName); found { + eventNameCached = v.(uint16) + } + } + + order := uint8(from.ID % 255) + if options.KeepNetworkOrder == false { + order = uint8(0) + } + + buf := lib.TakeBuffer() + if eventNameCached > 0 { + // 8 (header) + 8 (process id from) + 1 priority + 8 timestamp + 2 (cache id) + buf.Allocate(8 + 8 + 1 + 8 + 2) + } else { + // 8 (header) + 8 (process id from) + 1 priority + 8 timestamp + 1 size(bname) + bname + buf.Allocate(8 + 8 + 1 + 8 + 1 + len(bname)) + } + + if err := edf.Encode(message.Message, buf, c.encodeOptions); err != nil { + return err + } + + if buf.Len() > math.MaxUint32 { + return gen.ErrTooLarge + } + + if c.peer_maxmessagesize > 0 && buf.Len() > c.peer_maxmessagesize { + return gen.ErrTooLarge + } + + buf.B[0] = protoMagic + buf.B[1] = protoVersion + binary.BigEndian.PutUint32(buf.B[2:6], uint32(buf.Len())) + buf.B[6] = order // use the same order for the peer + binary.BigEndian.PutUint64(buf.B[8:16], from.ID) + buf.B[16] = byte(options.Priority) // usual value 0, 1, or 2, so just cast it + binary.BigEndian.PutUint64(buf.B[17:25], uint64(message.Timestamp)) + + if eventNameCached > 0 { + buf.B[7] = protoMessageEventCache + binary.BigEndian.PutUint16(buf.B[25:27], eventNameCached) + } else { + buf.B[7] = protoMessageEvent + buf.B[25] = byte(len(bname)) + copy(buf.B[26:], bname) + } + + return c.send(buf, order, options.Compression) +} + +func (c *connection) SendExit(from gen.PID, to gen.PID, reason error) error { + if to.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + + buf := lib.TakeBuffer() + // 8 (header) + 8 (process id from) + 1 priority + 8 (process id to) + buf.Allocate(8 + 8 + 1 + 8) + + if err := edf.Encode(reason, buf, c.encodeOptions); err != nil { + return err + } + + order := uint8(from.ID % 255) + orderPeer := uint8(to.ID % 255) + + buf.B[0] = protoMagic + buf.B[1] = protoVersion + binary.BigEndian.PutUint32(buf.B[2:6], uint32(buf.Len())) + buf.B[6] = orderPeer + buf.B[7] = protoMessageExit + binary.BigEndian.PutUint64(buf.B[8:16], from.ID) + buf.B[16] = byte(gen.MessagePriorityMax) + binary.BigEndian.PutUint64(buf.B[17:25], to.ID) + + return c.send(buf, order, gen.Compression{}) +} + +func (c *connection) SendResponse(from gen.PID, to gen.PID, options gen.MessageOptions, response any) error { + if to.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + order := uint8(from.ID % 255) + orderPeer := uint8(to.ID % 255) + if options.KeepNetworkOrder == false { + order = uint8(0) + orderPeer = uint8(0) + } + + buf := lib.TakeBuffer() + // 8 (header) + 8 (process id from) + 1 priority + 8 (process id to) + 24 (ref [3]uint64) + buf.Allocate(8 + 8 + 1 + 8 + 24) + + if err := edf.Encode(response, buf, c.encodeOptions); err != nil { + return err + } + + if buf.Len() > math.MaxUint32 { + return gen.ErrTooLarge + } + + if c.peer_maxmessagesize > 0 && buf.Len() > c.peer_maxmessagesize { + return gen.ErrTooLarge + } + + buf.B[0] = protoMagic + buf.B[1] = protoVersion + binary.BigEndian.PutUint32(buf.B[2:6], uint32(buf.Len())) + buf.B[6] = orderPeer + buf.B[7] = protoMessageResponse + binary.BigEndian.PutUint64(buf.B[8:16], from.ID) + buf.B[16] = byte(options.Priority) // usual value 0, 1, or 2, so just cast it + binary.BigEndian.PutUint64(buf.B[17:25], to.ID) + binary.BigEndian.PutUint64(buf.B[25:33], options.Ref.ID[0]) + binary.BigEndian.PutUint64(buf.B[33:41], options.Ref.ID[1]) + binary.BigEndian.PutUint64(buf.B[41:49], options.Ref.ID[2]) + + return c.send(buf, order, options.Compression) +} + +func (c *connection) SendResponseError(from gen.PID, to gen.PID, options gen.MessageOptions, err error) error { + if to.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + + order := uint8(from.ID % 255) + orderPeer := uint8(to.ID % 255) + if options.KeepNetworkOrder == false { + order = uint8(0) + orderPeer = uint8(0) + } + + buf := lib.TakeBuffer() + // 8 (header) + 8 (process id from) + 1 priority + 8 (process id to) + 24 (ref [3]uint64) + 1 (err code) + buf.Allocate(8 + 8 + 1 + 8 + 24 + 1) + switch err { + case nil: + buf.B[49] = 0 + case gen.ErrProcessUnknown: + buf.B[49] = 1 + case gen.ErrProcessMailboxFull: + buf.B[49] = 2 + case gen.ErrProcessTerminated: + buf.B[49] = 3 + default: + buf.B[49] = 255 + if e := edf.Encode(err, buf, c.encodeOptions); e != nil { + return e + } + } + + buf.B[0] = protoMagic + buf.B[1] = protoVersion + binary.BigEndian.PutUint32(buf.B[2:6], uint32(buf.Len())) + buf.B[6] = orderPeer + buf.B[7] = protoMessageResponseError + binary.BigEndian.PutUint64(buf.B[8:16], from.ID) + buf.B[16] = byte(options.Priority) // usual value 0, 1, or 2, so just cast it + binary.BigEndian.PutUint64(buf.B[17:25], to.ID) + binary.BigEndian.PutUint64(buf.B[25:33], options.Ref.ID[0]) + binary.BigEndian.PutUint64(buf.B[33:41], options.Ref.ID[1]) + binary.BigEndian.PutUint64(buf.B[41:49], options.Ref.ID[2]) + + return c.send(buf, order, options.Compression) +} + +func (c *connection) SendTerminatePID(target gen.PID, reason error) error { + if target.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + buf := lib.TakeBuffer() + // 8 (header) + 1 priority + 8 (target process id) + buf.Allocate(8 + 1 + 8) + + if err := edf.Encode(reason, buf, c.encodeOptions); err != nil { + return err + } + + buf.B[0] = protoMagic + buf.B[1] = protoVersion + binary.BigEndian.PutUint32(buf.B[2:6], uint32(buf.Len())) + buf.B[6] = 0 + buf.B[7] = protoMessageTerminatePID + buf.B[8] = byte(gen.MessagePriorityHigh) + binary.BigEndian.PutUint64(buf.B[9:17], target.ID) + + return c.send(buf, 0, gen.Compression{}) +} + +func (c *connection) SendTerminateProcessID(target gen.ProcessID, reason error) error { + targetName := target.Name + targetNameCached := uint16(0) + if c.encodeOptions.AtomMapping != nil { + if v, found := c.encodeOptions.AtomMapping.Load(targetName); found { + targetName = v.(gen.Atom) + } + } + + bname := []byte(targetName) + if len(bname) > 255 { + return fmt.Errorf("target process name too long") + } + + if c.encodeOptions.AtomCache != nil { + if v, found := c.encodeOptions.AtomCache.Load(targetName); found { + targetNameCached = v.(uint16) + } + } + + buf := lib.TakeBuffer() + if targetNameCached > 0 { + // 8 (header) + 1 priority + 2 (cache id) + buf.Allocate(8 + 1 + 2) + } else { + // 8 (header) + 1 priority + 1 (len of bname) + bname + buf.Allocate(8 + 1 + 1 + len(bname)) + } + + if err := edf.Encode(reason, buf, c.encodeOptions); err != nil { + return err + } + + buf.B[0] = protoMagic + buf.B[1] = protoVersion + binary.BigEndian.PutUint32(buf.B[2:6], uint32(buf.Len())) + buf.B[6] = 0 // order + buf.B[8] = byte(gen.MessagePriorityHigh) + if targetNameCached > 0 { + buf.B[7] = protoMessageTerminateNameCache + binary.BigEndian.PutUint16(buf.B[9:11], targetNameCached) + } else { + buf.B[7] = protoMessageTerminateName + buf.B[9] = byte(len(bname)) + copy(buf.B[10:], bname) + } + + return c.send(buf, 0, gen.Compression{}) +} + +func (c *connection) SendTerminateAlias(target gen.Alias, reason error) error { + if target.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + buf := lib.TakeBuffer() + // 8 (header) + 1 priority + 24 (target alias id [3]uint64) + buf.Allocate(8 + 1 + 24) + + if err := edf.Encode(reason, buf, c.encodeOptions); err != nil { + return err + } + + buf.B[0] = protoMagic + buf.B[1] = protoVersion + binary.BigEndian.PutUint32(buf.B[2:6], uint32(buf.Len())) + buf.B[6] = 0 + buf.B[7] = protoMessageTerminateAlias + buf.B[8] = byte(gen.MessagePriorityHigh) + binary.BigEndian.PutUint64(buf.B[9:17], target.ID[0]) + binary.BigEndian.PutUint64(buf.B[17:25], target.ID[1]) + binary.BigEndian.PutUint64(buf.B[25:33], target.ID[2]) + + return c.send(buf, 0, gen.Compression{}) +} + +func (c *connection) SendTerminateEvent(target gen.Event, reason error) error { + eventName := target.Name + eventNameCached := uint16(0) + + if c.encodeOptions.AtomMapping != nil { + if v, found := c.encodeOptions.AtomMapping.Load(eventName); found { + eventName = v.(gen.Atom) + } + } + + bname := []byte(eventName) + if len(bname) > 255 { + return fmt.Errorf("terminated event name too long") + } + + if c.encodeOptions.AtomCache != nil { + if v, found := c.encodeOptions.AtomCache.Load(eventName); found { + eventNameCached = v.(uint16) + } + } + + buf := lib.TakeBuffer() + if eventNameCached > 0 { + // 8 (header) + 1 priority + 2 (cache id) + buf.Allocate(8 + 1 + 2) + } else { + // 8 (header) + 1 priority + 1 size(bname) + bname + buf.Allocate(8 + 1 + +1 + len(bname)) + } + + if err := edf.Encode(reason, buf, c.encodeOptions); err != nil { + return err + } + + buf.B[0] = protoMagic + buf.B[1] = protoVersion + binary.BigEndian.PutUint32(buf.B[2:6], uint32(buf.Len())) + buf.B[6] = 0 // order + buf.B[8] = byte(gen.MessagePriorityHigh) + + if eventNameCached > 0 { + buf.B[7] = protoMessageTerminateEventCache + binary.BigEndian.PutUint16(buf.B[9:11], eventNameCached) + } else { + buf.B[7] = protoMessageTerminateEvent + buf.B[9] = byte(len(bname)) + copy(buf.B[10:], bname) + } + + return c.send(buf, 0, gen.Compression{}) +} + +func (c *connection) CallPID(from gen.PID, to gen.PID, options gen.MessageOptions, message any) error { + if to.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + + order := uint8(from.ID % 255) + orderPeer := uint8(to.ID % 255) + if options.KeepNetworkOrder == false { + order = uint8(0) + orderPeer = uint8(0) + } + + buf := lib.TakeBuffer() + // 8 (header) + 8 (process id from) + 1 priority + 24 (request ref) + 8 (process id to) + buf.Allocate(8 + 8 + 1 + 24 + 8) + + if err := edf.Encode(message, buf, c.encodeOptions); err != nil { + return err + } + if buf.Len() > math.MaxUint32 { + return gen.ErrTooLarge + } + + buf.B[0] = protoMagic + buf.B[1] = protoVersion + binary.BigEndian.PutUint32(buf.B[2:6], uint32(buf.Len())) + buf.B[6] = orderPeer + buf.B[7] = protoRequestPID + binary.BigEndian.PutUint64(buf.B[8:16], from.ID) + + buf.B[16] = byte(options.Priority) + if options.ImportantDelivery { + if c.peer_flags.EnableImportantDelivery == false { + lib.ReleaseBuffer(buf) + return gen.ErrUnsupported + } + // set important flag + buf.B[16] |= 128 + } + + binary.BigEndian.PutUint64(buf.B[17:25], options.Ref.ID[0]) + binary.BigEndian.PutUint64(buf.B[25:33], options.Ref.ID[1]) + binary.BigEndian.PutUint64(buf.B[33:41], options.Ref.ID[2]) + binary.BigEndian.PutUint64(buf.B[41:49], to.ID) + + return c.send(buf, order, options.Compression) +} + +func (c *connection) CallProcessID(from gen.PID, to gen.ProcessID, options gen.MessageOptions, message any) error { + toName := to.Name + toNameCached := uint16(0) + if c.encodeOptions.AtomMapping != nil { + if v, found := c.encodeOptions.AtomMapping.Load(toName); found { + toName = v.(gen.Atom) + } + } + + bname := []byte(toName) + if len(bname) > 255 { + return fmt.Errorf("process name too long") + } + + if c.encodeOptions.AtomCache != nil { + if v, found := c.encodeOptions.AtomCache.Load(toName); found { + toNameCached = v.(uint16) + } + } + + order := uint8(from.ID % 255) + if options.KeepNetworkOrder == false { + order = uint8(0) + } + + buf := lib.TakeBuffer() + if toNameCached > 0 { + // 8 (header) + 8 (process id from) + 1 priority + 24 (request ref) + 2 (cache id) + buf.Allocate(8 + 8 + 1 + 24 + 2) + } else { + // 8 (header) + 8 (process id from) + 1 priority + 24 (request ref) +1 (size(bname)) + bname + buf.Allocate(8 + 8 + 1 + 24 + 1 + len(bname)) + } + + if err := edf.Encode(message, buf, c.encodeOptions); err != nil { + return err + } + + if buf.Len() > math.MaxUint32 { + return gen.ErrTooLarge + } + + buf.B[0] = protoMagic + buf.B[1] = protoVersion + binary.BigEndian.PutUint32(buf.B[2:6], uint32(buf.Len())) + buf.B[6] = order // use the same order for the peer + binary.BigEndian.PutUint64(buf.B[8:16], from.ID) + + buf.B[16] = byte(options.Priority) + if options.ImportantDelivery { + if c.peer_flags.EnableImportantDelivery == false { + lib.ReleaseBuffer(buf) + return gen.ErrUnsupported + } + // set important flag + buf.B[16] |= 128 + } + + binary.BigEndian.PutUint64(buf.B[17:25], options.Ref.ID[0]) + binary.BigEndian.PutUint64(buf.B[25:33], options.Ref.ID[1]) + binary.BigEndian.PutUint64(buf.B[33:41], options.Ref.ID[2]) + + if toNameCached > 0 { + buf.B[7] = protoRequestNameCache + binary.BigEndian.PutUint16(buf.B[41:43], toNameCached) + } else { + buf.B[7] = protoRequestName + buf.B[41] = byte(len(bname)) + copy(buf.B[42:], bname) + } + + return c.send(buf, order, options.Compression) +} + +func (c *connection) CallAlias(from gen.PID, to gen.Alias, options gen.MessageOptions, message any) error { + + if to.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + + order := uint8(from.ID % 255) + orderPeer := uint8(to.ID[1] % 255) + if options.KeepNetworkOrder == false { + order = uint8(0) + orderPeer = uint8(0) + } + + buf := lib.TakeBuffer() + // 8 (header) + 8 (process id from) + 1 priority + 24 (request ref) + 24 (alias id to) + buf.Allocate(8 + 8 + 1 + 24 + 24) + + if err := edf.Encode(message, buf, c.encodeOptions); err != nil { + return err + } + + if buf.Len() > math.MaxUint32 { + return gen.ErrTooLarge + } + + buf.B[0] = protoMagic + buf.B[1] = protoVersion + binary.BigEndian.PutUint32(buf.B[2:6], uint32(buf.Len())) + buf.B[6] = orderPeer + buf.B[7] = protoRequestAlias + binary.BigEndian.PutUint64(buf.B[8:16], from.ID) + + buf.B[16] = byte(options.Priority) + if options.ImportantDelivery { + if c.peer_flags.EnableImportantDelivery == false { + lib.ReleaseBuffer(buf) + return gen.ErrUnsupported + } + // set important flag + buf.B[16] |= 128 + } + + binary.BigEndian.PutUint64(buf.B[17:25], options.Ref.ID[0]) + binary.BigEndian.PutUint64(buf.B[25:33], options.Ref.ID[1]) + binary.BigEndian.PutUint64(buf.B[33:41], options.Ref.ID[2]) + binary.BigEndian.PutUint64(buf.B[41:49], to.ID[0]) + binary.BigEndian.PutUint64(buf.B[49:57], to.ID[1]) + binary.BigEndian.PutUint64(buf.B[57:65], to.ID[2]) + + return c.send(buf, order, options.Compression) +} + +func (c *connection) LinkPID(pid gen.PID, target gen.PID) error { + if target.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + order := uint8(pid.ID % 255) + orderPeer := uint8(target.ID % 255) + ref := c.core.MakeRef() + message := MessageLinkPID{ + Source: pid, + Target: target, + Ref: ref, + } + + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, order, orderPeer, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return err + } + result := c.waitResult(ref, ch) + return result.Error +} + +func (c *connection) UnlinkPID(pid gen.PID, target gen.PID) error { + if target.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + order := uint8(pid.ID % 255) + orderPeer := uint8(target.ID % 255) + ref := c.core.MakeRef() + message := MessageUnlinkPID{ + Source: pid, + Target: target, + Ref: ref, + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, order, orderPeer, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return err + } + result := c.waitResult(ref, ch) + return result.Error +} + +func (c *connection) LinkProcessID(pid gen.PID, target gen.ProcessID) error { + order := uint8(pid.ID % 255) + ref := c.core.MakeRef() + message := MessageLinkProcessID{ + Source: pid, + Target: target, + Ref: ref, + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, order, 0, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return err + } + result := c.waitResult(ref, ch) + return result.Error + +} + +func (c *connection) UnlinkProcessID(pid gen.PID, target gen.ProcessID) error { + order := uint8(pid.ID % 255) + ref := c.core.MakeRef() + message := MessageUnlinkProcessID{ + Source: pid, + Target: target, + Ref: ref, + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, order, 0, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return err + } + result := c.waitResult(ref, ch) + return result.Error + +} + +func (c *connection) LinkAlias(pid gen.PID, target gen.Alias) error { + if target.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + order := uint8(pid.ID % 255) + orderPeer := uint8(target.ID[1] % 255) + ref := c.core.MakeRef() + message := MessageLinkAlias{ + Source: pid, + Target: target, + Ref: ref, + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, order, orderPeer, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return err + } + result := c.waitResult(ref, ch) + return result.Error + +} + +func (c *connection) UnlinkAlias(pid gen.PID, target gen.Alias) error { + if target.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + order := uint8(pid.ID % 255) + orderPeer := uint8(target.ID[1] % 255) + ref := c.core.MakeRef() + message := MessageUnlinkAlias{ + Source: pid, + Target: target, + Ref: ref, + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, order, orderPeer, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return err + } + result := c.waitResult(ref, ch) + return result.Error +} + +func (c *connection) LinkEvent(pid gen.PID, target gen.Event) ([]gen.MessageEvent, error) { + order := uint8(pid.ID % 255) + ref := c.core.MakeRef() + message := MessageLinkEvent{ + Source: pid, + Target: target, + Ref: ref, + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, order, 0, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return nil, err + } + result := c.waitResult(ref, ch) + if result.Error != nil { + return nil, result.Error + } + r, ok := result.Result.([]gen.MessageEvent) + if ok == false { + return nil, gen.ErrMalformed + } + return r, nil +} + +func (c *connection) UnlinkEvent(pid gen.PID, target gen.Event) error { + order := uint8(pid.ID % 255) + ref := c.core.MakeRef() + message := MessageUnlinkEvent{ + Source: pid, + Target: target, + Ref: ref, + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, order, 0, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return err + } + result := c.waitResult(ref, ch) + return result.Error +} + +func (c *connection) MonitorPID(pid gen.PID, target gen.PID) error { + if target.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + ref := c.core.MakeRef() + order := uint8(pid.ID % 255) + message := MessageMonitorPID{ + Source: pid, + Target: target, + Ref: ref, + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, order, 0, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return err + } + result := c.waitResult(ref, ch) + return result.Error +} + +func (c *connection) DemonitorPID(pid gen.PID, target gen.PID) error { + if target.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + ref := c.core.MakeRef() + order := uint8(pid.ID % 255) + message := MessageDemonitorPID{ + Source: pid, + Target: target, + Ref: ref, + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, order, 0, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return err + } + result := c.waitResult(ref, ch) + return result.Error +} + +func (c *connection) MonitorProcessID(pid gen.PID, target gen.ProcessID) error { + order := uint8(pid.ID % 255) + ref := c.core.MakeRef() + message := MessageMonitorProcessID{ + Source: pid, + Target: target, + Ref: ref, + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, order, 0, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return err + } + result := c.waitResult(ref, ch) + return result.Error +} + +func (c *connection) DemonitorProcessID(pid gen.PID, target gen.ProcessID) error { + order := uint8(pid.ID % 255) + ref := c.core.MakeRef() + message := MessageDemonitorProcessID{ + Source: pid, + Target: target, + Ref: ref, + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, order, 0, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return err + } + result := c.waitResult(ref, ch) + return result.Error +} + +func (c *connection) MonitorAlias(pid gen.PID, target gen.Alias) error { + if target.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + ref := c.core.MakeRef() + order := uint8(pid.ID % 255) + orderPeer := uint8(target.ID[1] % 255) + message := MessageMonitorAlias{ + Source: pid, + Target: target, + Ref: ref, + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, order, orderPeer, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return err + } + result := c.waitResult(ref, ch) + return result.Error +} + +func (c *connection) DemonitorAlias(pid gen.PID, target gen.Alias) error { + if target.Creation != c.peer_creation { + return gen.ErrProcessIncarnation + } + ref := c.core.MakeRef() + order := uint8(pid.ID % 255) + orderPeer := uint8(target.ID[1] % 255) + message := MessageDemonitorAlias{ + Source: pid, + Target: target, + Ref: ref, + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, order, orderPeer, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return err + } + result := c.waitResult(ref, ch) + return result.Error +} + +func (c *connection) MonitorEvent(pid gen.PID, target gen.Event) ([]gen.MessageEvent, error) { + order := uint8(pid.ID % 255) + ref := c.core.MakeRef() + message := MessageMonitorEvent{ + Source: pid, + Target: target, + Ref: ref, + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, order, 0, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return nil, err + } + result := c.waitResult(ref, ch) + if result.Error != nil { + return nil, result.Error + } + r, ok := result.Result.([]gen.MessageEvent) + if ok == false { + return nil, gen.ErrMalformed + } + return r, nil +} + +func (c *connection) DemonitorEvent(pid gen.PID, target gen.Event) error { + order := uint8(pid.ID % 255) + ref := c.core.MakeRef() + message := MessageDemonitorEvent{ + Source: pid, + Target: target, + Ref: ref, + } + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + if err := c.sendAny(message, order, 0, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return err + } + result := c.waitResult(ref, ch) + return result.Error +} + +func (c *connection) RemoteSpawn(name gen.Atom, options gen.ProcessOptionsExtra) (gen.PID, error) { + var pid gen.PID + + if c.peer_flags.Enable && c.peer_flags.EnableRemoteSpawn == false { + return pid, gen.ErrNotAllowed + } + + order := uint8(pid.ID % 255) + ref := c.core.MakeRef() + + message := MessageSpawn{ + Name: name, + Options: options, + Ref: ref, + } + + ch := make(chan MessageResult) + c.requestsMutex.Lock() + c.requests[ref] = ch + c.requestsMutex.Unlock() + + if err := c.sendAny(message, order, 0, gen.Compression{}); err != nil { + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + return pid, err + } + + result := c.waitResult(ref, ch) + if result.Error != nil { + return pid, result.Error + } + pid, ok := result.Result.(gen.PID) + if ok == false { + return pid, gen.ErrMalformed + } + return pid, nil +} + +func (c *connection) Join(conn net.Conn, id string, dial gen.NetworkDial, tail []byte) error { + if id != c.id { + return fmt.Errorf("connection id mismatch") + } + + if c.terminated { + return fmt.Errorf("connection terminated") + } + + c.pool_mutex.Lock() + if c.pool_size+1 < len(c.pool) { + c.pool_mutex.Unlock() + return fmt.Errorf("pool size limit") + } + pi := &pool_item{ + connection: conn, + fl: lib.NewFlusher(conn), + } + c.pool = append(c.pool, pi) + c.pool_mutex.Unlock() + + c.wg.Add(1) + go func() { + if lib.Trace() { + defer c.log.Trace("connection %s left the pool", conn.RemoteAddr().String()) + } + + re: // reconnected + if lib.Trace() { + c.log.Trace("joined new connection %s to the pool", conn.RemoteAddr().String()) + } + + c.serve(pi.connection, tail) + + if dial != nil { + pool_dsn := []string{} + pool_dsn = append(pool_dsn, c.pool_dsn...) + rand.Shuffle(len(pool_dsn), func(i, j int) { + pool_dsn[i], pool_dsn[j] = pool_dsn[j], pool_dsn[i] + }) + for _, dsn := range pool_dsn { + if c.terminated { + c.wg.Done() + return + } + c.log.Trace("re-dialing %s", dsn) + nc, t, err := dial(dsn, id) + if err != nil { + continue + } + pi.connection = nc + tail = t + + goto re + } + } + if c.terminated { + c.wg.Done() + return + } + + // remove it from the pool + c.pool_mutex.Lock() + for i, item := range c.pool { + if item != pi { + continue + } + c.pool[i] = c.pool[0] + c.pool = c.pool[1:] + } + if len(c.pool) == 0 { + c.pool_mutex.Unlock() + c.wg.Done() + return + } + c.pool_mutex.Unlock() + + c.wg.Done() + }() + + return nil +} + +func (c *connection) Terminate(reason error) { + c.terminated = true + + c.pool_mutex.Lock() + defer c.pool_mutex.Unlock() + for _, pi := range c.pool { + pi.connection.Close() + } +} + +func (c *connection) serve(conn net.Conn, tail []byte) { + + recvN := 0 + recvNQ := len(c.recvQueues) + + buf := lib.TakeBuffer() + buf.Append(tail) + + // remove the deadline + conn.SetReadDeadline(time.Time{}) + + for { + // read packet + buftail, err := c.read(conn, buf) + if err != nil || buftail == nil { + if err != nil { + c.log.Trace("link with %s closed: %s", conn.RemoteAddr(), err) + } + lib.ReleaseBuffer(buf) + conn.Close() + return + } + + if buf.B[0] != protoMagic { + c.log.Error("recevied malformed packet from %s (incorrect proto)", conn.RemoteAddr()) + lib.ReleaseBuffer(buf) + conn.Close() + return + } + + if buf.B[1] != protoVersion { + c.log.Error("recevied malformed packet from %s (incorrect proto version)", conn.RemoteAddr()) + lib.ReleaseBuffer(buf) + conn.Close() + return + } + + recvN++ + + atomic.AddUint64(&c.messagesIn, 1) + atomic.AddUint64(&c.bytesIn, uint64(buf.Len())) + // TODO + // c.transitIn + + // send 'buf' to the decoding queue + qN := recvN % recvNQ + if order := int(buf.B[6]); order > 0 { + qN = order % recvNQ + } + if lib.Trace() { + c.log.Trace("received message. put it to pool[%d] of %s...", qN, conn.RemoteAddr()) + } + queue := c.recvQueues[qN] + atomic.AddInt64(&c.allocatedInQueues, int64(buf.Cap())) + + queue.Push(buf) + if queue.Lock() { + go c.handleRecvQueue(queue) + } + buf = buftail + + } +} + +func (c *connection) handleRecvQueue(q lib.QueueMPSC) { + if lib.Recover() { + defer func() { + if r := recover(); r != nil { + pc, fn, line, _ := runtime.Caller(2) + c.log.Panic("panic on handling received message: %#v at %s[%s:%d]", + r, runtime.FuncForPC(pc).Name(), fn, line) + c.Terminate(gen.TerminateReasonPanic) + } + }() + } + + if lib.Trace() { + c.log.Trace("start handling the message queue") + } + for { + v, ok := q.Pop() + if ok == false { + // no more items in the queue, unlock it + q.Unlock() + + // but check the queue before the exit this goroutine + if i := q.Item(); i == nil { + return + } + + // there is something in the queue, try to lock it back + if locked := q.Lock(); locked == false { + // another goroutine is started + return + } + // get back to work + continue + } + + buf := v.(*lib.Buffer) + + // to avoid getting the buffer pool too big, we check the total volume (capacity) + // we took from there and don't put it back if the limit has been reached. + releaseBuffer := true + if atomic.AddInt64(&c.allocatedInQueues, int64(-buf.Cap())) > limitMemRecvQueues { + releaseBuffer = false + } + + re: + switch buf.B[7] { + case protoMessagePID: // process id + if buf.Len() < 30 { + c.log.Error("malformed message (too small MessagePID)") + continue + } + idFrom := binary.BigEndian.Uint64(buf.B[8:16]) + priority := gen.MessagePriority(buf.B[16] & 3) + important := (buf.B[16] & 128) > 0 + idTO := binary.BigEndian.Uint64(buf.B[25:33]) + + msg, tail, err := edf.Decode(buf.B[33:], c.decodeOptions) + if releaseBuffer { + lib.ReleaseBuffer(buf) + } + + if err != nil { + c.log.Error("unable to decode received message: %s", err) + continue + } + + if len(tail) > 0 { + c.log.Warning("message has extra bytes: %#v", tail) + } + + from := gen.PID{ + Node: c.peer, + ID: idFrom, + Creation: c.peer_creation, + } + to := gen.PID{ + Node: c.core.Name(), + ID: idTO, + Creation: c.core.Creation(), + } + + opts := gen.MessageOptions{ + Priority: priority, + } + + err = c.core.RouteSendPID(from, to, opts, msg) + if important == false { + continue + } + if c.node_flags.EnableImportantDelivery == false { + continue + } + + opts.Ref.ID[0] = binary.BigEndian.Uint64(buf.B[17:25]) + c.SendResponseError(to, from, opts, err) + + case protoMessageName, protoMessageNameCache: // name, chached name + var toName gen.Atom + var data []byte + + if buf.Len() < 18 { + c.log.Error("malformed message (too small MessageName*)") + continue + } + + if buf.B[7] == protoMessageName { + l := int(buf.B[25]) + if buf.Len() < 26+l { + c.log.Error("malformed message (too small MessageName)") + continue + } + + toName = gen.Atom(buf.B[26 : 26+l]) + data = buf.B[26+l:] + + } else { + if buf.Len() < 28 { + c.log.Error("malformed message (too small MessageNameCache)") + continue + } + + id := binary.BigEndian.Uint16(buf.B[25:27]) + if c.decodeOptions.AtomCache == nil { + c.log.Error("received message with cached atom value %d, but cache is nil (message ignored). please, report this bug", id) + lib.ReleaseBuffer(buf) + continue + } + + v, found := c.decodeOptions.AtomCache.Load(id) + if found == false { + c.log.Error("received message with unknown atom cache id %d (message ignored). please, report this bug", id) + lib.ReleaseBuffer(buf) + continue + } + + toName = v.(gen.Atom) + data = buf.B[27:] + } + idFrom := binary.BigEndian.Uint64(buf.B[8:16]) + priority := gen.MessagePriority(buf.B[16] & 3) + important := (buf.B[16] & 128) > 0 + + msg, tail, err := edf.Decode(data, c.decodeOptions) + if releaseBuffer { + lib.ReleaseBuffer(buf) + } + + if err != nil { + c.log.Error("unable to decode received message: %s", err) + continue + } + + if len(tail) > 0 { + c.log.Warning("message has extra bytes: %#v", tail) + } + + if c.decodeOptions.AtomMapping != nil { + if v, found := c.decodeOptions.AtomMapping.Load(toName); found { + toName = v.(gen.Atom) + } + } + + from := gen.PID{ + Node: c.peer, + ID: idFrom, + Creation: c.peer_creation, + } + to := gen.ProcessID{ + Node: c.core.Name(), + Name: toName, + } + + opts := gen.MessageOptions{ + Priority: priority, + } + + err = c.core.RouteSendProcessID(from, to, opts, msg) + if important == false { + continue + } + if c.node_flags.EnableImportantDelivery == false { + continue + } + + opts.Ref.ID[0] = binary.BigEndian.Uint64(buf.B[17:25]) + c.SendResponseError(gen.PID{}, from, opts, err) + + case protoMessageAlias: + if buf.Len() < 49 { + c.log.Error("malformed message (too small MessageAlias)") + continue + } + + idFrom := binary.BigEndian.Uint64(buf.B[8:16]) + priority := gen.MessagePriority(buf.B[16] & 3) + important := (buf.B[16] & 128) > 0 + idTo := [3]uint64{ + binary.BigEndian.Uint64(buf.B[25:33]), + binary.BigEndian.Uint64(buf.B[33:41]), + binary.BigEndian.Uint64(buf.B[41:49]), + } + + msg, tail, err := edf.Decode(buf.B[49:], c.decodeOptions) + if releaseBuffer { + lib.ReleaseBuffer(buf) + } + + if err != nil { + c.log.Error("unable to decode received message: %s", err) + continue + } + + if len(tail) > 0 { + c.log.Warning("message has extra bytes: %#v", tail) + } + + from := gen.PID{ + Node: c.peer, + ID: idFrom, + Creation: c.peer_creation, + } + to := gen.Alias{ + Node: c.core.Name(), + ID: idTo, + Creation: c.core.Creation(), + } + + opts := gen.MessageOptions{ + Priority: priority, + } + err = c.core.RouteSendAlias(from, to, opts, msg) + + if important == false { + continue + } + if c.node_flags.EnableImportantDelivery == false { + continue + } + + opts.Ref.ID[0] = binary.BigEndian.Uint64(buf.B[17:25]) + c.SendResponseError(gen.PID{}, from, opts, err) + + case protoRequestPID: + if buf.Len() < 50 { + c.log.Error("malformed message (too small RequestPID)") + continue + } + + ref := gen.Ref{ + Node: c.peer, + Creation: c.peer_creation, + } + idFrom := binary.BigEndian.Uint64(buf.B[8:16]) + + priority := gen.MessagePriority(buf.B[16] & 3) + important := (buf.B[16] & 128) > 0 + + ref.ID[0] = binary.BigEndian.Uint64(buf.B[17:25]) + ref.ID[1] = binary.BigEndian.Uint64(buf.B[25:33]) + ref.ID[2] = binary.BigEndian.Uint64(buf.B[33:41]) + idTO := binary.BigEndian.Uint64(buf.B[41:49]) + + msg, tail, err := edf.Decode(buf.B[49:], c.decodeOptions) + if releaseBuffer { + lib.ReleaseBuffer(buf) + } + + if err != nil { + c.log.Error("unable to decode received message: %s", err) + continue + } + + if len(tail) > 0 { + c.log.Warning("message has extra bytes: %#v", tail) + } + + from := gen.PID{ + Node: c.peer, + ID: idFrom, + Creation: c.peer_creation, + } + to := gen.PID{ + Node: c.core.Name(), + ID: idTO, + Creation: c.core.Creation(), + } + + opts := gen.MessageOptions{ + Ref: ref, + Priority: priority, + } + + err = c.core.RouteCallPID(from, to, opts, msg) + if err == nil { + continue + } + + if important == false { + continue + } + + if c.node_flags.EnableImportantDelivery == false { + continue + } + + c.SendResponseError(to, from, opts, err) + + case protoRequestName, protoRequestNameCache: + if buf.Len() < 43 { + c.log.Error("malformed message (too small RequestName*)") + continue + } + from := gen.PID{ + Node: c.peer, + Creation: c.peer_creation, + ID: binary.BigEndian.Uint64(buf.B[8:16]), + } + priority := gen.MessagePriority(buf.B[16] & 3) + important := (buf.B[16] & 128) > 0 + + ref := gen.Ref{ + Node: c.peer, + Creation: c.peer_creation, + } + ref.ID[0] = binary.BigEndian.Uint64(buf.B[17:25]) + ref.ID[1] = binary.BigEndian.Uint64(buf.B[25:33]) + ref.ID[2] = binary.BigEndian.Uint64(buf.B[33:41]) + + to := gen.ProcessID{ + Node: c.core.Name(), + } + + var data []byte + if buf.B[7] == protoRequestName { + l := int(buf.B[41]) + if buf.Len() < 42+l { + c.log.Error("malformed message (too small RequestName)") + continue + } + + to.Name = gen.Atom(buf.B[42 : 42+l]) + data = buf.B[42+l:] + + } else { + id := binary.BigEndian.Uint16(buf.B[41:43]) + if c.decodeOptions.AtomCache == nil { + c.log.Error("received message with cached atom value %d, but cache is nil (message ignored). please, report this bug", id) + lib.ReleaseBuffer(buf) + continue + } + + v, found := c.decodeOptions.AtomCache.Load(id) + if found == false { + c.log.Error("received message with unknown atom cache id %d (message ignored). please, report this bug", id) + lib.ReleaseBuffer(buf) + continue + } + + to.Name = v.(gen.Atom) + data = buf.B[43:] + } + + msg, tail, err := edf.Decode(data, c.decodeOptions) + if releaseBuffer { + lib.ReleaseBuffer(buf) + } + + if err != nil { + c.log.Error("unable to decode received message: %s", err) + continue + } + + if len(tail) > 0 { + c.log.Warning("message has extra bytes: %#v", tail) + } + + if c.decodeOptions.AtomMapping != nil { + if v, found := c.decodeOptions.AtomMapping.Load(to.Name); found { + to.Name = v.(gen.Atom) + } + } + opts := gen.MessageOptions{ + Ref: ref, + Priority: priority, + } + + err = c.core.RouteCallProcessID(from, to, opts, msg) + if err == nil { + continue + } + + if important == false { + continue + } + + if c.node_flags.EnableImportantDelivery == false { + continue + } + + c.SendResponseError(gen.PID{}, from, opts, err) + + case protoRequestAlias: + if buf.Len() < 66 { + c.log.Error("malformed message (too small RequestAlias)") + continue + } + ref := gen.Ref{ + Node: c.peer, + Creation: c.peer_creation, + } + to := gen.Alias{ + Node: c.core.Name(), + Creation: c.core.Creation(), + } + idFrom := binary.BigEndian.Uint64(buf.B[8:16]) + + priority := gen.MessagePriority(buf.B[16] & 3) + important := (buf.B[16] & 128) > 0 + + ref.ID[0] = binary.BigEndian.Uint64(buf.B[17:25]) + ref.ID[1] = binary.BigEndian.Uint64(buf.B[25:33]) + ref.ID[2] = binary.BigEndian.Uint64(buf.B[33:41]) + to.ID[0] = binary.BigEndian.Uint64(buf.B[41:49]) + to.ID[1] = binary.BigEndian.Uint64(buf.B[49:57]) + to.ID[2] = binary.BigEndian.Uint64(buf.B[57:65]) + + msg, tail, err := edf.Decode(buf.B[65:], c.decodeOptions) + if releaseBuffer { + lib.ReleaseBuffer(buf) + } + + if err != nil { + c.log.Error("unable to decode received message: %s", err) + continue + } + + if len(tail) > 0 { + c.log.Warning("message has extra bytes: %#v", tail) + } + + from := gen.PID{ + Node: c.peer, + ID: idFrom, + Creation: c.peer_creation, + } + + opts := gen.MessageOptions{ + Ref: ref, + Priority: priority, + } + err = c.core.RouteCallAlias(from, to, opts, msg) + if err == nil { + continue + } + + if important == false { + continue + } + + if c.node_flags.EnableImportantDelivery == false { + continue + } + + c.SendResponseError(gen.PID{}, from, opts, err) + + case protoMessageEvent, protoMessageEventCache: + if buf.Len() < 28 { + c.log.Error("malformed message (too small MessageEvent*)") + continue + } + from := gen.PID{ + Node: c.peer, + Creation: c.peer_creation, + ID: binary.BigEndian.Uint64(buf.B[8:16]), + } + options := gen.MessageOptions{ + Priority: gen.MessagePriority(buf.B[16]), + } + message := gen.MessageEvent{ + Timestamp: int64(binary.BigEndian.Uint64(buf.B[17:25])), + } + message.Event.Node = c.peer + + var data []byte + if buf.B[7] == protoMessageEvent { + l := int(buf.B[25]) + if buf.Len() < 26+l { + c.log.Error("malformed message (too small MessageEvent)") + continue + } + message.Event.Name = gen.Atom(buf.B[26 : 26+l]) + data = buf.B[26+l:] + + } else { + id := binary.BigEndian.Uint16(buf.B[25:27]) + if c.decodeOptions.AtomCache == nil { + c.log.Error("received Event with cached atom value %d, but cache is nil (message ignored). please, report this bug", id) + continue + } + + v, found := c.decodeOptions.AtomCache.Load(id) + if found == false { + c.log.Error("received Event with unknown atom cache id %d (message ignored). please, report this bug", id) + continue + } + + message.Event.Name = v.(gen.Atom) + data = buf.B[27:] + } + + if c.decodeOptions.AtomMapping != nil { + if v, found := c.decodeOptions.AtomMapping.Load(message.Event.Name); found { + message.Event.Name = v.(gen.Atom) + } + } + + msg, tail, err := edf.Decode(data, c.decodeOptions) + if releaseBuffer { + lib.ReleaseBuffer(buf) + } + + if err != nil { + c.log.Error("unable to decode received message: %s", err) + continue + } + + if len(tail) > 0 { + c.log.Warning("message has extra bytes: %#v", tail) + } + + message.Message = msg + c.core.RouteSendEvent(from, gen.Ref{}, options, message) + + case protoMessageExit: + if buf.Len() < 26 { + c.log.Error("malformed message (too small MessageExit)") + continue + } + + idFrom := binary.BigEndian.Uint64(buf.B[8:16]) + // priority := gen.MessagePriority(buf.B[16]) ignored + idTO := binary.BigEndian.Uint64(buf.B[17:25]) + + msg, tail, err := edf.Decode(buf.B[25:], c.decodeOptions) + if releaseBuffer { + lib.ReleaseBuffer(buf) + } + + if err != nil { + c.log.Error("unable to decode received message: %s", err) + continue + } + + if len(tail) > 0 { + c.log.Warning("message has extra bytes: %#v", tail) + } + + reason, ok := msg.(error) + if ok == false { + c.log.Error("received malformed Exit message: %v", msg) + continue + } + + from := gen.PID{ + Node: c.peer, + ID: idFrom, + Creation: c.peer_creation, + } + to := gen.PID{ + Node: c.core.Name(), + ID: idTO, + Creation: c.core.Creation(), + } + + c.core.RouteSendExit(from, to, reason) + + case protoMessageResponse: + if buf.Len() < 49 { + c.log.Error("malformed message (too small MessageResponse)") + continue + } + + ref := gen.Ref{ + Node: c.core.Name(), + Creation: c.core.Creation(), + } + idFrom := binary.BigEndian.Uint64(buf.B[8:16]) + priority := gen.MessagePriority(buf.B[16]) + idTO := binary.BigEndian.Uint64(buf.B[17:25]) + ref.ID[0] = binary.BigEndian.Uint64(buf.B[25:33]) + ref.ID[1] = binary.BigEndian.Uint64(buf.B[33:41]) + ref.ID[2] = binary.BigEndian.Uint64(buf.B[41:49]) + + msg, tail, err := edf.Decode(buf.B[49:], c.decodeOptions) + if releaseBuffer { + lib.ReleaseBuffer(buf) + } + + if err != nil { + c.log.Error("unable to decode received message: %s", err) + continue + } + + if len(tail) > 0 { + c.log.Warning("message has extra bytes: %#v", tail) + } + + from := gen.PID{ + Node: c.peer, + ID: idFrom, + Creation: c.peer_creation, + } + to := gen.PID{ + Node: c.core.Name(), + ID: idTO, + Creation: c.core.Creation(), + } + + opts := gen.MessageOptions{ + Ref: ref, + Priority: priority, + } + c.core.RouteSendResponse(from, to, opts, msg) + + case protoMessageResponseError: + if buf.Len() < 50 { + c.log.Error("malformed message (too small MessageResponseError)") + continue + } + + ref := gen.Ref{ + Node: c.core.Name(), + Creation: c.core.Creation(), + } + idFrom := binary.BigEndian.Uint64(buf.B[8:16]) + priority := gen.MessagePriority(buf.B[16]) + idTO := binary.BigEndian.Uint64(buf.B[17:25]) + ref.ID[0] = binary.BigEndian.Uint64(buf.B[25:33]) + ref.ID[1] = binary.BigEndian.Uint64(buf.B[33:41]) + ref.ID[2] = binary.BigEndian.Uint64(buf.B[41:49]) + + from := gen.PID{ + Node: c.peer, + ID: idFrom, + Creation: c.peer_creation, + } + to := gen.PID{ + Node: c.core.Name(), + ID: idTO, + Creation: c.core.Creation(), + } + opts := gen.MessageOptions{ + Ref: ref, + Priority: priority, + } + + var r error // result + switch buf.B[49] { + case 0: + break + case 1: + r = gen.ErrProcessUnknown + case 2: + r = gen.ErrProcessMailboxFull + case 3: + r = gen.ErrProcessTerminated + case 255: + var ok bool + + msg, tail, err := edf.Decode(buf.B[50:], c.decodeOptions) + if releaseBuffer { + lib.ReleaseBuffer(buf) + } + + if err != nil { + c.log.Error("unable to decode received message: %s", err) + continue + } + + if len(tail) > 0 { + c.log.Warning("message has extra bytes: %#v", tail) + } + + r, ok = msg.(error) + if ok == false { + c.log.Error("received incorrect response error") + continue + } + + default: + c.log.Error("received incorrect response error id") + continue + } + c.core.RouteSendResponseError(from, to, opts, r) + + case protoMessageTerminatePID: + if buf.Len() < 18 { + c.log.Error("malformed message (too small MessageTerminatePID)") + continue + } + // priority := gen.MessagePriority(buf.B[8]) ignored + idTarget := binary.BigEndian.Uint64(buf.B[9:17]) + msg, tail, err := edf.Decode(buf.B[17:], c.decodeOptions) + if releaseBuffer { + lib.ReleaseBuffer(buf) + } + + if err != nil { + c.log.Error("unable to decode received message: %s", err) + continue + } + + if len(tail) > 0 { + c.log.Warning("message has extra bytes: %#v", tail) + } + + reason, ok := msg.(error) + if ok == false { + c.log.Error("received malformed TerminatePID message: %v", msg) + continue + } + + target := gen.PID{ + Node: c.peer, + ID: idTarget, + Creation: c.peer_creation, + } + c.core.RouteTerminatePID(target, reason) + + case protoMessageTerminateName, protoMessageTerminateNameCache: + var data []byte + + if buf.Len() < 12 { + c.log.Error("malformed message (too small MessageTerminateName*)") + continue + } + + processid := gen.ProcessID{ + Node: c.peer, + } + // priority := gen.MessagePriority(buf.B[8]) ignored + if buf.B[7] == protoMessageTerminateName { + l := int(buf.B[9]) + if buf.Len() < 10+l { + c.log.Error("malformed message (too small MessageTerminateName)") + continue + } + processid.Name = gen.Atom(buf.B[10 : 10+l]) + data = buf.B[10+l:] + } else { + id := binary.BigEndian.Uint16(buf.B[9:11]) + if c.decodeOptions.AtomCache == nil { + c.log.Error("received TerminateName with cached atom value %d, but cache is nil (message ignored). please, report this bug", id) + lib.ReleaseBuffer(buf) + continue + } + + v, found := c.decodeOptions.AtomCache.Load(id) + if found == false { + c.log.Error("received TerminateName with unknown atom cache id %d (message ignored). please, report this bug", id) + lib.ReleaseBuffer(buf) + continue + } + + processid.Name = v.(gen.Atom) + data = buf.B[11:] + } + + if c.decodeOptions.AtomMapping != nil { + if v, found := c.decodeOptions.AtomMapping.Load(processid.Name); found { + processid.Name = v.(gen.Atom) + } + } + + msg, tail, err := edf.Decode(data, c.decodeOptions) + if releaseBuffer { + lib.ReleaseBuffer(buf) + } + + if err != nil { + c.log.Error("unable to decode received message: %s", err) + continue + } + + if len(tail) > 0 { + c.log.Warning("message has extra bytes: %#v", tail) + } + + reason, ok := msg.(error) + if ok == false { + c.log.Error("received malformed TerminateName message: %v", msg) + continue + } + c.core.RouteTerminateProcessID(processid, reason) + + case protoMessageTerminateEvent, protoMessageTerminateEventCache: + var data []byte + + if buf.Len() < 12 { + c.log.Error("malformed message (too small MessageTerminateEvent*)") + continue + } + + event := gen.Event{ + Node: c.peer, + } + // priority := gen.MessagePriority(buf.B[8]) ignored + if buf.B[7] == protoMessageTerminateEvent { + l := int(buf.B[9]) + if buf.Len() < 10+l { + c.log.Error("malformed message (too small MessageTerminateEvent)") + continue + } + event.Name = gen.Atom(buf.B[10 : 10+l]) + data = buf.B[10+l:] + } else { + id := binary.BigEndian.Uint16(buf.B[9:11]) + if c.decodeOptions.AtomCache == nil { + c.log.Error("received TerminateEvent with cached atom value %d, but cache is nil (message ignored). please, report this bug", id) + lib.ReleaseBuffer(buf) + continue + } + + v, found := c.decodeOptions.AtomCache.Load(id) + if found == false { + c.log.Error("received TerminateEvent with unknown atom cache id %d (message ignored). please, report this bug", id) + lib.ReleaseBuffer(buf) + continue + } + + event.Name = v.(gen.Atom) + data = buf.B[11:] + } + + if c.decodeOptions.AtomMapping != nil { + if v, found := c.decodeOptions.AtomMapping.Load(event.Name); found { + event.Name = v.(gen.Atom) + } + } + + msg, tail, err := edf.Decode(data, c.decodeOptions) + if releaseBuffer { + lib.ReleaseBuffer(buf) + } + + if err != nil { + c.log.Error("unable to decode received message: %s", err) + continue + } + + if len(tail) > 0 { + c.log.Warning("message has extra bytes: %#v", tail) + } + + reason, ok := msg.(error) + if ok == false { + c.log.Error("received malformed TerminateEvent message: %v", msg) + continue + } + c.core.RouteTerminateEvent(event, reason) + + case protoMessageTerminateAlias: + if buf.Len() < 34 { + c.log.Error("malformed message (too small MessageTerminateAlias)") + continue + } + + target := gen.Alias{ + Node: c.peer, + Creation: c.peer_creation, + } + // priority := gen.MessagePriority(buf.B[8]) // ignored + target.ID[0] = binary.BigEndian.Uint64(buf.B[9:17]) + target.ID[1] = binary.BigEndian.Uint64(buf.B[17:25]) + target.ID[2] = binary.BigEndian.Uint64(buf.B[25:33]) + + msg, tail, err := edf.Decode(buf.B[33:], c.decodeOptions) + if releaseBuffer { + lib.ReleaseBuffer(buf) + } + + if err != nil { + c.log.Error("unable to decode received message: %s", err) + continue + } + + if len(tail) > 0 { + c.log.Warning("message has extra bytes: %#v", tail) + } + + reason, ok := msg.(error) + if ok == false { + c.log.Error("received malformed TerminateAlias message: %v", msg) + continue + } + + c.core.RouteTerminateAlias(target, reason) + + case protoMessageAny: + if buf.Len() < 9 { + c.log.Error("malformed message (too small MessageAny)") + continue + } + + msg, tail, err := edf.Decode(buf.B[8:], c.decodeOptions) + lib.ReleaseBuffer(buf) + + if err != nil { + c.log.Error("unable to decode received message: %s", err) + continue + } + + if len(tail) > 0 { + c.log.Warning("message has extra bytes: %#v", tail) + } + + c.routeMessage(msg) + + case protoMessageZ: + if buf.Len() < 10 { + c.log.Error("malformed message (too small MessageZ)") + continue + } + skipBytes := 9 // proto header for compressed message + switch buf.B[8] { + case gen.CompressionTypeGZIP.ID(): + dbuf, err := lib.DecompressGZIP(buf, uint(skipBytes)) + if err != nil { + c.log.Error("unable to decompress message (gzip), ignored: %s", err) + continue + } + lib.ReleaseBuffer(buf) + buf = dbuf + goto re + + case gen.CompressionTypeLZW.ID(): + dbuf, err := lib.DecompressLZW(buf, uint(skipBytes)) + if err != nil { + c.log.Error("unable to decompress message (lzw), ignored") + continue + } + lib.ReleaseBuffer(buf) + buf = dbuf + goto re + + case gen.CompressionTypeZLIB.ID(): + dbuf, err := lib.DecompressZLIB(buf, uint(skipBytes)) + if err != nil { + c.log.Error("unable to decompress message (zlib), ignored") + continue + } + lib.ReleaseBuffer(buf) + buf = dbuf + goto re + + default: + c.log.Error("message with unknown compression type %d, ignored", buf.B[7]) + continue + } + + // case protoMessageF: + // TODO fragmentation + // TODO check the message size after assembling + + // case protoMessageP: + // TODO proxy + + default: + c.log.Error("unknown/unsupported message type %d, ignored", buf.B[6]) + lib.ReleaseBuffer(buf) + } + + // TODO + // check if connection has been terminated + // if c.terminated { + // return + // } + + } +} + +func (c *connection) read(conn net.Conn, buf *lib.Buffer) (*lib.Buffer, error) { + total := buf.Len() + expect := 8 // 8 bytes as a header + // 1 byte - protoMagic + // 1 byte - protoVersion + // 4 bytes - message length + // 1 byte - order (0 - no order, N - use the specific queue) + // 1 byte - message type + for { + if buf.Len() < expect { + n, e := buf.ReadDataFrom(conn, math.MaxUint16) + if e != nil { + if e == io.EOF { + // something went wrong + return nil, nil + } + return nil, e + } + + total += n + // check if we should get more data + continue + } + + l := int(binary.BigEndian.Uint32(buf.B[2:6])) + + if c.node_maxmessagesize > 0 && l > c.node_maxmessagesize { + return nil, fmt.Errorf("received too long message (len: %d, limit: %d)", l, c.node_maxmessagesize) + } + + if lib.Trace() { + c.log.Trace("...recv buf.Len: %d, packet %d (expect: %d)", buf.Len(), l, expect) + } + + if buf.Len() < l { + expect = l + continue + } + + tail := lib.TakeBuffer() + tail.Append(buf.B[l:total]) + + buf.B = buf.B[:l] + + return tail, nil + } +} + +func (c *connection) routeMessage(msg any) { + switch m := msg.(type) { + case MessageResult: + c.requestsMutex.RLock() + ch, found := c.requests[m.Ref] + c.requestsMutex.RUnlock() + if found == false { + // no one is wating. seems request was handled to long + return + } + + select { + case ch <- m: + default: + } + + case MessageUpdateCache: + for k, v := range m.AtomCache { + entry, exist := c.decodeOptions.AtomCache.LoadOrStore(k, v) + if exist { + c.log.Warning("updating atom cache ignored entry (already exist): %d => %v", k, entry) + } + } + for k, v := range m.AtomMapping { + entry, exist := c.decodeOptions.AtomMapping.LoadOrStore(k, v) + if exist { + c.log.Warning("updating atom mapping ignored entry (already exist): %s => %v", k, entry) + } + } + for k, v := range m.RegCache { + entry, exist := c.decodeOptions.RegCache.LoadOrStore(k, v) + if exist { + c.log.Warning("updating reg cache ignored entry (already exist): %d => %v", k, entry) + } + } + for k, v := range m.ErrCache { + + // TODO + // check if error v is registered as a error on this node + // and replase it by the local one + + entry, exist := c.decodeOptions.ErrCache.LoadOrStore(k, v) + if exist { + c.log.Warning("updating err cache ignored entry (already exist): %d => %v", k, entry) + } + } + result := MessageResult{ + Ref: m.Ref, + } + order := uint8(0) + orderPeer := uint8(0) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageLinkPID: + // TODO check the source/target node name + err := c.core.RouteLinkPID(m.Source, m.Target) + result := MessageResult{ + Error: err, + Ref: m.Ref, + } + order := uint8(m.Target.ID % 255) + orderPeer := uint8(m.Source.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageUnlinkPID: + err := c.core.RouteUnlinkPID(m.Source, m.Target) + result := MessageResult{ + Error: err, + Ref: m.Ref, + } + order := uint8(m.Target.ID % 255) + orderPeer := uint8(m.Source.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageLinkProcessID: + err := c.core.RouteLinkProcessID(m.Source, m.Target) + result := MessageResult{ + Error: err, + Ref: m.Ref, + } + order := uint8(0) + orderPeer := uint8(m.Source.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageUnlinkProcessID: + err := c.core.RouteUnlinkProcessID(m.Source, m.Target) + result := MessageResult{ + Error: err, + Ref: m.Ref, + } + order := uint8(0) + orderPeer := uint8(m.Source.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageLinkAlias: + err := c.core.RouteLinkAlias(m.Source, m.Target) + result := MessageResult{ + Error: err, + Ref: m.Ref, + } + order := uint8(m.Target.ID[1] % 255) + orderPeer := uint8(m.Source.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageUnlinkAlias: + err := c.core.RouteUnlinkAlias(m.Source, m.Target) + result := MessageResult{ + Error: err, + Ref: m.Ref, + } + order := uint8(m.Target.ID[1] % 255) + orderPeer := uint8(m.Source.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageLinkEvent: + r, err := c.core.RouteLinkEvent(m.Source, m.Target) + result := MessageResult{ + Result: r, + Error: err, + Ref: m.Ref, + } + order := uint8(0) + orderPeer := uint8(m.Source.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageUnlinkEvent: + err := c.core.RouteUnlinkEvent(m.Source, m.Target) + result := MessageResult{ + Error: err, + Ref: m.Ref, + } + order := uint8(0) + orderPeer := uint8(m.Source.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageMonitorPID: + err := c.core.RouteMonitorPID(m.Source, m.Target) + result := MessageResult{ + Error: err, + Ref: m.Ref, + } + order := uint8(m.Target.ID % 255) + orderPeer := uint8(m.Source.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageDemonitorPID: + err := c.core.RouteDemonitorPID(m.Source, m.Target) + result := MessageResult{ + Error: err, + Ref: m.Ref, + } + order := uint8(m.Target.ID % 255) + orderPeer := uint8(m.Source.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageMonitorProcessID: + err := c.core.RouteMonitorProcessID(m.Source, m.Target) + result := MessageResult{ + Error: err, + Ref: m.Ref, + } + order := uint8(0) + orderPeer := uint8(m.Source.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageDemonitorProcessID: + err := c.core.RouteDemonitorProcessID(m.Source, m.Target) + result := MessageResult{ + Error: err, + Ref: m.Ref, + } + order := uint8(0) + orderPeer := uint8(m.Source.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageMonitorAlias: + err := c.core.RouteMonitorAlias(m.Source, m.Target) + result := MessageResult{ + Error: err, + Ref: m.Ref, + } + order := uint8(m.Target.ID[1] % 255) + orderPeer := uint8(m.Source.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageDemonitorAlias: + err := c.core.RouteDemonitorAlias(m.Source, m.Target) + result := MessageResult{ + Error: err, + Ref: m.Ref, + } + order := uint8(m.Target.ID[1] % 255) + orderPeer := uint8(m.Source.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageMonitorEvent: + r, err := c.core.RouteMonitorEvent(m.Source, m.Target) + result := MessageResult{ + Result: r, + Error: err, + Ref: m.Ref, + } + order := uint8(0) + orderPeer := uint8(m.Source.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageDemonitorEvent: + err := c.core.RouteDemonitorEvent(m.Source, m.Target) + result := MessageResult{ + Error: err, + Ref: m.Ref, + } + order := uint8(0) + orderPeer := uint8(m.Source.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageSpawn: + if c.node_flags.Enable && c.node_flags.EnableRemoteSpawn == false { + c.log.Warning("remote spawn is not allowed for %s", c.peer) + return + } + pid, err := c.core.RouteSpawn(c.core.Name(), m.Name, m.Options, c.peer) + result := MessageResult{ + Error: err, + Result: pid, + Ref: m.Ref, + } + order := uint8(0) + orderPeer := uint8(m.Options.ParentPID.ID % 255) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + case MessageApplicationStart: + if c.node_flags.Enable && c.node_flags.EnableRemoteApplicationStart == false { + c.log.Warning("remote application start is not allowed for %s", c.peer) + return + } + err := c.core.RouteApplicationStart(m.Name, m.Mode, m.Options, c.peer) + result := MessageResult{ + Error: err, + Ref: m.Ref, + } + order := uint8(0) + orderPeer := uint8(0) + c.sendAny(result, order, orderPeer, gen.Compression{}) + + default: + c.log.Error("recevied unsupported type of message: %T", msg) + } +} + +func (c *connection) sendAny(msg any, order uint8, orderPeer uint8, compression gen.Compression) error { + buf := lib.TakeBuffer() + buf.Allocate(8) // for the header + + if err := edf.Encode(msg, buf, c.encodeOptions); err != nil { + return err + } + if buf.Len() > math.MaxUint32 { + return gen.ErrTooLarge + } + buf.B[0] = protoMagic + buf.B[1] = protoVersion + binary.BigEndian.PutUint32(buf.B[2:6], uint32(buf.Len())) + buf.B[6] = orderPeer + buf.B[7] = protoMessageAny + + return c.send(buf, order, compression) +} + +func (c *connection) wait() { + c.wg.Wait() +} + +func (c *connection) send(buf *lib.Buffer, order uint8, compression gen.Compression) error { + + if compression.Enable && buf.Len() > compression.Threshold { + var zbuf *lib.Buffer + var err error + + // 1 - protoMagic + // 1 - protoVersion + // 4 - length + // 1 - order + // 1 - protoMessageZ + // 1 - compression type + preallocate := uint(9) + + switch compression.Type { + case gen.CompressionTypeZLIB: + zbuf, err = lib.CompressZLIB(buf, preallocate) + if err != nil { + return fmt.Errorf("unable to compress packet (zlib): %s", err) + } + case gen.CompressionTypeLZW: + zbuf, err = lib.CompressLZW(buf, preallocate) + if err != nil { + return fmt.Errorf("unable to compress packet (lzw): %s", err) + } + default: + compression.Type = gen.CompressionTypeGZIP + zbuf, err = lib.CompressGZIP(buf, preallocate, int(compression.Level)) + if err != nil { + return fmt.Errorf("unable to compress packet (gzip): %s", err) + } + + } + zbuf.B[0] = protoMagic + zbuf.B[1] = protoVersion + binary.BigEndian.PutUint32(zbuf.B[2:6], uint32(zbuf.Len())) + zbuf.B[6] = buf.B[6] // keep order of the original message + zbuf.B[7] = protoMessageZ + zbuf.B[8] = compression.Type.ID() + + lib.ReleaseBuffer(buf) + buf = zbuf + } + + if c.peer_maxmessagesize > 0 && buf.Len() > c.peer_maxmessagesize { + return gen.ErrTooLarge + } + + var pi *pool_item + c.pool_mutex.RLock() + l := len(c.pool) + if l == 0 { + c.pool_mutex.RUnlock() + return gen.ErrNoConnection + } + if order == 0 { + neworder := atomic.AddUint32(&c.order, 1) + n := int(neworder) % l + pi = c.pool[n] + } else { + n := int(order) % l + pi = c.pool[n] + } + c.pool_mutex.RUnlock() + + atomic.AddUint64(&c.messagesOut, 1) + atomic.AddUint64(&c.bytesOut, uint64(buf.Len())) + + // TODO + // add proxy, fragmentation support + // c.transitOut++ + // if buf.Len() < protoFragmentSize { + + pi.fl.Write(buf.B) + lib.ReleaseBuffer(buf) + return nil + + // } + + // message must be fragmented + // panic("TODO") +} + +func (c *connection) waitResult(ref gen.Ref, ch chan MessageResult) (result MessageResult) { + + timer := lib.TakeTimer() + defer lib.ReleaseTimer(timer) + timer.Reset(time.Second * time.Duration(gen.DefaultRequestTimeout)) + + select { + case <-timer.C: + result.Error = gen.ErrTimeout + case result = <-ch: + } + + c.requestsMutex.Lock() + delete(c.requests, ref) + c.requestsMutex.Unlock() + + return +} diff --git a/net/proto/enp.go b/net/proto/enp.go new file mode 100644 index 00000000..8b4293ee --- /dev/null +++ b/net/proto/enp.go @@ -0,0 +1,145 @@ +package proto + +import ( + "fmt" + "sync" + "time" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" + "ergo.services/ergo/net/edf" + "ergo.services/ergo/net/handshake" +) + +type enp struct { + core gen.Core +} + +func Create() gen.NetworkProto { + return &enp{} +} + +// gen.NetworkProto implementation + +func (e *enp) NewConnection(core gen.Core, result gen.HandshakeResult, log gen.Log) (gen.Connection, error) { + + opts, ok := result.Custom.(handshake.ConnectionOptions) + if ok == false { + return nil, fmt.Errorf("HandshakeResult.Custom has unknown type") + } + + if result.PeerCreation == 0 { + // seems it was Join handshake for the connection that was already terminated + return nil, gen.ErrNotAllowed + } + + log.Trace("create new connection with %s (pool size: %d)", result.Peer, opts.PoolSize) + conn := &connection{ + id: result.ConnectionID, + creation: time.Now().Unix(), + core: core, + log: log, + node_flags: result.NodeFlags, + node_maxmessagesize: result.NodeMaxMessageSize, + + handshakeVersion: result.HandshakeVersion, + protoVersion: e.Version(), + + peer: result.Peer, + peer_creation: result.PeerCreation, + peer_flags: result.PeerFlags, + peer_version: result.PeerVersion, + peer_maxmessagesize: result.PeerMaxMessageSize, + + pool_size: opts.PoolSize, + pool_dsn: opts.PoolDSN, + + encodeOptions: edf.Options{ + AtomCache: opts.EncodeAtomCache, + RegCache: opts.EncodeRegCache, + ErrCache: opts.EncodeErrCache, + Cache: new(sync.Map), + }, + + decodeOptions: edf.Options{ + AtomCache: opts.DecodeAtomCache, + RegCache: opts.DecodeRegCache, + ErrCache: opts.DecodeErrCache, + Cache: new(sync.Map), + }, + requests: make(map[gen.Ref]chan MessageResult), + } + + if len(result.AtomMapping) > 0 { + conn.encodeOptions.AtomMapping = &sync.Map{} + conn.decodeOptions.AtomMapping = &sync.Map{} + for k, v := range result.AtomMapping { + conn.encodeOptions.AtomMapping.Store(k, v) + conn.decodeOptions.AtomMapping.Store(v, k) + } + } + + // init recv queues. create 4 recv queues per connection + // since the decoding is more costly comparing to the encoding + for i := 0; i < opts.PoolSize*4; i++ { + conn.recvQueues = append(conn.recvQueues, lib.NewQueueMPSC()) + } + + return conn, nil +} + +func (e *enp) Serve(c gen.Connection, redial gen.NetworkDial) error { + conn := c.(*connection) + if redial == nil { + // accepted connection. no dialer. + conn.wait() + return nil + } + + if conn.pool_size < 2 { + // just one TCP connection in the pool + conn.wait() + return nil + } + + if len(conn.pool_dsn) == 0 { + conn.log.Warning("pool size is %d, but DSN list is empty", conn.pool_size) + conn.wait() + return nil + } + + for i := 1; i < conn.pool_size; i++ { + + // TODO + // we should try the next dsn on dialing failure + + n := i % len(conn.pool_dsn) + dsn := conn.pool_dsn[n] + if lib.Trace() { + conn.log.Trace("dialing %s (pool: %d of %d)", dsn, i+1, conn.pool_size) + } + nc, tail, err := redial(dsn, conn.id) + if err != nil { + if lib.Trace() { + conn.log.Trace("dialing %s failed: %s", dsn, err) + } + continue + } + + if err := conn.Join(nc, conn.id, redial, tail); err != nil { + conn.log.Error("unable to join %s: %s", nc.RemoteAddr().String(), err) + } + } + + conn.wait() + + return nil +} + +func (e *enp) Version() gen.Version { + return gen.Version{ + Name: protoName, + Release: protoRelease, + License: gen.LicenseMIT, + } +} diff --git a/net/proto/types.go b/net/proto/types.go new file mode 100644 index 00000000..46a9baa9 --- /dev/null +++ b/net/proto/types.go @@ -0,0 +1,231 @@ +package proto + +import ( + "ergo.services/ergo/gen" + "ergo.services/ergo/net/edf" +) + +const ( + protoName string = "ENP" // Ergo Network Protocol + protoRelease string = "R1" // (Rev.1) + + protoMagic byte = 78 + protoVersion byte = 1 + + // for messages sent with Send* methods + protoMessagePID byte = 101 + protoMessageName byte = 102 + protoMessageNameCache byte = 103 + protoMessageAlias byte = 104 + protoMessageEvent byte = 105 + protoMessageEventCache byte = 106 + protoMessageExit byte = 107 + + // for requests made with Call* methods + protoRequestPID byte = 121 + protoRequestName byte = 122 + protoRequestNameCache byte = 123 + protoRequestAlias byte = 124 + protoMessageResponse byte = 129 + protoMessageResponseError byte = 130 + + // termination messages + protoMessageTerminatePID byte = 181 + protoMessageTerminateName byte = 182 + protoMessageTerminateNameCache byte = 183 + protoMessageTerminateAlias byte = 184 + protoMessageTerminateEvent byte = 185 + protoMessageTerminateEventCache byte = 186 + + // any structured message (link/monitor/spawn/etc...) + protoMessageAny byte = 199 + + // order: compressed -> encrypted -> fragmented -> proxy + protoMessageZ byte = 200 // compressed + protoMessageE byte = 201 // encrypted + protoMessageF byte = 202 // fragmented + protoMessageP byte = 203 // proxy + + // TODO + // protoFragmentSize int = 65000 +) + +// +// Link/Unlink +// + +type MessageLinkPID struct { + Source gen.PID + Target gen.PID + Ref gen.Ref +} + +type MessageUnlinkPID struct { + Source gen.PID + Target gen.PID + Ref gen.Ref +} + +type MessageLinkProcessID struct { + Source gen.PID + Target gen.ProcessID + Ref gen.Ref +} + +type MessageUnlinkProcessID struct { + Source gen.PID + Target gen.ProcessID + Ref gen.Ref +} + +type MessageLinkAlias struct { + Source gen.PID + Target gen.Alias + Ref gen.Ref +} + +type MessageUnlinkAlias struct { + Source gen.PID + Target gen.Alias + Ref gen.Ref +} + +type MessageLinkEvent struct { + Source gen.PID + Target gen.Event + Ref gen.Ref +} + +type MessageUnlinkEvent struct { + Source gen.PID + Target gen.Event + Ref gen.Ref +} + +// +// Monitor/Demonitor +// + +type MessageMonitorPID struct { + Source gen.PID + Target gen.PID + Ref gen.Ref +} + +type MessageDemonitorPID struct { + Source gen.PID + Target gen.PID + Ref gen.Ref +} + +type MessageMonitorProcessID struct { + Source gen.PID + Target gen.ProcessID + Ref gen.Ref +} + +type MessageDemonitorProcessID struct { + Source gen.PID + Target gen.ProcessID + Ref gen.Ref +} + +type MessageMonitorAlias struct { + Source gen.PID + Target gen.Alias + Ref gen.Ref +} + +type MessageDemonitorAlias struct { + Source gen.PID + Target gen.Alias + Ref gen.Ref +} + +type MessageMonitorEvent struct { + Source gen.PID + Target gen.Event + Ref gen.Ref +} + +type MessageDemonitorEvent struct { + Source gen.PID + Target gen.Event + Ref gen.Ref +} + +// +// remote spawn and application start +// + +type MessageSpawn struct { + Name gen.Atom + Options gen.ProcessOptionsExtra + Ref gen.Ref +} + +type MessageApplicationStart struct { + Name gen.Atom + Mode gen.ApplicationMode + Options gen.ApplicationOptionsExtra + Ref gen.Ref +} + +// TODO +// for updating cache +// + +type MessageUpdateCache struct { + AtomCache map[uint16]gen.Atom + AtomMapping map[gen.Atom]gen.Atom + RegCache map[uint16]string + ErrCache map[uint16]error + Ref gen.Ref +} + +// +// result message for "any" requests (link/monitor/spawn etc...), +// + +type MessageResult struct { + Error error + Result any + Ref gen.Ref +} + +// +// message types must be registered +// + +func init() { + types := []any{ + MessageLinkPID{}, + MessageUnlinkPID{}, + MessageLinkProcessID{}, + MessageUnlinkProcessID{}, + MessageLinkAlias{}, + MessageUnlinkAlias{}, + MessageLinkEvent{}, + MessageUnlinkEvent{}, + MessageMonitorPID{}, + MessageDemonitorPID{}, + MessageMonitorProcessID{}, + MessageDemonitorProcessID{}, + MessageMonitorAlias{}, + MessageDemonitorAlias{}, + MessageMonitorEvent{}, + MessageDemonitorEvent{}, + MessageSpawn{}, + MessageApplicationStart{}, + MessageUpdateCache{}, + MessageResult{}, + } + + for _, t := range types { + err := edf.RegisterTypeOf(t) + if err == nil || err == gen.ErrTaken { + continue + } + panic(err) + } +} diff --git a/net/registrar/client.go b/net/registrar/client.go new file mode 100644 index 00000000..9d895437 --- /dev/null +++ b/net/registrar/client.go @@ -0,0 +1,382 @@ +package registrar + +import ( + "encoding/binary" + "fmt" + "io" + "net" + "strconv" + "time" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" + "ergo.services/ergo/net/edf" +) + +type Options struct { + Port uint16 + DisableServer bool +} + +func Create(options Options) gen.Registrar { + if options.Port == 0 { + options.Port = defaultRegistrarPort + } + client := &client{ + options: options, + terminated: true, + } + + edf.RegisterTypeOf(gen.Version{}) + edf.RegisterTypeOf(gen.Route{}) + edf.RegisterTypeOf(MessageRegisterRoutes{}) + edf.RegisterTypeOf(MessageRegisterReply{}) + edf.RegisterTypeOf(MessageResolveRoutes{}) + edf.RegisterTypeOf(MessageResolveReply{}) + + return client +} + +type client struct { + node gen.NodeRegistrar + + routes []gen.Route + + options Options + + server *server + conn net.Conn + + terminated bool +} + +// +// gen.Resolver interface implementation +// + +func (c *client) Resolve(name gen.Atom) ([]gen.Route, error) { + if c.terminated { + return nil, fmt.Errorf("registrar client terminated") + } + + srv := c.server + if srv != nil { + c.node.Log().Trace("resolving %s using local registrar server", name) + return srv.resolve(name, true) + } + + host := name.Host() + if host == "" { + return nil, gen.ErrIncorrect + } + dsn := net.JoinHostPort(host, strconv.Itoa(int(c.options.Port))) + c.node.Log().Trace("resolving %s using registrar %s", name, dsn) + conn, err := net.Dial("udp", dsn) + if err != nil { + return nil, err + } + defer conn.Close() + + // send resolve request + rbuf := lib.TakeBuffer() + defer lib.ReleaseBuffer(rbuf) + + rbuf.Allocate(4) + rbuf.B[0] = protoVersion + rbuf.B[1] = protoResolve + resolve := MessageResolveRoutes{ + Node: name, + } + if err := edf.Encode(resolve, rbuf, edf.Options{}); err != nil { + return nil, err + } + binary.BigEndian.PutUint16(rbuf.B[2:4], uint16(rbuf.Len()-4)) + + if _, err := conn.Write(rbuf.B); err != nil { + return nil, err + } + + // wait the answer + conn.SetReadDeadline(time.Now().Add(3 * time.Second)) + buf := make([]byte, 4096) + n, err := conn.Read(buf) + if err != nil { + return nil, err + } + if n < 4 { + c.node.Log().Error("malformed data from the registrar") + return nil, gen.ErrMalformed + } + dbuf := buf[:n] + + if dbuf[0] != protoVersion { + c.node.Log().Error("malformed proto version in the registrar resolve reply") + return nil, gen.ErrMalformed + } + if dbuf[1] != protoResolveReply { + c.node.Log().Error("malformed resolve reply from the registrar") + return nil, gen.ErrMalformed + } + l := int(binary.BigEndian.Uint16(dbuf[2:4])) + if 4+l > len(dbuf) { + c.node.Log().Error("malformed data in the registrar resolve reply (too long)") + return nil, gen.ErrMalformed + } + v, _, err := edf.Decode(dbuf[4:], edf.Options{}) + if err != nil { + c.node.Log().Error("unable to decode resolve reply message from the registrar:", err) + return nil, err + } + + reply, ok := v.(MessageResolveReply) + if ok == false { + c.node.Log().Error("incorrect message: %#v", v) + return nil, err + } + + if reply.Error != nil { + return nil, reply.Error + } + return reply.Routes, nil +} + +func (c *client) ResolveApplication(name gen.Atom) ([]gen.ApplicationRoute, error) { + return nil, gen.ErrUnsupported +} +func (c *client) ResolveProxy(node gen.Atom) ([]gen.ProxyRoute, error) { + return nil, gen.ErrUnsupported +} + +// +// gen.Registrar interface implementation +// + +func (c *client) Resolver() gen.Resolver { + return c +} + +func (c *client) RegisterProxy(to gen.Atom) error { + return gen.ErrUnsupported +} +func (c *client) UnregisterProxy(to gen.Atom) error { + return gen.ErrUnsupported +} +func (c *client) RegisterApplicationRoute(route gen.ApplicationRoute) error { + return gen.ErrUnsupported +} +func (c *client) UnregisterApplicationRoute(name gen.Atom) error { + return gen.ErrUnsupported +} +func (c *client) Nodes() ([]gen.Atom, error) { + return nil, gen.ErrUnsupported +} +func (c *client) Config(items ...string) (map[string]any, error) { + return nil, gen.ErrUnsupported +} +func (c *client) ConfigItem(item string) (any, error) { + return nil, gen.ErrUnsupported +} +func (c *client) Event() (gen.Event, error) { + return gen.Event{}, gen.ErrUnsupported +} +func (c *client) Info() gen.RegistrarInfo { + info := gen.RegistrarInfo{ + EmbeddedServer: c.server != nil, + Version: c.Version(), + } + conn := c.conn + if conn != nil { + info.Server = conn.RemoteAddr().String() + return info + } + if info.EmbeddedServer { + info.Server = c.server.lReg.Addr().String() + } + return info +} + +func (c *client) Register(node gen.NodeRegistrar, routes gen.RegisterRoutes) (gen.StaticRoutes, error) { + var static gen.StaticRoutes + + c.node = node + c.routes = routes.Routes + + if c.terminated == false { + return static, fmt.Errorf("already started") + } + + if len(c.routes) == 0 { + // hidden mode. do not register node + c.terminated = false + return static, nil + } + + rc, err := c.tryRegister() + if err != nil { + return static, err + } + + if rc != nil { + go c.serve(rc) + } + + c.terminated = false + return static, nil +} + +func (c *client) Terminate() { + if c.server != nil { + c.node.Log().Trace("terminate registrar server") + c.server.terminate() + } + if c.conn != nil { + c.conn.Close() + } + + c.terminated = true + c.node.Log().Trace("registrar client terminated") +} + +func (c *client) Version() gen.Version { + return gen.Version{ + Name: registrarName, + Release: registrarRelease, + License: gen.LicenseMIT, + } +} + +func (c *client) tryRegister() (net.Conn, error) { + if c.options.DisableServer == false { + c.server = tryStartServer(c.options.Port, c.node.Log()) + if c.server != nil { + // local registrar is started + c.server.registerNode(c.node.Name(), c.routes, nil) + return nil, nil + } + c.node.Log().Trace("unable to start registrar server, run as a client only") + } + + dialer := net.Dialer{ + KeepAlive: defaultKeepAlive, + } + dsn := net.JoinHostPort("localhost", strconv.Itoa(int(c.options.Port))) + conn, err := dialer.Dial("tcp", dsn) + if err != nil { + return nil, err + } + + buf := lib.TakeBuffer() + defer lib.ReleaseBuffer(buf) + + buf.Allocate(4) + buf.B[0] = protoVersion + buf.B[1] = protoRegister + reg := MessageRegisterRoutes{ + Node: c.node.Name(), + Routes: c.routes, + } + if err := edf.Encode(reg, buf, edf.Options{}); err != nil { + conn.Close() + return nil, err + } + binary.BigEndian.PutUint16(buf.B[2:4], uint16(buf.Len()-4)) + + if _, err := conn.Write(buf.B); err != nil { + conn.Close() + return nil, err + } + + conn.SetReadDeadline(time.Now().Add(time.Second)) + + var rbuf [1024]byte + n, err := conn.Read(rbuf[:]) + if err != nil { + return nil, err + } + + if n < 4 { + c.node.Log().Error("malformed data from the registrar") + conn.Close() + return nil, gen.ErrMalformed + } + dbuf := rbuf[:n] + + if dbuf[0] != protoVersion { + c.node.Log().Error("malformed proto version in the registrar reply") + conn.Close() + return nil, gen.ErrMalformed + } + if dbuf[1] != protoRegisterReply { + c.node.Log().Error("malformed reply from the registrar") + conn.Close() + return nil, gen.ErrMalformed + } + l := int(binary.BigEndian.Uint16(dbuf[2:4])) + if 4+l > len(dbuf) { + c.node.Log().Error("malformed data in the registrar reply (too long)") + conn.Close() + return nil, gen.ErrMalformed + } + v, _, err := edf.Decode(dbuf[4:], edf.Options{}) + if err != nil { + c.node.Log().Error("unable to decode reply message from the registrar:", err) + conn.Close() + return nil, err + } + + reply, ok := v.(MessageRegisterReply) + if ok == false { + c.node.Log().Error("incorrect message: %#v", v) + conn.Close() + return nil, err + } + + if reply.Error != nil { + return nil, reply.Error + } + + conn.SetReadDeadline(time.Time{}) + return conn, nil +} + +func (c *client) serve(conn net.Conn) { + var buf [16]byte + c.conn = conn + + for { + _, err := c.conn.Read(buf[:]) + if c.terminated { + return + } + if err != io.EOF { + continue + } + + // disconnected + c.node.Log().Warning("lost connection with the registrar") + c.conn = nil + + // trying to reconnect + for { + if c.terminated { + return + } + conn, err := c.tryRegister() + if err != nil { + c.node.Log().Error("unable to register node on the registrar: %s", err) + time.Sleep(time.Second) + continue + } + + if conn == nil { + // use the local registrar server + c.node.Log().Info("registered node on the local registrar") + return + } + c.conn = conn + c.node.Log().Info("registered node on the registrar") + break + } + + } +} diff --git a/net/registrar/server.go b/net/registrar/server.go new file mode 100644 index 00000000..42b06d21 --- /dev/null +++ b/net/registrar/server.go @@ -0,0 +1,296 @@ +package registrar + +import ( + "encoding/binary" + "fmt" + "net" + "sync" + "time" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" + "ergo.services/ergo/net/edf" +) + +type server struct { + sync.RWMutex + + lReg net.Listener + lRes net.PacketConn + + log gen.Log + + routes map[gen.Atom][]gen.Route + registered map[net.Conn]gen.Atom + terminated bool +} + +func tryStartServer(port uint16, log gen.Log) *server { + addressReg := fmt.Sprintf("localhost:%d", port) + lReg, err := net.Listen("tcp", addressReg) + if err != nil { + // might be already taken. dont care + return nil + } + addressRes := fmt.Sprintf(":%d", port) + lRes, err := net.ListenPacket("udp", addressRes) + if err != nil { + // might be already taken. dont care + lReg.Close() + return nil + } + + srv := &server{ + lReg: lReg, + lRes: lRes, + log: log, + routes: make(map[gen.Atom][]gen.Route), + registered: make(map[net.Conn]gen.Atom), + } + + go srv.serveRegister() + go srv.serveResolve() + + srv.log.Trace("(registrar) server started on tcp://%s and resolver on udp://%s", + lReg.Addr(), lRes.LocalAddr()) + return srv +} + +func (s *server) serveRegister() { + for { + if s.terminated { + return + } + conn, err := s.lReg.Accept() + if err != nil { + return + } + + go s.serveConn(conn) + } + +} + +func (s *server) serveResolve() { + buf := make([]byte, 1024) + rbuf := lib.TakeBuffer() + defer lib.ReleaseBuffer(rbuf) + + for { + if s.terminated { + return + } + + n, addr, err := s.lRes.ReadFrom(buf) + if err != nil { + s.log.Trace("(registrar) unable to read from UDP socket: %s", err) + continue + } + // decode request + s.log.Trace("(registrar) got resolve request from %s", addr) + if n < 4 { + s.log.Error("(registrar) malformed data from %s", addr) + continue + } + + if buf[0] != protoVersion { + s.log.Error("(registrar) proto version mismatch from %s: %d", addr, buf[0]) + continue + } + + if buf[1] != protoResolve { + s.log.Error("(registrar) unknown UDP packet type from %s: %d", addr, buf[1]) + continue + } + + l := binary.BigEndian.Uint16(buf[2:4]) + if int(l) > n-4 { + s.log.Error("(registrar) malformed data from %s", addr) + continue + } + + v, _, err := edf.Decode(buf[4:], edf.Options{}) + if err != nil { + s.log.Error("(registrar) unable to decode message from %s: %s", addr, err) + continue + } + + resolve, ok := v.(MessageResolveRoutes) + if ok == false { + s.log.Error("(registrar) incorrect message from %s: %#v", addr, v) + continue + } + + // resolve and send reply + routes, err := s.resolve(resolve.Node, false) + reply := MessageResolveReply{ + Routes: routes, + Error: err, + } + + rbuf.Reset() + rbuf.Allocate(4) + rbuf.B[0] = protoVersion + rbuf.B[1] = protoResolveReply + if err := edf.Encode(reply, rbuf, edf.Options{}); err != nil { + s.log.Error("(registrar) unable to encode resolve reply message: %s", err) + continue + } + binary.BigEndian.PutUint16(rbuf.B[2:4], uint16(rbuf.Len()-4)) + if _, err := s.lRes.WriteTo(rbuf.B, addr); err != nil { + s.log.Error("(registrar) unable to send resolve reply message: %s", err) + } + } +} + +func (s *server) terminate() { + s.terminated = true + + s.lReg.Close() + s.lRes.Close() + + s.RLock() + defer s.RUnlock() + + for k := range s.registered { + k.(net.Conn).Close() + } + + s.log.Trace("(registrar) server terminated") +} + +func (s *server) serveConn(conn net.Conn) { + s.log.Trace("(registrar) server got connection from %s", conn.RemoteAddr()) + + conn.SetReadDeadline(time.Now().Add(time.Second)) + + buf := make([]byte, 4096) // must be enough for the register packet + n, err := conn.Read(buf) + if err != nil { + s.log.Error("(registrar) unable to read from socket with %s: %s", conn.RemoteAddr(), err) + conn.Close() + return + } + if n < 4 { + s.log.Error("(registrar) malformed data from %s", conn.RemoteAddr()) + conn.Close() + return + } + + if buf[0] != protoVersion { + s.log.Error("(registrar) proto version mismatch from %s: %d", conn.RemoteAddr(), buf[0]) + conn.Close() + return + } + + if buf[1] != protoRegister { + s.log.Error("(registrar) unknown packet type from %s: %d", conn.RemoteAddr(), buf[1]) + conn.Close() + return + } + + l := binary.BigEndian.Uint16(buf[2:4]) + if int(l) > n-4 { + s.log.Error("(registrar) malformed data from %s", conn.RemoteAddr()) + conn.Close() + return + } + + v, _, err := edf.Decode(buf[4:], edf.Options{}) + if err != nil { + s.log.Error("(registrar) unable to decode message from %s: %s", conn.RemoteAddr(), err) + conn.Close() + return + } + routes, ok := v.(MessageRegisterRoutes) + if ok == false { + s.log.Error("(registrar) incorrect message from %s: %#v", conn.RemoteAddr(), v) + conn.Close() + return + } + + reply := MessageRegisterReply{ + Error: s.registerNode(routes.Node, routes.Routes, conn), + } + + rbuf := lib.TakeBuffer() + defer lib.ReleaseBuffer(rbuf) + + rbuf.Allocate(4) + rbuf.B[0] = protoVersion + rbuf.B[1] = protoRegisterReply + if err := edf.Encode(reply, rbuf, edf.Options{}); err != nil { + conn.Close() + return + } + binary.BigEndian.PutUint16(rbuf.B[2:4], uint16(rbuf.Len()-4)) + if _, err := conn.Write(rbuf.B); err != nil { + return + } + + if reply.Error != nil { + conn.Close() + return + } + + conn.SetReadDeadline(time.Time{}) + defer s.unregisterNode(routes.Node, conn) + for { + n, err := conn.Read(buf) + if err != nil { + return + } + + if n > 0 { + s.log.Warning("(registrar) misbehavior in reg link with %s, received %d bytes. ignored", routes.Node, n) + } + } +} + +func (s *server) registerNode(name gen.Atom, routes []gen.Route, conn net.Conn) error { + s.Lock() + defer s.Unlock() + if _, found := s.routes[name]; found { + s.log.Trace("(registrar) unable to register %s: %s", name, gen.ErrTaken) + return gen.ErrTaken + } + if len(routes) == 0 { + return gen.ErrIncorrect + } + + s.routes[name] = routes + if conn != nil { + s.registered[conn] = name + } + s.log.Trace("(registrar) registered node %s with %d route(s)", name, len(routes)) + return nil +} + +func (s *server) unregisterNode(name gen.Atom, conn net.Conn) { + s.Lock() + defer s.Unlock() + if _, found := s.routes[name]; found == false { + return + } + delete(s.routes, name) + delete(s.registered, conn) + + s.log.Trace("(registrar) unregistered node %s", name) +} + +func (s *server) resolve(name gen.Atom, docopy bool) ([]gen.Route, error) { + var routes []gen.Route + + s.RLock() + defer s.RUnlock() + if regs, found := s.routes[name]; found { + if docopy == false { + return regs, nil + } + for _, r := range regs { + routes = append(routes, r) + } + return routes, nil + } + return nil, gen.ErrUnknown +} diff --git a/net/registrar/types.go b/net/registrar/types.go new file mode 100644 index 00000000..4cdf80d3 --- /dev/null +++ b/net/registrar/types.go @@ -0,0 +1,39 @@ +package registrar + +import ( + "time" + + "ergo.services/ergo/gen" +) + +const ( + registrarName string = "ESRD" // Ergo Service Registration and Discovery + registrarRelease string = "R1" // (Rev.1) + + defaultRegistrarPort uint16 = 4499 + defaultKeepAlive time.Duration = 3 * time.Second + + protoVersion byte = 1 + protoRegister byte = 44 + protoRegisterReply byte = 45 + protoResolve byte = 46 + protoResolveReply byte = 47 +) + +type MessageRegisterRoutes struct { + Node gen.Atom + Routes []gen.Route +} + +type MessageRegisterReply struct { + Error error +} + +type MessageResolveRoutes struct { + Node gen.Atom +} + +type MessageResolveReply struct { + Routes []gen.Route + Error error +} diff --git a/node/acceptor.go b/node/acceptor.go new file mode 100644 index 00000000..e5abf0c7 --- /dev/null +++ b/node/acceptor.go @@ -0,0 +1,78 @@ +package node + +import ( + "net" + + "ergo.services/ergo/gen" +) + +type acceptor struct { + l net.Listener + bs int + cookie string + port uint16 + cert_manager gen.CertManager + flags gen.NetworkFlags + max_message_size int + + registrar_custom bool + registrar_info func() gen.RegistrarInfo + + handshake gen.NetworkHandshake + proto gen.NetworkProto + + atom_mapping map[gen.Atom]gen.Atom +} + +// gen.Acceptor interface implementation + +func (a *acceptor) Cookie() string { + return a.cookie +} + +func (a *acceptor) SetCookie(cookie string) { + a.cookie = cookie +} + +func (a *acceptor) NetworkFlags() gen.NetworkFlags { + return a.flags +} + +func (a *acceptor) SetNetworkFlags(flags gen.NetworkFlags) { + if flags.Enable == false { + flags = gen.DefaultNetworkFlags + } + a.flags = flags +} + +func (a *acceptor) MaxMessageSize() int { + return a.max_message_size +} + +func (a *acceptor) SetMaxMessageSize(size int) { + if size < 0 { + size = 0 + } + a.max_message_size = size + +} + +func (a *acceptor) Info() gen.AcceptorInfo { + info := gen.AcceptorInfo{ + Interface: a.l.Addr().String(), + MaxMessageSize: a.max_message_size, + Flags: a.flags, + TLS: a.cert_manager != nil, + CustomRegistrar: a.registrar_custom, + HandshakeVersion: a.handshake.Version(), + ProtoVersion: a.proto.Version(), + } + regInfo := a.registrar_info() + if regInfo.EmbeddedServer { + info.RegistrarServer = "(embedded) " + regInfo.Server + } else { + info.RegistrarServer = regInfo.Server + } + info.RegistrarVersion = regInfo.Version + return info +} diff --git a/node/application.go b/node/application.go new file mode 100644 index 00000000..190945ff --- /dev/null +++ b/node/application.go @@ -0,0 +1,300 @@ +package node + +import ( + "runtime" + "sync" + "sync/atomic" + "time" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +type application struct { + spec gen.ApplicationSpec + node *node + behavior gen.ApplicationBehavior + group sync.Map + mode gen.ApplicationMode + + started int64 + parent gen.Atom + state int32 + stopped chan struct{} + reason error +} + +func (a *application) start(mode gen.ApplicationMode, options gen.ApplicationOptionsExtra) error { + if swapped := atomic.CompareAndSwapInt32(&a.state, + int32(gen.ApplicationStateLoaded), int32(gen.ApplicationStateRunning)); swapped == false { + if atomic.LoadInt32(&a.state) == int32(gen.ApplicationStateRunning) { + return gen.ErrApplicationRunning + } + return gen.ErrApplicationState + } + + // build app env + appEnv := make(map[gen.Env]any) + // 1. from core env + for k, v := range options.CoreEnv { + appEnv[k] = v + } + // 2. from app spec env + for k, v := range a.spec.Env { + appEnv[k] = v + } + // 3. from options.Env (gen.ApplicationOptions.Env override spec.Env) + for k, v := range options.Env { + appEnv[k] = v + } + + // start items + for _, item := range a.spec.Group { + opts := gen.ProcessOptionsExtra{ + Register: item.Name, + ProcessOptions: item.Options, + ParentPID: options.CorePID, + ParentLeader: options.CorePID, + ParentLogLevel: options.CoreLogLevel, + ParentEnv: appEnv, + Application: a.spec.Name, + } + + opts.Args = item.Args + + pid, err := a.node.spawn(item.Factory, opts) + if err != nil { + a.group.Range(func(k, _ any) bool { + pid := k.(gen.PID) + a.node.Kill(pid) + return true + }) + atomic.StoreInt32(&a.state, int32(gen.ApplicationStateLoaded)) + return err + } + + a.group.Store(pid, true) + } + + a.stopped = make(chan struct{}) + a.node.log.Info("application %s (%s) started", a.spec.Name, a.mode) + a.mode = mode + a.parent = options.CorePID.Node + + a.started = time.Now().Unix() + + if lib.Recover() { + defer func() { + if r := recover(); r != nil { + // keep application running even if panic happened in Start callback handler + pc, fn, line, _ := runtime.Caller(2) + a.node.log.Panic("Application Start handler failed. Panic reason: %#v at %s[%s:%d]", + r, runtime.FuncForPC(pc).Name(), fn, line) + } + }() + } + + a.behavior.Start(mode) + a.registerAppRoute() + + return nil +} + +func (a *application) stop(force bool, timeout time.Duration) error { + if swapped := atomic.CompareAndSwapInt32(&a.state, + int32(gen.ApplicationStateRunning), + int32(gen.ApplicationStateStopping)); swapped == false { + state := atomic.LoadInt32(&a.state) + if state == int32(gen.ApplicationStateLoaded) { + return nil // already stopped + } + + if force == false { + if state == int32(gen.ApplicationStateStopping) { + return gen.ErrApplicationStopping + } + return gen.ErrApplicationState + } + } + + a.registerAppRoute() // new state of the app + + // update mode to prevent triggering 'permantent' mode + a.mode = gen.ApplicationModeTemporary + + a.group.Range(func(k, _ any) bool { + pid := k.(gen.PID) + if force { + a.node.Kill(pid) + } else { + a.node.SendExit(pid, gen.TerminateReasonShutdown) + } + return true + }) + + if force { + a.reason = gen.TerminateReasonKill + } else { + a.reason = gen.TerminateReasonShutdown + } + + select { + case <-a.stopped: + return nil + case <-time.After(timeout): + return gen.ErrApplicationStopping + } +} + +func (a *application) terminate(pid gen.PID, reason error) { + if _, exist := a.group.LoadAndDelete(pid); exist == false { + // it was started as a child process somewhere deep in the supervision tree + // do nothing. + return + } + + switch a.mode { + case gen.ApplicationModePermanent: + state := atomic.SwapInt32(&a.state, int32(gen.ApplicationStateStopping)) + if state == int32(gen.ApplicationStateStopping) { + // already in stopping + break + } + a.node.Log().Info("application %s (%s) will be stopped due to termination of %s with reason: %s", a.spec.Name, a.mode, pid, reason) + a.reason = reason + a.group.Range(func(k, _ any) bool { + pid := k.(gen.PID) + a.node.SendExit(pid, gen.TerminateReasonShutdown) + return true + }) + case gen.ApplicationModeTransient: + if reason == gen.TerminateReasonNormal || reason == gen.TerminateReasonShutdown { + // do nothing + break + } + a.node.Log().Info("application %s (%s) will be stopped due to termination of %s with reason: %s", a.spec.Name, a.mode, pid, reason) + + state := atomic.SwapInt32(&a.state, int32(gen.ApplicationStateStopping)) + if state == int32(gen.ApplicationStateStopping) { + // already in stopping + break + } + a.reason = reason + a.group.Range(func(k, _ any) bool { + pid := k.(gen.PID) + a.node.SendExit(pid, gen.TerminateReasonShutdown) + return true + }) + default: + // do nothing + } + + // check if it was the last item + empty := true + a.group.Range(func(_, _ any) bool { + empty = false + return false + }) + + if empty == false { + // do nothing + return + } + if a.reason == nil { + a.reason = gen.TerminateReasonNormal + } + + old := atomic.SwapInt32(&a.state, int32(gen.ApplicationStateLoaded)) + if old == int32(gen.ApplicationStateLoaded) { + return + } + if a.stopped != nil { + close(a.stopped) + } + + a.started = 0 + a.parent = "" + + a.node.log.Info("application %s (%s) stopped with reason %s", a.spec.Name, a.mode, a.reason) + + if lib.Recover() { + defer func() { + if r := recover(); r != nil { + pc, fn, line, _ := runtime.Caller(2) + a.node.log.Panic("Application Terminate handler failed. Panic reason: %#v at %s[%s:%d]", + r, runtime.FuncForPC(pc).Name(), fn, line) + } + }() + } + + a.behavior.Terminate(a.reason) + + network := a.node.Network() + if network.Mode() != gen.NetworkModeEnabled { + return + } + a.registerAppRoute() // new state for the app + return +} + +func (a *application) info() gen.ApplicationInfo { + var info gen.ApplicationInfo + info.Name = a.spec.Name + info.Weight = a.spec.Weight + info.Description = a.spec.Description + info.Version = a.spec.Version + info.Depends = a.spec.Depends + info.Mode = a.mode + info.Uptime = time.Now().Unix() - a.started + info.Group = []gen.PID{} + a.group.Range(func(k, _ any) bool { + pid := k.(gen.PID) + info.Group = append(info.Group, pid) + return true + }) + + info.Env = make(map[gen.Env]any) + if a.node.security.ExposeEnvInfo { + for k, v := range a.spec.Env { + info.Env[k] = v + } + } + + info.State = gen.ApplicationState(atomic.LoadInt32(&a.state)) + return info +} + +func (a *application) tryUnload() bool { + return atomic.CompareAndSwapInt32(&a.state, int32(gen.ApplicationStateLoaded), 0) +} + +func (a *application) isRunning() bool { + return atomic.LoadInt32(&a.state) == int32(gen.ApplicationStateRunning) +} + +func (a *application) registerAppRoute() { + appRoute := gen.ApplicationRoute{ + Node: a.node.name, + Name: a.spec.Name, + Weight: a.spec.Weight, + Mode: a.mode, + State: gen.ApplicationState(a.state), + } + network := a.node.Network() + if network.Mode() != gen.NetworkModeEnabled { + return + } + if reg, err := network.Registrar(); err == nil { + reg.RegisterApplicationRoute(appRoute) + } +} + +func (a *application) unregisterAppRoute() { + network := a.node.Network() + if network.Mode() != gen.NetworkModeEnabled { + return + } + if reg, err := network.Registrar(); err == nil { + reg.UnregisterApplicationRoute(a.spec.Name) + } +} diff --git a/node/core.go b/node/core.go index 3d9a515c..235ea438 100644 --- a/node/core.go +++ b/node/core.go @@ -1,952 +1,1564 @@ package node import ( - "context" - "crypto/rsa" - "fmt" - "runtime" - "strings" - "sync" "sync/atomic" - "time" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" ) -var ( - startPID = uint64(1000) - startUniqID = uint64(time.Now().UnixNano()) +// gen.Core interface implementation - corePID = etf.Pid{} -) +func (n *node) RouteSendPID(from gen.PID, to gen.PID, options gen.MessageOptions, message any) error { + var queue lib.QueueMPSC -type core struct { - monitorInternal - networkInternal + if n.isRunning() == false { + return gen.ErrNodeTerminated + } - ctx context.Context - stop context.CancelFunc + if lib.Trace() { + n.log.Trace("RouteSendPID from %s to %s", from, to) + } - env map[gen.EnvKey]interface{} - mutexEnv sync.RWMutex + if to.Node != n.name { + // remote + connection, err := n.network.GetConnection(to.Node) + if err != nil { + return err + } + return connection.SendPID(from, to, options, message) + } - compression Compression + // local + value, found := n.processes.Load(to) + if found == false { + return gen.ErrProcessUnknown + } + p := value.(*process) - nextPID uint64 - uniqID uint64 - nodename string - creation uint32 + if alive := p.isAlive(); alive == false { + return gen.ErrProcessTerminated + } - names map[string]etf.Pid - mutexNames sync.RWMutex - aliases map[etf.Alias]*process - mutexAliases sync.RWMutex - processes map[uint64]*process - mutexProcesses sync.RWMutex + switch options.Priority { + case gen.MessagePriorityHigh: + queue = p.mailbox.System + case gen.MessagePriorityMax: + queue = p.mailbox.Urgent + default: + queue = p.mailbox.Main + } - behaviors map[string]map[string]gen.RegisteredBehavior - mutexBehaviors sync.Mutex -} + qm := gen.TakeMailboxMessage() + qm.From = from + qm.Type = gen.MailboxMessageTypeRegular + qm.Target = to + qm.Message = message -type coreInternal interface { - gen.Core - CoreRouter + if ok := queue.Push(qm); ok == false { + if p.fallback.Enable == false { + return gen.ErrProcessMailboxFull + } - // core environment - ListEnv() map[gen.EnvKey]interface{} - SetEnv(name gen.EnvKey, value interface{}) - Env(name gen.EnvKey) interface{} + if p.fallback.Name == p.name { + return gen.ErrProcessMailboxFull + } - monitorInternal - networkInternal + fbm := gen.MessageFallback{ + PID: p.pid, + Tag: p.fallback.Tag, + Message: message, + } + fbto := gen.ProcessID{Name: p.fallback.Name, Node: n.name} + return n.RouteSendProcessID(from, fbto, options, fbm) + } + atomic.AddUint64(&p.messagesIn, 1) + p.run() + return nil +} - spawn(name string, opts processOptions, behavior gen.ProcessBehavior, args ...etf.Term) (gen.Process, error) +func (n *node) RouteSendProcessID(from gen.PID, to gen.ProcessID, options gen.MessageOptions, message any) error { + var queue lib.QueueMPSC - registerName(name string, pid etf.Pid) error - unregisterName(name string) error + if n.isRunning() == false { + return gen.ErrNodeTerminated + } - newAlias(p *process) (etf.Alias, error) - deleteAlias(owner *process, alias etf.Alias) error + if lib.Trace() { + n.log.Trace("RouteSendProcessID from %s to %s", from, to) + } - coreNodeName() string - coreStop() - coreUptime() int64 - coreIsAlive() bool + if to.Node != n.name { + // remote + connection, err := n.network.GetConnection(to.Node) + if err != nil { + return err + } + return connection.SendProcessID(from, to, options, message) + } - coreWait() - coreWaitWithTimeout(d time.Duration) error + value, found := n.names.Load(to.Name) + if found == false { + return gen.ErrProcessUnknown + } + p := value.(*process) - monitorStats() internalMonitorStats - networkStats() internalNetworkStats - coreStats() internalCoreStats -} + if alive := p.isAlive(); alive == false { + return gen.ErrProcessTerminated + } -type internalCoreStats struct { - totalProcesses uint64 - totalReferences uint64 - processes int - aliases int - names int -} + switch options.Priority { + case gen.MessagePriorityHigh: + queue = p.mailbox.System + case gen.MessagePriorityMax: + queue = p.mailbox.Urgent + default: + queue = p.mailbox.Main + } -type coreRouterInternal interface { - CoreRouter - MakeRef() etf.Ref + qm := gen.TakeMailboxMessage() + qm.From = from + qm.Type = gen.MailboxMessageTypeRegular + qm.Target = to.Name + qm.Message = message - ProcessByPid(pid etf.Pid) gen.Process - ProcessByName(name string) gen.Process - ProcessByAlias(alias etf.Alias) gen.Process + if ok := queue.Push(qm); ok == false { + if p.fallback.Enable == false { + return gen.ErrProcessMailboxFull + } - processByPid(pid etf.Pid) *process - getConnection(nodename string) (ConnectionInterface, error) - sendEvent(pid etf.Pid, event gen.Event, message gen.EventMessage) error -} + if p.fallback.Name == p.name { + return gen.ErrProcessMailboxFull + } -// transit proxy session -type proxyTransitSession struct { - a ConnectionInterface - b ConnectionInterface -} + fbm := gen.MessageFallback{ + PID: p.pid, + Tag: p.fallback.Tag, + Message: message, + } + fbto := gen.ProcessID{Name: p.fallback.Name, Node: n.name} + return n.RouteSendProcessID(from, fbto, options, fbm) + } -type proxyConnectRequest struct { - privateKey *rsa.PrivateKey - request ProxyConnectRequest - connection chan ConnectionInterface - cancel chan ProxyConnectCancel + atomic.AddUint64(&p.messagesIn, 1) + p.run() + return nil } -func newCore(ctx context.Context, nodename string, cookie string, options Options) (coreInternal, error) { - if options.Compression.Level < 1 || options.Compression.Level > 9 { - options.Compression.Level = DefaultCompressionLevel +func (n *node) RouteSendAlias(from gen.PID, to gen.Alias, options gen.MessageOptions, message any) error { + var queue lib.QueueMPSC + + if n.isRunning() == false { + return gen.ErrNodeTerminated } - if options.Compression.Threshold < DefaultCompressionThreshold { - options.Compression.Threshold = DefaultCompressionThreshold + + if lib.Trace() { + n.log.Trace("RouteSendAlias from %s to %s", from, to) } - c := &core{ - env: options.Env, - nextPID: startPID, - uniqID: startUniqID, - // keep node to get the process to access to the node's methods - nodename: nodename, - compression: options.Compression, - creation: options.Creation, - names: make(map[string]etf.Pid), - aliases: make(map[etf.Alias]*process), - processes: make(map[uint64]*process), - behaviors: make(map[string]map[string]gen.RegisteredBehavior), + + if to.Node != n.name { + // remote + connection, err := n.network.GetConnection(to.Node) + if err != nil { + return err + } + return connection.SendAlias(from, to, options, message) } - corePID = etf.Pid{ - Node: etf.Atom(c.nodename), - ID: 1, - Creation: c.creation, + value, found := n.aliases.Load(to) + if found == false { + return gen.ErrProcessUnknown } + p := value.(*process) - corectx, corestop := context.WithCancel(ctx) - c.stop = corestop - c.ctx = context.WithValue(corectx, c, c) + if alive := p.isAlive(); alive == false { + return gen.ErrProcessTerminated + } - c.monitorInternal = newMonitor(nodename, coreRouterInternal(c)) - network, err := newNetwork(c.ctx, nodename, cookie, options, coreRouterInternal(c)) - if err != nil { - corestop() - return nil, err + qm := gen.TakeMailboxMessage() + qm.From = from + qm.Type = gen.MailboxMessageTypeRegular + qm.Target = to + qm.Message = message + + // check if this message should be delivered to the meta process + if value, found := p.metas.Load(to); found { + m := value.(*meta) + if ok := m.main.Push(qm); ok == false { + return gen.ErrMetaMailboxFull + } + atomic.AddUint64(&m.messagesIn, 1) + m.handle() + return nil } - c.networkInternal = network - c.registerEvent(corePID, EventNetwork, []gen.EventMessage{MessageEventNetwork{}}) + switch options.Priority { + case gen.MessagePriorityHigh: + queue = p.mailbox.System + case gen.MessagePriorityMax: + queue = p.mailbox.Urgent + default: + queue = p.mailbox.Main + } - return c, nil -} + if ok := queue.Push(qm); ok == false { + if p.fallback.Enable == false { + return gen.ErrProcessMailboxFull + } -func (c *core) coreNodeName() string { - return c.nodename -} + if p.fallback.Name == p.name { + return gen.ErrProcessMailboxFull + } -func (c *core) coreStop() { - c.stop() - c.stopNetwork() + fbm := gen.MessageFallback{ + PID: p.pid, + Tag: p.fallback.Tag, + Message: message, + } + fbto := gen.ProcessID{Name: p.fallback.Name, Node: n.name} + return n.RouteSendProcessID(from, fbto, options, fbm) + } + + atomic.AddUint64(&p.messagesIn, 1) + p.run() + return nil } -func (c *core) coreUptime() int64 { - return time.Now().Unix() - int64(c.creation) +func (n *node) RouteSendEvent(from gen.PID, token gen.Ref, options gen.MessageOptions, message gen.MessageEvent) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + if lib.Trace() { + n.log.Trace("RouteSendEvent from %s with token %s", from, token) + } + + if from.Node == n.name { + // local producer. check if sender is allowed to send this event + value, found := n.events.Load(message.Event) + if found == false { + return gen.ErrEventUnknown + } + event := value.(*eventOwner) + if event.token != token { + return gen.ErrEventOwner + } + + if event.last != nil { + event.last.Push(message) + } + } + + consumers := append(n.links.consumers(message.Event), n.monitors.consumers(message.Event)...) + remote := make(map[gen.Atom]bool) + // local delivery + for _, pid := range consumers { + if pid.Node == n.name { + n.sendEventMessage(from, pid, options.Priority, message) + continue + } + if from.Node != n.name { + // event came here from the remote process. so there must be the local + // subscribers only. otherwise there is a bug + panic("unable to route event from remote to the remote") + } + remote[pid.Node] = true + } + + for k := range remote { + connection, err := n.network.GetConnection(k) + if err != nil { + continue + } + if err := connection.SendEvent(from, options, message); err != nil { + n.log.Error("unable to send event message to the remote consumer on %s: %s", k, err) + } + } + return nil } -func (c *core) coreWait() { - <-c.ctx.Done() +func (n *node) RouteSendExit(from gen.PID, to gen.PID, reason error) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + if reason == nil { + return gen.ErrIncorrect + } + + if lib.Trace() { + n.log.Trace("RouteSendExit from %s to %s with reason %q", from, to, reason) + } + + if to.Node != n.name { + // remote + connection, err := n.network.GetConnection(to.Node) + if err != nil { + return err + } + return connection.SendExit(from, to, reason) + } + + message := gen.MessageExitPID{ + PID: from, + Reason: reason, + } + return n.sendExitMessage(from, to, message) + } -// WaitWithTimeout waits until node stopped. Return ErrTimeout -// if given timeout is exceeded -func (c *core) coreWaitWithTimeout(d time.Duration) error { - timer := time.NewTimer(d) - defer timer.Stop() +func (n *node) RouteSendResponse(from gen.PID, to gen.PID, options gen.MessageOptions, message any) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + if lib.Trace() { + n.log.Trace("RouteSendResponse from %s to %s with ref %q", from, to, options.Ref) + } + + if to.Node != n.name { + // remote + connection, err := n.network.GetConnection(to.Node) + if err != nil { + return err + } + return connection.SendResponse(from, to, options, message) + } + value, loaded := n.processes.Load(to) + if loaded == false { + return gen.ErrProcessUnknown + } + p := value.(*process) select { - case <-timer.C: - return lib.ErrTimeout - case <-c.ctx.Done(): + case p.response <- response{ref: options.Ref, message: message}: + atomic.AddUint64(&p.messagesIn, 1) return nil + default: + // process doesn't wait for a response anymore + return gen.ErrResponseIgnored } } -// IsAlive returns true if node is running -func (c *core) coreIsAlive() bool { - return c.ctx.Err() == nil -} +func (n *node) RouteSendResponseError(from gen.PID, to gen.PID, options gen.MessageOptions, err error) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } -func (c *core) newPID() etf.Pid { - // http://erlang.org/doc/apps/erts/erl_ext_dist.html#pid_ext - // https://stackoverflow.com/questions/243363/can-someone-explain-the-structure-of-a-pid-in-erlang - i := atomic.AddUint64(&c.nextPID, 1) - return etf.Pid{ - Node: etf.Atom(c.nodename), - ID: i, - Creation: c.creation, + if lib.Trace() { + n.log.Trace("RouteSendResponseError from %s to %s with ref %q", from, to, options.Ref) } -} + if to.Node != n.name { + // remote + connection, e := n.network.GetConnection(to.Node) + if e != nil { + return e + } + return connection.SendResponseError(from, to, options, err) + } -// MakeRef returns atomic reference etf.Ref within this node -func (c *core) MakeRef() (ref etf.Ref) { - ref.Node = etf.Atom(c.nodename) - ref.Creation = c.creation - nt := atomic.AddUint64(&c.uniqID, 1) - ref.ID[0] = uint32(uint64(nt) & ((2 << 17) - 1)) - ref.ID[1] = uint32(uint64(nt) >> 46) - return -} + value, loaded := n.processes.Load(to) + if loaded == false { + return gen.ErrProcessUnknown + } + p := value.(*process) -// IsAlias -func (c *core) IsAlias(alias etf.Alias) bool { - c.mutexAliases.RLock() - _, ok := c.aliases[alias] - c.mutexAliases.RUnlock() - return ok + select { + case p.response <- response{ref: options.Ref, err: err}: + atomic.AddUint64(&p.messagesIn, 1) + return nil + default: + // process doesn't wait for a response anymore + return gen.ErrResponseIgnored + } } -func (c *core) newAlias(p *process) (etf.Alias, error) { - var alias etf.Alias +func (n *node) RouteCallPID(from gen.PID, to gen.PID, options gen.MessageOptions, message any) error { + var queue lib.QueueMPSC - // chech if its alive - c.mutexProcesses.RLock() - _, exist := c.processes[p.self.ID] - c.mutexProcesses.RUnlock() - if !exist { - return alias, lib.ErrProcessUnknown + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + // not allowed to make a call request to itself + if from == to { + return gen.ErrNotAllowed } - alias = etf.Alias(c.MakeRef()) - lib.Log("[%s] CORE create process alias for %v: %s", c.nodename, p.self, alias) + if lib.Trace() { + n.log.Trace("RouteCallPID from %s to %s with ref %q", from, to, options.Ref) + } - c.mutexAliases.Lock() - c.aliases[alias] = p - c.mutexAliases.Unlock() + if to.Node != n.name { + // remote + connection, err := n.network.GetConnection(to.Node) + if err != nil { + return err + } + return connection.CallPID(from, to, options, message) + } - p.Lock() - p.aliases = append(p.aliases, alias) - p.Unlock() - return alias, nil -} + // local + value, found := n.processes.Load(to) + if found == false { + return gen.ErrProcessUnknown + } + p := value.(*process) -func (c *core) deleteAlias(owner *process, alias etf.Alias) error { - lib.Log("[%s] CORE delete process alias %v for %v", c.nodename, alias, owner.self) + if alive := p.isAlive(); alive == false { + return gen.ErrProcessTerminated + } - c.mutexAliases.Lock() - p, alias_exist := c.aliases[alias] - c.mutexAliases.Unlock() + switch options.Priority { + case gen.MessagePriorityHigh: + queue = p.mailbox.System + case gen.MessagePriorityMax: + queue = p.mailbox.Urgent + default: + queue = p.mailbox.Main + } - if alias_exist == false { - return lib.ErrAliasUnknown + qm := gen.TakeMailboxMessage() + qm.Ref = options.Ref + qm.From = from + qm.Type = gen.MailboxMessageTypeRequest + qm.Message = message + + if ok := queue.Push(qm); ok == false { + return gen.ErrProcessMailboxFull } + atomic.AddUint64(&p.messagesIn, 1) + p.run() + return nil +} - c.mutexProcesses.RLock() - _, process_exist := c.processes[owner.self.ID] - c.mutexProcesses.RUnlock() +func (n *node) RouteCallProcessID(from gen.PID, to gen.ProcessID, options gen.MessageOptions, message any) error { + var queue lib.QueueMPSC - if process_exist == false { - return lib.ErrProcessUnknown + if n.isRunning() == false { + return gen.ErrNodeTerminated } - if p.self != owner.self { - return lib.ErrAliasOwner + if lib.Trace() { + n.log.Trace("RouteCallProcessID from %s to %s with ref %q", from, to, options.Ref) } - p.Lock() - for i := range p.aliases { - if alias != p.aliases[i] { - continue + if to.Node != n.name { + // remote + connection, err := n.network.GetConnection(to.Node) + if err != nil { + return err } - // remove it from the global alias list - c.mutexAliases.Lock() - delete(c.aliases, alias) - c.mutexAliases.Unlock() - // remove it from the process alias list - p.aliases[i] = p.aliases[0] - p.aliases = p.aliases[1:] - p.Unlock() - return nil + return connection.CallProcessID(from, to, options, message) } - p.Unlock() - // shouldn't reach this code. seems we got a bug - c.mutexAliases.Lock() - delete(c.aliases, alias) - c.mutexAliases.Unlock() - lib.Warning("Bug: Process lost its alias. Please, report this issue") + value, found := n.names.Load(to.Name) + if found == false { + return gen.ErrProcessUnknown + } + p := value.(*process) + if alive := p.isAlive(); alive == false { + return gen.ErrProcessTerminated + } + + switch options.Priority { + case gen.MessagePriorityHigh: + queue = p.mailbox.System + case gen.MessagePriorityMax: + queue = p.mailbox.Urgent + default: + queue = p.mailbox.Main + } - return lib.ErrAliasUnknown + qm := gen.TakeMailboxMessage() + qm.Ref = options.Ref + qm.From = from + qm.Type = gen.MailboxMessageTypeRequest + qm.Target = to.Name + qm.Message = message + + if ok := queue.Push(qm); ok == false { + return gen.ErrProcessMailboxFull + } + atomic.AddUint64(&p.messagesIn, 1) + p.run() + return nil } -func (c *core) newProcess(name string, behavior gen.ProcessBehavior, opts processOptions) (*process, error) { +func (n *node) RouteCallAlias(from gen.PID, to gen.Alias, options gen.MessageOptions, message any) error { + var queue lib.QueueMPSC + + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + if lib.Trace() { + n.log.Trace("RouteCallAlias from %s to %s with ref %q", from, to, options.Ref) + } + + if to.Node != n.name { + // remote + connection, err := n.network.GetConnection(to.Node) + if err != nil { + return err + } + return connection.CallAlias(from, to, options, message) + } - var processContext context.Context - var kill context.CancelFunc - mailboxSize := DefaultProcessMailboxSize - if opts.MailboxSize > 0 { - mailboxSize = int(opts.MailboxSize) + value, found := n.aliases.Load(to) + if found == false { + return gen.ErrProcessUnknown } - directboxSize := DefaultProcessDirectboxSize - if opts.DirectboxSize > 0 { - directboxSize = int(opts.DirectboxSize) + p := value.(*process) + if alive := p.isAlive(); alive == false { + return gen.ErrProcessTerminated } - if opts.Context != nil { - if opts.Context.Value(c) != c { - return nil, lib.ErrProcessContext + qm := gen.TakeMailboxMessage() + qm.Ref = options.Ref + qm.From = from + qm.Type = gen.MailboxMessageTypeRequest + qm.Target = to + qm.Message = message + + // check if this request should be delivered to the meta process + if value, found := p.metas.Load(to); found { + m := value.(*meta) + if ok := m.main.Push(qm); ok == false { + return gen.ErrMetaMailboxFull } - processContext, kill = context.WithCancel(opts.Context) - } else { - processContext, kill = context.WithCancel(c.ctx) + atomic.AddUint64(&m.messagesIn, 1) + m.handle() + return nil } - pid := c.newPID() + switch options.Priority { + case gen.MessagePriorityHigh: + queue = p.mailbox.System + case gen.MessagePriorityMax: + queue = p.mailbox.Urgent + default: + queue = p.mailbox.Main + } + if ok := queue.Push(qm); ok == false { + return gen.ErrProcessMailboxFull + } + atomic.AddUint64(&p.messagesIn, 1) + p.run() + return nil +} - env := make(map[gen.EnvKey]interface{}) - // inherite the node environment - c.mutexEnv.RLock() - for k, v := range c.env { - env[k] = v +func (n *node) RouteLinkPID(pid gen.PID, target gen.PID) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated } - c.mutexEnv.RUnlock() - // merge the custom ones - for k, v := range opts.Env { - env[k] = v + if lib.Trace() { + n.log.Trace("RouteLinkPID %s with %s", pid, target) } - process := &process{ - coreInternal: c, + if n.name == target.Node { + // local target + if _, exist := n.processes.Load(target); exist == false { + return gen.ErrProcessUnknown + } + n.links.registerConsumer(target, pid) + return nil + } - self: pid, - name: name, - behavior: behavior, - env: env, - compression: c.compression, + // remote target + connection, err := n.network.GetConnection(target.Node) + if err != nil { + return err + } - parent: opts.parent, - groupLeader: opts.GroupLeader, + if err := connection.LinkPID(pid, target); err != nil { + return err + } - mailBox: make(chan gen.ProcessMailboxMessage, mailboxSize), - gracefulExit: make(chan gen.ProcessGracefulExitRequest, mailboxSize), - direct: make(chan gen.ProcessDirectMessage, directboxSize), + n.links.registerConsumer(target, pid) + return nil +} - context: processContext, - kill: kill, +func (n *node) RouteUnlinkPID(pid gen.PID, target gen.PID) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } - reply: make(map[etf.Ref]chan syncReplyMessage), - fallback: opts.Fallback, + if lib.Trace() { + n.log.Trace("RouteUnlinkPID %s with %s ", pid, target) } - process.exit = func(from etf.Pid, reason string) error { - lib.Log("[%s] EXIT from %s to %s with reason: %s", c.nodename, from, pid, reason) - if processContext.Err() != nil { - // process is already died - return lib.ErrProcessUnknown + if n.name == target.Node { + // local target + if _, exist := n.processes.Load(target); exist == false { + return gen.ErrProcessUnknown } + n.links.unregisterConsumer(target, pid) + return nil + } - ex := gen.ProcessGracefulExitRequest{ - From: from, - Reason: reason, - } + // remote target + connection, err := n.network.GetConnection(target.Node) + if err != nil { + return err + } - // use select just in case if this process isn't been started yet - // or ProcessLoop is already exited (has been set to nil) - // otherwise it cause infinity lock - select { - case process.gracefulExit <- ex: - default: - return lib.ErrProcessBusy - } + if err := connection.UnlinkPID(pid, target); err != nil { + return nil + } + n.links.unregisterConsumer(target, pid) + return nil +} + +func (n *node) RouteLinkProcessID(pid gen.PID, target gen.ProcessID) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + if lib.Trace() { + n.log.Trace("RouteLinkProcessID %s with %s", pid, target) + } - // let the process decide whether to stop itself, otherwise its going to be killed - if process.trapExit == false { - process.kill() + if n.name == target.Node { + // local target + if _, exist := n.names.Load(target.Name); exist == false { + return gen.ErrProcessUnknown } + n.links.registerConsumer(target, pid) return nil } - if name != "" { - lib.Log("[%s] CORE registering name (%s): %s", c.nodename, pid, name) - c.mutexNames.Lock() - if _, exist := c.names[name]; exist { - c.mutexNames.Unlock() - process.kill() // cancel context - return nil, lib.ErrTaken + // remote target + connection, err := n.network.GetConnection(target.Node) + if err != nil { + return err + } + + if err := connection.LinkProcessID(pid, target); err != nil { + return err + } + n.links.registerConsumer(target, pid) + return nil +} + +func (n *node) RouteUnlinkProcessID(pid gen.PID, target gen.ProcessID) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + if lib.Trace() { + n.log.Trace("RouteUnlinkProcessID %s with %s", pid, target) + } + if n.name == target.Node { + // local target + if _, exist := n.names.Load(target.Name); exist == false { + return gen.ErrProcessUnknown } - c.names[name] = process.self - c.mutexNames.Unlock() + n.links.unregisterConsumer(target, pid) + return nil } - lib.Log("[%s] CORE registering process: %s", c.nodename, pid) - c.mutexProcesses.Lock() - c.processes[process.self.ID] = process - c.mutexProcesses.Unlock() + // remote target + connection, err := n.network.GetConnection(target.Node) + if err != nil { + return err + } - return process, nil + if err := connection.UnlinkProcessID(pid, target); err != nil { + return err + } + n.links.unregisterConsumer(target, pid) + return nil } -func (c *core) deleteProcess(pid etf.Pid) { - c.mutexProcesses.Lock() - p, exist := c.processes[pid.ID] - if !exist { - c.mutexProcesses.Unlock() - return +func (n *node) RouteLinkAlias(pid gen.PID, target gen.Alias) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated } - lib.Log("[%s] CORE unregistering process: %s", c.nodename, p.self) - delete(c.processes, pid.ID) - c.mutexProcesses.Unlock() - c.mutexNames.Lock() - if (p.name) != "" { - lib.Log("[%s] CORE unregistering name (%s): %s", c.nodename, p.self, p.name) - delete(c.names, p.name) + if lib.Trace() { + n.log.Trace("RouteLinkAlias %s with %s using %s", pid, target) } - // delete names registered with this pid - for name, pid := range c.names { - if p.self == pid { - delete(c.names, name) + if n.name == target.Node { + // local target + if _, exist := n.aliases.Load(target); exist == false { + return gen.ErrAliasUnknown } + n.links.registerConsumer(target, pid) + return nil } - c.mutexNames.Unlock() - c.mutexAliases.Lock() - for _, alias := range p.aliases { - delete(c.aliases, alias) + // remote target + connection, err := n.network.GetConnection(target.Node) + if err != nil { + return err } - c.mutexAliases.Unlock() - return + if err := connection.LinkAlias(pid, target); err != nil { + return err + } + n.links.registerConsumer(target, pid) + return nil } -func (c *core) spawn(name string, opts processOptions, behavior gen.ProcessBehavior, args ...etf.Term) (gen.Process, error) { +func (n *node) RouteUnlinkAlias(pid gen.PID, target gen.Alias) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + if lib.Trace() { + n.log.Trace("RouteUnlinkAlias %s with %s", pid, target) + } + + if n.name == target.Node { + // local target + if _, exist := n.aliases.Load(target); exist == false { + return gen.ErrAliasUnknown + } + n.links.unregisterConsumer(target, pid) + return nil + } - process, err := c.newProcess(name, behavior, opts) + // remote target + connection, err := n.network.GetConnection(target.Node) if err != nil { - return nil, err + return err } - lib.Log("[%s] CORE spawn a new process %s (registered name: %q)", c.nodename, process.self, name) - - initProcess := func() (ps gen.ProcessState, err error) { - if lib.CatchPanic() { - defer func() { - if rcv := recover(); rcv != nil { - pc, fn, line, _ := runtime.Caller(2) - lib.Warning("initialization process failed %s[%q] %#v at %s[%s:%d]", - process.self, name, rcv, runtime.FuncForPC(pc).Name(), fn, line) - c.deleteProcess(process.self) - process.kill() - err = fmt.Errorf("panic") + + if err := connection.UnlinkAlias(pid, target); err != nil { + return err + } + + n.links.unregisterConsumer(target, pid) + return nil +} + +func (n *node) RouteLinkEvent(pid gen.PID, target gen.Event) ([]gen.MessageEvent, error) { + + if n.isRunning() == false { + return nil, gen.ErrNodeTerminated + } + + if lib.Trace() { + n.log.Trace("RouteLinkEvent %s with %s", pid, target) + } + + if n.name == target.Node { + var lastEventMessages []gen.MessageEvent + // local target + value, exist := n.events.Load(target) + if exist == false { + return nil, gen.ErrEventUnknown + } + + event := value.(*eventOwner) + n.links.registerConsumer(target, pid) + + if event.last != nil { + // load last N events + item := event.last.Item() + for { + if item == nil { + break } - }() + v := item.Value().(gen.MessageEvent) + lastEventMessages = append(lastEventMessages, v) + item = item.Next() + } } - ps, err = behavior.ProcessInit(process, args...) - return + c := atomic.AddInt32(&event.consumers, 1) + if event.notify == false || c > 1 { + return lastEventMessages, nil + } + + options := gen.MessageOptions{ + Priority: gen.MessagePriorityHigh, + } + message := gen.MessageEventStart{ + Name: target.Name, + } + n.RouteSendPID(n.corePID, event.producer, options, message) + return lastEventMessages, nil } - processState, err := initProcess() + // remote target + connection, err := n.network.GetConnection(target.Node) if err != nil { return nil, err } - started := make(chan bool) - defer close(started) - - cleanProcess := func(reason string) { - // set gracefulExit to nil before we start termination handling - process.gracefulExit = nil - c.deleteProcess(process.self) - // invoke cancel context to prevent memory leaks - // and propagate context canelation - process.kill() - // notify all the linked process and monitors - c.handleTerminated(process.self, name, reason) - // make the rest empty - process.Lock() - process.aliases = []etf.Alias{} - - // Do not clean self and name. Sometimes its good to know what pid - // (and what name) was used by the dead process. (gen.Applications is using it) - // process.name = "" - // process.self = etf.Pid{} - - process.behavior = nil - process.parent = nil - process.groupLeader = nil - process.exit = nil - process.kill = nil - process.mailBox = nil - process.direct = nil - process.env = nil - process.reply = nil - process.Unlock() - } - - go func(ps gen.ProcessState) { - if lib.CatchPanic() { - defer func() { - if rcv := recover(); rcv != nil { - pc, fn, line, _ := runtime.Caller(2) - lib.Warning("process terminated %s[%q] %#v at %s[%s:%d]", - process.self, name, rcv, runtime.FuncForPC(pc).Name(), fn, line) - cleanProcess("panic") - } - }() + lastEventMessages, err := connection.LinkEvent(pid, target) + if err != nil { + return nil, err + } + + n.links.registerConsumer(target, pid) + return lastEventMessages, nil +} + +func (n *node) RouteUnlinkEvent(pid gen.PID, target gen.Event) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + if lib.Trace() { + n.log.Trace("RouteUnlinkEvent %s with %s", pid, target) + } + + if n.name == target.Node { + // local target + value, exist := n.events.Load(target) + if exist == false { + return gen.ErrEventUnknown } + event := value.(*eventOwner) + n.links.unregisterConsumer(target, pid) - // start process loop - reason := behavior.ProcessLoop(ps, started) - // process stopped - cleanProcess(reason) + c := atomic.AddInt32(&event.consumers, -1) + if event.notify == false || c > 0 { + return nil + } - }(processState) + // notify producer + options := gen.MessageOptions{ + Priority: gen.MessagePriorityHigh, + } + message := gen.MessageEventStop{ + Name: target.Name, + } + n.RouteSendPID(n.corePID, event.producer, options, message) + return nil + } - // wait for the starting process loop - <-started - return process, nil -} + // remote target + connection, err := n.network.GetConnection(target.Node) + if err != nil { + return err + } -func (c *core) registerName(name string, pid etf.Pid) error { - lib.Log("[%s] CORE registering name %s", c.nodename, name) - c.mutexNames.Lock() - defer c.mutexNames.Unlock() - if _, ok := c.names[name]; ok { - // already registered - return lib.ErrTaken + if err := connection.UnlinkEvent(pid, target); err != nil { + return err } - c.names[name] = pid + n.links.unregisterConsumer(target, pid) return nil } -func (c *core) unregisterName(name string) error { - lib.Log("[%s] CORE unregistering name %s", c.nodename, name) - c.mutexNames.Lock() - defer c.mutexNames.Unlock() - if _, ok := c.names[name]; ok { - delete(c.names, name) - return nil +func (n *node) RouteMonitorPID(pid gen.PID, target gen.PID) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated } - return lib.ErrNameUnknown -} -// ListEnv -func (c *core) ListEnv() map[gen.EnvKey]interface{} { - c.mutexEnv.RLock() - defer c.mutexEnv.RUnlock() - - env := make(map[gen.EnvKey]interface{}) - for key, value := range c.env { - env[key] = value + if lib.Trace() { + n.log.Trace("RouteMonitor %s to %s", pid, target) } - return env -} + if n.name == target.Node { + // local target + if v, exist := n.processes.Load(target); exist == false { + return gen.ErrProcessUnknown + } else { + p := v.(*process) + if p.State() == gen.ProcessStateTerminated { + return gen.ErrProcessTerminated + } + } + n.monitors.registerConsumer(target, pid) + return nil + } -// SetEnv -func (c *core) SetEnv(name gen.EnvKey, value interface{}) { - c.mutexEnv.Lock() - defer c.mutexEnv.Unlock() - if strings.HasPrefix(string(name), "ergo:") { - return + // remote target + connection, err := n.network.GetConnection(target.Node) + if err != nil { + return err } - c.env[name] = value -} -// Env -func (c *core) Env(name gen.EnvKey) interface{} { - c.mutexEnv.RLock() - defer c.mutexEnv.RUnlock() - if value, ok := c.env[name]; ok { - return value + if err := connection.MonitorPID(pid, target); err != nil { + return err } + n.monitors.registerConsumer(target, pid) return nil } -// RegisterBehavior -func (c *core) RegisterBehavior(group, name string, behavior gen.ProcessBehavior, data interface{}) error { - lib.Log("[%s] CORE registering behavior %q in group %q ", c.nodename, name, group) - var groupBehaviors map[string]gen.RegisteredBehavior - var exist bool +func (n *node) RouteDemonitorPID(pid gen.PID, target gen.PID) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } - c.mutexBehaviors.Lock() - defer c.mutexBehaviors.Unlock() + if lib.Trace() { + n.log.Trace("RouteDemonitor %s to %s", pid, target) + } - groupBehaviors, exist = c.behaviors[group] - if !exist { - groupBehaviors = make(map[string]gen.RegisteredBehavior) - c.behaviors[group] = groupBehaviors + if n.name == target.Node { + // local target + if _, exist := n.processes.Load(target); exist == false { + return gen.ErrProcessUnknown + } + n.monitors.unregisterConsumer(target, pid) + return nil } - _, exist = groupBehaviors[name] - if exist { - return lib.ErrTaken + // remote target + connection, err := n.network.GetConnection(target.Node) + if err != nil { + return err } - rb := gen.RegisteredBehavior{ - Behavior: behavior, - Data: data, + if err := connection.DemonitorPID(pid, target); err != nil { + return err } - groupBehaviors[name] = rb + n.monitors.unregisterConsumer(target, pid) return nil } -// RegisteredBehavior -func (c *core) RegisteredBehavior(group, name string) (gen.RegisteredBehavior, error) { - var groupBehaviors map[string]gen.RegisteredBehavior - var rb gen.RegisteredBehavior - var exist bool +func (n *node) RouteMonitorProcessID(pid gen.PID, target gen.ProcessID) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + if lib.Trace() { + n.log.Trace("RouteMonitorProcessID %s to %s", pid, target) + } - c.mutexBehaviors.Lock() - defer c.mutexBehaviors.Unlock() + if n.name == target.Node { + // local target + if v, exist := n.names.Load(target.Name); exist == false { + return gen.ErrProcessUnknown + } else { + p := v.(*process) + if p.State() == gen.ProcessStateTerminated { + return gen.ErrProcessTerminated + } + } + n.monitors.registerConsumer(target, pid) + return nil + } - groupBehaviors, exist = c.behaviors[group] - if !exist { - return rb, lib.ErrBehaviorGroupUnknown + // remote target + connection, err := n.network.GetConnection(target.Node) + if err != nil { + return err } - rb, exist = groupBehaviors[name] - if !exist { - return rb, lib.ErrBehaviorUnknown + if err := connection.MonitorProcessID(pid, target); err != nil { + return err } - return rb, nil + n.monitors.registerConsumer(target, pid) + return nil } -// RegisteredBehaviorGroup -func (c *core) RegisteredBehaviorGroup(group string) []gen.RegisteredBehavior { - var groupBehaviors map[string]gen.RegisteredBehavior - var exist bool - var listrb []gen.RegisteredBehavior +func (n *node) RouteDemonitorProcessID(pid gen.PID, target gen.ProcessID) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } - c.mutexBehaviors.Lock() - defer c.mutexBehaviors.Unlock() + if lib.Trace() { + n.log.Trace("RouteDemonitorProcessID %s to %s", pid, target) + } + + if n.name == target.Node { + // local target + if _, exist := n.names.Load(target.Name); exist == false { + return gen.ErrProcessUnknown + } + n.monitors.unregisterConsumer(target, pid) + return nil + } - groupBehaviors, exist = c.behaviors[group] - if !exist { - return listrb + // remote target + connection, err := n.network.GetConnection(target.Node) + if err != nil { + return err } - for _, v := range groupBehaviors { - listrb = append(listrb, v) + if err := connection.DemonitorProcessID(pid, target); err != nil { + return err } - return listrb + + n.monitors.unregisterConsumer(target, pid) + return nil } -// UnregisterBehavior -func (c *core) UnregisterBehavior(group, name string) error { - lib.Log("[%s] CORE unregistering behavior %s in group %s ", c.nodename, name, group) - var groupBehaviors map[string]gen.RegisteredBehavior - var exist bool +func (n *node) RouteMonitorAlias(pid gen.PID, target gen.Alias) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } - c.mutexBehaviors.Lock() - defer c.mutexBehaviors.Unlock() + if lib.Trace() { + n.log.Trace("RouteMonitorAlias %s to %s", pid, target) + } - groupBehaviors, exist = c.behaviors[group] - if !exist { - return lib.ErrBehaviorUnknown + if n.name == target.Node { + // local target + if _, exist := n.aliases.Load(target); exist == false { + return gen.ErrAliasUnknown + } + n.monitors.registerConsumer(target, pid) + return nil } - delete(groupBehaviors, name) - // remove group if its empty - if len(groupBehaviors) == 0 { - delete(c.behaviors, group) + // remote target + connection, err := n.network.GetConnection(target.Node) + if err != nil { + return err + } + + if err := connection.MonitorAlias(pid, target); err != nil { + return err } + + n.monitors.registerConsumer(target, pid) return nil } -// ProcessInfo -func (c *core) ProcessInfo(pid etf.Pid) (gen.ProcessInfo, error) { - p := c.processByPid(pid) - if p == nil { - return gen.ProcessInfo{}, fmt.Errorf("undefined") +func (n *node) RouteDemonitorAlias(pid gen.PID, target gen.Alias) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated } - return p.Info(), nil -} + if lib.Trace() { + n.log.Trace("RouteDemonitorAlias %s to %s", pid, target) + } -// ProcessByPid -func (c *core) ProcessByPid(pid etf.Pid) gen.Process { - p := c.processByPid(pid) - if p == nil { + if n.name == target.Node { + // local target + if _, exist := n.aliases.Load(target); exist == false { + return gen.ErrAliasUnknown + } + n.monitors.unregisterConsumer(target, pid) return nil } - return p -} -// ProcessByAlias -func (c *core) ProcessByAlias(alias etf.Alias) gen.Process { - c.mutexAliases.RLock() - defer c.mutexAliases.RUnlock() - if p, ok := c.aliases[alias]; ok && p.IsAlive() { - return p + // remote target + connection, err := n.network.GetConnection(target.Node) + if err != nil { + return err } - // unknown process + + if err := connection.DemonitorAlias(pid, target); err != nil { + return err + } + + n.monitors.unregisterConsumer(target, pid) return nil } -// ProcessByName -func (c *core) ProcessByName(name string) gen.Process { - var pid etf.Pid - if name != "" { - // requesting Process by name - c.mutexNames.RLock() +func (n *node) RouteMonitorEvent(pid gen.PID, target gen.Event) ([]gen.MessageEvent, error) { - if p, ok := c.names[name]; ok { - pid = p - } else { - c.mutexNames.RUnlock() - return nil + if n.isRunning() == false { + return nil, gen.ErrNodeTerminated + } + + if lib.Trace() { + n.log.Trace("RouteMonitorEvent %s to %s", pid, target) + } + + if n.name == target.Node { + var lastEventMessages []gen.MessageEvent + // local target + value, exist := n.events.Load(target) + if exist == false { + return nil, gen.ErrEventUnknown + } + event := value.(*eventOwner) + n.monitors.registerConsumer(target, pid) + + if event.last != nil { + // load last N events + item := event.last.Item() + for { + if item == nil { + break + } + v := item.Value().(gen.MessageEvent) + lastEventMessages = append(lastEventMessages, v) + item = item.Next() + } + } + + c := atomic.AddInt32(&event.consumers, 1) + if event.notify == false || c > 1 { + return lastEventMessages, nil } - c.mutexNames.RUnlock() + + options := gen.MessageOptions{ + Priority: gen.MessagePriorityHigh, + } + message := gen.MessageEventStart{ + Name: target.Name, + } + n.RouteSendPID(n.corePID, event.producer, options, message) + return lastEventMessages, nil } - return c.ProcessByPid(pid) -} + // remote target + connection, err := n.network.GetConnection(target.Node) + if err != nil { + return nil, err + } -// ProcessList -func (c *core) ProcessList() []gen.Process { - list := []gen.Process{} - c.mutexProcesses.RLock() - for _, p := range c.processes { - list = append(list, p) + lastEventMessages, err := connection.MonitorEvent(pid, target) + if err != nil { + return nil, err } - c.mutexProcesses.RUnlock() - return list + + n.monitors.registerConsumer(target, pid) + return lastEventMessages, nil } -// -// implementation of CoreRouter interface: -// RouteSend -// RouteSendReg -// RouteSendAlias -// +func (n *node) RouteDemonitorEvent(pid gen.PID, target gen.Event) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + if lib.Trace() { + n.log.Trace("RouteDemonitorEvent %s to %s", pid, target) + } -// RouteSend implements RouteSend method of Router interface -func (c *core) RouteSend(from etf.Pid, to etf.Pid, message etf.Term) error { - if string(to.Node) == c.nodename { - if to.Creation != c.creation { - // message is addressed to the previous incarnation of this PID - lib.Warning("message from %s is addressed to the previous incarnation of this PID %s", from, to) - return lib.ErrProcessIncarnation + if n.name == target.Node { + // local target + value, exist := n.events.Load(target) + if exist == false { + return gen.ErrEventUnknown } - // local route - c.mutexProcesses.RLock() - p, exist := c.processes[to.ID] - c.mutexProcesses.RUnlock() - if !exist { - lib.Log("[%s] CORE route message by pid (local) %s failed. Unknown process", c.nodename, to) - return lib.ErrProcessUnknown + n.monitors.unregisterConsumer(target, pid) + + // notify producer + event := value.(*eventOwner) + c := atomic.AddInt32(&event.consumers, -1) + if event.notify == false || c > 0 { + return nil } - lib.Log("[%s] CORE route message by pid (local) %s", c.nodename, to) - select { - case p.mailBox <- gen.ProcessMailboxMessage{From: from, Message: message}: - default: - c.mutexNames.RLock() - pid, found := c.names[p.fallback.Name] - c.mutexNames.RUnlock() - if found == false { - lib.Warning("mailbox of %s[%q] is full. dropped message from %s", p.self, p.name, from) - return lib.ErrProcessMailboxFull - } - fbm := gen.MessageFallback{ - Process: p.self, - Tag: p.fallback.Tag, - Message: message, - } - return c.RouteSend(from, pid, fbm) + + options := gen.MessageOptions{ + Priority: gen.MessagePriorityHigh, + } + message := gen.MessageEventStop{ + Name: target.Name, } + n.RouteSendPID(n.corePID, event.producer, options, message) return nil } - // do not allow to send from the alien node. - if string(from.Node) != c.nodename { - return lib.ErrSenderUnknown + // remote target + connection, err := n.network.GetConnection(target.Node) + if err != nil { + return err } - // sending to remote node - c.mutexProcesses.RLock() - p_from, exist := c.processes[from.ID] - c.mutexProcesses.RUnlock() - if !exist { - lib.Log("[%s] CORE route message by pid (remote) %s failed. Unknown sender", c.nodename, to) - return lib.ErrSenderUnknown - } - connection, err := c.getConnection(string(to.Node)) - if err != nil { + if err := connection.DemonitorEvent(pid, target); err != nil { return err } - lib.Log("[%s] CORE route message by pid (remote) %s", c.nodename, to) - return connection.Send(p_from, to, message) + n.monitors.unregisterConsumer(target, pid) + return nil } -// RouteSendReg implements RouteSendReg method of Router interface -func (c *core) RouteSendReg(from etf.Pid, to gen.ProcessID, message etf.Term) error { - if to.Node == c.nodename { - // local route - c.mutexNames.RLock() - pid, ok := c.names[to.Name] - c.mutexNames.RUnlock() - if !ok { - lib.Log("[%s] CORE route message by gen.ProcessID (local) %s failed. Unknown process", c.nodename, to) - return lib.ErrProcessUnknown - } - lib.Log("[%s] CORE route message by gen.ProcessID (local) %s", c.nodename, to) - return c.RouteSend(from, pid, message) +func (n *node) RouteTerminatePID(target gen.PID, reason error) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated } - // do not allow to send from the alien node. - if string(from.Node) != c.nodename { - return lib.ErrSenderUnknown + if lib.Trace() { + n.log.Trace("RouteTerminatePID %s with reason %q", target, reason) } - // send to remote node - c.mutexProcesses.RLock() - p_from, exist := c.processes[from.ID] - c.mutexProcesses.RUnlock() - if !exist { - lib.Log("[%s] CORE route message by gen.ProcessID (remote) %s failed. Unknown sender", c.nodename, to) - return lib.ErrSenderUnknown + remote := make(map[gen.Atom]bool) + messageExit := gen.MessageExitPID{ + PID: target, + Reason: reason, } - connection, err := c.getConnection(string(to.Node)) - if err != nil { - return err + for _, pid := range n.links.unregister(target) { + if pid.Node != n.name { + remote[pid.Node] = true + } + n.sendExitMessage(target, pid, messageExit) } - lib.Log("[%s] CORE route message by gen.ProcessID (remote) %s", c.nodename, to) - return connection.SendReg(p_from, to, message) + messageDown := gen.MessageDownPID{ + PID: target, + Reason: reason, + } + messageOptions := gen.MessageOptions{ + Priority: gen.MessagePriorityHigh, + } + for _, pid := range n.monitors.unregister(target) { + if pid.Node != n.name { + remote[pid.Node] = true + } + n.RouteSendPID(target, pid, messageOptions, messageDown) + } + + if target.Node != n.name && len(remote) > 0 { + panic("bug") + } + + for name := range remote { + if connection, err := n.network.GetConnection(name); err == nil { + connection.SendTerminatePID(target, reason) + } + } + return nil } -// RouteSendAlias implements RouteSendAlias method of Router interface -func (c *core) RouteSendAlias(from etf.Pid, to etf.Alias, message etf.Term) error { +func (n *node) RouteTerminateProcessID(target gen.ProcessID, reason error) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + if lib.Trace() { + n.log.Trace("RouteTerminateProcessID %s with reason %q", target, reason) + } - if string(to.Node) == c.nodename { - // local route by alias - c.mutexAliases.RLock() - process, ok := c.aliases[to] - c.mutexAliases.RUnlock() - if !ok { - lib.Log("[%s] CORE route message by alias (local) %s failed. Unknown process", c.nodename, to) - return lib.ErrProcessUnknown + remote := make(map[gen.Atom]bool) + messageExit := gen.MessageExitProcessID{ + ProcessID: target, + Reason: reason, + } + for _, pid := range n.links.unregister(target) { + if pid.Node != n.name { + remote[pid.Node] = true } - lib.Log("[%s] CORE route message by alias (local) %s", c.nodename, to) - return c.RouteSend(from, process.self, message) + n.sendExitMessage(n.corePID, pid, messageExit) } - // do not allow to send from the alien node. Proxy request must be used. - if string(from.Node) != c.nodename { - return lib.ErrSenderUnknown + messageDown := gen.MessageDownProcessID{ + ProcessID: target, + Reason: reason, + } + messageOptions := gen.MessageOptions{ + Priority: gen.MessagePriorityHigh, + } + for _, pid := range n.monitors.unregister(target) { + if pid.Node != n.name { + remote[pid.Node] = true + } + n.RouteSendPID(n.corePID, pid, messageOptions, messageDown) } - // send to remote node - c.mutexProcesses.RLock() - p_from, exist := c.processes[from.ID] - c.mutexProcesses.RUnlock() - if !exist { - lib.Log("[%s] CORE route message by alias (remote) %s failed. Unknown sender", c.nodename, to) - return lib.ErrSenderUnknown + if target.Node != n.name && len(remote) > 0 { + panic("bug") } - connection, err := c.getConnection(string(to.Node)) - if err != nil { - return err + + for name := range remote { + if connection, err := n.network.GetConnection(name); err == nil { + connection.SendTerminateProcessID(target, reason) + } + } + return nil +} + +func (n *node) RouteTerminateEvent(target gen.Event, reason error) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + if lib.Trace() { + n.log.Trace("RouteTerminateEvent %s with reason %q", target, reason) + } + + remote := make(map[gen.Atom]bool) + messageExit := gen.MessageExitEvent{ + Event: target, + Reason: reason, + } + for _, pid := range n.links.unregister(target) { + if pid.Node != n.name { + remote[pid.Node] = true + } + n.sendExitMessage(n.corePID, pid, messageExit) } - lib.Log("[%s] CORE route message by alias (remote) %s", c.nodename, to) - return connection.SendAlias(p_from, to, message) + messageDown := gen.MessageDownEvent{ + Event: target, + Reason: reason, + } + messageOptions := gen.MessageOptions{ + Priority: gen.MessagePriorityHigh, + } + for _, pid := range n.monitors.unregister(target) { + if pid.Node != n.name { + remote[pid.Node] = true + } + n.RouteSendPID(n.corePID, pid, messageOptions, messageDown) + } + + if target.Node != n.name && len(remote) > 0 { + panic("bug") + } + + for name := range remote { + if connection, err := n.network.GetConnection(name); err == nil { + connection.SendTerminateEvent(target, reason) + } + } + return nil } -// RouteSpawnRequest -func (c *core) RouteSpawnRequest(node string, behaviorName string, request gen.RemoteSpawnRequest, args ...etf.Term) error { - if node == c.nodename { - // get connection for reply - connection, err := c.getConnection(string(request.From.Node)) - if err != nil { - return err +func (n *node) RouteTerminateAlias(target gen.Alias, reason error) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + if lib.Trace() { + n.log.Trace("RouteTerminateAlias %s with reason %q", target, reason) + } + + remote := make(map[gen.Atom]bool) + messageExit := gen.MessageExitAlias{ + Alias: target, + Reason: reason, + } + for _, pid := range n.links.unregister(target) { + if pid.Node != n.name { + remote[pid.Node] = true } + n.sendExitMessage(n.corePID, pid, messageExit) + } - // check if we have registered behavior with given name - b, err := c.RegisteredBehavior(remoteBehaviorGroup, behaviorName) - if err != nil { - return connection.SpawnReplyError(request.From, request.Ref, err) + messageDown := gen.MessageDownAlias{ + Alias: target, + Reason: reason, + } + messageOptions := gen.MessageOptions{ + Priority: gen.MessagePriorityHigh, + } + for _, pid := range n.monitors.unregister(target) { + if pid.Node != n.name { + remote[pid.Node] = true + } + n.RouteSendPID(n.corePID, pid, messageOptions, messageDown) + } + + if target.Node != n.name && len(remote) > 0 { + panic("bug") + } + + for name := range remote { + if connection, err := n.network.GetConnection(name); err == nil { + connection.SendTerminateAlias(target, reason) } + } + return nil +} - // spawn new process - process_opts := processOptions{} - process_opts.Env = map[gen.EnvKey]interface{}{EnvKeyRemoteSpawn: request.Options} - process, err_spawn := c.spawn(request.Options.Name, process_opts, b.Behavior, args...) +func (n *node) RouteSpawn(node gen.Atom, name gen.Atom, options gen.ProcessOptionsExtra, source gen.Atom) (gen.PID, error) { + var empty gen.PID + + if n.isRunning() == false { + return empty, gen.ErrNodeTerminated + } - // reply - if err_spawn != nil { - return connection.SpawnReplyError(request.From, request.Ref, err_spawn) + if lib.Trace() { + n.log.Trace("RouteSpawn %s from %s to %s", name, options.ParentPID, node) + } + + if node != n.name { + // remote + connection, err := n.network.GetConnection(node) + if err != nil { + return empty, err } - return connection.SpawnReply(request.From, request.Ref, process.Self()) + return connection.RemoteSpawn(name, options) } - connection, err := c.getConnection(node) + factory, err := n.network.getEnabledSpawn(name, source) if err != nil { + return empty, err + } + + return n.spawn(factory, options) +} + +func (n *node) RouteApplicationStart(name gen.Atom, mode gen.ApplicationMode, options gen.ApplicationOptionsExtra, source gen.Atom) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + if lib.Trace() { + n.log.Trace("RouteApplicationStart %s with mode %s requested by %s", name, mode, source) + } + + if err := n.network.isEnabledApplicationStart(name, source); err != nil { return err } - return connection.SpawnRequest(node, behaviorName, request, args...) + + v, exist := n.applications.Load(name) + if exist == false { + return gen.ErrApplicationUnknown + } + app := v.(*application) + return app.start(mode, options) } -// RouteSpawnReply -func (c *core) RouteSpawnReply(to etf.Pid, ref etf.Ref, result etf.Term) error { - process := c.processByPid(to) - if process == nil { - // seems process terminated - return lib.ErrProcessTerminated +func (n *node) RouteNodeDown(name gen.Atom, reason error) { + // handle links + for _, target := range n.links.targetsNodeDown(name) { + var message any + switch t := target.(type) { + case gen.PID: + message = gen.MessageExitPID{ + PID: t, + Reason: gen.ErrNoConnection, + } + + case gen.ProcessID: + message = gen.MessageExitProcessID{ + ProcessID: t, + Reason: gen.ErrNoConnection, + } + + case gen.Alias: + message = gen.MessageExitAlias{ + Alias: t, + Reason: gen.ErrNoConnection, + } + + case gen.Event: + message = gen.MessageExitEvent{ + Event: t, + Reason: gen.ErrNoConnection, + } + + case gen.Atom: + message = gen.MessageExitNode{ + Name: name, + } + + default: + // bug + continue + } + for _, pid := range n.links.unregister(target) { + n.sendExitMessage(n.corePID, pid, message) + } + } + + // handle monitors + for _, target := range n.monitors.targetsNodeDown(name) { + var message any + switch t := target.(type) { + case gen.PID: + message = gen.MessageDownPID{ + PID: t, + Reason: gen.ErrNoConnection, + } + + case gen.ProcessID: + message = gen.MessageDownProcessID{ + ProcessID: t, + Reason: gen.ErrNoConnection, + } + + case gen.Alias: + message = gen.MessageDownAlias{ + Alias: t, + Reason: gen.ErrNoConnection, + } + + case gen.Event: + message = gen.MessageDownEvent{ + Event: t, + Reason: gen.ErrNoConnection, + } + + case gen.Atom: + message = gen.MessageDownNode{ + Name: name, + } + + default: + // bug + continue + } + messageOptions := gen.MessageOptions{ + Priority: gen.MessagePriorityHigh, + } + for _, pid := range n.monitors.unregister(target) { + n.RouteSendPID(n.corePID, pid, messageOptions, message) + } } - process.PutSyncReply(ref, result, nil) - return nil } -func (c *core) processByPid(pid etf.Pid) *process { - c.mutexProcesses.RLock() - defer c.mutexProcesses.RUnlock() - if p, ok := c.processes[pid.ID]; ok && p.IsAlive() { - return p +func (n *node) MakeRef() gen.Ref { + var ref gen.Ref + ref.Node = n.name + ref.Creation = n.creation + id := atomic.AddUint64(&n.uniqID, 1) + ref.ID[0] = id & ((2 << 17) - 1) + ref.ID[1] = id >> 46 + return ref +} + +func (n *node) PID() gen.PID { + return n.corePID +} + +func (n *node) LogLevel() gen.LogLevel { + return n.log.Level() +} + +func (n *node) Creation() int64 { + return n.creation +} + +func (n *node) sendExitMessage(from gen.PID, to gen.PID, message any) error { + value, loaded := n.processes.Load(to) + if loaded == false { + return gen.ErrProcessUnknown + } + p := value.(*process) + + if lib.Trace() { + n.log.Trace("...sendExitMessage from %s to %s ", from, to) } - // unknown process + + // graceful shutdown via messaging + qm := gen.TakeMailboxMessage() + qm.From = from + qm.Type = gen.MailboxMessageTypeExit + qm.Message = message + + if ok := p.mailbox.Urgent.Push(qm); ok == false { + return gen.ErrProcessMailboxFull + } + + atomic.AddUint64(&p.messagesIn, 1) + p.run() return nil } -func (c *core) coreStats() internalCoreStats { - stats := internalCoreStats{} - stats.totalProcesses = atomic.LoadUint64(&c.nextPID) - startPID - stats.totalReferences = atomic.LoadUint64(&c.uniqID) - startUniqID +func (n *node) sendEventMessage(from gen.PID, to gen.PID, priority gen.MessagePriority, message gen.MessageEvent) error { + var queue lib.QueueMPSC - c.mutexProcesses.RLock() - stats.processes = len(c.processes) - c.mutexProcesses.RUnlock() + value, loaded := n.processes.Load(to) + if loaded == false { + return gen.ErrProcessUnknown + } + p := value.(*process) + + switch priority { + case gen.MessagePriorityHigh: + queue = p.mailbox.System + case gen.MessagePriorityMax: + queue = p.mailbox.Urgent + default: + queue = p.mailbox.Main + } + + if lib.Trace() { + n.log.Trace("...sendEventMessage from %s to %s ", from, to) + } - c.mutexAliases.RLock() - stats.aliases = len(c.aliases) - c.mutexAliases.RUnlock() + qm := gen.TakeMailboxMessage() + qm.From = from + qm.Type = gen.MailboxMessageTypeEvent + qm.Message = message - c.mutexNames.RLock() - stats.names = len(c.names) - c.mutexNames.RUnlock() - return stats + if ok := queue.Push(qm); ok == false { + return gen.ErrProcessMailboxFull + } + + atomic.AddUint64(&p.messagesIn, 1) + p.run() + return nil } diff --git a/node/log.go b/node/log.go new file mode 100644 index 00000000..f90fd0e8 --- /dev/null +++ b/node/log.go @@ -0,0 +1,95 @@ +package node + +import ( + "time" + + "ergo.services/ergo/gen" +) + +// gen.Log interface implementation + +func createLog(level gen.LogLevel, dolog func(gen.MessageLog, string)) *log { + return &log{ + level: level, + dolog: dolog, + } +} + +type log struct { + level gen.LogLevel + logger string + source any + dolog func(gen.MessageLog, string) +} + +func (l *log) Level() gen.LogLevel { + return l.level +} + +func (l *log) SetLevel(level gen.LogLevel) error { + if level < gen.LogLevelDebug { + return gen.ErrIncorrect + } + if level > gen.LogLevelDisabled { + return gen.ErrIncorrect + } + l.level = level + return nil +} + +func (l *log) Logger() string { + return l.logger +} + +func (l *log) SetLogger(name string) { + l.logger = name +} + +func (l *log) Trace(format string, args ...any) { + l.write(gen.LogLevelTrace, format, args) +} + +func (l *log) Debug(format string, args ...any) { + l.write(gen.LogLevelDebug, format, args) +} + +func (l *log) Info(format string, args ...any) { + l.write(gen.LogLevelInfo, format, args) +} + +func (l *log) Warning(format string, args ...any) { + l.write(gen.LogLevelWarning, format, args) +} + +func (l *log) Error(format string, args ...any) { + l.write(gen.LogLevelError, format, args) +} + +func (l *log) Panic(format string, args ...any) { + l.write(gen.LogLevelPanic, format, args) +} + +func (l *log) setSource(source any) { + switch source.(type) { + case gen.MessageLogProcess, gen.MessageLogMeta, gen.MessageLogNode, gen.MessageLogNetwork: + default: + panic("unknown source type for log interface") + } + l.source = source +} + +func (l *log) write(level gen.LogLevel, format string, args []any) { + if l.level > level { + return + } + + m := gen.MessageLog{ + Time: time.Now(), + Level: level, + Source: l.source, + Format: format, + Args: args, + } + + l.dolog(m, l.logger) +} diff --git a/node/logger.go b/node/logger.go new file mode 100644 index 00000000..50ac4794 --- /dev/null +++ b/node/logger.go @@ -0,0 +1,30 @@ +package node + +import ( + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +// +// logger based on a process +// + +func createProcessLogger(queue lib.QueueMPSC, run func()) gen.LoggerBehavior { + return &process_logger{ + queue: queue, + run: run, + } +} + +type process_logger struct { + queue lib.QueueMPSC + level gen.LogLevel + run func() +} + +func (p *process_logger) Log(message gen.MessageLog) { + p.queue.Push(message) + p.run() +} + +func (p *process_logger) Terminate() {} diff --git a/node/meta.go b/node/meta.go new file mode 100644 index 00000000..4db1fa51 --- /dev/null +++ b/node/meta.go @@ -0,0 +1,269 @@ +package node + +import ( + "runtime" + "sync/atomic" + "time" + + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +type meta struct { + // fields were reordered to have small memory footprint + behavior gen.MetaBehavior + + main lib.QueueMPSC + system lib.QueueMPSC + + p *process + log *log + + sbehavior string + id gen.Alias + + messagesIn uint64 + messagesOut uint64 + + priority gen.MessagePriority + + creation int64 // used for the meta process Uptime method only + state int32 +} + +func (m *meta) ID() gen.Alias { + return m.id +} + +func (m *meta) Parent() gen.PID { + return m.p.pid +} + +func (m *meta) Send(to any, message any) error { + if err := m.p.Send(to, message); err != nil { + return err + } + atomic.AddUint64(&m.messagesOut, 1) + return nil +} + +func (m *meta) SendImportant(to any, message any) error { + if err := m.p.SendImportant(to, message); err != nil { + return err + } + atomic.AddUint64(&m.messagesOut, 1) + return nil +} + +func (m *meta) SendWithPriority(to any, message any, priority gen.MessagePriority) error { + if err := m.p.SendWithPriority(to, message, priority); err != nil { + return err + } + atomic.AddUint64(&m.messagesOut, 1) + return nil +} + +func (m *meta) Spawn(behavior gen.MetaBehavior, options gen.MetaOptions) (gen.Alias, error) { + return m.p.SpawnMeta(behavior, options) +} + +func (m *meta) Env(name gen.Env) (any, bool) { + return m.p.Env(name) +} + +func (m *meta) EnvList() map[gen.Env]any { + return m.p.EnvList() +} + +func (m *meta) Log() gen.Log { + return m.log +} + +func (m *meta) init() (r error) { + if lib.Recover() { + defer func() { + if rcv := recover(); rcv != nil { + pc, fn, line, _ := runtime.Caller(2) + m.log.Panic("init meta %s failed - %#v at %s[%s:%d]", m.id, + rcv, runtime.FuncForPC(pc).Name(), fn, line) + r = gen.TerminateReasonPanic + } + }() + } + return m.behavior.Init(m) +} + +func (m *meta) start() { + defer m.p.metas.Delete(m.id) + + if lib.Recover() { + defer func() { + if rcv := recover(); rcv != nil { + pc, fn, line, _ := runtime.Caller(2) + m.log.Panic("meta process %s terminated - %#v at %s[%s:%d]", m.id, + rcv, runtime.FuncForPC(pc).Name(), fn, line) + old := atomic.SwapInt32(&m.state, int32(gen.MetaStateTerminated)) + if old != int32(gen.MetaStateTerminated) { + m.p.node.aliases.Delete(m.id) + atomic.StoreInt32(&m.state, int32(gen.MetaStateTerminated)) + reason := gen.TerminateReasonPanic + m.p.node.RouteTerminateAlias(m.id, reason) + m.behavior.Terminate(reason) + } + } + }() + } + + // start meta process + m.creation = time.Now().Unix() + reason := m.behavior.Start() + // meta process terminated + old := atomic.SwapInt32(&m.state, int32(gen.MetaStateTerminated)) + if old != int32(gen.MetaStateTerminated) { + m.p.node.aliases.Delete(m.id) + if reason == nil { + reason = gen.TerminateReasonNormal + } + m.p.node.RouteTerminateAlias(m.id, reason) + m.behavior.Terminate(reason) + } +} + +func (m *meta) handle() { + var reason error + var result any + + if atomic.CompareAndSwapInt32(&m.state, int32(gen.MetaStateSleep), int32(gen.MetaStateRunning)) == false { + // running or terminated + return + } + + go func() { + var message *gen.MailboxMessage + + if lib.Recover() { + defer func() { + if rcv := recover(); rcv != nil { + pc, fn, line, _ := runtime.Caller(2) + m.log.Panic("meta process %s terminated - %#v at %s[%s:%d]", m.id, + rcv, runtime.FuncForPC(pc).Name(), fn, line) + + old := atomic.SwapInt32(&m.state, int32(gen.MetaStateTerminated)) + if old != int32(gen.MetaStateTerminated) { + m.p.node.aliases.Delete(m.id) + reason = gen.TerminateReasonPanic + m.p.node.RouteTerminateAlias(m.id, reason) + m.behavior.Terminate(reason) + } + } + }() + } + + next: + for { + reason = nil + result = nil + + if gen.MetaState(atomic.LoadInt32(&m.state)) != gen.MetaStateRunning { + // terminated + break + } + msg, ok := m.system.Pop() + if ok == false { + msg, ok = m.main.Pop() + if ok == false { + // no messages + break + } + } + + if message != nil { + gen.ReleaseMailboxMessage(message) + message = nil + } + + if message, ok = msg.(*gen.MailboxMessage); ok == false { + m.log.Error("got unknown mailbox message. ignored") + continue + } + + switch message.Type { + case gen.MailboxMessageTypeRegular: + reason = m.behavior.HandleMessage(message.From, message.Message) + if reason == nil { + continue + } + + case gen.MailboxMessageTypeRequest: + result, reason = m.behavior.HandleCall(message.From, message.Ref, message.Message) + options := gen.MessageOptions{ + Ref: message.Ref, + Priority: m.p.priority, + Compression: m.p.compression, + KeepNetworkOrder: m.p.keeporder, + } + if reason == nil { + if result != nil { + m.p.node.RouteSendResponse(m.p.pid, message.From, options, result) + } + continue + } + if reason == gen.TerminateReasonNormal && result != nil { + m.p.node.RouteSendResponse(m.p.pid, message.From, options, result) + } + case gen.MailboxMessageTypeInspect: + result := m.behavior.HandleInspect(message.From, message.Message.([]string)...) + options := gen.MessageOptions{ + Ref: message.Ref, + Priority: m.p.priority, + Compression: m.p.compression, + KeepNetworkOrder: m.p.keeporder, + } + m.p.node.RouteSendResponse(m.p.pid, message.From, options, result) + atomic.AddUint64(&m.messagesOut, 1) + continue + + case gen.MailboxMessageTypeExit: + if err, ok := message.Message.(error); ok { + reason = err + break + } + m.p.log.Error("got incorrect exit-message from %s. ignored", message.From) + continue + default: + + m.p.log.Error("got unknown mailbox message type %#v. ignored", message.Type) + continue + } + + // terminated + old := atomic.SwapInt32(&m.state, int32(gen.MetaStateTerminated)) + if old != int32(gen.MetaStateTerminated) { + m.p.node.aliases.Delete(m.id) + m.p.node.RouteTerminateAlias(m.id, reason) + m.behavior.Terminate(reason) + } + return + } + + if atomic.CompareAndSwapInt32(&m.state, int32(gen.MetaStateRunning), int32(gen.MetaStateSleep)) == false { + // terminated. seems the main loop is stopped. do nothing. + return + } + + // check if we got a new message + if m.system.Item() == nil { + if m.main.Item() == nil { + // no messages + return + } + } + + // got some... try to use this goroutine + if atomic.CompareAndSwapInt32(&m.state, int32(gen.MetaStateSleep), int32(gen.MetaStateRunning)) == false { + // another goroutine is already running + return + } + goto next + }() +} diff --git a/node/monitor.go b/node/monitor.go deleted file mode 100644 index 63843fb7..00000000 --- a/node/monitor.go +++ /dev/null @@ -1,1111 +0,0 @@ -package node - -// http://erlang.org/doc/reference_manual/processes.html - -import ( - "fmt" - "reflect" - "sync" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" -) - -type monitorItem struct { - pid etf.Pid // by - ref etf.Ref -} - -type eventItem struct { - owner etf.Pid - messageTypes map[string]bool - monitors []etf.Pid -} - -type monitorInternal interface { - // RouteLink - RouteLink(pidA etf.Pid, pidB etf.Pid) error - // RouteUnlink - RouteUnlink(pidA etf.Pid, pidB etf.Pid) error - // RouteExit - RouteExit(to etf.Pid, terminated etf.Pid, reason string) error - // RouteMonitorReg - RouteMonitorReg(by etf.Pid, process gen.ProcessID, ref etf.Ref) error - // RouteMonitor - RouteMonitor(by etf.Pid, process etf.Pid, ref etf.Ref) error - // RouteDemonitor - RouteDemonitor(by etf.Pid, ref etf.Ref) error - // RouteMonitorExitReg - RouteMonitorExitReg(terminated gen.ProcessID, reason string, ref etf.Ref) error - // RouteMonitorExit - RouteMonitorExit(terminated etf.Pid, reason string, ref etf.Ref) error - // RouteNodeDown - RouteNodeDown(name string, disconnect *ProxyDisconnect) - - // IsMonitor - IsMonitor(ref etf.Ref) bool - - monitorNode(by etf.Pid, node string, ref etf.Ref) - demonitorNode(ref etf.Ref) bool - - registerEvent(by etf.Pid, event gen.Event, messages []gen.EventMessage) error - unregisterEvent(by etf.Pid, event gen.Event) error - monitorEvent(by etf.Pid, event gen.Event) error - demonitorEvent(by etf.Pid, event gen.Event) error - sendEvent(by etf.Pid, event gen.Event, message gen.EventMessage) error - - handleTerminated(terminated etf.Pid, name, reason string) - - processLinks(process etf.Pid) []etf.Pid - processMonitors(process etf.Pid) []etf.Pid - processMonitorsByName(process etf.Pid) []gen.ProcessID - processMonitoredBy(process etf.Pid) []etf.Pid - - monitorStats() internalMonitorStats -} - -type internalMonitorStats struct { - monitorsByPid int - monitorsByName int - monitorsNodes int - links int -} - -type monitor struct { - // monitors by pid - processes map[etf.Pid][]monitorItem - ref2pid map[etf.Ref]etf.Pid - mutexProcesses sync.RWMutex - // monitors by name - names map[gen.ProcessID][]monitorItem - ref2name map[etf.Ref]gen.ProcessID - mutexNames sync.RWMutex - - // links - links map[etf.Pid][]etf.Pid - mutexLinks sync.RWMutex - - // monitors of nodes - nodes map[string][]monitorItem - ref2node map[etf.Ref]string - mutexNodes sync.RWMutex - - // monitors of events - events map[gen.Event]eventItem - pid2events map[etf.Pid][]gen.Event - mutexEvents sync.RWMutex - - nodename string - router coreRouterInternal -} - -func newMonitor(nodename string, router coreRouterInternal) monitorInternal { - return &monitor{ - processes: make(map[etf.Pid][]monitorItem), - names: make(map[gen.ProcessID][]monitorItem), - links: make(map[etf.Pid][]etf.Pid), - nodes: make(map[string][]monitorItem), - - ref2pid: make(map[etf.Ref]etf.Pid), - ref2name: make(map[etf.Ref]gen.ProcessID), - ref2node: make(map[etf.Ref]string), - - events: make(map[gen.Event]eventItem), - pid2events: make(map[etf.Pid][]gen.Event), - - nodename: nodename, - router: router, - } -} - -func (m *monitor) monitorNode(by etf.Pid, node string, ref etf.Ref) { - lib.Log("[%s] MONITOR NODE : %v => %s", m.nodename, by, node) - - m.mutexNodes.Lock() - - l := m.nodes[node] - item := monitorItem{ - pid: by, - ref: ref, - } - m.nodes[node] = append(l, item) - m.ref2node[ref] = node - m.mutexNodes.Unlock() - - _, err := m.router.getConnection(node) - if err != nil { - m.RouteNodeDown(node, nil) - } -} - -func (m *monitor) demonitorNode(ref etf.Ref) bool { - var name string - var ok bool - - m.mutexNodes.Lock() - defer m.mutexNodes.Unlock() - - if name, ok = m.ref2node[ref]; !ok { - return false - } - - l := m.nodes[name] - - // remove PID from monitoring processes list - for i := range l { - if l[i].ref != ref { - continue - } - - l[i] = l[0] - l = l[1:] - break - } - delete(m.ref2node, ref) - - if len(l) == 0 { - delete(m.nodes, name) - } else { - m.nodes[name] = l - } - - return true -} - -func (m *monitor) RouteNodeDown(name string, disconnect *ProxyDisconnect) { - lib.Log("[%s] MONITOR NODE down: %v", m.nodename, name) - - // notify node monitors - m.mutexNodes.RLock() - if pids, ok := m.nodes[name]; ok { - for i := range pids { - lib.Log("[%s] MONITOR node down: %v. send notify to: %s", m.nodename, name, pids[i].pid) - if disconnect == nil { - message := gen.MessageNodeDown{Ref: pids[i].ref, Name: name} - m.router.RouteSend(etf.Pid{}, pids[i].pid, message) - continue - } - message := gen.MessageProxyDown{ - Ref: pids[i].ref, - Node: disconnect.Node, - Proxy: disconnect.Proxy, - Reason: disconnect.Reason, - } - m.router.RouteSend(etf.Pid{}, pids[i].pid, message) - - } - delete(m.nodes, name) - } - m.mutexNodes.RUnlock() - - // notify processes created monitors by pid - m.mutexProcesses.Lock() - for pid, ps := range m.processes { - if string(pid.Node) != name { - continue - } - for i := range ps { - // args: (to, terminated, reason, ref) - delete(m.ref2pid, ps[i].ref) - if disconnect == nil || disconnect.Node == name { - m.sendMonitorExit(ps[i].pid, pid, "noconnection", ps[i].ref) - continue - } - m.sendMonitorExit(ps[i].pid, pid, "noproxy", ps[i].ref) - } - delete(m.processes, pid) - } - m.mutexProcesses.Unlock() - - // notify processes created monitors by name - m.mutexNames.Lock() - for processID, ps := range m.names { - if processID.Node != name { - continue - } - for i := range ps { - // args: (to, terminated, reason, ref) - delete(m.ref2name, ps[i].ref) - if disconnect == nil || disconnect.Node == name { - m.sendMonitorExitReg(ps[i].pid, processID, "noconnection", ps[i].ref) - continue - } - m.sendMonitorExitReg(ps[i].pid, processID, "noproxy", ps[i].ref) - } - delete(m.names, processID) - } - m.mutexNames.Unlock() - - // notify linked processes - m.mutexLinks.Lock() - for link, pids := range m.links { - if link.Node != etf.Atom(name) { - continue - } - - for i := range pids { - if disconnect == nil || disconnect.Node == name { - m.sendExit(pids[i], link, "noconnection") - } else { - m.sendExit(pids[i], link, "noproxy") - } - p, ok := m.links[pids[i]] - - if !ok { - continue - } - - for k := range p { - if p[k].Node != etf.Atom(name) { - continue - } - - p[k] = p[0] - p = p[1:] - - } - - if len(p) > 0 { - m.links[pids[i]] = p - continue - } - - delete(m.links, pids[i]) - } - - delete(m.links, link) - } - m.mutexLinks.Unlock() -} - -func (m *monitor) handleTerminated(terminated etf.Pid, name string, reason string) { - lib.Log("[%s] MONITOR process terminated: %v", m.nodename, terminated) - - // if terminated process had a name we should make shure to clean up them all - m.mutexNames.Lock() - if name != "" { - terminatedProcessID := gen.ProcessID{Name: name, Node: m.nodename} - if items, ok := m.names[terminatedProcessID]; ok { - for i := range items { - lib.Log("[%s] MONITOR process terminated: %s. send notify to: %s", m.nodename, terminatedProcessID, items[i].pid) - m.sendMonitorExitReg(items[i].pid, terminatedProcessID, reason, items[i].ref) - delete(m.ref2name, items[i].ref) - } - delete(m.names, terminatedProcessID) - } - } - m.mutexNames.Unlock() - - // check whether we have monitorItem on this process by Pid (terminated) - m.mutexProcesses.Lock() - if items, ok := m.processes[terminated]; ok { - - for i := range items { - lib.Log("[%s] MONITOR process terminated: %s. send notify to: %s", m.nodename, terminated, items[i].pid) - m.sendMonitorExit(items[i].pid, terminated, reason, items[i].ref) - delete(m.ref2pid, items[i].ref) - } - delete(m.processes, terminated) - } - m.mutexProcesses.Unlock() - - m.mutexLinks.Lock() - if pidLinks, ok := m.links[terminated]; ok { - for i := range pidLinks { - lib.Log("[%s] LINK process exited: %s. send notify to: %s", m.nodename, terminated, pidLinks[i]) - m.sendExit(pidLinks[i], terminated, reason) - - // remove A link - pids, ok := m.links[pidLinks[i]] - if !ok { - continue - } - for k := range pids { - if pids[k] != terminated { - continue - } - pids[k] = pids[0] - pids = pids[1:] - break - } - - if len(pids) > 0 { - m.links[pidLinks[i]] = pids - } else { - delete(m.links, pidLinks[i]) - } - } - // remove link - delete(m.links, terminated) - } - m.mutexLinks.Unlock() - - // check for event owning and monitoring - m.mutexEvents.Lock() - events, exist := m.pid2events[terminated] - if exist == false { - // this process hasn't been involved in any events - m.mutexEvents.Unlock() - return - } - - for _, e := range events { - item := m.events[e] - if item.owner == terminated { - message := gen.MessageEventDown{ - Event: e, - Reason: reason, - } - for _, pid := range item.monitors { - pidevents := m.pid2events[pid] - removed := 0 - for i := range pidevents { - if pidevents[i] != e { - continue - } - m.router.RouteSend(etf.Pid{}, pid, message) - pidevents[i] = pidevents[removed] - removed++ - } - pidevents = pidevents[removed:] - if len(pidevents) == 0 { - delete(m.pid2events, pid) - } else { - m.pid2events[pid] = pidevents - } - } - delete(m.events, e) - continue - } - - removed := 0 - for i := range item.monitors { - if item.monitors[i] != terminated { - continue - } - item.monitors[i] = item.monitors[removed] - removed++ - } - item.monitors = item.monitors[removed:] - m.events[e] = item - } - - delete(m.pid2events, terminated) - m.mutexEvents.Unlock() -} - -func (m *monitor) processLinks(process etf.Pid) []etf.Pid { - m.mutexLinks.RLock() - defer m.mutexLinks.RUnlock() - - if l, ok := m.links[process]; ok { - return l - } - return nil -} - -func (m *monitor) processMonitors(process etf.Pid) []etf.Pid { - monitors := []etf.Pid{} - m.mutexProcesses.RLock() - defer m.mutexProcesses.RUnlock() - - for p, by := range m.processes { - for b := range by { - if by[b].pid == process { - monitors = append(monitors, p) - } - } - } - return monitors -} - -func (m *monitor) processMonitorsByName(process etf.Pid) []gen.ProcessID { - monitors := []gen.ProcessID{} - m.mutexProcesses.RLock() - defer m.mutexProcesses.RUnlock() - - for processID, by := range m.names { - for b := range by { - if by[b].pid == process { - monitors = append(monitors, processID) - } - } - } - return monitors -} - -func (m *monitor) processMonitoredBy(process etf.Pid) []etf.Pid { - monitors := []etf.Pid{} - m.mutexProcesses.RLock() - defer m.mutexProcesses.RUnlock() - if m, ok := m.processes[process]; ok { - for i := range m { - monitors = append(monitors, m[i].pid) - } - - } - return monitors -} - -func (m *monitor) IsMonitor(ref etf.Ref) bool { - m.mutexProcesses.RLock() - defer m.mutexProcesses.RUnlock() - if _, ok := m.ref2pid[ref]; ok { - return true - } - if _, ok := m.ref2name[ref]; ok { - return true - } - return false -} - -// -// implementation of CoreRouter interface: -// -// RouteLink -// RouteUnlink -// RouteExit -// RouteMonitor -// RouteMonitorReg -// RouteDemonitor -// RouteMonitorExit -// RouteMonitorExitReg -// - -func (m *monitor) RouteLink(pidA etf.Pid, pidB etf.Pid) error { - lib.Log("[%s] LINK process: %v => %v", m.nodename, pidA, pidB) - - // http://erlang.org/doc/reference_manual/processes.html#links - // Links are bidirectional and there can only be one link between - // two processes. Repeated calls to link(Pid) have no effect. - - // Returns error if link is already exist or a process attempts to create - // a link to itself - - if pidA == pidB { - return fmt.Errorf("Can not link to itself") - } - - m.mutexLinks.RLock() - linksA := m.links[pidA] - if pidA.Node == etf.Atom(m.nodename) { - // check if these processes are linked already (source) - for i := range linksA { - if linksA[i] == pidB { - m.mutexLinks.RUnlock() - return fmt.Errorf("Already linked") - } - } - - } - m.mutexLinks.RUnlock() - - // check if these processes are linked already (destination) - m.mutexLinks.RLock() - linksB := m.links[pidB] - - for i := range linksB { - if linksB[i] == pidA { - m.mutexLinks.RUnlock() - return fmt.Errorf("Already linked") - } - } - m.mutexLinks.RUnlock() - - if pidB.Node == etf.Atom(m.nodename) { - // for the local process we should make sure if its alive - // otherwise send 'EXIT' message with 'noproc' as a reason - if p := m.router.processByPid(pidB); p == nil { - m.sendExit(pidA, pidB, "noproc") - return lib.ErrProcessUnknown - } - m.mutexLinks.Lock() - m.links[pidA] = append(linksA, pidB) - m.links[pidB] = append(linksB, pidA) - m.mutexLinks.Unlock() - return nil - } - - // linking with remote process - connection, err := m.router.getConnection(string(pidB.Node)) - if err != nil { - m.sendExit(pidA, pidB, "noconnection") - return nil - } - - if err := connection.Link(pidA, pidB); err != nil { - m.sendExit(pidA, pidB, err.Error()) - return nil - } - - m.mutexLinks.Lock() - m.links[pidA] = append(linksA, pidB) - m.links[pidB] = append(linksB, pidA) - m.mutexLinks.Unlock() - return nil -} - -func (m *monitor) RouteUnlink(pidA etf.Pid, pidB etf.Pid) error { - m.mutexLinks.Lock() - defer m.mutexLinks.Unlock() - - if pidA.Node == etf.Atom(m.nodename) { - linksA := m.links[pidA] - for i := range linksA { - if linksA[i] != pidB { - continue - } - - linksA[i] = linksA[0] - linksA = linksA[1:] - if len(linksA) > 0 { - m.links[pidA] = linksA - } else { - delete(m.links, pidA) - } - break - } - } - - linksB := m.links[pidB] - for i := range linksB { - if linksB[i] != pidA { - continue - } - linksB[i] = linksB[0] - linksB = linksB[1:] - if len(linksB) > 0 { - m.links[pidB] = linksB - } else { - delete(m.links, pidB) - } - break - - } - - if pidB.Node != etf.Atom(m.nodename) { - connection, err := m.router.getConnection(string(pidB.Node)) - if err != nil { - m.sendExit(pidA, pidB, "noconnection") - return err - } - if err := connection.Unlink(pidA, pidB); err != nil { - m.sendExit(pidA, pidB, err.Error()) - return err - } - } - return nil -} - -func (m *monitor) RouteExit(to etf.Pid, terminated etf.Pid, reason string) error { - m.mutexLinks.Lock() - defer m.mutexLinks.Unlock() - - pidLinks, ok := m.links[terminated] - if !ok { - return nil - } - for i := range pidLinks { - lib.Log("[%s] LINK process exited: %s. send notify to: %s", m.nodename, terminated, pidLinks[i]) - m.sendExit(pidLinks[i], terminated, reason) - - // remove A link - pids, ok := m.links[pidLinks[i]] - if !ok { - continue - } - for k := range pids { - if pids[k] != terminated { - continue - } - pids[k] = pids[0] - pids = pids[1:] - break - } - - if len(pids) > 0 { - m.links[pidLinks[i]] = pids - } else { - delete(m.links, pidLinks[i]) - } - } - // remove link - delete(m.links, terminated) - return nil - -} - -func (m *monitor) RouteMonitor(by etf.Pid, pid etf.Pid, ref etf.Ref) error { - lib.Log("[%s] MONITOR process: %s => %s", m.nodename, by, pid) - - // If 'process' belongs to this node we should make sure if its alive. - // http://erlang.org/doc/reference_manual/processes.html#monitors - // If Pid does not exist a gen.MessageDown must be - // send immediately with Reason set to noproc. - if p := m.router.processByPid(pid); string(pid.Node) == m.nodename && p == nil { - return m.sendMonitorExit(by, pid, "noproc", ref) - } - - if string(pid.Node) != m.nodename { - connection, err := m.router.getConnection(string(pid.Node)) - if err != nil { - m.sendMonitorExit(by, pid, "noconnection", ref) - return err - } - - if err := connection.Monitor(by, pid, ref); err != nil { - switch err { - case lib.ErrPeerUnsupported: - m.sendMonitorExit(by, pid, "unsupported", ref) - case lib.ErrProcessIncarnation: - m.sendMonitorExit(by, pid, "incarnation", ref) - default: - m.sendMonitorExit(by, pid, "noconnection", ref) - } - return err - } - } - - m.mutexProcesses.Lock() - l := m.processes[pid] - item := monitorItem{ - pid: by, - ref: ref, - } - m.processes[pid] = append(l, item) - m.ref2pid[ref] = pid - m.mutexProcesses.Unlock() - - return nil -} - -func (m *monitor) RouteMonitorReg(by etf.Pid, process gen.ProcessID, ref etf.Ref) error { - // If 'process' belongs to this node and does not exist a gen.MessageDown must be - // send immediately with Reason set to noproc. - if p := m.router.ProcessByName(process.Name); process.Node == m.nodename && p == nil { - return m.sendMonitorExitReg(by, process, "noproc", ref) - } - if process.Node != m.nodename { - connection, err := m.router.getConnection(process.Node) - if err != nil { - m.sendMonitorExitReg(by, process, "noconnection", ref) - return err - } - - if err := connection.MonitorReg(by, process, ref); err != nil { - if err == lib.ErrPeerUnsupported { - m.sendMonitorExitReg(by, process, "unsupported", ref) - } else { - m.sendMonitorExitReg(by, process, "noconnection", ref) - } - return err - } - } - - m.mutexNames.Lock() - l := m.names[process] - item := monitorItem{ - pid: by, - ref: ref, - } - m.names[process] = append(l, item) - m.ref2name[ref] = process - m.mutexNames.Unlock() - - return nil -} - -func (m *monitor) RouteDemonitor(by etf.Pid, ref etf.Ref) error { - m.mutexProcesses.RLock() - pid, knownRefByPid := m.ref2pid[ref] - m.mutexProcesses.RUnlock() - - if knownRefByPid == false { - // monitor was created by process name - m.mutexNames.Lock() - defer m.mutexNames.Unlock() - processID, knownRefByName := m.ref2name[ref] - if knownRefByName == false { - // unknown monitor reference - return lib.ErrMonitorUnknown - } - items := m.names[processID] - - for i := range items { - if items[i].pid != by { - continue - } - if items[i].ref != ref { - continue - } - - items[i] = items[0] - items = items[1:] - - if len(items) == 0 { - delete(m.names, processID) - } else { - m.names[processID] = items - } - delete(m.ref2name, ref) - - if processID.Node != m.nodename { - connection, err := m.router.getConnection(processID.Node) - if err != nil { - return err - } - return connection.DemonitorReg(by, processID, ref) - } - return nil - } - return nil - } - - // monitor was created by pid - - // cheching for monitorItem list - m.mutexProcesses.Lock() - defer m.mutexProcesses.Unlock() - items := m.processes[pid] - - // remove PID from monitoring processes list - for i := range items { - if items[i].pid != by { - continue - } - if items[i].ref != ref { - continue - } - - items[i] = items[0] - items = items[1:] - - if len(items) == 0 { - delete(m.processes, pid) - } else { - m.processes[pid] = items - } - delete(m.ref2pid, ref) - - if string(pid.Node) != m.nodename { - connection, err := m.router.getConnection(string(pid.Node)) - if err != nil { - return err - } - return connection.Demonitor(by, pid, ref) - } - - return nil - } - return nil -} - -func (m *monitor) RouteMonitorExit(terminated etf.Pid, reason string, ref etf.Ref) error { - m.mutexProcesses.Lock() - defer m.mutexProcesses.Unlock() - - items, ok := m.processes[terminated] - if !ok { - return nil - } - - for i := range items { - lib.Log("[%s] MONITOR process terminated: %s. send notify to: %s", m.nodename, terminated, items[i].pid) - if items[i].ref != ref { - continue - } - - delete(m.ref2pid, items[i].ref) - m.sendMonitorExit(items[i].pid, terminated, reason, items[i].ref) - - items[i] = items[0] - items = items[1:] - if len(items) == 0 { - delete(m.processes, terminated) - return nil - } - m.processes[terminated] = items - return nil - } - - return nil -} - -func (m *monitor) RouteMonitorExitReg(terminated gen.ProcessID, reason string, ref etf.Ref) error { - m.mutexNames.Lock() - defer m.mutexNames.Unlock() - - items, ok := m.names[terminated] - if !ok { - return nil - } - - for i := range items { - lib.Log("[%s] MONITOR process terminated: %s. send notify to: %s", m.nodename, terminated, items[i].pid) - if items[i].ref != ref { - continue - } - - delete(m.ref2name, items[i].ref) - m.sendMonitorExitReg(items[i].pid, terminated, reason, items[i].ref) - - items[i] = items[0] - items = items[1:] - if len(items) == 0 { - delete(m.names, terminated) - return nil - } - m.names[terminated] = items - return nil - } - - return nil -} - -func (m *monitor) sendMonitorExit(to etf.Pid, terminated etf.Pid, reason string, ref etf.Ref) error { - if string(to.Node) != m.nodename { - // remote - if reason == "noconnection" { - // do nothing. it was a monitor created by the remote node we lost connection to. - return nil - } - - connection, err := m.router.getConnection(string(to.Node)) - if err != nil { - return err - } - - return connection.MonitorExit(to, terminated, reason, ref) - } - - // local - down := gen.MessageDown{ - Ref: ref, - Pid: terminated, - Reason: reason, - } - from := to - return m.router.RouteSend(from, to, down) -} - -func (m *monitor) sendMonitorExitReg(to etf.Pid, terminated gen.ProcessID, reason string, ref etf.Ref) error { - if string(to.Node) != m.nodename { - // remote - if reason == "noconnection" { - // do nothing - return nil - } - - connection, err := m.router.getConnection(string(to.Node)) - if err != nil { - return err - } - - return connection.MonitorExitReg(to, terminated, reason, ref) - } - - // local - down := gen.MessageDown{ - Ref: ref, - ProcessID: terminated, - Reason: reason, - } - from := to - return m.router.RouteSend(from, to, down) -} - -func (m *monitor) sendExit(to etf.Pid, terminated etf.Pid, reason string) error { - // for remote: {3, FromPid, ToPid, Reason} - if to.Node != etf.Atom(m.nodename) { - if reason == "noconnection" { - return nil - } - connection, err := m.router.getConnection(string(to.Node)) - if err != nil { - return err - } - return connection.LinkExit(to, terminated, reason) - } - - // check if 'to' process is still alive - if p := m.router.processByPid(to); p != nil { - p.exit(terminated, reason) - return nil - } - return lib.ErrProcessUnknown -} - -func (m *monitor) registerEvent(by etf.Pid, event gen.Event, messages []gen.EventMessage) error { - m.mutexEvents.Lock() - defer m.mutexEvents.Unlock() - if _, taken := m.events[event]; taken { - return lib.ErrTaken - } - events, _ := m.pid2events[by] - events = append(events, event) - m.pid2events[by] = events - - mt := make(map[string]bool) - for _, m := range messages { - t := reflect.TypeOf(m) - st := t.PkgPath() + "/" + t.Name() - mt[st] = true - } - item := eventItem{ - owner: by, - messageTypes: mt, - } - m.events[event] = item - return nil -} - -func (m *monitor) unregisterEvent(by etf.Pid, event gen.Event) error { - m.mutexEvents.Lock() - defer m.mutexEvents.Unlock() - - item, exist := m.events[event] - if exist == false { - return lib.ErrEventUnknown - } - if item.owner != by { - return lib.ErrEventOwner - } - message := gen.MessageEventDown{ - Event: event, - Reason: "unregistered", - } - - monitors := append(item.monitors, by) - for _, pid := range monitors { - events, _ := m.pid2events[pid] - removed := 0 - for i := range events { - if events[i] != event { - continue - } - if pid != by { - m.router.RouteSend(etf.Pid{}, pid, message) - } - events[i] = events[removed] - removed++ - } - events = events[removed:] - - if len(events) == 0 { - delete(m.pid2events, pid) - } else { - m.pid2events[pid] = events - } - - } - - delete(m.events, event) - return nil -} - -func (m *monitor) monitorEvent(by etf.Pid, event gen.Event) error { - m.mutexEvents.Lock() - defer m.mutexEvents.Unlock() - - item, exist := m.events[event] - if exist == false { - return lib.ErrEventUnknown - } - if item.owner == by { - return lib.ErrEventSelf - } - item.monitors = append(item.monitors, by) - m.events[event] = item - - events, exist := m.pid2events[by] - events = append(events, event) - m.pid2events[by] = events - return nil -} - -func (m *monitor) demonitorEvent(by etf.Pid, event gen.Event) error { - m.mutexEvents.Lock() - defer m.mutexEvents.Unlock() - - item, exist := m.events[event] - if exist == false { - return lib.ErrEventUnknown - } - removed := 0 - for i := range item.monitors { - if item.monitors[i] != by { - continue - } - - item.monitors[i] = item.monitors[removed] - removed++ - } - item.monitors = item.monitors[removed:] - m.events[event] = item - - events, _ := m.pid2events[by] - - removed = 0 - for i := range events { - if events[i] != event { - continue - } - events[i] = events[removed] - } - events = events[removed:] - - if len(events) == 0 { - delete(m.pid2events, by) - } else { - m.pid2events[by] = events - } - - return nil -} - -func (m *monitor) sendEvent(by etf.Pid, event gen.Event, message gen.EventMessage) error { - m.mutexEvents.RLock() - defer m.mutexEvents.RUnlock() - - item, exist := m.events[event] - if exist == false { - return lib.ErrEventUnknown - } - if item.owner != by { - return lib.ErrEventOwner - } - - t := reflect.TypeOf(message) - st := t.PkgPath() + "/" + t.Name() - if _, exist := item.messageTypes[st]; exist == false { - return lib.ErrEventMismatch - } - - // TODO clean up terminated subscribers - for _, pid := range item.monitors { - m.router.RouteSend(etf.Pid{}, pid, message) - } - - return nil -} - -func (m *monitor) monitorStats() internalMonitorStats { - stats := internalMonitorStats{} - m.mutexProcesses.RLock() - stats.monitorsByPid = len(m.processes) - m.mutexProcesses.RUnlock() - - m.mutexNames.RLock() - stats.monitorsByName = len(m.names) - m.mutexNames.RUnlock() - - m.mutexNodes.RLock() - stats.monitorsNodes = len(m.nodes) - m.mutexNodes.RUnlock() - - m.mutexLinks.RLock() - stats.links = len(m.links) - m.mutexLinks.RUnlock() - return stats -} diff --git a/node/network.go b/node/network.go index 6a40692b..6b7597f5 100644 --- a/node/network.go +++ b/node/network.go @@ -1,1589 +1,1258 @@ package node import ( - "bytes" "context" - "encoding/binary" + "crypto/tls" + "fmt" "io" + "net" + "reflect" + "strconv" + "strings" "sync" + "sync/atomic" "time" - "crypto/aes" - "crypto/md5" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/tls" - "crypto/x509" - "fmt" + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" + "ergo.services/ergo/net/handshake" + "ergo.services/ergo/net/proto" + "ergo.services/ergo/net/registrar" +) - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" +func createNetwork(node *node) *network { + n := &network{ + node: node, + staticRoutes: &staticRoutes{}, + staticProxies: &staticProxies{}, + defaultHandshake: handshake.Create(handshake.Options{}), + defaultProto: proto.Create(), + } + // register standard handshake and proto + n.RegisterHandshake(n.defaultHandshake) + n.RegisterProto(n.defaultProto) + return n +} - "net" +type network struct { + running atomic.Bool - "strconv" - "strings" -) + mode gen.NetworkMode + flags gen.NetworkFlags + skipverify bool -type networkInternal interface { - // add/remove static route - AddStaticRoute(node string, host string, port uint16, options RouteOptions) error - AddStaticRoutePort(node string, port uint16, options RouteOptions) error - AddStaticRouteOptions(node string, options RouteOptions) error - RemoveStaticRoute(node string) bool - StaticRoutes() []Route - StaticRoute(name string) (Route, bool) - - // add/remove proxy route - AddProxyRoute(route ProxyRoute) error - RemoveProxyRoute(name string) bool - ProxyRoutes() []ProxyRoute - ProxyRoute(name string) (ProxyRoute, bool) - - Registrar() Registrar - Resolve(peername string) (Route, error) - ResolveProxy(peername string) (ProxyRoute, error) - - Connect(peername string) error - Disconnect(peername string) error - Nodes() []string - NodesIndirect() []string - - // stats - NetworkStats(name string) (NetworkStats, error) - - // core router methods - RouteProxyConnectRequest(from ConnectionInterface, request ProxyConnectRequest) error - RouteProxyConnectReply(from ConnectionInterface, reply ProxyConnectReply) error - RouteProxyConnectCancel(from ConnectionInterface, cancel ProxyConnectCancel) error - RouteProxyDisconnect(from ConnectionInterface, disconnect ProxyDisconnect) error - RouteProxy(from ConnectionInterface, sessionID string, packet *lib.Buffer) error - - getConnection(peername string) (ConnectionInterface, error) - stopNetwork() - - networkStats() internalNetworkStats + node *node + registrar gen.Registrar + + acceptors []*acceptor + + defaultHandshake gen.NetworkHandshake + defaultProto gen.NetworkProto + + handshakes sync.Map // .Version().String() -> handshake + protos sync.Map // .Version().String() -> proto + + cookie string + maxmessagesize int + + staticRoutes *staticRoutes + staticProxies *staticProxies + + enableSpawn sync.Map + enableAppStart sync.Map + + connections sync.Map // gen.Atom (peer name) => gen.Connection } -type internalNetworkStats struct { - transitConnections int - proxyConnections int - connections int +func (n *network) Registrar() (gen.Registrar, error) { + if n.running.Load() == false { + return nil, gen.ErrNetworkStopped + } + return n.registrar, nil } -type connectionInternal struct { - // conn. has nil value for the proxy connection - conn net.Conn - // connection interface of the network connection - connection ConnectionInterface - // - proxySessionID string - // - proxyTransitTo map[string]bool +func (n *network) Cookie() string { + return n.cookie +} +func (n *network) SetCookie(cookie string) error { + n.cookie = cookie + if lib.Trace() { + n.node.Log().Trace("updated cookie") + } + return nil } -type network struct { - nodename string - cookie string - ctx context.Context - listeners []net.Listener - - registrar Registrar - staticOnly bool - staticRoutes map[string]Route - staticRoutesMutex sync.RWMutex - - proxyRoutes map[string]ProxyRoute - proxyRoutesMutex sync.RWMutex - - connections map[string]connectionInternal - connectionsProxy map[ConnectionInterface][]string // peers via proxy - connectionsTransit map[ConnectionInterface][]string // transit session IDs - connectionsMutex sync.RWMutex - - proxyTransitSessions map[string]proxyTransitSession - proxyTransitSessionsMutex sync.RWMutex - - proxyConnectRequest map[etf.Ref]proxyConnectRequest - proxyConnectRequestMutex sync.RWMutex - - tls *tls.Config - proxy Proxy - version Version - creation uint32 - flags Flags - - router coreRouterInternal - handshake HandshakeInterface - proto ProtoInterface - - remoteSpawn map[string]gen.ProcessBehavior - remoteSpawnMutex sync.Mutex +func (n *network) NetworkFlags() gen.NetworkFlags { + return n.flags } -func newNetwork(ctx context.Context, nodename string, cookie string, options Options, router coreRouterInternal) (networkInternal, error) { - n := &network{ - nodename: nodename, - cookie: cookie, - ctx: ctx, - tls: options.TLS, - staticOnly: options.StaticRoutesOnly, - staticRoutes: make(map[string]Route), - proxyRoutes: make(map[string]ProxyRoute), - connections: make(map[string]connectionInternal), - connectionsProxy: make(map[ConnectionInterface][]string), - connectionsTransit: make(map[ConnectionInterface][]string), - proxyTransitSessions: make(map[string]proxyTransitSession), - proxyConnectRequest: make(map[etf.Ref]proxyConnectRequest), - remoteSpawn: make(map[string]gen.ProcessBehavior), - flags: options.Flags, - proxy: options.Proxy, - registrar: options.Registrar, - handshake: options.Handshake, - proto: options.Proto, - router: router, - creation: options.Creation, - } - - splitNodeHost := strings.Split(nodename, "@") - if len(splitNodeHost) != 2 || splitNodeHost[0] == "" || splitNodeHost[1] == "" { - return nil, fmt.Errorf("FQDN for node name is required (example: node@hostname)") - } - - if n.proxy.Flags.Enable == false { - n.proxy.Flags = DefaultProxyFlags() - } - - n.version, _ = options.Env[EnvKeyVersion].(Version) - - if len(options.Listeners) == 0 { - return nil, fmt.Errorf("no listeners defined") - } - for i, lo := range options.Listeners { - if lo.TLS == nil { - lo.TLS = options.TLS - } - if lo.Handshake == nil { - lo.Handshake = options.Handshake - } - if lo.Proto == nil { - lo.Proto = options.Proto - } - if lo.Flags.Enable == false { - lo.Flags = options.Flags - } - if lo.Cookie == "" { - lo.Cookie = cookie - } +func (n *network) SetNetworkFlags(flags gen.NetworkFlags) { + if flags.Enable == false { + flags = gen.DefaultNetworkFlags + } + n.flags = flags +} - if err := lo.Handshake.Init(n.nodename, n.creation, lo.Flags); err != nil { - return nil, err - } +func (n *network) MaxMessageSize() int { + return n.maxmessagesize +} - if lo.Listen > 0 { - lo.ListenBegin = lo.Listen - lo.ListenEnd = lo.Listen - lib.Log("Node listener[%d] port: %d", i, lo.Listen) - } else { - if lo.ListenBegin == 0 { - lo.ListenBegin = defaultListenBegin - } - if lo.ListenEnd == 0 { - lo.ListenEnd = defaultListenEnd - } - lib.Log("Node listener[%d] port range: %d...%d", i, lo.ListenBegin, lo.ListenEnd) - } - register := i == 0 - listener, err := n.listen(ctx, splitNodeHost[1], lo, register) - if err != nil { - // close all listening sockets - n.stopNetwork() - return nil, err - } - n.listeners = append(n.listeners, listener) +func (n *network) SetMaxMessageSize(size int) { + if size < 0 { + size = 0 } - - return n, nil + n.maxmessagesize = size } -func (n *network) stopNetwork() { - for _, l := range n.listeners { - l.Close() +func (n *network) Acceptors() ([]gen.Acceptor, error) { + var acceptors []gen.Acceptor + if n.running.Load() == false { + return nil, gen.ErrNetworkStopped } - n.connectionsMutex.RLock() - defer n.connectionsMutex.RUnlock() - for _, ci := range n.connections { - if ci.conn == nil { - continue - } - ci.conn.Close() + for _, acceptor := range n.acceptors { + acceptors = append(acceptors, acceptor) } + return acceptors, nil } -// AddStaticRouteOptions adds static options for the given node. -func (n *network) AddStaticRouteOptions(node string, options RouteOptions) error { - if n.staticOnly { - return fmt.Errorf("can't be used if enabled StaticRoutesOnly") +func (n *network) Node(name gen.Atom) (gen.RemoteNode, error) { + c, err := n.Connection(name) + if err != nil { + return nil, err } - return n.AddStaticRoute(node, "", 0, options) + return c.Node(), nil } -// AddStaticRoutePort adds a static route to the node with the given name -func (n *network) AddStaticRoutePort(node string, port uint16, options RouteOptions) error { - ns := strings.Split(node, "@") - if port < 1 { - return fmt.Errorf("port must be greater 0") - } - if len(ns) != 2 { - return fmt.Errorf("wrong FQDN") +func (n *network) GetNode(name gen.Atom) (gen.RemoteNode, error) { + c, err := n.GetConnection(name) + if err != nil { + return nil, err } - return n.AddStaticRoute(node, ns[1], port, options) - + return c.Node(), nil } -// AddStaticRoute adds a static route to the node with the given name -func (n *network) AddStaticRoute(node string, host string, port uint16, options RouteOptions) error { - if len(strings.Split(node, "@")) != 2 { - return fmt.Errorf("wrong FQDN") - } +func (n *network) GetNodeWithRoute(name gen.Atom, route gen.NetworkRoute) (gen.RemoteNode, error) { + var emptyVersion gen.Version - if port > 0 { - if _, err := net.LookupHost(host); err != nil { - return err + route.InsecureSkipVerify = n.skipverify + + if route.Resolver != nil { + resolved, err := route.Resolver.Resolve(name) + if err != nil { + return nil, err + } + route.Route.Port = resolved[0].Port + route.Route.TLS = resolved[0].TLS + if route.Route.HandshakeVersion == emptyVersion { + route.Route.HandshakeVersion = resolved[0].HandshakeVersion + } + if route.Route.ProtoVersion == emptyVersion { + route.Route.ProtoVersion = resolved[0].ProtoVersion + } + if route.Route.Host == "" { + route.Route.Host = resolved[0].Host } } - route := Route{ - Node: node, - Host: host, - Port: port, - Options: options, + if route.Route.Port == 0 { + return nil, gen.ErrNoRoute } - n.staticRoutesMutex.Lock() - defer n.staticRoutesMutex.Unlock() + if route.Route.HandshakeVersion == emptyVersion { + route.Route.HandshakeVersion = n.defaultHandshake.Version() + } - _, exist := n.staticRoutes[node] - if exist { - return lib.ErrTaken + if route.Route.ProtoVersion == emptyVersion { + route.Route.ProtoVersion = n.defaultProto.Version() } - if options.Handshake != nil { - if err := options.Handshake.Init(n.nodename, n.creation, n.flags); err != nil { - return err - } + c, err := n.connect(name, route) + if err != nil { + return nil, err } - n.staticRoutes[node] = route + return c.Node(), nil +} +func (n *network) AddRoute(match string, route gen.NetworkRoute, weight int) error { + var emptyVersion gen.Version + if route.Route.HandshakeVersion == emptyVersion { + route.Route.HandshakeVersion = n.defaultHandshake.Version() + } + if route.Route.ProtoVersion == emptyVersion { + route.Route.ProtoVersion = n.defaultProto.Version() + } + if err := n.staticRoutes.add(match, route, weight); err != nil { + return err + } + if lib.Trace() { + n.node.Log().Trace("added static route %s with weight %d", match, weight) + } return nil } -// RemoveStaticRoute removes static route record. Returns false if it doesn't exist. -func (n *network) RemoveStaticRoute(node string) bool { - n.staticRoutesMutex.Lock() - defer n.staticRoutesMutex.Unlock() - _, exist := n.staticRoutes[node] - if exist { - delete(n.staticRoutes, node) - return true +func (n *network) RemoveRoute(match string) error { + if err := n.staticRoutes.remove(match); err != nil { + return err } - return false + if lib.Trace() { + n.node.Log().Trace("removed static route %s", match) + } + return nil } -// StaticRoutes returns list of static routes added with AddStaticRoute -func (n *network) StaticRoutes() []Route { - var routes []Route +func (n *network) Route(name gen.Atom) ([]gen.NetworkRoute, error) { + if routes, found := n.staticRoutes.lookup(string(name)); found { + return routes, nil + } + return nil, gen.ErrNoRoute +} - n.staticRoutesMutex.RLock() - defer n.staticRoutesMutex.RUnlock() - for _, v := range n.staticRoutes { - routes = append(routes, v) +func (n *network) AddProxyRoute(match string, route gen.NetworkProxyRoute, weight int) error { + if err := n.staticProxies.add(match, route, weight); err != nil { + return err } - return routes + if lib.Trace() { + n.node.Log().Trace("added static proxy route %s with weight %d", match, weight) + } + return nil } -func (n *network) StaticRoute(name string) (Route, bool) { - n.staticRoutesMutex.RLock() - defer n.staticRoutesMutex.RUnlock() - route, exist := n.staticRoutes[name] - return route, exist +func (n *network) RemoveProxyRoute(match string) error { + if err := n.staticProxies.remove(match); err != nil { + return err + } + if lib.Trace() { + n.node.Log().Trace("removed static proxy route %s", match) + } + return nil } -func (n *network) getConnectionDirect(peername string, connect bool) (ConnectionInterface, error) { - n.connectionsMutex.RLock() - ci, ok := n.connections[peername] - n.connectionsMutex.RUnlock() - if ok { - return ci.connection, nil +func (n *network) ProxyRoute(name gen.Atom) ([]gen.NetworkProxyRoute, error) { + + if routes, found := n.staticProxies.lookup(string(name)); found { + return routes, nil } + return nil, gen.ErrNoRoute +} + +type enableSpawn struct { + sync.RWMutex + factory gen.ProcessFactory + behavior string + nodes map[gen.Atom]bool +} + +func (n *network) EnableSpawn(name gen.Atom, factory gen.ProcessFactory, nodes ...gen.Atom) error { - if connect == false { - return nil, lib.ErrNoRoute + if factory == nil { + return gen.ErrIncorrect } - connection, err := n.connect(peername) - if err != nil { - lib.Log("[%s] CORE no route to node %q: %s", n.nodename, peername, err) - return nil, lib.ErrNoRoute + enable := &enableSpawn{ + factory: factory, + nodes: make(map[gen.Atom]bool), + behavior: strings.TrimPrefix(reflect.TypeOf(factory()).String(), "*"), + } + + v, exist := n.enableSpawn.LoadOrStore(name, enable) + if exist { + enable = v.(*enableSpawn) + if reflect.TypeOf(enable.factory()) != reflect.TypeOf(factory()) { + return fmt.Errorf("%s associated with another process factory", name) + } + } + enable.Lock() + if len(nodes) == 0 { + // allow any node to spawn this process (make nodes map empty) + enable.nodes = make(map[gen.Atom]bool) + } else { + for _, nn := range nodes { + enable.nodes[nn] = true + } } - return connection, nil + enable.Unlock() + return nil } -// getConnection -func (n *network) getConnection(peername string) (ConnectionInterface, error) { - if peername == n.nodename { - // can't connect to itself - return nil, lib.ErrNoRoute +func (n *network) getEnabledSpawn(name gen.Atom, source gen.Atom) (gen.ProcessFactory, error) { + v, found := n.enableSpawn.Load(name) + if found == false { + return nil, gen.ErrNameUnknown } - n.connectionsMutex.RLock() - ci, ok := n.connections[peername] - n.connectionsMutex.RUnlock() - if ok { - lib.Log("[%s] NETWORK found active connection with %s", n.nodename, peername) - return ci.connection, nil + enable := v.(*enableSpawn) + allowed := true + enable.RLock() + if len(enable.nodes) > 0 { + allowed = enable.nodes[source] } - - // try to connect via proxy if there ProxyRoute was presented for this peer - request := ProxyConnectRequest{ - ID: n.router.MakeRef(), - To: peername, - Creation: n.creation, + enable.RUnlock() + if allowed == false { + return nil, gen.ErrNotAllowed } + return enable.factory, nil +} - if err := n.RouteProxyConnectRequest(nil, request); err != nil { - if err != lib.ErrProxyNoRoute { - return nil, err +func (n *network) listEnabledSpawn() []gen.NetworkSpawnInfo { + info := []gen.NetworkSpawnInfo{} + + n.enableSpawn.Range(func(k, v any) bool { + enable := v.(*enableSpawn) + nsi := gen.NetworkSpawnInfo{ + Name: k.(gen.Atom), + Behavior: enable.behavior, } + enable.RLock() + for peer, en := range enable.nodes { + if en == false { + continue + } + nsi.Nodes = append(nsi.Nodes, peer) + } + enable.RUnlock() + info = append(info, nsi) + return true + }) + return info +} - // there wasn't proxy presented. try to connect directly. - connection, err := n.getConnectionDirect(peername, true) - return connection, err +func (n *network) DisableSpawn(name gen.Atom, nodes ...gen.Atom) error { + if len(nodes) == 0 { + if _, exist := n.enableSpawn.LoadAndDelete(name); exist == false { + return gen.ErrUnknown + } + return nil } - - connection, err := n.waitProxyConnection(request.ID, 5) - if err != nil { - return nil, err + v, exist := n.enableSpawn.Load(name) + if exist == false { + return gen.ErrUnknown + } + enable := v.(*enableSpawn) + enable.Lock() + for _, nn := range nodes { + enable.nodes[nn] = false } + enable.Unlock() + return nil +} - return connection, nil +type enableAppStart struct { + sync.RWMutex + nodes map[gen.Atom]bool } -// Resolve -func (n *network) Resolve(node string) (Route, error) { - n.staticRoutesMutex.RLock() - defer n.staticRoutesMutex.RUnlock() - - if r, ok := n.staticRoutes[node]; ok { - if r.Port == 0 { - // use static option for this route - route, err := n.registrar.Resolve(node) - route.Options = r.Options - return route, err - } - return r, nil +func (n *network) EnableApplicationStart(name gen.Atom, nodes ...gen.Atom) error { + enable := &enableAppStart{ + nodes: make(map[gen.Atom]bool), } - if n.staticOnly { - return Route{}, lib.ErrNoRoute + v, exist := n.enableAppStart.LoadOrStore(name, enable) + if exist { + enable = v.(*enableAppStart) } + enable.Lock() + if len(nodes) == 0 { + // allow any node to start this app (make nodes map empty) + enable.nodes = make(map[gen.Atom]bool) + } else { + for _, nn := range nodes { + enable.nodes[nn] = true + } + } + enable.Unlock() - return n.registrar.Resolve(node) + return nil } -// ResolveProxy -func (n *network) ResolveProxy(name string) (ProxyRoute, error) { - n.proxyRoutesMutex.RLock() - defer n.proxyRoutesMutex.RUnlock() - route, found := n.proxyRoutes[name] +func (n *network) isEnabledApplicationStart(name gen.Atom, source gen.Atom) error { + v, found := n.enableAppStart.Load(name) if found == false { - sn := strings.Split(name, "@") - if len(sn) != 2 { - return route, lib.ErrUnknown - } - domain := "@" + sn[1] - route, found = n.proxyRoutes[domain] + return gen.ErrNameUnknown } - if found == false { - return n.registrar.ResolveProxy(name) + enable := v.(*enableAppStart) + allowed := true + enable.RLock() + if len(enable.nodes) > 0 { + allowed = enable.nodes[source] } - if route.Proxy == "" { - r, err := n.registrar.ResolveProxy(name) - if err != nil { - return route, err - } - route.Proxy = r.Proxy + enable.RUnlock() + if allowed == false { + return gen.ErrNotAllowed } - return route, nil + return nil } -// Registrar -func (n *network) Registrar() Registrar { - return n.registrar -} +func (n *network) listEnabledApplicationStart() []gen.NetworkApplicationStartInfo { + info := []gen.NetworkApplicationStartInfo{} -// Connect -func (n *network) Connect(node string) error { - _, err := n.getConnection(node) - return err + n.enableAppStart.Range(func(k, v any) bool { + nas := gen.NetworkApplicationStartInfo{ + Name: k.(gen.Atom), + } + enable := v.(*enableAppStart) + enable.RLock() + for peer, en := range enable.nodes { + if en == false { + continue + } + nas.Nodes = append(nas.Nodes, peer) + } + enable.RUnlock() + info = append(info, nas) + return true + }) + return info } -// Disconnect -func (n *network) Disconnect(node string) error { - n.connectionsMutex.RLock() - ci, ok := n.connections[node] - n.connectionsMutex.RUnlock() - if !ok { - return lib.ErrNoRoute - } - - if ci.conn == nil { - // this is proxy connection - disconnect := ProxyDisconnect{ - Node: n.nodename, - Proxy: n.nodename, - SessionID: ci.proxySessionID, - Reason: "normal", +func (n *network) DisableApplicationStart(name gen.Atom, nodes ...gen.Atom) error { + if len(nodes) == 0 { + if _, exist := n.enableAppStart.LoadAndDelete(name); exist == false { + return gen.ErrUnknown } - n.unregisterConnection(node, &disconnect) - return ci.connection.ProxyDisconnect(disconnect) + return nil } - - ci.conn.Close() + v, exist := n.enableAppStart.Load(name) + if exist == false { + return gen.ErrUnknown + } + enable := v.(*enableAppStart) + enable.Lock() + for _, nn := range nodes { + delete(enable.nodes, nn) + } + enable.Unlock() return nil } -// Nodes -func (n *network) Nodes() []string { - list := []string{} - n.connectionsMutex.RLock() - defer n.connectionsMutex.RUnlock() +func (n *network) RegisterHandshake(handshake gen.NetworkHandshake) { + if handshake == nil { + n.node.Log().Error("unable to register nil value as a handshake") + return + } + _, exist := n.handshakes.LoadOrStore(handshake.Version().Str(), handshake) + if exist == false { + if lib.Trace() { + n.node.Log().Trace("registered handshake %s", handshake.Version()) + } + } +} - for node := range n.connections { - list = append(list, node) +func (n *network) RegisterProto(proto gen.NetworkProto) { + if proto == nil { + n.node.Log().Error("unable to register nil value as a proto ") + return + } + _, exist := n.protos.LoadOrStore(proto.Version().Str(), proto) + if exist == false { + if lib.Trace() { + n.node.Log().Trace("registered proto %s", proto.Version()) + } } - return list } -func (n *network) NodesIndirect() []string { - list := []string{} - n.connectionsMutex.RLock() - defer n.connectionsMutex.RUnlock() +func (n *network) Nodes() []gen.Atom { + var nodes []gen.Atom - for node, ci := range n.connections { - if ci.conn == nil { - list = append(list, node) - } + n.connections.Range(func(k, _ any) bool { + node := k.(gen.Atom) + nodes = append(nodes, node) + return true + }) + + return nodes +} + +func (n *network) Info() (gen.NetworkInfo, error) { + var info gen.NetworkInfo + + if n.running.Load() == false { + return info, gen.ErrNetworkStopped + } + + info.Mode = n.mode + info.Registrar = n.registrar.Info() + + for _, acceptor := range n.acceptors { + info.Acceptors = append(info.Acceptors, acceptor.Info()) } - return list + info.MaxMessageSize = n.maxmessagesize + info.HandshakeVersion = n.defaultHandshake.Version() + info.ProtoVersion = n.defaultProto.Version() + + n.connections.Range(func(k, _ any) bool { + node := k.(gen.Atom) + info.Nodes = append(info.Nodes, node) + return true + }) + + info.Routes = n.staticRoutes.info() + info.ProxyRoutes = n.staticProxies.info() + + info.Flags = n.flags + + info.EnabledSpawn = n.listEnabledSpawn() + info.EnabledApplicationStart = n.listEnabledApplicationStart() + + return info, nil } -func (n *network) NetworkStats(name string) (NetworkStats, error) { - var stats NetworkStats - n.connectionsMutex.RLock() - ci, found := n.connections[name] - n.connectionsMutex.RUnlock() +func (n *network) Mode() gen.NetworkMode { + return n.mode +} +// +// internals +// + +// Connection and GetConnection aren't exposed via gen.Network +func (n *network) Connection(name gen.Atom) (gen.Connection, error) { + v, found := n.connections.Load(name) if found == false { - return stats, lib.ErrUnknown + return nil, gen.ErrNoConnection } - - stats = ci.connection.Stats() - return stats, nil + return v.(gen.Connection), nil } -// RouteProxyConnectRequest -func (n *network) RouteProxyConnectRequest(from ConnectionInterface, request ProxyConnectRequest) error { - if request.To != n.nodename { - var err error - var connection ConnectionInterface - var proxyTransitTo map[string]bool - // - // outgoing proxy request - // - - // check if we already have - n.connectionsMutex.RLock() - if ci, exist := n.connections[request.To]; exist { - connection = ci.connection - proxyTransitTo = ci.proxyTransitTo +func (n *network) GetConnection(name gen.Atom) (gen.Connection, error) { + v, found := n.connections.Load(name) + if found { + return v.(gen.Connection), nil + } + + if lib.Trace() { + n.node.Log().Trace("trying to make connection with %s", name) + } + // check the static routes + if sroutes, found := n.staticRoutes.lookup(string(name)); found { + if lib.Trace() { + n.node.Log().Trace("found %s static route[s] for %s", len(sroutes), name) } - n.connectionsMutex.RUnlock() - - if from != nil { - // - // transit request - // - if from == connection { - lib.Log("[%s] NETWORK proxy. Error: proxy route points to the connection this request came from", n.nodename) - return lib.ErrProxyLoopDetected + for i, sroute := range sroutes { + sroute.InsecureSkipVerify = n.skipverify + if sroute.Resolver == nil { + if lib.Trace() { + n.node.Log().Trace("use static route to %s (%d)", name, i+1) + } + if c, err := n.connect(name, sroute); err == nil { + return c, nil + } else { + if lib.Trace() { + n.node.Log().Trace("unable to connect to %s using static route: %s", name, err) + } + } + continue } - lib.Log("[%s] NETWORK transit proxy connection to %q", n.nodename, request.To) - // proxy feature must be enabled explicitly for the transitional requests - if n.proxy.Transit == false { - lib.Log("[%s] NETWORK proxy. Proxy feature is disabled on this node", n.nodename) - return lib.ErrProxyTransitDisabled + if lib.Trace() { + n.node.Log().Trace("use static route to %s with resolver (%d)", name, i+1) + } + nr, err := sroute.Resolver.Resolve(name) + if err != nil { + if lib.Trace() { + n.node.Log().Trace("failed to resolve %s: %s", name, err) + } + continue } - if proxyTransitTo != nil { - if proxyTransitTo[request.To] == false { - nodeHost := strings.Split(request.To, "@") - if len(nodeHost) != 2 || proxyTransitTo[nodeHost[1]] == false { - lib.Log("[%s] NETWORK proxy. Proxy connection is restricted (to: %s)", n.nodename, request.To) - return lib.ErrProxyTransitRestricted + for _, route := range nr { + nroute := gen.NetworkRoute{ + Route: route, + InsecureSkipVerify: n.skipverify, + } + if nroute.Route.TLS && nroute.Cert == nil { + nroute.Cert = n.node.certmanager + } + if nroute.Cookie == "" { + nroute.Cookie = n.cookie + } + if c, err := n.connect(name, nroute); err == nil { + return c, nil + } else { + if lib.Trace() { + n.node.Log().Trace("unable to connect to %s using static route (with resolver): %s", name, err) } } } + } + return nil, gen.ErrNoRoute + } - if request.Hop < 1 { - lib.Log("[%s] NETWORK proxy. Error: exceeded hop limit", n.nodename) - return lib.ErrProxyHopExceeded + // check the static proxy routes + if proutes, found := n.staticProxies.lookup(string(name)); found { + if lib.Trace() { + n.node.Log().Trace("found %d static proxy route[s] for %s", len(proutes), name) + } + for i, proute := range proutes { + if proute.Resolver == nil { + if lib.Trace() { + n.node.Log().Trace("use static proxy route to %s (%d)", name, i+1) + } + if c, err := n.connectProxy(name, proute); err == nil { + return c, nil + } + continue } - request.Hop-- - if len(request.Path) > defaultProxyPathLimit { - return lib.ErrProxyPathTooLong + if lib.Trace() { + n.node.Log().Trace("use static proxy route to %s with resolver (%d)", name, i+1) } - - for i := range request.Path { - if n.nodename != request.Path[i] { - continue + pr, err := proute.Resolver.ResolveProxy(name) + if err != nil { + if lib.Trace() { + n.node.Log().Trace("failed to resolve proxy for %s: %s", name, err) } - lib.Log("[%s] NETWORK proxy. Error: loop detected in proxy path %#v", n.nodename, request.Path) - return lib.ErrProxyLoopDetected + continue } - if connection == nil { - // check if we have proxy route - route, err_route := n.ResolveProxy(request.To) - if err_route == nil && route.Proxy != n.nodename { - // proxy request goes to the next hop - connection, err = n.getConnectionDirect(route.Proxy, true) - } else { - connection, err = n.getConnectionDirect(request.To, true) + for _, route := range pr { + nproute := gen.NetworkProxyRoute{ + Route: route, } - - if err != nil { - return err + if c, err := n.connectProxy(name, nproute); err == nil { + return c, nil + } else { + if lib.Trace() { + n.node.Log().Trace("unable to connect to %s using proxy route: %s", name, err) + } } } + } + return nil, gen.ErrNoRoute + } - request.Path = append([]string{n.nodename}, request.Path...) - err = connection.ProxyConnectRequest(request) - return err + // resolve it + if nr, err := n.registrar.Resolver().Resolve(name); err == nil { + if lib.Trace() { + n.node.Log().Trace("resolved %d route[s] for %s", len(nr), name) } - if connection == nil { - route, err_route := n.ResolveProxy(request.To) - if err_route != nil { - // if it was invoked from getConnection ('from' == nil) there will - // be attempt to make direct connection using getConnectionDirect - return lib.ErrProxyNoRoute + for _, route := range nr { + nroute := gen.NetworkRoute{ + Route: route, + InsecureSkipVerify: n.skipverify, + Cookie: n.cookie, } - // initiating proxy connection - lib.Log("[%s] NETWORK initiate proxy connection to %q via %q", n.nodename, request.To, route.Proxy) - connection, err = n.getConnectionDirect(route.Proxy, true) - if err != nil { - return err + if route.TLS { + nroute.Cert = n.node.certmanager } - } - - cookie := n.proxy.Cookie - flags := n.proxy.Flags - if route, err_route := n.ResolveProxy(request.To); err_route == nil { - cookie = route.Cookie - if request.Flags.Enable == false { - flags = route.Flags + if c, err := n.connect(name, nroute); err == nil { + return c, nil + } else { + if lib.Trace() { + n.node.Log().Trace("unable to connect to %s: %s", name, err) + } } } - privKey, _ := rsa.GenerateKey(rand.Reader, 2048) - pubKey := x509.MarshalPKCS1PublicKey(&privKey.PublicKey) - request.PublicKey = pubKey - request.Flags = flags - - // create digest using creation, cookie and pubKey. - // we can't use neither n.nodename or request.To, or request.ID - - // - anything that contains nodename or peername, because of etf.AtomMapping. - request.Digest = generateProxyDigest(n.creation, cookie, pubKey) - - if request.Hop < 1 { - request.Hop = DefaultProxyMaxHop - } - request.Creation = n.creation - connectRequest := proxyConnectRequest{ - privateKey: privKey, - request: request, - connection: make(chan ConnectionInterface), - cancel: make(chan ProxyConnectCancel), - } - request.Path = []string{n.nodename} - if err := connection.ProxyConnectRequest(request); err != nil { - return err + if lib.Trace() { + n.node.Log().Trace("unable to connect to %s directly, looking up proxies...", name) } - n.putProxyConnectRequest(connectRequest) - return nil - } - - // - // handle proxy connect request - // - - // check digest - // use the last item in the request.Path as a peername - if len(request.Path) < 2 { - // reply error. there must be atleast 2 nodes - initiating and transit nodes - lib.Log("[%s] NETWORK proxy. Proxy connect request has wrong path (too short)", n.nodename) - return lib.ErrProxyConnect - } - peername := request.Path[len(request.Path)-1] - - if n.proxy.Accept == false { - lib.Warning("[%s] Got proxy connect request from %q. Not allowed.", n.nodename, peername) - return lib.ErrProxyConnect } - cookie := n.proxy.Cookie - flags := n.proxy.Flags - if route, err_route := n.ResolveProxy(peername); err_route == nil { - cookie = route.Cookie - if request.Flags.Enable == false { - flags = route.Flags + // resolve proxy + if pr, err := n.registrar.Resolver().ResolveProxy(name); err == nil { + if lib.Trace() { + n.node.Log().Trace("resolved %d proxy routes for %s", len(pr), name) } - } - checkDigest := generateProxyDigest(request.Creation, cookie, request.PublicKey) - if bytes.Equal(request.Digest, checkDigest) == false { - // reply error. digest mismatch - lib.Log("[%s] NETWORK proxy. Proxy connect request has wrong digest", n.nodename) - return lib.ErrProxyConnect - } - - // do some encryption magic - pk, err := x509.ParsePKCS1PublicKey(request.PublicKey) - if err != nil { - lib.Log("[%s] NETWORK proxy. Proxy connect request has wrong public key", n.nodename) - return lib.ErrProxyConnect - } - hash := sha256.New() - key := make([]byte, 32) - rand.Read(key) - cipherkey, err := rsa.EncryptOAEP(hash, rand.Reader, pk, key, nil) - if err != nil { - lib.Log("[%s] NETWORK proxy. Proxy connect request. Can't encrypt: %s ", n.nodename, err) - return lib.ErrProxyConnect - } - block, err := aes.NewCipher(key) - if err != nil { - return err - } - sessionID := lib.RandomString(32) - digest := generateProxyDigest(n.creation, n.proxy.Cookie, key) - if flags.Enable == false { - flags = DefaultProxyFlags() + // check if we already have connection with the proxy, so use it + // for the proxy connection + for _, route := range pr { + // check if we have connection to the proxy node + if _, err := n.Connection(route.Proxy); err != nil { + continue + } + // try to use the existing connection to the proxy node + nproute := gen.NetworkProxyRoute{ + Route: route, + } + if c, err := n.connectProxy(name, nproute); err == nil { + return c, nil + } else { + if lib.Trace() { + n.node.Log().Trace("unable to connect to %s using resolve proxy: %s", name, err) + } + } + } } - // if one of the nodes want to use encryption then it must be used by both nodes - if request.Flags.EnableEncryption || flags.EnableEncryption { - request.Flags.EnableEncryption = true - flags.EnableEncryption = true - } + return nil, gen.ErrNoRoute +} - cInternal := connectionInternal{ - connection: from, - proxySessionID: sessionID, - } - if _, err := n.registerConnection(peername, cInternal); err != nil { - return lib.ErrProxySessionDuplicate - } +func (n *network) connect(name gen.Atom, route gen.NetworkRoute) (gen.Connection, error) { + var dial func(network, addr string) (net.Conn, error) - reply := ProxyConnectReply{ - ID: request.ID, - To: peername, - Digest: digest, - Cipher: cipherkey, - Flags: flags, - Creation: n.creation, - SessionID: sessionID, - Path: request.Path[1:], + if n.running.Load() == false { + return nil, gen.ErrNetworkStopped } - if err := from.ProxyConnectReply(reply); err != nil { - // can't send reply. ignore this connection request - lib.Log("[%s] NETWORK proxy. Proxy connect request. Can't send reply: %s ", n.nodename, err) - n.unregisterConnection(peername, nil) - return lib.ErrProxyConnect + vhandshake, found := n.handshakes.Load(route.Route.HandshakeVersion.Str()) + if found == false { + return nil, fmt.Errorf("no handshake handler for %s", route.Route.HandshakeVersion) } - - session := ProxySession{ - ID: sessionID, - NodeFlags: reply.Flags, - PeerFlags: request.Flags, - PeerName: peername, - Creation: request.Creation, - Block: block, + vproto, found := n.protos.Load(route.Route.ProtoVersion.Str()) + if found == false { + return nil, fmt.Errorf("no proto handler for %s", route.Route.ProtoVersion) } - // register proxy session - from.ProxyRegisterSession(session) - return nil -} - -func (n *network) RouteProxyConnectReply(from ConnectionInterface, reply ProxyConnectReply) error { - - n.proxyTransitSessionsMutex.RLock() - _, duplicate := n.proxyTransitSessions[reply.SessionID] - n.proxyTransitSessionsMutex.RUnlock() + handshake := vhandshake.(gen.NetworkHandshake) + proto := vproto.(gen.NetworkProto) - if duplicate { - return lib.ErrProxySessionDuplicate + if lib.Trace() { + n.node.Log().Trace("trying to connect to %s (%s:%d, tls:%v)", + name, route.Route.Host, route.Route.Port, route.Route.TLS) } - if from == nil { - // from value can't be nil - return lib.ErrProxyUnknownRequest + dialer := &net.Dialer{ + KeepAlive: gen.DefaultKeepAlivePeriod, + Timeout: 3 * time.Second, // timeout to establish TCP-connection } - if reply.To != n.nodename { - // send this reply further and register this session - if n.proxy.Transit == false { - return lib.ErrProxyTransitDisabled - } - - if len(reply.Path) == 0 { - return lib.ErrProxyUnknownRequest - } - if len(reply.Path) > defaultProxyPathLimit { - return lib.ErrProxyPathTooLong - } - - next := reply.Path[0] - connection, err := n.getConnectionDirect(next, false) - if err != nil { - return err - } - if connection == from { - return lib.ErrProxyLoopDetected - } - - reply.Path = reply.Path[1:] - // check for the looping - for i := range reply.Path { - if reply.Path[i] == next { - return lib.ErrProxyLoopDetected - } - } - - if err := connection.ProxyConnectReply(reply); err != nil { - return err + if route.Route.TLS { + tlsconfig := &tls.Config{ + InsecureSkipVerify: route.InsecureSkipVerify, } - - // register transit proxy session - n.proxyTransitSessionsMutex.Lock() - session := proxyTransitSession{ - a: from, - b: connection, + tlsdialer := tls.Dialer{ + NetDialer: dialer, + Config: tlsconfig, } - n.proxyTransitSessions[reply.SessionID] = session - n.proxyTransitSessionsMutex.Unlock() - - // keep session id for both connections in order - // to handle connection closing (we should - // send ProxyDisconnect if one of the connection - // was closed) - n.connectionsMutex.Lock() - sessions, _ := n.connectionsTransit[session.a] - sessions = append(sessions, reply.SessionID) - n.connectionsTransit[session.a] = sessions - sessions, _ = n.connectionsTransit[session.b] - sessions = append(sessions, reply.SessionID) - n.connectionsTransit[session.b] = sessions - n.connectionsMutex.Unlock() - return nil + dial = tlsdialer.Dial + } else { + dial = dialer.Dial } - - // look up for the request we made earlier - r, found := n.getProxyConnectRequest(reply.ID) - if found == false { - return lib.ErrProxyUnknownRequest + dsn := net.JoinHostPort(route.Route.Host, strconv.Itoa(int(route.Route.Port))) + conn, err := dial("tcp", dsn) + if err != nil { + return nil, err } - // decrypt cipher key using private key - hash := sha256.New() - key, err := rsa.DecryptOAEP(hash, rand.Reader, r.privateKey, reply.Cipher, nil) - if err != nil { - lib.Log("[%s] CORE route proxy. Proxy connect reply has invalid cipher", n.nodename) - return lib.ErrProxyConnect + hopts := gen.HandshakeOptions{ + Cookie: route.Cookie, + Flags: route.Flags, + MaxMessageSize: n.maxmessagesize, } - cookie := n.proxy.Cookie - // check if we should use proxy route cookie - n.proxyRoutesMutex.RLock() - route, has_route := n.proxyRoutes[r.request.To] - n.proxyRoutesMutex.RUnlock() - if has_route { - cookie = route.Cookie + if hopts.Cookie == "" { + hopts.Cookie = n.cookie } - // check digest - checkDigest := generateProxyDigest(reply.Creation, cookie, key) - if bytes.Equal(checkDigest, reply.Digest) == false { - lib.Log("[%s] CORE route proxy. Proxy connect reply has wrong digest", n.nodename) - return lib.ErrProxyConnect + if hopts.Flags.Enable == false { + hopts.Flags = n.flags } - block, err := aes.NewCipher(key) + result, err := handshake.Start(n.node, conn, hopts) if err != nil { - return err + conn.Close() + return nil, err } - cInternal := connectionInternal{ - connection: from, - proxySessionID: reply.SessionID, + + if result.Peer != name { + conn.Close() + return nil, fmt.Errorf("remote node %s introduced itself as %s", name, result.Peer) } - if registered, err := n.registerConnection(r.request.To, cInternal); err != nil { - select { - case r.connection <- registered: - } - return lib.ErrProxySessionDuplicate + + mapping := make(map[gen.Atom]gen.Atom) + for k, v := range route.AtomMapping { + mapping[k] = v } - // if one of the nodes want to use encryption then it must be used by both nodes - if r.request.Flags.EnableEncryption || reply.Flags.EnableEncryption { - r.request.Flags.EnableEncryption = true - reply.Flags.EnableEncryption = true + for k, v := range result.AtomMapping { + mapping[k] = v } + result.AtomMapping = mapping - session := ProxySession{ - ID: reply.SessionID, - NodeFlags: r.request.Flags, - PeerFlags: reply.Flags, - PeerName: r.request.To, - Creation: reply.Creation, - Block: block, + if route.LogLevel == gen.LogLevelDefault { + route.LogLevel = n.node.Log().Level() } - - // register proxy session - from.ProxyRegisterSession(session) - - select { - case r.connection <- from: + log := createLog(route.LogLevel, n.node.dolog) + logSource := gen.MessageLogNetwork{ + Node: n.node.name, + Peer: result.Peer, + Creation: result.PeerCreation, } + log.setSource(logSource) - return nil -} - -func (n *network) RouteProxyConnectCancel(from ConnectionInterface, cancel ProxyConnectCancel) error { - if from == nil { - // from value can not be nil - return lib.ErrProxyConnect - } - if len(cancel.Path) == 0 { - n.cancelProxyConnectRequest(cancel) - return nil + pconn, err := proto.NewConnection(n.node, result, log) + if err != nil { + conn.Close() + return nil, err } - next := cancel.Path[0] - if next != n.nodename { - if len(cancel.Path) > defaultProxyPathLimit { - return lib.ErrProxyPathTooLong - } - connection, err := n.getConnectionDirect(next, false) + redial := func(dsn, id string) (net.Conn, []byte, error) { + c, err := dial("tcp", dsn) if err != nil { - return err - } - - if connection == from { - return lib.ErrProxyLoopDetected - } - - cancel.Path = cancel.Path[1:] - // check for the looping - for i := range cancel.Path { - if cancel.Path[i] == next { - return lib.ErrProxyLoopDetected - } + return nil, nil, err } - - if err := connection.ProxyConnectCancel(cancel); err != nil { - return err + tail, err := handshake.Join(n.node, c, id, hopts) + if err != nil { + return nil, nil, err } - return nil + return c, tail, nil } - return lib.ErrProxyUnknownRequest -} - -func (n *network) RouteProxyDisconnect(from ConnectionInterface, disconnect ProxyDisconnect) error { - - n.proxyTransitSessionsMutex.RLock() - session, isTransitSession := n.proxyTransitSessions[disconnect.SessionID] - n.proxyTransitSessionsMutex.RUnlock() - if isTransitSession == false { - // check for the proxy connection endpoint - var peername string - var found bool - var ci connectionInternal - - // get peername by session id - n.connectionsMutex.RLock() - for p, c := range n.connections { - if c.proxySessionID != disconnect.SessionID { - continue - } - found = true - peername = p - ci = c - break + if c, err := n.registerConnection(result.Peer, pconn); err != nil { + if err == gen.ErrTaken { + return c, nil } - if found == false { - n.connectionsMutex.RUnlock() - return lib.ErrProxySessionUnknown - } - n.connectionsMutex.RUnlock() + pconn.Terminate(err) + conn.Close() + return nil, err + } - if ci.proxySessionID != disconnect.SessionID || ci.connection != from { - return lib.ErrProxySessionUnknown - } + pconn.Join(conn, result.ConnectionID, redial, result.Tail) + go n.serve(proto, pconn, redial) - n.unregisterConnection(peername, &disconnect) - return nil - } + return pconn, nil +} - n.proxyTransitSessionsMutex.Lock() - delete(n.proxyTransitSessions, disconnect.SessionID) - n.proxyTransitSessionsMutex.Unlock() - - // remove this session from the connections - n.connectionsMutex.Lock() - sessions, ok := n.connectionsTransit[session.a] - if ok { - for i := range sessions { - if sessions[i] == disconnect.SessionID { - sessions[i] = sessions[0] - sessions = sessions[1:] - n.connectionsTransit[session.a] = sessions - break +func (n *network) serve(proto gen.NetworkProto, conn gen.Connection, redial gen.NetworkDial) { + name := conn.Node().Name() + if lib.Recover() { + defer func() { + if r := recover(); r != nil { + n.node.log.Panic("connection with %s (%s) terminated abnormally: %v", name, name.CRC32(), r) + n.unregisterConnection(name, gen.TerminateReasonPanic) + conn.Terminate(gen.TerminateReasonPanic) } - } - } - sessions, ok = n.connectionsTransit[session.b] - if ok { - for i := range sessions { - if sessions[i] == disconnect.SessionID { - sessions[i] = sessions[0] - sessions = sessions[1:] - n.connectionsTransit[session.b] = sessions - break - } - } + }() } - n.connectionsMutex.Unlock() - // send this message further - switch from { - case session.b: - return session.a.ProxyDisconnect(disconnect) - case session.a: - return session.b.ProxyDisconnect(disconnect) - default: - // shouldn't happen - panic("internal error") - } + err := proto.Serve(conn, redial) + n.unregisterConnection(name, err) + conn.Terminate(err) } -func (n *network) RouteProxy(from ConnectionInterface, sessionID string, packet *lib.Buffer) error { - // check if this session is present on this node - n.proxyTransitSessionsMutex.RLock() - session, ok := n.proxyTransitSessions[sessionID] - n.proxyTransitSessionsMutex.RUnlock() - - if !ok { - return lib.ErrProxySessionUnknown +func (n *network) connectProxy(name gen.Atom, route gen.NetworkProxyRoute) (gen.Connection, error) { + if lib.Trace() { + n.node.Log().Trace("trying to connect to %s (via proxy %s)", name, route.Route.Proxy) } + // TODO will be implemented later + n.node.log.Warning("proxy feature is not implemented yet") - switch from { - case session.b: - return session.a.ProxyPacket(packet) - case session.a: - return session.b.ProxyPacket(packet) - default: - // shouldn't happen - panic("internal error") - } + return nil, gen.ErrUnsupported } -func (n *network) AddProxyRoute(route ProxyRoute) error { - n.proxyRoutesMutex.Lock() - defer n.proxyRoutesMutex.Unlock() - if route.MaxHop > defaultProxyPathLimit { - return lib.ErrProxyPathTooLong - } - if route.MaxHop < 1 { - route.MaxHop = DefaultProxyMaxHop +func (n *network) stop() error { + if swapped := n.running.CompareAndSwap(true, false); swapped == false { + return fmt.Errorf("network stack is already stopped") } - if route.Flags.Enable == false { - route.Flags = n.proxy.Flags - } + n.registrar.Terminate() + n.registrar = nil - if s := strings.Split(route.Name, "@"); len(s) == 2 { - if s[0] == "" { - // must be domain name - if strings.HasPrefix(route.Name, "@") == false { - return lib.ErrRouteName - } - } - } else { - return lib.ErrRouteName + // stop acceptors + for _, a := range n.acceptors { + a.l.Close() } - if _, exist := n.proxyRoutes[route.Name]; exist { - return lib.ErrTaken - } + n.connections.Range(func(_, v any) bool { + c := v.(gen.Connection) + c.Terminate(gen.TerminateReasonNormal) + return true + }) - n.proxyRoutes[route.Name] = route return nil } -func (n *network) RemoveProxyRoute(name string) bool { - n.proxyRoutesMutex.Lock() - defer n.proxyRoutesMutex.Unlock() - if _, exist := n.proxyRoutes[name]; exist == false { - return false +func (n *network) start(options gen.NetworkOptions) error { + if swapped := n.running.CompareAndSwap(false, true); swapped == false { + return fmt.Errorf("network stack is already running") } - delete(n.proxyRoutes, name) - return true -} -func (n *network) ProxyRoutes() []ProxyRoute { - var routes []ProxyRoute - n.proxyRoutesMutex.RLock() - defer n.proxyRoutesMutex.RUnlock() - for _, v := range n.proxyRoutes { - routes = append(routes, v) + n.mode = options.Mode + if options.Mode == gen.NetworkModeDisabled { + n.running.Store(false) + n.node.log.Info("network is disabled") + return nil } - return routes -} - -func (n *network) ProxyRoute(name string) (ProxyRoute, bool) { - n.proxyRoutesMutex.RLock() - defer n.proxyRoutesMutex.RUnlock() - route, exist := n.proxyRoutes[name] - return route, exist -} -func (n *network) listen(ctx context.Context, hostname string, options Listener, register bool) (net.Listener, error) { + if lib.Trace() { + n.node.log.Trace("starting network...") + } - lc := net.ListenConfig{ - KeepAlive: defaultKeepAlivePeriod * time.Second, + n.skipverify = options.InsecureSkipVerify + n.registrar = options.Registrar + if n.registrar == nil { + n.registrar = registrar.Create(registrar.Options{}) } - tlsEnabled := false - if options.TLS != nil { - if options.TLS.Certificates != nil || options.TLS.GetCertificate != nil { - tlsEnabled = true - } + + n.node.validateLicenses(n.registrar.Version()) + + if options.Cookie == "" { + n.node.log.Warning("cookie is empty (gen.NetworkOptions), used randomized value") + options.Cookie = lib.RandomString(16) } + n.cookie = options.Cookie + n.maxmessagesize = options.MaxMessageSize - for port := options.ListenBegin; port <= options.ListenEnd; port++ { - if options.Hostname != "" { - hostname = options.Hostname - } + if options.Flags.Enable == false { + options.Flags = gen.DefaultNetworkFlags + } + n.flags = options.Flags - hostPort := net.JoinHostPort(hostname, strconv.Itoa(int(port))) - lib.Log("[%s] NETWORK trying to start listener on %q", n.nodename, hostPort) - listener, err := lc.Listen(ctx, "tcp", hostPort) + if options.Mode == gen.NetworkModeHidden { + static, err := n.registrar.Register(n.node, gen.RegisterRoutes{}) if err != nil { - continue + return err } - if register && n.registrar != nil { - registerOptions := RegisterOptions{ - Port: port, - Creation: n.creation, - NodeVersion: n.version, - HandshakeVersion: options.Handshake.Version(), - EnableTLS: tlsEnabled, - EnableProxy: options.Flags.EnableProxy, - EnableCompression: options.Flags.EnableCompression, + // add static routes + for match, route := range static.Routes { + if err := n.AddRoute(match, route, 0); err != nil { + n.node.log.Error("unable to add static route %q from the registrar, ignored", match) } - - if err := n.registrar.Register(n.ctx, n.nodename, registerOptions); err != nil { - listener.Close() - return nil, fmt.Errorf("can not register this node: %s", err) + } + // add static proxy routes + for match, route := range static.Proxies { + if err := n.AddProxyRoute(match, route, 0); err != nil { + n.node.log.Error("unable to add static proxy route %q from the registrar, ignored", match) } } - if tlsEnabled { - listener = tls.NewListener(listener, options.TLS) + if lib.Trace() { + n.node.log.Trace("network started (hidden) with registrar %s", n.registrar.Version()) } + return nil + } - go func() { - for { - c, err := listener.Accept() - if err != nil { - if err == io.EOF { - return - } - if ctx.Err() == nil { - continue - } - lib.Log(err.Error()) - return - } - lib.Log("[%s] NETWORK accepted new connection from %s", n.nodename, c.RemoteAddr().String()) + nodehost := strings.Split(string(n.node.name), "@") - details, err := options.Handshake.Accept(c.RemoteAddr(), c, tlsEnabled, options.Cookie) - if err != nil { - if err != io.EOF { - lib.Warning("[%s] Can't handshake with %s: %s", n.nodename, c.RemoteAddr().String(), err) - } - c.Close() - continue - } - if details.Name == "" { - err := fmt.Errorf("remote node introduced itself as %q", details.Name) - lib.Warning("Handshake error: %s", err) - c.Close() - continue - } - connection, err := options.Proto.Init(n.ctx, c, n.nodename, details) - if err != nil { - lib.Warning("Proto error: %s", err) - c.Close() - continue - } + if len(options.Acceptors) == 0 { + a := gen.AcceptorOptions{ + Host: nodehost[1], + Port: gen.DefaultPort, + CertManager: n.node.CertManager(), + Cookie: options.Cookie, + MaxMessageSize: options.MaxMessageSize, + Flags: options.Flags, + } + options.Acceptors = append(options.Acceptors, a) + } - cInternal := connectionInternal{ - conn: c, - connection: connection, - } + if options.Handshake != nil { + n.defaultHandshake = options.Handshake + } + if options.Proto != nil { + n.defaultProto = options.Proto + } - if len(details.ProxyTransit.AllowTo) > 0 { - cInternal.proxyTransitTo = make(map[string]bool) - for _, to := range details.ProxyTransit.AllowTo { - cInternal.proxyTransitTo[to] = true - } - } + appRoutes := []gen.ApplicationRoute{} + for _, app := range n.node.Applications() { + info, err := n.node.ApplicationInfo(app) + if err != nil { + continue + } + r := gen.ApplicationRoute{ + Node: n.node.Name(), + Name: info.Name, + Weight: info.Weight, + Mode: info.Mode, + } + appRoutes = append(appRoutes, r) + } + routes := []gen.Route{} - if _, err := n.registerConnection(details.Name, cInternal); err != nil { - // Race condition: - // There must be another goroutine which already created and registered - // connection to this node. - // Close this connection and use the already registered connection - c.Close() - continue - } + for _, a := range options.Acceptors { + if a.Handshake == nil { + a.Handshake = n.defaultHandshake + } - // run serving connection - go func(ctx context.Context, ci connectionInternal) { - options.Proto.Serve(ci.connection, n.router) - n.unregisterConnection(details.Name, nil) - options.Proto.Terminate(ci.connection) - ci.conn.Close() - }(ctx, cInternal) + if a.Proto == nil { + a.Proto = n.defaultProto + } - } - }() + if a.MaxMessageSize == 0 { + a.MaxMessageSize = options.MaxMessageSize + } - return listener, nil - } + if a.Flags.Enable == false { + a.Flags = a.Handshake.NetworkFlags() + if a.Flags.Enable == false { + a.Flags = options.Flags + } + } - // all ports within a given range are taken - return nil, fmt.Errorf("can not start listener (port range is taken)") -} + switch a.TCP { + case "tcp": + case "tcp6": + default: + a.TCP = "tcp4" + } -func (n *network) connect(node string) (ConnectionInterface, error) { - var c net.Conn - lib.Log("[%s] NETWORK trying to connect to %#v", n.nodename, node) + if a.Host == "" { + a.Host = nodehost[1] + } - // resolve the route - route, err := n.Resolve(node) - if err != nil { - return nil, err - } - customHandshake := route.Options.Handshake != nil - lib.Log("[%s] NETWORK resolved %#v to %s:%d (custom handshake: %t)", n.nodename, node, route.Host, route.Port, customHandshake) + acceptor, err := n.startAcceptor(a) + if err != nil { + // stop acceptors + for i := range n.acceptors { + n.acceptors[i].l.Close() + } + return err + } - HostPort := net.JoinHostPort(route.Host, strconv.Itoa(int(route.Port))) - dialer := net.Dialer{ - KeepAlive: defaultKeepAlivePeriod * time.Second, - Timeout: 3 * time.Second, // timeout to establish TCP-connection - } + n.acceptors = append(n.acceptors, acceptor) + r := gen.Route{ + Port: acceptor.port, + TLS: acceptor.cert_manager != nil, + HandshakeVersion: acceptor.handshake.Version(), + ProtoVersion: acceptor.proto.Version(), + } + n.node.validateLicenses(r.HandshakeVersion, r.ProtoVersion) + if a.Registrar == nil { + acceptor.registrar_info = n.registrar.Info + routes = append(routes, r) + continue + } - tlsEnabled := route.Options.TLS != nil + acceptor.registrar_info = a.Registrar.Info + // custom reistrar for this acceptor + registerRoutes := gen.RegisterRoutes{ + Routes: []gen.Route{r}, + ApplicationRoutes: appRoutes, + } + registrarInfo := a.Registrar.Info() - if route.Options.IsErgo == true { - // use the route TLS settings if they were defined - if tlsEnabled { - if n.tls != nil { - route.Options.TLS.InsecureSkipVerify = n.tls.InsecureSkipVerify - } - // use the local TLS settings - tlsdialer := tls.Dialer{ - NetDialer: &dialer, - Config: route.Options.TLS, + // TODO it returns static routes. they need to be handled + _, err = a.Registrar.Register(n.node, registerRoutes) + if err != nil { + // stop acceptors + for i := range n.acceptors { + n.acceptors[i].l.Close() } - c, err = tlsdialer.DialContext(n.ctx, "tcp", HostPort) - } else { - // TLS disabled on a remote node - c, err = dialer.DialContext(n.ctx, "tcp", HostPort) + return fmt.Errorf("unable to register node on %s (%s): %s", registrarInfo.Server, registrarInfo.Version, err) } - } else { - // this is an Erlang/Elixir node. use the local TLS settings - tlsEnabled = n.tls != nil - if tlsEnabled { - tlsdialer := tls.Dialer{ - NetDialer: &dialer, - Config: n.tls, - } - c, err = tlsdialer.DialContext(n.ctx, "tcp", HostPort) + acceptor.registrar_custom = true + } - } else { - c, err = dialer.DialContext(n.ctx, "tcp", HostPort) - } + registerRoutes := gen.RegisterRoutes{ + Routes: routes, + ApplicationRoutes: appRoutes, } - // check if we couldn't establish a connection with the node + static, err := n.registrar.Register(n.node, registerRoutes) if err != nil { - lib.Warning("Could not connect to %q (%s): %s", node, HostPort, err) - return nil, err + return fmt.Errorf("unable to register node: %s", err) } - // handshake - handshake := route.Options.Handshake - if handshake == nil { - // use default handshake - handshake = n.handshake + // add static routes + for match, route := range static.Routes { + if err := n.AddRoute(match, route, 0); err != nil { + n.node.log.Error("unable to add static route %q from the registrar, ignored", match) + } } - - cookie := n.cookie - if route.Options.Cookie != "" { - cookie = route.Options.Cookie + // add static proxy routes + for match, route := range static.Proxies { + if err := n.AddProxyRoute(match, route, 0); err != nil { + n.node.log.Error("unable to add static proxy route %q from the registrar, ignored", match) + } } - details, err := handshake.Start(c.RemoteAddr(), c, tlsEnabled, cookie) - if err != nil { - lib.Warning("Handshake error: %s", err) - c.Close() - return nil, err - } - if details.Name != node { - err := fmt.Errorf("Handshake error: node %q introduced itself as %q", node, details.Name) - lib.Warning("%s", err) - return nil, err + if lib.Trace() { + n.node.log.Trace("network started with registrar %s", n.registrar.Version()) } + return nil +} - // proto - proto := route.Options.Proto - if proto == nil { - // use default proto - proto = n.proto +func (n *network) startAcceptor(a gen.AcceptorOptions) (*acceptor, error) { + lc := net.ListenConfig{ + KeepAlive: gen.DefaultKeepAlivePeriod, } - connection, err := proto.Init(n.ctx, c, n.nodename, details) - if err != nil { - c.Close() - lib.Warning("Proto error: %s", err) - return nil, err + cert_manager := a.CertManager + if cert_manager == nil { + cert_manager = n.node.CertManager() } - cInternal := connectionInternal{ - conn: c, - connection: connection, + bs := a.BufferSize + if bs < 1 { + bs = gen.DefaultTCPBufferSize } - if registered, err := n.registerConnection(details.Name, cInternal); err != nil { - // Race condition: - // There must be another goroutine which already created and registered - // connection to this node. - // Close this connection and use the already registered one - c.Close() - if err == lib.ErrTaken { - return registered, nil - } - return nil, err + pstart := a.Port + if pstart == 0 { + pstart = gen.DefaultPort } - - // enable keep alive on this connection - if tcp, ok := c.(*net.TCPConn); ok { - tcp.SetKeepAlive(true) - tcp.SetKeepAlivePeriod(5 * time.Second) - tcp.SetNoDelay(true) + pend := a.PortRange + if pend == 0 { + pend = 50000 } - - // run serving connection - go func(ctx context.Context, ci connectionInternal) { - proto.Serve(ci.connection, n.router) - n.unregisterConnection(details.Name, nil) - proto.Terminate(ci.connection) - ci.conn.Close() - }(n.ctx, cInternal) - - return connection, nil -} - -func (n *network) registerConnection(peername string, ci connectionInternal) (ConnectionInterface, error) { - lib.Log("[%s] NETWORK registering peer %#v", n.nodename, peername) - n.connectionsMutex.Lock() - defer n.connectionsMutex.Unlock() - - if registered, exist := n.connections[peername]; exist { - // already registered - return registered.connection, lib.ErrTaken + if pend < pstart { + pend = pstart } - n.connections[peername] = ci - event := MessageEventNetwork{ - PeerName: peername, - Online: true, + acceptor := &acceptor{ + bs: bs, + proto: a.Proto, + handshake: a.Handshake, + cert_manager: cert_manager, + max_message_size: a.MaxMessageSize, + atom_mapping: make(map[gen.Atom]gen.Atom), } - if ci.conn == nil { - // this is proxy connection - p, _ := n.connectionsProxy[ci.connection] - p = append(p, peername) - n.connectionsProxy[ci.connection] = p - event.Proxy = true + if a.Cookie == "" { + acceptor.cookie = n.cookie } - n.router.sendEvent(corePID, EventNetwork, event) - return ci.connection, nil -} - -func (n *network) unregisterConnection(peername string, disconnect *ProxyDisconnect) { - lib.Log("[%s] NETWORK unregistering peer %v", n.nodename, peername) - - n.connectionsMutex.Lock() - ci, exist := n.connections[peername] - if exist == false { - n.connectionsMutex.Unlock() - return + for k, v := range a.AtomMapping { + acceptor.atom_mapping[k] = v } - delete(n.connections, peername) - n.connectionsMutex.Unlock() - n.router.RouteNodeDown(peername, disconnect) - event := MessageEventNetwork{ - PeerName: peername, - Online: false, - } + for i := pstart; i < pend+1; i++ { + hp := net.JoinHostPort(a.Host, strconv.Itoa(int(i))) + lcl, err := lc.Listen(context.Background(), a.TCP, hp) + if err != nil { + if e, ok := err.(*net.OpError); ok { + if _, ok := e.Err.(*net.DNSError); ok { + return nil, err + } + } + continue + } - if ci.conn == nil { - // it was proxy connection - ci.connection.ProxyUnregisterSession(ci.proxySessionID) - event.Proxy = true - n.router.sendEvent(corePID, EventNetwork, event) - return + acceptor.port = i + acceptor.l = lcl + break } - n.router.sendEvent(corePID, EventNetwork, event) - n.connectionsMutex.Lock() - cp, _ := n.connectionsProxy[ci.connection] - for _, p := range cp { - lib.Log("[%s] NETWORK unregistering peer (via proxy) %v", n.nodename, p) - delete(n.connections, p) - event.PeerName = p - event.Proxy = true - n.router.sendEvent(corePID, EventNetwork, event) + if acceptor.l == nil { + return acceptor, fmt.Errorf("unable to assign requested address %s: no available ports in range %d..%d", + a.Host, pstart, pend) } - ct, _ := n.connectionsTransit[ci.connection] - delete(n.connectionsTransit, ci.connection) - n.connectionsMutex.Unlock() - - // send disconnect for the proxy sessions - for _, p := range cp { - disconnect := ProxyDisconnect{ - Node: peername, - Proxy: n.nodename, - Reason: "noconnection", + if acceptor.cert_manager != nil { + config := &tls.Config{ + GetCertificate: acceptor.cert_manager.GetCertificateFunc(), + InsecureSkipVerify: a.InsecureSkipVerify, } - n.router.RouteNodeDown(p, &disconnect) + acceptor.l = tls.NewListener(acceptor.l, config) } - // disconnect for the transit proxy sessions - for i := range ct { - disconnect := ProxyDisconnect{ - Node: peername, - Proxy: n.nodename, - SessionID: ct[i], - Reason: "noconnection", - } - n.RouteProxyDisconnect(ci.connection, disconnect) + acceptor.flags = a.Flags + if acceptor.flags.Enable == false { + acceptor.flags = gen.DefaultNetworkFlags } -} + go n.accept(acceptor) -// Connection interface default callbacks -func (c *Connection) Send(from gen.Process, to etf.Pid, message etf.Term) error { - return lib.ErrUnsupported -} -func (c *Connection) SendReg(from gen.Process, to gen.ProcessID, message etf.Term) error { - return lib.ErrUnsupported -} -func (c *Connection) SendAlias(from gen.Process, to etf.Alias, message etf.Term) error { - return lib.ErrUnsupported -} -func (c *Connection) Link(local gen.Process, remote etf.Pid) error { - return lib.ErrUnsupported -} -func (c *Connection) Unlink(local gen.Process, remote etf.Pid) error { - return lib.ErrUnsupported -} -func (c *Connection) LinkExit(local etf.Pid, remote etf.Pid, reason string) error { - return lib.ErrUnsupported -} -func (c *Connection) Monitor(local gen.Process, remote etf.Pid, ref etf.Ref) error { - return lib.ErrUnsupported -} -func (c *Connection) MonitorReg(local gen.Process, remote gen.ProcessID, ref etf.Ref) error { - return lib.ErrUnsupported -} -func (c *Connection) Demonitor(by etf.Pid, process etf.Pid, ref etf.Ref) error { - return lib.ErrUnsupported -} -func (c *Connection) DemonitorReg(by etf.Pid, process gen.ProcessID, ref etf.Ref) error { - return lib.ErrUnsupported -} -func (c *Connection) MonitorExitReg(process gen.Process, reason string, ref etf.Ref) error { - return lib.ErrUnsupported -} -func (c *Connection) MonitorExit(to etf.Pid, terminated etf.Pid, reason string, ref etf.Ref) error { - return lib.ErrUnsupported -} -func (c *Connection) SpawnRequest(nodeName string, behaviorName string, request gen.RemoteSpawnRequest, args ...etf.Term) error { - return lib.ErrUnsupported -} -func (c *Connection) SpawnReply(to etf.Pid, ref etf.Ref, pid etf.Pid) error { - return lib.ErrUnsupported -} -func (c *Connection) SpawnReplyError(to etf.Pid, ref etf.Ref, err error) error { - return lib.ErrUnsupported -} -func (c *Connection) ProxyConnectRequest(connect ProxyConnectRequest) error { - return lib.ErrUnsupported -} -func (c *Connection) ProxyConnectReply(reply ProxyConnectReply) error { - return lib.ErrUnsupported -} -func (c *Connection) ProxyDisconnect(disconnect ProxyDisconnect) error { - return lib.ErrUnsupported -} -func (c *Connection) ProxyRegisterSession(session ProxySession) error { - return lib.ErrUnsupported -} -func (c *Connection) ProxyUnregisterSession(id string) error { - return lib.ErrUnsupported -} -func (c *Connection) ProxyPacket(packet *lib.Buffer) error { - return lib.ErrUnsupported -} -func (c *Connection) Stats() NetworkStats { - return NetworkStats{} -} - -// Handshake interface default callbacks -func (h *Handshake) Start(remote net.Addr, conn lib.NetReadWriter, tls bool, cookie string) (HandshakeDetails, error) { - return HandshakeDetails{}, lib.ErrUnsupported -} -func (h *Handshake) Accept(remote net.Addr, conn lib.NetReadWriter, tls bool, cookie string) (HandshakeDetails, error) { - return HandshakeDetails{}, lib.ErrUnsupported -} -func (h *Handshake) Version() HandshakeVersion { - var v HandshakeVersion - return v -} + if lib.Trace() { + n.node.Log().Trace("started acceptor on %s with handshake %s and proto %s (TLS: %t)", + acceptor.l.Addr(), + acceptor.handshake.Version(), + acceptor.proto.Version(), acceptor.cert_manager != nil, + ) + } -// internals + n.RegisterHandshake(acceptor.handshake) + n.RegisterProto(acceptor.proto) -func (n *network) putProxyConnectRequest(r proxyConnectRequest) { - n.proxyConnectRequestMutex.Lock() - defer n.proxyConnectRequestMutex.Unlock() - n.proxyConnectRequest[r.request.ID] = r + return acceptor, nil } -func (n *network) cancelProxyConnectRequest(cancel ProxyConnectCancel) { - n.proxyConnectRequestMutex.Lock() - defer n.proxyConnectRequestMutex.Unlock() - - r, found := n.proxyConnectRequest[cancel.ID] - if found == false { - return +func (n *network) accept(a *acceptor) { + hopts := gen.HandshakeOptions{ + Cookie: a.cookie, + Flags: a.flags, + MaxMessageSize: a.max_message_size, + CertManager: a.cert_manager, } + for { + c, err := a.l.Accept() + if err != nil { + if err == io.EOF { + return + } + n.node.Log().Info("acceptor %s terminated (handshake: %s, proto: %s)", + a.l.Addr(), a.handshake.Version(), a.proto.Version()) + return + } + if lib.Trace() { + n.node.Log().Trace("accepted new TCP-connection from %s", c.RemoteAddr().String()) + } - delete(n.proxyConnectRequest, cancel.ID) - select { - case r.cancel <- cancel: - default: - } - return -} + if hopts.Cookie == "" { + hopts.Cookie = n.cookie + } -func (n *network) waitProxyConnection(id etf.Ref, timeout int) (ConnectionInterface, error) { - n.proxyConnectRequestMutex.RLock() - r, found := n.proxyConnectRequest[id] - n.proxyConnectRequestMutex.RUnlock() + result, err := a.handshake.Accept(n.node, c, hopts) + if err != nil { + if err != io.EOF { + n.node.Log().Warning("unable to handshake with %s: %s", c.RemoteAddr().String(), err) + } + c.Close() + continue + } - if found == false { - return nil, lib.ErrProxyUnknownRequest - } + if result.Peer == "" { + n.node.Log().Warning("%s is not introduced itself, close connection", c.RemoteAddr().String()) + c.Close() + continue + } - defer func(id etf.Ref) { - n.proxyConnectRequestMutex.Lock() - delete(n.proxyConnectRequest, id) - n.proxyConnectRequestMutex.Unlock() - }(id) + // update atom mapping: a.atom_mapping + result.AtomMapping + mapping := make(map[gen.Atom]gen.Atom) + for k, v := range a.atom_mapping { + mapping[k] = v + } + for k, v := range result.AtomMapping { + mapping[k] = v + } + result.AtomMapping = mapping + + // check if we already have connection with this node + if v, exist := n.connections.Load(result.Peer); exist { + conn := v.(gen.Connection) + if err := conn.Join(c, result.ConnectionID, nil, result.Tail); err != nil { + if err == gen.ErrUnsupported { + n.node.Log().Warning("unable to accept connection with %s (join is not supported)", + result.Peer) + } else { + n.node.Log().Trace("unable to join %s to the existing connection with %s: %s", + c.RemoteAddr(), result.Peer, err) + } + c.Close() + } + continue + } - timer := lib.TakeTimer() - defer lib.ReleaseTimer(timer) - timer.Reset(time.Second * time.Duration(timeout)) + log := createLog(n.node.Log().Level(), n.node.dolog) + logSource := gen.MessageLogNetwork{ + Node: n.node.name, + Peer: result.Peer, + Creation: result.PeerCreation, + } + log.setSource(logSource) + conn, err := a.proto.NewConnection(n.node, result, log) + if err != nil { + n.node.Log().Warning("unable to create new connection: %s", err) + c.Close() + continue + } - for { - select { - case connection := <-r.connection: - return connection, nil - case err := <-r.cancel: - return nil, fmt.Errorf("[%s] %s", err.From, err.Reason) - case <-timer.C: - return nil, lib.ErrTimeout - case <-n.ctx.Done(): - // node is on the way to terminate, it means connection is closed - // so it doesn't matter what kind of error will be returned - return nil, lib.ErrProxyUnknownRequest + if _, err := n.registerConnection(result.Peer, conn); err != nil { + n.node.Log().Warning("unable to register new connection with %s: %s", result.Peer, err) + c.Close() + continue } + conn.Join(c, result.ConnectionID, nil, result.Tail) + go n.serve(a.proto, conn, nil) } } -func (n *network) getProxyConnectRequest(id etf.Ref) (proxyConnectRequest, bool) { - n.proxyConnectRequestMutex.RLock() - defer n.proxyConnectRequestMutex.RUnlock() - r, found := n.proxyConnectRequest[id] - return r, found -} - -func (n *network) networkStats() internalNetworkStats { - stats := internalNetworkStats{} - n.proxyTransitSessionsMutex.RLock() - stats.transitConnections = len(n.proxyTransitSessions) - n.proxyTransitSessionsMutex.RUnlock() - - n.connectionsMutex.RLock() - stats.proxyConnections = len(n.connectionsProxy) - stats.connections = len(n.connections) - n.connectionsMutex.RUnlock() - return stats +func (n *network) registerConnection(name gen.Atom, conn gen.Connection) (gen.Connection, error) { + if v, exist := n.connections.LoadOrStore(name, conn); exist { + return v.(gen.Connection), gen.ErrTaken + } + n.node.log.Info("new connection with %s (%s)", name, name.CRC32()) + // TODO create event gen.MessageNetworkEvent + return conn, nil } -// -// internals -// - -func generateProxyDigest(creation uint32, cookie string, pubkey []byte) []byte { - // md5(md5(md5(md5(node)+cookie)+peer)+pubkey) - c := [4]byte{} - binary.BigEndian.PutUint32(c[:], creation) - digest1 := md5.Sum([]byte(c[:])) - digest2 := md5.Sum(append(digest1[:], []byte(cookie)...)) - digest3 := md5.Sum(append(digest2[:], pubkey...)) - return digest3[:] +func (n *network) unregisterConnection(name gen.Atom, reason error) { + n.connections.Delete(name) + if reason != nil { + n.node.log.Info("connection with %s (%s) terminated with reason: %s", name, name.CRC32(), reason) + } else { + n.node.log.Info("connection with %s (%s) terminated", name, name.CRC32()) + } + n.node.RouteNodeDown(name, reason) + // TODO create event gen.MessageNetworkEvent } diff --git a/node/node.go b/node/node.go index 8ff7fda8..34293c41 100644 --- a/node/node.go +++ b/node/node.go @@ -1,454 +1,1753 @@ package node import ( - "context" + "errors" "fmt" + "os" + "os/signal" + "reflect" "runtime" "strings" + "sync" + "sync/atomic" + "syscall" "time" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" + "ergo.services/ergo/app/system" + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" + "ergo.services/ergo/lib/osdep" + "ergo.services/ergo/net/edf" ) -const ( - appBehaviorGroup = "ergo:applications" - remoteBehaviorGroup = "ergo:remote" +var ( + startID = uint64(1000) + startUniqID = uint64(time.Now().UnixNano()) ) -// node instance of created node using CreateNode type node struct { - coreInternal + name gen.Atom + version gen.Version + framework gen.Version - name string - creation uint32 - context context.Context - stop context.CancelFunc - version Version + creation int64 + + env sync.Map // env name gen.Env -> any + + security gen.SecurityOptions + certmanager gen.CertManager + + corePID gen.PID + nextID uint64 + uniqID uint64 + + processes sync.Map // process pid gen.PID -> *process + names sync.Map // process name gen.Atom -> *process + aliases sync.Map // process alias gen.Alias -> *process + events sync.Map // process event gen.Event -> *eventOwner + + applications sync.Map // application name -> *application + + // consumer lists (subcribers) + monitors *target + links *target + + network *network + + loggers map[gen.LogLevel]*sync.Map // level -> name -> gen.LoggerBehavior + log *log + + waitprocesses sync.WaitGroup + wait chan struct{} + + licenses sync.Map + + coreEventsToken gen.Ref + + ctrlc chan os.Signal } -// StartWithContext create new node with specified context, name and cookie string -func StartWithContext(ctx context.Context, name string, cookie string, opts Options) (Node, error) { +type eventOwner struct { + name gen.Atom + producer gen.PID + token gen.Ref + notify bool + consumers int32 - lib.Log("Start node with name %q and cookie %q", name, cookie) + last lib.QueueMPSC +} - if len(strings.Split(name, "@")) != 2 { - return nil, fmt.Errorf("incorrect FQDN node name (example: node@localhost)") - } - if opts.Creation == 0 { - opts.Creation = uint32(time.Now().Unix()) +func Start(name gen.Atom, options gen.NodeOptions, frameworkVersion gen.Version) (gen.Node, error) { + if len(name) > 255 { + return nil, gen.ErrAtomTooLong } - if opts.Flags.Enable == false { - opts.Flags = DefaultFlags() + if s := strings.Split(string(name), "@"); len(s) != 2 { + return nil, fmt.Errorf("incorrect FQDN node name (example: node@localhost)") + } else { + if len(s[0]) < 1 { + return nil, fmt.Errorf("too short node name") + } + if len(s[1]) < 1 { + return nil, fmt.Errorf("too short host name") + } } - if opts.Handshake == nil { - return nil, fmt.Errorf("Handshake must be defined") + creation := time.Now().Unix() + + node := &node{ + name: name, + version: options.Version, + framework: frameworkVersion, + creation: creation, + + corePID: gen.PID{Node: name, ID: 1, Creation: creation}, + nextID: startID, + uniqID: startUniqID, + + certmanager: options.CertManager, + security: options.Security, + + monitors: createTarget(), + links: createTarget(), + + loggers: make(map[gen.LogLevel]*sync.Map), + + wait: make(chan struct{}), } - if opts.Proto == nil { - return nil, fmt.Errorf("Proto must be defined") + + node.log = createLog(options.Log.Level, node.dolog) + node.log.setSource(gen.MessageLogNode{Node: name, Creation: creation}) + + if options.Log.Level == gen.LogLevelDefault { + node.log.SetLevel(gen.LogLevelInfo) } - if opts.StaticRoutesOnly == false && opts.Registrar == nil { - return nil, fmt.Errorf("Registrar must be defined if StaticRoutesOnly == false") + + node.loggers[gen.LogLevelSystem] = &sync.Map{} + node.loggers[gen.LogLevelTrace] = &sync.Map{} + node.loggers[gen.LogLevelDebug] = &sync.Map{} + node.loggers[gen.LogLevelInfo] = &sync.Map{} + node.loggers[gen.LogLevelWarning] = &sync.Map{} + node.loggers[gen.LogLevelError] = &sync.Map{} + node.loggers[gen.LogLevelPanic] = &sync.Map{} + + for k, v := range options.Env { + node.SetEnv(k, v) } - nodectx, nodestop := context.WithCancel(ctx) - node := &node{ - name: name, - context: nodectx, - stop: nodestop, - creation: opts.Creation, + if options.Log.DefaultLogger.Disable == false { + // add default logger + logger := gen.CreateDefaultLogger(options.Log.DefaultLogger) + node.LoggerAdd("default", logger, options.Log.DefaultLogger.Filter...) } - // create a copy of envs - copyEnv := make(map[gen.EnvKey]interface{}) - for k, v := range opts.Env { - copyEnv[k] = v + for _, lo := range options.Log.Loggers { + if len(lo.Name) == 0 { + return nil, errors.New("logger name can not be empty") + } + if lo.Logger == nil { + return nil, errors.New("logger can not be nil") + } + node.LoggerAdd(lo.Name, lo.Logger, lo.Filter...) } - // set global variable 'ergo:Node' - copyEnv[EnvKeyNode] = Node(node) - opts.Env = copyEnv + node.coreEventsToken, _ = node.RegisterEvent(gen.CoreEvent, gen.EventOptions{}) - core, err := newCore(nodectx, name, cookie, opts) - if err != nil { + node.validateLicenses(node.version) + node.network = createNetwork(node) + + if err := node.NetworkStart(options.Network); err != nil { return nil, err } - node.coreInternal = core - for _, app := range opts.Applications { - // load applications - name, err := node.ApplicationLoad(app) - if err != nil { - nodestop() - return nil, err - } - // start applications - _, err = node.ApplicationStart(name) - if err != nil { - nodestop() - return nil, err + if len(options.Applications) > 0 { + node.log.Trace("starting application(s)...") + for _, app := range options.Applications { + // load applications + name, err := node.ApplicationLoad(app) + if err != nil { + node.log.Error("unable to load application %s: %s ", name, err) + node.StopForce() + return nil, err + } + // start applications + if err := node.ApplicationStart(name, gen.ApplicationOptions{}); err != nil { + node.log.Error("unable to start application %s:%s", name, err) + node.StopForce() + return nil, err + } } } + edf.RegisterAtom(name) + node.log.Info("node %s built with %q successfully started", node.name, node.framework) return node, nil } -// Version returns version of the node -func (n *node) Version() Version { +// +// gen.Node interface implementation +// + +func (n *node) Name() gen.Atom { + return n.name +} + +func (n *node) IsAlive() bool { + return n.isRunning() +} + +func (n *node) Uptime() int64 { + if n.isRunning() == false { + return 0 + } + return time.Now().Unix() - atomic.LoadInt64(&n.creation) +} + +func (n *node) Version() gen.Version { return n.version } -// Spawn -func (n *node) Spawn(name string, opts gen.ProcessOptions, object gen.ProcessBehavior, args ...etf.Term) (gen.Process, error) { - // process started by node has no parent - options := processOptions{ - ProcessOptions: opts, +func (n *node) FrameworkVersion() gen.Version { + return n.framework +} + +func (n *node) Commercial() []gen.Version { + var commercial []gen.Version + n.licenses.Range(func(k, _ any) bool { + commercial = append(commercial, k.(gen.Version)) + return true + }) + return commercial +} + +func (n *node) EnvList() map[gen.Env]any { + if n.isRunning() == false { + return nil } - return n.spawn(name, options, object, args...) + env := make(map[gen.Env]any) + n.env.Range(func(k, v any) bool { + env[gen.Env(k.(string))] = v + return true + }) + return env } -// RegisterName -func (n *node) RegisterName(name string, pid etf.Pid) error { - return n.registerName(name, pid) +func (n *node) SetEnv(name gen.Env, value any) { + if n.isRunning() == false { + return + } + if value == nil { + n.env.Delete(name.String()) + return + } + n.env.Store(name.String(), value) } -// UnregisterName -func (n *node) UnregisterName(name string) error { - return n.unregisterName(name) +func (n *node) Env(name gen.Env) (any, bool) { + if n.isRunning() == false { + return nil, false + } + + return n.env.Load(name.String()) } -// Stop -func (n *node) Stop() { - n.coreStop() +func (n *node) CertManager() gen.CertManager { + return n.certmanager } -// Name -func (n *node) Name() string { - return n.name +func (n *node) Security() gen.SecurityOptions { + return n.security } -// IsAlive -func (n *node) IsAlive() bool { - return n.coreIsAlive() +func (n *node) Spawn(factory gen.ProcessFactory, options gen.ProcessOptions, args ...any) (gen.PID, error) { + if n.isRunning() == false { + return gen.PID{}, gen.ErrNodeTerminated + } + opts := gen.ProcessOptionsExtra{ + ProcessOptions: options, + Args: args, + ParentPID: n.corePID, + ParentLeader: n.corePID, + ParentLogLevel: n.log.level, + ParentEnv: n.EnvList(), + } + + return n.spawn(factory, opts) } -// Uptime -func (n *node) Uptime() int64 { - return n.coreUptime() +func (n *node) SpawnRegister(register gen.Atom, factory gen.ProcessFactory, options gen.ProcessOptions, args ...any) (gen.PID, error) { + if n.isRunning() == false { + return gen.PID{}, gen.ErrNodeTerminated + } + if len(register) > 255 { + return gen.PID{}, gen.ErrAtomTooLong + } + opts := gen.ProcessOptionsExtra{ + ProcessOptions: options, + Register: register, + Args: args, + ParentPID: n.corePID, + ParentLeader: n.corePID, + ParentLogLevel: n.log.level, + ParentEnv: n.EnvList(), + } + return n.spawn(factory, opts) + } -// Wait -func (n *node) Wait() { - n.coreWait() +func (n *node) RegisterName(name gen.Atom, pid gen.PID) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + if len(name) > 255 { + return gen.ErrAtomTooLong + } + + n.log.Trace("RegisterName %s to %s", name, pid) + + value, ok := n.processes.Load(pid) + if ok == false { + return gen.ErrProcessUnknown + } + p := value.(*process) + + if p.isAlive() == false { + return gen.ErrProcessTerminated + } + + if p.registered.CompareAndSwap(false, true) == false { + return gen.ErrTaken + } + + if _, exist := n.names.LoadOrStore(name, p); exist { + p.registered.Store(false) + return gen.ErrTaken + } + + p.name = name + + return nil +} + +func (n *node) UnregisterName(name gen.Atom) (gen.PID, error) { + if n.isRunning() == false { + return gen.PID{}, gen.ErrNodeTerminated + } + + value, exist := n.names.LoadAndDelete(name) + if exist == false { + return gen.PID{}, gen.ErrNameUnknown + } + p := value.(*process) + p.name = "" + p.registered.Store(false) + + n.log.Trace("UnregisterName %s belonged to %s", name, p.pid) + + pname := gen.ProcessID{Name: name, Node: n.name} + n.RouteTerminateProcessID(pname, gen.ErrUnregistered) + return p.pid, nil +} + +func (n *node) MetaInfo(m gen.Alias) (gen.MetaInfo, error) { + var info gen.MetaInfo + if n.isRunning() == false { + return info, gen.ErrNodeTerminated + } + + value, ok := n.aliases.Load(m) + if ok == false { + return info, gen.ErrProcessUnknown + } + p := value.(*process) + + value, ok = p.metas.Load(m) + if ok == false { + return info, gen.ErrMetaUnknown + } + mp := value.(*meta) + + info.ID = mp.id + info.Parent = p.pid + info.Application = p.application + info.Behavior = mp.sbehavior + info.MailboxSize = mp.main.Size() + info.MailboxQueues.Main = mp.main.Len() + info.MailboxQueues.System = mp.system.Len() + info.MessagesIn = mp.messagesIn + info.MessagesOut = mp.messagesOut + info.MessagePriority = mp.priority + info.Uptime = time.Now().Unix() - mp.creation + info.LogLevel = mp.log.Level() + info.State = gen.MetaState(mp.state) + return info, nil +} + +func (n *node) ProcessInfo(pid gen.PID) (gen.ProcessInfo, error) { + var info gen.ProcessInfo + + if n.isRunning() == false { + return info, gen.ErrNodeTerminated + } + + value, ok := n.processes.Load(pid) + if ok == false { + return info, gen.ErrProcessUnknown + } + p := value.(*process) + + info.PID = p.pid + info.Name = p.name + info.Application = p.application + info.Behavior = p.sbehavior + info.MailboxSize = p.mailbox.Main.Size() + info.MailboxQueues.Main = p.mailbox.Main.Len() + info.MailboxQueues.Urgent = p.mailbox.Urgent.Len() + info.MailboxQueues.System = p.mailbox.System.Len() + info.MailboxQueues.Log = p.mailbox.Log.Len() + info.MessagesIn = atomic.LoadUint64(&p.messagesIn) + info.MessagesOut = atomic.LoadUint64(&p.messagesOut) + info.RunningTime = atomic.LoadUint64(&p.runningTime) + info.Compression = p.compression + info.MessagePriority = p.priority + info.Uptime = p.Uptime() + info.State = p.State() + info.Parent = p.parent + info.Leader = p.leader + info.Fallback = p.fallback + info.Aliases = p.Aliases() + info.Events = p.Events() + info.LogLevel = p.log.Level() + info.KeepNetworkOrder = p.keeporder + info.ImportantDelivery = p.important + + if n.security.ExposeEnvInfo { + info.Env = p.EnvList() + } else { + info.Env = make(map[gen.Env]any) + } + + // initialized slices make json marshaler treat them as an empty list + // (not a nil value) + info.Metas = []gen.Alias{} + info.LinksPID = []gen.PID{} + info.MonitorsPID = []gen.PID{} + info.LinksProcessID = []gen.ProcessID{} + info.MonitorsProcessID = []gen.ProcessID{} + info.LinksAlias = []gen.Alias{} + info.MonitorsAlias = []gen.Alias{} + info.LinksEvent = []gen.Event{} + info.MonitorsEvent = []gen.Event{} + info.LinksNode = []gen.Atom{} + info.MonitorsNode = []gen.Atom{} + + p.metas.Range(func(k, _ any) bool { + meta := k.(gen.Alias) + info.Metas = append(info.Metas, meta) + return true + }) + + p.targets.Range(func(k, v any) bool { + is_link := v.(bool) + switch m := k.(type) { + case gen.PID: + if is_link { + info.LinksPID = append(info.LinksPID, m) + break + } + info.MonitorsPID = append(info.MonitorsPID, m) + case gen.ProcessID: + if is_link { + info.LinksProcessID = append(info.LinksProcessID, m) + break + } + info.MonitorsProcessID = append(info.MonitorsProcessID, m) + case gen.Alias: + if is_link { + info.LinksAlias = append(info.LinksAlias, m) + break + } + info.MonitorsAlias = append(info.MonitorsAlias, m) + case gen.Event: + if is_link { + info.LinksEvent = append(info.LinksEvent, m) + break + } + info.MonitorsEvent = append(info.MonitorsEvent, m) + case gen.Atom: + if is_link { + info.LinksNode = append(info.LinksNode, m) + break + } + info.MonitorsNode = append(info.MonitorsNode, m) + } + return true + }) + + return info, nil } -func (n *node) Stats() NodeStats { - stats := NodeStats{} +func (n *node) SetLogLevelProcess(pid gen.PID, level gen.LogLevel) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + value, loaded := n.processes.Load(pid) + if loaded == false { + return gen.ErrProcessUnknown + } - coreStats := n.coreStats() - stats.TotalProcesses = coreStats.totalProcesses - stats.TotalReferences = coreStats.totalReferences - stats.RunningProcesses = uint64(coreStats.processes) - stats.RegisteredNames = uint64(coreStats.names) - stats.RegisteredAliases = uint64(coreStats.aliases) + p := value.(*process) + return p.log.SetLevel(level) +} - monStats := n.monitorStats() - stats.MonitorsByPid = uint64(monStats.monitorsByPid) - stats.MonitorsByName = uint64(monStats.monitorsByName) - stats.MonitorsNodes = uint64(monStats.monitorsNodes) - stats.Links = uint64(monStats.links) +func (n *node) LogLevelProcess(pid gen.PID) (gen.LogLevel, error) { + var level gen.LogLevel + if n.isRunning() == false { + return level, gen.ErrNodeTerminated + } + value, loaded := n.processes.Load(pid) + if loaded == false { + return level, gen.ErrProcessUnknown + } - stats.LoadedApplications = uint64(len(n.LoadedApplications())) - stats.RunningApplications = uint64(len(n.WhichApplications())) + p := value.(*process) + level = p.log.Level() + return level, nil +} - netStats := n.networkStats() - stats.NetworkConnections = uint64(netStats.connections) - stats.ProxyConnections = uint64(netStats.proxyConnections) - stats.TransitConnections = uint64(netStats.transitConnections) +func (n *node) SetLogLevelMeta(m gen.Alias, level gen.LogLevel) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + value, loaded := n.aliases.Load(m) + if loaded == false { + return gen.ErrProcessUnknown + } + + p := value.(*process) - return stats + value, loaded = p.metas.Load(m) + if loaded == false { + return gen.ErrMetaUnknown + } + mp := value.(*meta) + + return mp.log.SetLevel(level) } -// WaitWithTimeout -func (n *node) WaitWithTimeout(d time.Duration) error { - return n.coreWaitWithTimeout(d) +func (n *node) LogLevelMeta(m gen.Alias) (gen.LogLevel, error) { + var level gen.LogLevel + if n.isRunning() == false { + return level, gen.ErrNodeTerminated + } + value, loaded := n.aliases.Load(m) + if loaded == false { + return level, gen.ErrProcessUnknown + } + + p := value.(*process) + + value, loaded = p.metas.Load(m) + if loaded == false { + return level, gen.ErrMetaUnknown + } + mp := value.(*meta) + level = mp.log.Level() + + return level, nil } -// LoadedApplications returns a list of loaded applications (including running applications) -func (n *node) LoadedApplications() []gen.ApplicationInfo { - return n.listApplications(false) +func (n *node) Info() (gen.NodeInfo, error) { + var info gen.NodeInfo + if n.isRunning() == false { + return info, gen.ErrNodeTerminated + } + + info.Name = n.name + info.Uptime = n.Uptime() + info.Version = n.version + info.Framework = n.framework + info.Commercial = n.Commercial() + info.LogLevel = n.log.Level() + + mli := make(map[string]int) + for _, level := range gen.DefaultLogLevels { + loggers := n.loggers[level] + loggers.Range(func(k, v any) bool { + loggername := k.(string) + n, found := mli[loggername] + if found == false { + loggerbehavior := strings.TrimPrefix(reflect.TypeOf(v).String(), "*") + li := gen.LoggerInfo{ + Name: loggername, + Behavior: loggerbehavior, + } + info.Loggers = append(info.Loggers, li) + n = len(info.Loggers) - 1 + mli[loggername] = n + } + info.Loggers[n].Levels = append(info.Loggers[n].Levels, level) + return true + }) + } + + if n.security.ExposeEnvInfo { + info.Env = n.EnvList() + } else { + info.Env = make(map[gen.Env]any) + } + + n.processes.Range(func(_, v any) bool { + info.ProcessesTotal++ + p := v.(*process) + switch p.State() { + case gen.ProcessStateRunning: + info.ProcessesRunning++ + case gen.ProcessStateWaitResponse: + info.ProcessesRunning++ + case gen.ProcessStateZombee: + info.ProcessesZombee++ + } + return true + }) + + n.names.Range(func(_, _ any) bool { + info.RegisteredNames++ + return true + }) + n.aliases.Range(func(_, _ any) bool { + info.RegisteredAliases++ + return true + }) + n.events.Range(func(_, _ any) bool { + info.RegisteredEvents++ + return true + }) + + info.ApplicationsTotal = int64(len(n.Applications())) + info.ApplicationsRunning = int64(len(n.ApplicationsRunning())) + + var mstat runtime.MemStats + runtime.ReadMemStats(&mstat) + info.MemoryUsed = mstat.Sys + info.MemoryAlloc = mstat.Alloc + + utime, stime := osdep.ResourceUsage() + info.UserTime = utime + info.SystemTime = stime + + return info, nil } -// WhichApplications returns a list of running applications -func (n *node) WhichApplications() []gen.ApplicationInfo { - return n.listApplications(true) +func (n *node) ProcessList() ([]gen.PID, error) { + var pl []gen.PID + + if n.isRunning() == false { + return nil, gen.ErrNodeTerminated + } + + n.processes.Range(func(k, _ any) bool { + pl = append(pl, k.(gen.PID)) + return true + }) + + return pl, nil } -// WhichApplications returns a list of running applications -func (n *node) listApplications(onlyRunning bool) []gen.ApplicationInfo { - info := []gen.ApplicationInfo{} - for _, rb := range n.RegisteredBehaviorGroup(appBehaviorGroup) { - spec, ok := rb.Data.(*gen.ApplicationSpec) - if !ok { - continue +func (n *node) ProcessListShortInfo(start, limit int) ([]gen.ProcessShortInfo, error) { + if n.isRunning() == false { + return nil, gen.ErrNodeTerminated + } + + if start < 1000 || limit < 0 { + return nil, gen.ErrIncorrect + } + if limit == 0 { + limit = 100 + } + ustart := uint64(start) + psi := []gen.ProcessShortInfo{} + pid := n.corePID + + for limit > 0 { + + if ustart > n.nextID { + break } - if onlyRunning && spec.Process == nil { - // list only started apps + pid.ID = ustart + ustart++ + v, found := n.processes.Load(pid) + if found == false { continue } + process := v.(*process) + messagesMailbox := process.mailbox.Main.Len() + + process.mailbox.System.Len() + + process.mailbox.Urgent.Len() + + process.mailbox.Log.Len() + + info := gen.ProcessShortInfo{ + PID: process.pid, + Name: process.name, + Application: process.application, + Behavior: process.sbehavior, + MessagesIn: process.messagesIn, + MessagesOut: process.messagesOut, + MessagesMailbox: uint64(messagesMailbox), + RunningTime: process.runningTime, + Uptime: process.Uptime(), + State: process.State(), + Parent: process.parent, + Leader: process.leader, + LogLevel: process.log.Level(), + } + psi = append(psi, info) + limit-- + } - appInfo := gen.ApplicationInfo{ - Name: spec.Name, - Description: spec.Description, - Version: spec.Version, + return psi, nil + +} + +func (n *node) NetworkStart(options gen.NetworkOptions) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + return n.network.start(options) +} + +func (n *node) NetworkStop() error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + return n.network.stop() +} + +func (n *node) Network() gen.Network { + if n.isRunning() == false { + return nil + } + return n.network +} + +func (n *node) Stop() { + n.stop(false) +} + +func (n *node) StopForce() { + n.stop(true) +} + +func (n *node) stop(force bool) { + if n.isRunning() == false { + // already stopped + return + } + + if force == false { + n.applications.Range(func(_, v any) bool { + app := v.(*application) + if app.spec.Name == system.Name { + // skip system app + return true + } + app.stop(false, 5*time.Second) + return true + }) + } + + n.processes.Range(func(_, v any) bool { + p := v.(*process) + // do not kill system app processes + if p.application == system.Name { + return true } - if spec.Process != nil { - appInfo.PID = spec.Process.Self() + + if force { + n.Kill(p.pid) + return true } - info = append(info, appInfo) + + // we should send an exit-signal using parent pid of the process, + // so it wont be trapped + n.RouteSendExit(p.parent, p.pid, gen.TerminateReasonShutdown) + return true + }) + + if force == false { + n.waitprocesses.Wait() } - return info + + n.NetworkStop() + atomic.StoreInt64(&n.creation, 0) + n.log.Info("node %s stopped", n.name) + + // call terminate loggers + loggers := make(map[string]gen.LoggerBehavior) + for _, l := range n.loggers { + l.Range(func(k, v any) bool { + name := k.(string) + logger := v.(gen.LoggerBehavior) + loggers[name] = logger + return true + }) + } + for _, logger := range loggers { + logger.Terminate() + } + + close(n.wait) } -// ApplicationInfo returns information about application -func (n *node) ApplicationInfo(name string) (gen.ApplicationInfo, error) { - rb, err := n.RegisteredBehavior(appBehaviorGroup, name) - if err != nil { - return gen.ApplicationInfo{}, lib.ErrAppUnknown +func (n *node) Wait() { + // if the node is terminated this channel is already closed so it returns immediately + <-n.wait +} + +func (n *node) WaitWithTimeout(timeout time.Duration) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + timer := time.NewTimer(timeout) + defer timer.Stop() + select { + case <-timer.C: + return gen.ErrTimeout + case <-n.wait: + return nil } - spec, ok := rb.Data.(*gen.ApplicationSpec) - if !ok { - return gen.ApplicationInfo{}, lib.ErrAppUnknown +} + +func (n *node) Send(to any, message any) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated } - pid := etf.Pid{} - if spec.Process != nil { - pid = spec.Process.Self() + options := gen.MessageOptions{ + Priority: gen.MessagePriorityNormal, } - appInfo := gen.ApplicationInfo{ - Name: spec.Name, - Description: spec.Description, - Version: spec.Version, - PID: pid, + switch t := to.(type) { + case gen.Atom: + return n.RouteSendProcessID(n.corePID, gen.ProcessID{Name: t, Node: n.name}, options, message) + case gen.PID: + return n.RouteSendPID(n.corePID, t, options, message) + case gen.ProcessID: + return n.RouteSendProcessID(n.corePID, t, options, message) + case gen.Alias: + return n.RouteSendAlias(n.corePID, t, options, message) } - return appInfo, nil + + return gen.ErrUnsupported } -// ApplicationLoad loads the application specification for an application. Returns name of -// loaded application. -func (n *node) ApplicationLoad(app gen.ApplicationBehavior, args ...etf.Term) (string, error) { +func (n *node) SendEvent(name gen.Atom, token gen.Ref, options gen.MessageOptions, message any) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } - spec, err := app.Load(args...) - if err != nil { - return "", err + n.log.Trace("node.SendEvent %s with token %s", name, token) + + em := gen.MessageEvent{ + Event: gen.Event{Name: name, Node: n.name}, + Timestamp: time.Now().UnixNano(), + Message: message, } - err = n.RegisterBehavior(appBehaviorGroup, spec.Name, app, &spec) - if err != nil { - return "", err + + return n.RouteSendEvent(n.corePID, token, options, em) +} + +func (n *node) RegisterEvent(name gen.Atom, options gen.EventOptions) (gen.Ref, error) { + var empty gen.Ref + if n.isRunning() == false { + return empty, gen.ErrNodeTerminated } - return spec.Name, nil + + n.log.Trace("node.RegisterEvent %s", name) + + return n.registerEvent(name, n.corePID, options) +} + +func (n *node) UnregisterEvent(name gen.Atom) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + n.log.Trace("node.UnregisterEvent %s", name) + return n.unregisterEvent(name, n.corePID) +} + +func (n *node) SendExit(pid gen.PID, reason error) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + return n.RouteSendExit(n.corePID, pid, reason) } -// ApplicationUnload unloads given application -func (n *node) ApplicationUnload(appName string) error { - rb, err := n.RegisteredBehavior(appBehaviorGroup, appName) +func (n *node) Kill(pid gen.PID) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + value, loaded := n.processes.Load(pid) + if loaded == false { + return gen.ErrProcessUnknown + } + + p := value.(*process) + state := atomic.SwapInt32(&p.state, int32(gen.ProcessStateZombee)) + switch state { + case int32(gen.ProcessStateWaitResponse), int32(gen.ProcessStateRunning): + // do not unregister process until its goroutine stopped + return nil + case int32(gen.ProcessStateTerminated): + atomic.StoreInt32(&p.state, int32(gen.ProcessStateTerminated)) + return nil + } + + old := atomic.SwapInt32(&p.state, int32(gen.ProcessStateTerminated)) + if old == int32(gen.ProcessStateTerminated) { + return nil + } + // unregister process and stuff belonging to it + n.unregisterProcess(p, gen.TerminateReasonKill) + + go func() { + if lib.Recover() { + defer func() { + if rcv := recover(); rcv != nil { + pc, fn, line, _ := runtime.Caller(2) + p.log.Panic("panic in ProcessTerminate - %s[%s] %#v at %s[%s:%d]", + p.pid, p.name, rcv, runtime.FuncForPC(pc).Name(), fn, line) + } + }() + } + p.behavior.ProcessTerminate(gen.TerminateReasonKill) + }() + + return nil +} + +func (n *node) ProcessState(pid gen.PID) (gen.ProcessState, error) { + if n.isRunning() == false { + return 0, gen.ErrNodeTerminated + } + value, loaded := n.processes.Load(pid) + if loaded == false { + return 0, gen.ErrProcessUnknown + } + p := value.(*process) + return p.State(), nil +} + +func (n *node) ApplicationLoad(app gen.ApplicationBehavior, args ...any) (name gen.Atom, r error) { + if lib.Recover() { + defer func() { + if rcv := recover(); rcv != nil { + pc, fn, line, _ := runtime.Caller(2) + n.log.Panic("panic in ApplicationLoad - %#v at %s[%s:%d]", + rcv, runtime.FuncForPC(pc).Name(), fn, line) + r = gen.ErrApplicationLoadPanic + } + }() + } + + spec, err := app.Load(n, args...) if err != nil { - return lib.ErrAppUnknown + return name, err + } + + if len(spec.Group) == 0 { + return name, gen.ErrApplicationEmpty + } + + if len(spec.Name) == 0 { + return name, gen.ErrApplicationName + } + + if spec.Depends.Network { + // TODO make it right + if n.network == nil { + return name, gen.ErrApplicationDepends + } + } + + if spec.Mode == 0 { + spec.Mode = gen.ApplicationModeTemporary + } + + if spec.Depends.Applications == nil { + spec.Depends.Applications = []gen.Atom{} + } + + env := n.EnvList() + for k, v := range spec.Env { + env[k] = v + } + spec.Env = env + + if spec.LogLevel == gen.LogLevelDefault { + spec.LogLevel = n.log.Level() } - spec, ok := rb.Data.(*gen.ApplicationSpec) - if !ok { - return lib.ErrAppUnknown + a := &application{ + spec: spec, + node: n, + behavior: app, + state: int32(gen.ApplicationStateLoaded), + mode: spec.Mode, } - if spec.Process != nil { - return lib.ErrAppAlreadyStarted + if _, exist := n.applications.LoadOrStore(spec.Name, a); exist { + return spec.Name, gen.ErrTaken } - return n.UnregisterBehavior(appBehaviorGroup, appName) + a.registerAppRoute() + + return spec.Name, nil } -// ApplicationStartPermanent start Application with start type ApplicationStartPermanent -// If this application terminates, all other applications and the entire node are also -// terminated -func (n *node) ApplicationStartPermanent(appName string, args ...etf.Term) (gen.Process, error) { - return n.applicationStart(gen.ApplicationStartPermanent, appName, args...) +func (n *node) ApplicationInfo(name gen.Atom) (gen.ApplicationInfo, error) { + var info gen.ApplicationInfo + v, exist := n.applications.Load(name) + if exist == false { + return info, gen.ErrApplicationUnknown + } + app := v.(*application) + info = app.info() + return info, nil } -// ApplicationStartTransient start Application with start type ApplicationStartTransient -// If transient application terminates with reason 'normal', this is reported and no -// other applications are terminated. Otherwise, all other applications and node -// are terminated -func (n *node) ApplicationStartTransient(appName string, args ...etf.Term) (gen.Process, error) { - return n.applicationStart(gen.ApplicationStartTransient, appName, args...) +func (n *node) ApplicationUnload(name gen.Atom) error { + v, exist := n.applications.Load(name) + if exist == false { + return gen.ErrApplicationUnknown + } + + app := v.(*application) + if unloaded := app.tryUnload(); unloaded == false { + return gen.ErrApplicationRunning + } + n.applications.Delete(name) + app.unregisterAppRoute() + return nil } -// ApplicationStartTemporary start Application with start type ApplicationStartTemporary -// If an application terminates, this is reported but no other applications -// are terminated -func (n *node) ApplicationStartTemporary(appName string, args ...etf.Term) (gen.Process, error) { - return n.applicationStart(gen.ApplicationStartTemporary, appName, args...) +func (n *node) ApplicationStart(name gen.Atom, options gen.ApplicationOptions) error { + v, exist := n.applications.Load(name) + if exist == false { + return gen.ErrApplicationUnknown + } + app := v.(*application) + + // check dependency on the other applications + for _, dep := range app.spec.Depends.Applications { + if err := n.ApplicationStart(dep, options); err != nil { + if err == gen.ErrApplicationUnknown { + n.log.Error("unable to start %s: unknown dependent application %s", name, dep) + return gen.ErrApplicationDepends + } + + if err != gen.ErrApplicationRunning { + n.log.Error("unable to start %s: start dependent application %s failed: %s", dep, err) + return gen.ErrApplicationDepends + } + } + } + + opts := gen.ApplicationOptionsExtra{ + ApplicationOptions: options, + CorePID: n.corePID, + CoreEnv: n.EnvList(), + CoreLogLevel: n.log.Level(), + } + return app.start(app.spec.Mode, opts) } -// ApplicationStart start Application with start type defined in the gen.ApplicationSpec.StartType -// on the loading application -func (n *node) ApplicationStart(appName string, args ...etf.Term) (gen.Process, error) { - return n.applicationStart("", appName, args...) +func (n *node) ApplicationStartPermanent(name gen.Atom, options gen.ApplicationOptions) error { + v, exist := n.applications.Load(name) + if exist == false { + return gen.ErrApplicationUnknown + } + app := v.(*application) + opts := gen.ApplicationOptionsExtra{ + ApplicationOptions: options, + CorePID: n.corePID, + CoreEnv: n.EnvList(), + CoreLogLevel: n.log.Level(), + } + return app.start(gen.ApplicationModePermanent, opts) } -func (n *node) applicationStart(startType gen.ApplicationStartType, appName string, args ...etf.Term) (gen.Process, error) { - rb, err := n.RegisteredBehavior(appBehaviorGroup, appName) - if err != nil { - return nil, lib.ErrAppUnknown +func (n *node) ApplicationStartTransient(name gen.Atom, options gen.ApplicationOptions) error { + v, exist := n.applications.Load(name) + if exist == false { + return gen.ErrApplicationUnknown } + app := v.(*application) + opts := gen.ApplicationOptionsExtra{ + ApplicationOptions: options, + CorePID: n.corePID, + CoreEnv: n.EnvList(), + CoreLogLevel: n.log.Level(), + } + return app.start(gen.ApplicationModeTransient, opts) +} - spec, ok := rb.Data.(*gen.ApplicationSpec) - if !ok { - return nil, lib.ErrAppUnknown +func (n *node) ApplicationStartTemporary(name gen.Atom, options gen.ApplicationOptions) error { + v, exist := n.applications.Load(name) + if exist == false { + return gen.ErrApplicationUnknown + } + app := v.(*application) + opts := gen.ApplicationOptionsExtra{ + ApplicationOptions: options, + CorePID: n.corePID, + CoreEnv: n.EnvList(), + CoreLogLevel: n.log.Level(), + } + return app.start(gen.ApplicationModeTemporary, opts) +} + +func (n *node) ApplicationStop(name gen.Atom) error { + v, exist := n.applications.Load(name) + if exist == false { + return gen.ErrApplicationUnknown } - if startType != "" { - spec.StartType = startType + // system app can not be stopped + if name == system.Name { + return gen.ErrNotAllowed } - // to prevent race condition on starting application we should - // make sure that nobodyelse starting it - spec.Lock() - defer spec.Unlock() + app := v.(*application) + return app.stop(false, 5*time.Second) +} - if spec.Process != nil { - return nil, lib.ErrAppAlreadyStarted +func (n *node) ApplicationStopForce(name gen.Atom) error { + v, exist := n.applications.Load(name) + if exist == false { + return gen.ErrApplicationUnknown } - // start dependencies - for _, depAppName := range spec.Applications { - if _, e := n.ApplicationStart(depAppName); e != nil && e != lib.ErrAppAlreadyStarted { - return nil, e + // system app can not be stopped + if name == system.Name { + return gen.ErrNotAllowed + } + + app := v.(*application) + return app.stop(true, 0) +} + +func (n *node) ApplicationStopWithTimeout(name gen.Atom, timeout time.Duration) error { + v, exist := n.applications.Load(name) + if exist == false { + return gen.ErrApplicationUnknown + } + app := v.(*application) + return app.stop(false, timeout) +} + +func (n *node) Applications() []gen.Atom { + apps := []gen.Atom{} + n.applications.Range(func(_, v any) bool { + app := v.(*application) + apps = append(apps, app.spec.Name) + return true + }) + return apps +} + +func (n *node) ApplicationsRunning() []gen.Atom { + apps := []gen.Atom{} + n.applications.Range(func(_, v any) bool { + app := v.(*application) + if app.isRunning() { + apps = append(apps, app.spec.Name) } + return true + }) + return apps +} + +func (n *node) Log() gen.Log { + return n.log +} + +func (n *node) LoggerAddPID(pid gen.PID, name string, filter ...gen.LogLevel) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + value, loaded := n.processes.Load(pid) + if loaded == false { + return gen.ErrProcessUnknown } - env := map[gen.EnvKey]interface{}{ - gen.EnvKeyAppSpec: spec, + if name == "" { + return gen.ErrIncorrect } - options := gen.ProcessOptions{ - Env: env, + + p := value.(*process) + + if p.loggername != "" { + // already registered as a logger + return gen.ErrNotAllowed } - process, e := n.Spawn("", options, rb.Behavior, args...) - if e != nil { - return nil, e + + logger := createProcessLogger(p.mailbox.Log, p.run) + if err := n.LoggerAdd(name, logger, filter...); err == nil { + p.loggername = name + p.log.SetLevel(gen.LogLevelDisabled) + } else { + return err } - return process, nil + if lib.Trace() { + n.log.Trace("node.LoggerAddPID added new process logger %s with name %q", pid, name) + } + return nil } -// ApplicationStop stop running application -func (n *node) ApplicationStop(name string) error { - rb, err := n.RegisteredBehavior(appBehaviorGroup, name) - if err != nil { - return lib.ErrAppUnknown +func (n *node) LoggerAdd(name string, logger gen.LoggerBehavior, filter ...gen.LogLevel) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + if logger == nil { + return gen.ErrIncorrect } - spec, ok := rb.Data.(*gen.ApplicationSpec) - if !ok { - return lib.ErrAppUnknown + if filter == nil { + filter = gen.DefaultLogFilter } - spec.Lock() - defer spec.Unlock() - if spec.Process == nil { - return lib.ErrAppIsNotRunning + for _, l := range n.loggers { + if _, exist := l.Load(name); exist { + return gen.ErrTaken + } } - if e := spec.Process.Exit("normal"); e != nil { - return e + for _, level := range filter { + if l := n.loggers[level]; l != nil { + l.Store(name, logger) + } } - // we should wait until children process stopped. - if e := spec.Process.WaitWithTimeout(5 * time.Second); e != nil { - return lib.ErrProcessBusy + + if lib.Trace() { + n.log.Trace("node.LoggerAdd added new logger with name %q", name) } return nil } -// Links -func (n *node) Links(process etf.Pid) []etf.Pid { - return n.processLinks(process) +func (n *node) LoggerDeletePID(pid gen.PID) { + if n.isRunning() == false { + return + } + value, loaded := n.processes.Load(pid) + if loaded == false { + return + } + + p := value.(*process) + if p.loggername != "" { + n.LoggerDelete(p.loggername) + p.loggername = "" + // TODO we should restore previous log level + p.log.SetLevel(gen.LogLevelInfo) + n.log.Trace("node.LoggerDeletePID removed process logger %s with name %q", pid, p.loggername) + } + return } -// Monitors -func (n *node) Monitors(process etf.Pid) []etf.Pid { - return n.processMonitors(process) +func (n *node) LoggerDelete(name string) { + var logger gen.LoggerBehavior + + if n.isRunning() == false { + return + } + + for _, l := range n.loggers { + if v, exist := l.LoadAndDelete(name); exist { + logger = v.(gen.LoggerBehavior) + } + } + // call terminate + if logger != nil { + logger.Terminate() + } + n.log.Trace("node.LoggerDelete removed logger with name %q", name) } -// MonitorsByName -func (n *node) MonitorsByName(process etf.Pid) []gen.ProcessID { - return n.processMonitorsByName(process) +func (n *node) LoggerLevels(name string) []gen.LogLevel { + var levels []gen.LogLevel + for level, l := range n.loggers { + if _, exist := l.Load(name); exist { + levels = append(levels, level) + } + } + return levels } -// MonitoredBy -func (n *node) MonitoredBy(process etf.Pid) []etf.Pid { - return n.processMonitoredBy(process) +func (n *node) Loggers() []string { + m := make(map[string]bool) + for _, l := range n.loggers { + l.Range(func(k, _ any) bool { + name := k.(string) + m[name] = true + return true + }) + } + loggers := []string{} + for k := range m { + loggers = append(loggers, k) + } + return loggers } -// ProvideRemoteSpawn -func (n *node) ProvideRemoteSpawn(name string, behavior gen.ProcessBehavior) error { - return n.RegisterBehavior(remoteBehaviorGroup, name, behavior, nil) +func (n *node) dolog(message gen.MessageLog, loggername string) { + if n.isRunning() == false { + return + } + if l := n.loggers[message.Level]; l != nil { + l.Range(func(k, v any) bool { + if loggername == "" { + logger := v.(gen.LoggerBehavior) + logger.Log(message) + return true + } + if loggername == k.(string) { + logger := v.(gen.LoggerBehavior) + logger.Log(message) + } + return true + }) + } } -// RevokeRemoteSpawn -func (n *node) RevokeRemoteSpawn(name string) error { - return n.UnregisterBehavior(remoteBehaviorGroup, name) +func (n *node) SetCTRLC(enable bool) { + if enable == true && n.ctrlc != nil { + // already set up + return + } + + if enable == false && n.ctrlc != nil { + close(n.ctrlc) + n.ctrlc = nil + n.Log().Info("(CRTL+C) disabled for %s", n.name) + return + } + + go func() { + n.ctrlc = make(chan os.Signal) + signal.Notify(n.ctrlc, os.Interrupt, syscall.SIGTERM) + n.Log().Info("(CRTL+C) enabled for %s", n.name) + + n.Log().Info(" press Ctrl+C to enable/disable debug logging level for %s", n.name) + n.Log().Info(" press Ctrl+C twice to stop %s gracefully", n.name) + ctrlcTime := time.Now().Unix() + level := n.Log().Level() + debug := false + for { + sig := <-n.ctrlc + if sig == nil { + // closed channel. disable ctrlc + signal.Reset() + + return + } + + now := time.Now().Unix() + if now-ctrlcTime == 0 { + signal.Reset() + n.Log().Info("(CRTL+C) stopping %s (graceful shutdown)...", n.name) + n.Stop() + return + } + + ctrlcTime = now + + if debug { + n.Log().Info("(CRTL+C) disabling debug level for %s", n.name) + n.Log().SetLevel(level) + debug = false + continue + } + + n.Log().Info("(CRTL+C) enabling debug level for %s", n.name) + level = n.Log().Level() + n.Log().SetLevel(gen.LogLevelDebug) + debug = true + } + }() } -// DefaultFlags -func DefaultFlags() Flags { - // all features are enabled by default - return Flags{ - Enable: true, - EnableHeaderAtomCache: true, - EnableBigCreation: true, - EnableBigPidRef: true, - EnableFragmentation: true, - EnableAlias: true, - EnableRemoteSpawn: true, - EnableCompression: true, - EnableProxy: true, +// +// private +// + +func (n *node) spawn(factory gen.ProcessFactory, options gen.ProcessOptionsExtra) (gen.PID, error) { + var empty gen.PID + + if n.isRunning() == false { + return empty, gen.ErrNodeTerminated + } + + if factory == nil { + return empty, gen.ErrIncorrect + } + + if options.ParentPID == empty || options.ParentLeader == empty { + return empty, gen.ErrParentUnknown + } + + p := &process{ + node: n, + response: make(chan response), + creation: time.Now().Unix(), + keeporder: true, + state: int32(gen.ProcessStateInit), + parent: options.ParentPID, + leader: options.ParentLeader, + application: options.Application, + important: options.ImportantDelivery, + } + + if options.Register != "" { + if _, exist := n.names.LoadOrStore(options.Register, p); exist { + return p.pid, gen.ErrTaken + } + p.name = options.Register + p.registered.Store(true) + } + + // init mailbox + if options.MailboxSize > 0 { + p.fallback = options.Fallback + p.mailbox.Main = lib.NewQueueLimitMPSC(options.MailboxSize, false) + p.mailbox.System = lib.NewQueueLimitMPSC(options.MailboxSize, false) + p.mailbox.Urgent = lib.NewQueueLimitMPSC(options.MailboxSize, false) + p.mailbox.Log = lib.NewQueueLimitMPSC(options.MailboxSize, false) + } else { + p.mailbox.Main = lib.NewQueueMPSC() + p.mailbox.System = lib.NewQueueMPSC() + p.mailbox.Urgent = lib.NewQueueMPSC() + p.mailbox.Log = lib.NewQueueMPSC() + } + + // create pid + pid := gen.PID{ + Node: n.name, + ID: atomic.AddUint64(&n.nextID, 1), + Creation: n.creation, + } + p.pid = pid + + for k, v := range options.ParentEnv { + p.SetEnv(k, v) + } + if lib.Trace() { + n.log.Trace("...spawn new process %s (parent %s, %s) using %#v", p.pid, p.parent, p.name, factory) + } + + for k, v := range options.Env { + p.SetEnv(k, v) + } + + if options.Leader != empty { + p.leader = options.Leader + } + + p.compression = options.Compression + if p.compression.Level == 0 { + p.compression.Level = gen.DefaultCompressionLevel + } + if p.compression.Type == "" { + p.compression.Type = gen.DefaultCompressionType + } + if p.compression.Threshold == 0 { + p.compression.Threshold = gen.DefaultCompressionThreshold + } + + switch options.SendPriority { + case gen.MessagePriorityHigh: + p.priority = gen.MessagePriorityHigh + case gen.MessagePriorityMax: + p.priority = gen.MessagePriorityMax + default: + p.priority = gen.MessagePriorityNormal + } + + // create a new process with provided behavior + behavior := factory() + if behavior == nil { + n.names.Delete(p.name) + return p.pid, errors.New("factory function must return non nil value") + } + p.behavior = behavior + p.sbehavior = strings.TrimPrefix(reflect.TypeOf(behavior).String(), "*") + + if options.LogLevel == gen.LogLevelDefault { + // parent's log level + options.LogLevel = options.ParentLogLevel + } + p.log = createLog(options.LogLevel, n.dolog) + + logSource := gen.MessageLogProcess{ + Node: p.pid.Node, + PID: p.pid, + Name: p.name, + Behavior: p.sbehavior, + } + p.log.setSource(logSource) + + if err := behavior.ProcessInit(p, options.Args...); err != nil { + n.names.Delete(p.name) + // make sure to notify children that might have been spawned + // (during ProcessInit callback) with the enabled LinkParent option + messageExit := gen.MessageExitPID{ + PID: p.pid, + Reason: err, + } + for _, pid := range n.links.unregister(p.pid) { + n.sendExitMessage(p.pid, pid, messageExit) + } + + // clean up links to the processes that might have been spawned + // (during ProcessInit callback) with the enabled LinkChild option + p.targets.Range(func(k, v any) bool { + if v.(bool) { + n.links.unregisterConsumer(k, p.pid) + } + return true + }) + + // terminate meta process that spawned during initialization + + p.metas.Range(func(_, v any) bool { + m := v.(*meta) + + qm := gen.TakeMailboxMessage() + qm.From = p.pid + qm.Type = gen.MailboxMessageTypeExit + qm.Message = err + + if ok := m.system.Push(qm); ok == false { + p.log.Error("unable to stop meta process %s. mailbox is full", m.id) + } + p.node.aliases.Delete(m.id) + go m.handle() + return true + }) + + return p.pid, err + } + + if options.LinkParent { + n.links.registerConsumer(p.parent, p.pid) + p.targets.Store(p.parent, true) + } + + // register process and switch it to the sleep state + p.state = int32(gen.ProcessStateSleep) + n.processes.Store(p.pid, p) + + // do not count system app processes + if p.application != system.Name { + n.waitprocesses.Add(1) + } + + // process could send a message to itself during initialization + // so we should run this process to make sure this message is handled + p.run() + + return p.pid, nil +} + +func (n *node) unregisterProcess(p *process, reason error) { + n.processes.Delete(p.pid) + n.RouteTerminatePID(p.pid, reason) + + if p.application != system.Name { + // do not count system app processes + n.waitprocesses.Done() } + n.log.Trace("...unregisterProcess %s", p.pid) + + if p.registered.Load() { + n.names.Delete(p.name) + pname := gen.ProcessID{Name: p.name, Node: n.name} + n.RouteTerminateProcessID(pname, reason) + } + + for _, a := range p.aliases { + n.aliases.Delete(a) + n.RouteTerminateAlias(a, reason) + } + + p.events.Range(func(k, _ any) bool { + ev := gen.Event{Name: k.(gen.Atom), Node: p.node.name} + n.events.Delete(ev) + n.RouteTerminateEvent(ev, reason) + return true + }) + + p.targets.Range(func(target, v any) bool { + if v.(bool) { + n.links.unregisterConsumer(target, p.pid) + } else { + n.monitors.unregisterConsumer(target, p.pid) + } + return true + }) + + // send exit signal to the meta processes + p.metas.Range(func(_, v any) bool { + m := v.(*meta) + + qm := gen.TakeMailboxMessage() + qm.From = p.pid + qm.Type = gen.MailboxMessageTypeExit + qm.Message = reason + + p.node.aliases.Delete(m.id) + if ok := m.system.Push(qm); ok == false { + p.log.Error("unable to stop meta process %s. mailbox is full", m.id) + } + m.handle() + return true + }) + + if p.loggername != "" { // acted as a logger + n.LoggerDelete(p.loggername) + // enable logging. it might be used + // in the termination callback (like a act.ActorBehavior.Terminate) + p.log.SetLevel(gen.LogLevelInfo) + } + + if p.application == "" { + return + } + + if v, exist := n.applications.Load(p.application); exist { + // this process was a member of the application + app := v.(*application) + app.terminate(p.pid, reason) + } +} + +func (n *node) isRunning() bool { + return atomic.LoadInt64(&n.creation) > 0 } -// DefaultCloudFlags -func DefaultCloudFlags() CloudFlags { - return CloudFlags{ - Enable: true, - EnableIntrospection: true, - EnableMetrics: true, +func (n *node) registerAlias(alias gen.Alias, p *process) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + n.log.Trace("...registerAlias %s for %s", alias, p.pid) + if _, exist := n.aliases.LoadOrStore(alias, p); exist { + return gen.ErrTaken } + return nil } -func DefaultProxyFlags() ProxyFlags { - return ProxyFlags{ - Enable: true, - EnableLink: true, - EnableMonitor: true, - EnableRemoteSpawn: true, - EnableEncryption: false, +func (n *node) unregisterAlias(alias gen.Alias, p *process) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated } + value, found := n.aliases.Load(alias) + if found == false { + return gen.ErrAliasUnknown + } + owner := value.(*process) + if p != owner { + return gen.ErrAliasOwner + } + n.log.Trace("...unregisterAlias %s for %s", alias, p.pid) + + n.aliases.Delete(alias) + return nil } -// DefaultProtoOptions -func DefaultProtoOptions() ProtoOptions { - return ProtoOptions{ - NumHandlers: runtime.NumCPU(), - MaxMessageSize: 0, // no limit - SendQueueLength: DefaultProtoSendQueueLength, - RecvQueueLength: DefaultProtoRecvQueueLength, - FragmentationUnit: DefaultProtoFragmentationUnit, +func (n *node) registerEvent(name gen.Atom, owner gen.PID, options gen.EventOptions) (gen.Ref, error) { + token := gen.Ref{} + if n.isRunning() == false { + return token, gen.ErrNodeTerminated } + + n.log.Trace("...registerEvent %s for %s", name, owner) + ev := gen.Event{Name: name, Node: n.name} + event := &eventOwner{ + name: name, + producer: owner, + notify: options.Notify, + } + + if options.Buffer > 0 { + event.last = lib.NewQueueLimitMPSC(int64(options.Buffer), true) + } + + if _, exist := n.events.LoadOrStore(ev, event); exist { + return token, gen.ErrTaken + } + event.token = n.MakeRef() + return event.token, nil +} + +func (n *node) unregisterEvent(name gen.Atom, pid gen.PID) error { + if n.isRunning() == false { + return gen.ErrNodeTerminated + } + + n.log.Trace("...unregisterEvent %s for %s", name, pid) + ev := gen.Event{Name: name, Node: n.name} + value, exist := n.events.Load(ev) + if exist == false { + return gen.ErrEventUnknown + } + + event := value.(*eventOwner) + if event.producer != pid { + return gen.ErrEventOwner + } + + n.events.Delete(ev) + n.RouteTerminateEvent(ev, gen.ErrUnregistered) + return nil } -func DefaultListener() Listener { - return Listener{} +func (n *node) validateLicenses(versions ...gen.Version) { + for _, version := range versions { + switch version.License { + case gen.LicenseMIT: + continue + + case "": + if lib.Trace() { + n.Log().Trace("undefined license for %s", version) + } + continue + + case gen.LicenseBSL1: + var valid bool + + if _, exist := n.licenses.LoadOrStore(version, valid); exist { + continue + } + + // TODO validate license + //if valid { + // continue + //} + + n.Log().Warning("%s is distributed under %q and can not be used "+ + "without a license for production/commercial purposes", + version, version.License) + + default: + if lib.Trace() { + n.Log().Trace("unhandled license %q for %s", version.License, version) + } + } + } } diff --git a/node/process.go b/node/process.go index 2729b792..7ff2044e 100644 --- a/node/process.go +++ b/node/process.go @@ -1,705 +1,1772 @@ package node import ( - "context" - "fmt" + "errors" + "reflect" + "runtime" + "strings" "sync" + "sync/atomic" "time" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" ) -var ( - syncReplyChannels = &sync.Pool{ - New: func() interface{} { - return make(chan syncReplyMessage, 2) - }, - } -) +type process struct { + node *node + pid gen.PID -type syncReplyMessage struct { - value etf.Term - err error -} + name gen.Atom + registered atomic.Bool + application gen.Atom -type process struct { - coreInternal - sync.RWMutex + // used for the process Uptime method only. PID value uses node creation value. + creation int64 + + // registered aliases + aliases []gen.Alias + // registered events + events sync.Map // gen.Atom ->.. - name string - self etf.Pid - behavior gen.ProcessBehavior - env map[gen.EnvKey]interface{} + behavior gen.ProcessBehavior + sbehavior string - parent *process - groupLeader gen.Process - aliases []etf.Alias + state int32 - mailBox chan gen.ProcessMailboxMessage - gracefulExit chan gen.ProcessGracefulExitRequest - direct chan gen.ProcessDirectMessage + parent gen.PID + leader gen.PID + fallback gen.ProcessFallback - context context.Context - kill context.CancelFunc - exit processExitFunc + mailbox gen.ProcessMailbox + priority gen.MessagePriority + keeporder bool + important bool - replyMutex sync.RWMutex - reply map[etf.Ref]chan syncReplyMessage + messagesIn uint64 + messagesOut uint64 + runningTime uint64 - trapExit bool - compression Compression + compression gen.Compression - fallback gen.ProcessFallback + env sync.Map + + // channel for the sync requests made this process + response chan response + + // created links/monitors + targets sync.Map // target[PID,ProcessID,Alias,Event] -> true(link), false (monitor) + + // meta processes + metas sync.Map // metas[Alias] -> *meta + + // gen.Log interface + log *log + + // if act as a logger + loggername string } -type processOptions struct { - gen.ProcessOptions - parent *process +type response struct { + message any + err error + ref gen.Ref } -type processExitFunc func(from etf.Pid, reason string) error +// gen.Process implementation -// Self -func (p *process) Self() etf.Pid { - return p.self +func (p *process) Node() gen.Node { + return p.node } -// Name -func (p *process) Name() string { +func (p *process) Name() gen.Atom { return p.name } -// RegisterName -func (p *process) RegisterName(name string) error { - if p.behavior == nil { - return lib.ErrProcessTerminated +func (p *process) PID() gen.PID { + return p.pid +} + +func (p *process) Leader() gen.PID { + return p.leader +} + +func (p *process) Parent() gen.PID { + return p.parent +} + +func (p *process) Uptime() int64 { + if p.isAlive() == false { + return 0 } - return p.registerName(name, p.self) + return time.Now().Unix() - p.creation } -// UnregisterName -func (p *process) UnregisterName(name string) error { - if p.behavior == nil { - return lib.ErrProcessTerminated +func (p *process) Spawn(factory gen.ProcessFactory, options gen.ProcessOptions, args ...any) (gen.PID, error) { + if p.isStateIRW() == false { + return gen.PID{}, gen.ErrNotAllowed } - prc := p.ProcessByName(name) - if prc == nil { - return lib.ErrNameUnknown + opts := gen.ProcessOptionsExtra{ + ProcessOptions: options, + ParentPID: p.pid, + ParentLeader: p.leader, + ParentEnv: p.EnvList(), + ParentLogLevel: p.log.level, + Application: p.application, + Args: args, } - if prc.Self() != p.self { - return lib.ErrNameOwner + + pid, err := p.node.spawn(factory, opts) + if err != nil { + return pid, err } - return p.unregisterName(name) -} -// Kill -func (p *process) Kill() { - if p.behavior == nil { - return + if options.LinkChild { + // method LinkPID is not allowed to be used in the initialization state, + // so we use linking manually. + p.node.links.registerConsumer(pid, p.pid) + p.targets.Store(pid, true) } - p.kill() + return pid, err } -// Exit -func (p *process) Exit(reason string) error { - if p.behavior == nil { - return lib.ErrProcessTerminated +func (p *process) SpawnRegister(register gen.Atom, factory gen.ProcessFactory, options gen.ProcessOptions, args ...any) (gen.PID, error) { + + if p.isStateIRW() == false { + return gen.PID{}, gen.ErrNotAllowed } - return p.exit(p.self, reason) -} -// Context -func (p *process) Context() context.Context { - return p.context -} + opts := gen.ProcessOptionsExtra{ + ProcessOptions: options, + ParentPID: p.pid, + ParentLeader: p.leader, + ParentEnv: p.EnvList(), + Register: register, + ParentLogLevel: p.log.level, + Application: p.application, + Args: args, + } + pid, err := p.node.spawn(factory, opts) + if err != nil { + return pid, err + } -// Parent -func (p *process) Parent() gen.Process { - if p.parent == nil { - return nil + if options.LinkChild { + // method LinkPID is not allowed to be used in the initialization state, + // so we use linking manually. + p.node.links.registerConsumer(pid, p.pid) + p.targets.Store(pid, true) } - return p.parent + return pid, err } -// GroupLeader -func (p *process) GroupLeader() gen.Process { - if p.groupLeader == nil { - return nil +func (p *process) SpawnMeta(behavior gen.MetaBehavior, options gen.MetaOptions) (gen.Alias, error) { + var alias gen.Alias + + // use isAlive instead of isStateIRW because + // any meta process should be able to spawn other meta process + if p.isAlive() == false { + return alias, gen.ErrNotAllowed } - return p.groupLeader -} -// Links -func (p *process) Links() []etf.Pid { - return p.processLinks(p.self) -} + if behavior == nil { + return alias, errors.New("behavior is nil") + } -// Monitors -func (p *process) Monitors() []etf.Pid { - return p.processMonitors(p.self) -} + m := &meta{ + p: p, + behavior: behavior, + state: int32(gen.MetaStateSleep), + } + switch options.SendPriority { + case gen.MessagePriorityHigh: + m.priority = gen.MessagePriorityHigh + case gen.MessagePriorityMax: + m.priority = gen.MessagePriorityMax + default: + m.priority = gen.MessagePriorityNormal + } + if options.MailboxSize > 0 { + m.main = lib.NewQueueLimitMPSC(options.MailboxSize, false) + m.system = lib.NewQueueLimitMPSC(options.MailboxSize, false) + } else { + m.main = lib.NewQueueMPSC() + m.system = lib.NewQueueMPSC() + } -// MonitorsByName -func (p *process) MonitorsByName() []gen.ProcessID { - return p.processMonitorsByName(p.self) -} + m.id = gen.Alias(p.node.MakeRef()) + m.sbehavior = strings.TrimPrefix(reflect.TypeOf(behavior).String(), "*") -// MonitoredBy -func (p *process) MonitoredBy() []etf.Pid { - return p.processMonitoredBy(p.self) -} + if options.LogLevel == gen.LogLevelDefault { + options.LogLevel = p.log.Level() + } + m.log = createLog(options.LogLevel, p.node.dolog) + logSource := gen.MessageLogMeta{ + Node: p.node.name, + Parent: p.pid, + Meta: m.id, + Behavior: m.sbehavior, + } + m.log.setSource(logSource) + + if err := m.init(); err != nil { + return alias, err + } -// Aliases -func (p *process) Aliases() []etf.Alias { - return p.aliases + // register to be able routing messages to this meta process + p.metas.Store(m.id, m) + p.node.aliases.Store(m.id, p) + go m.start() + + return m.id, nil } -// Info -func (p *process) Info() gen.ProcessInfo { - p.RLock() - if p.behavior == nil { - p.RUnlock() - return gen.ProcessInfo{} +func (p *process) RemoteSpawn(node gen.Atom, name gen.Atom, options gen.ProcessOptions, args ...any) (gen.PID, error) { + + if p.isStateIRW() == false { + return gen.PID{}, gen.ErrNotAllowed + } + + if p.node.Name() == node { + return gen.PID{}, gen.ErrNotAllowed } - gl := p.self - if p.groupLeader != nil { - gl = p.groupLeader.Self() + opts := gen.ProcessOptionsExtra{ + ProcessOptions: options, + ParentPID: p.pid, + ParentLeader: p.leader, + ParentLogLevel: p.log.level, + Application: p.application, + Args: args, + } + if p.node.Security().ExposeEnvRemoteSpawn { + opts.ParentEnv = p.EnvList() + } + pid, err := p.node.RouteSpawn(node, name, opts, p.Node().Name()) + if err != nil { + return gen.PID{}, err } - p.RUnlock() - links := p.Links() - monitors := p.Monitors() - monitorsByName := p.MonitorsByName() - monitoredBy := p.MonitoredBy() - return gen.ProcessInfo{ - PID: p.self, - Name: p.name, - GroupLeader: gl, - Links: links, - Monitors: monitors, - MonitorsByName: monitorsByName, - MonitoredBy: monitoredBy, - Aliases: p.aliases, - Status: "running", - MessageQueueLen: len(p.mailBox), - TrapExit: p.trapExit, - Compression: p.compression.Enable, + if opts.LinkChild { + // method LinkPID is not allowed to be used in the initialization state, + // so we use linking manually. + p.node.links.registerConsumer(pid, p.pid) + p.targets.Store(pid, true) } + + return pid, err } -// Send -func (p *process) Send(to interface{}, message etf.Term) error { - p.RLock() - if p.behavior == nil { - p.RUnlock() - return lib.ErrProcessTerminated +func (p *process) RemoteSpawnRegister(node gen.Atom, name gen.Atom, register gen.Atom, options gen.ProcessOptions, args ...any) (gen.PID, error) { + + if p.isStateIRW() == false { + return gen.PID{}, gen.ErrNotAllowed } - p.RUnlock() - switch receiver := to.(type) { - case etf.Pid: - return p.RouteSend(p.self, receiver, message) - case string: - return p.RouteSendReg(p.self, gen.ProcessID{Name: receiver, Node: string(p.self.Node)}, message) - case etf.Atom: - return p.RouteSendReg(p.self, gen.ProcessID{Name: string(receiver), Node: string(p.self.Node)}, message) - case gen.ProcessID: - return p.RouteSendReg(p.self, receiver, message) - case etf.Alias: - return p.RouteSendAlias(p.self, receiver, message) + opts := gen.ProcessOptionsExtra{ + ProcessOptions: options, + ParentPID: p.pid, + ParentLeader: p.leader, + Register: register, + ParentLogLevel: p.log.level, + Application: p.application, + Args: args, + } + if p.node.Security().ExposeEnvRemoteSpawn { + opts.ParentEnv = p.EnvList() + } + pid, err := p.node.RouteSpawn(node, name, opts, p.Node().Name()) + if err != nil { + return gen.PID{}, err + } + + if opts.LinkChild { + // method LinkPID is not allowed to be used in the initialization state, + // so we use linking manually. + p.node.links.registerConsumer(pid, p.pid) + p.targets.Store(pid, true) } - return fmt.Errorf("Unknown receiver type") -} -// SendAfter -func (p *process) SendAfter(to interface{}, message etf.Term, after time.Duration) gen.CancelFunc { + return pid, err +} - timer := time.AfterFunc(after, func() { p.Send(to, message) }) - return timer.Stop +func (p *process) State() gen.ProcessState { + return gen.ProcessState(atomic.LoadInt32(&p.state)) } -// CreateAlias -func (p *process) CreateAlias() (etf.Alias, error) { - p.RLock() - if p.behavior == nil { - p.RUnlock() - return etf.Alias{}, lib.ErrProcessTerminated +func (p *process) RegisterName(name gen.Atom) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed } - p.RUnlock() - return p.newAlias(p) + if err := p.node.RegisterName(name, p.pid); err != nil { + return err + } + + p.log.setSource(gen.MessageLogProcess{Node: p.node.name, PID: p.pid, Name: p.name}) + return nil } -// DeleteAlias -func (p *process) DeleteAlias(alias etf.Alias) error { - p.RLock() - if p.behavior == nil { - p.RUnlock() - return lib.ErrProcessTerminated +func (p *process) UnregisterName() error { + if p.isStateRW() == false { + return gen.ErrNotAllowed } - p.RUnlock() - return p.deleteAlias(p, alias) + _, err := p.node.UnregisterName(p.name) + return err } -// ListEnv -func (p *process) ListEnv() map[gen.EnvKey]interface{} { - p.RLock() - defer p.RUnlock() +func (p *process) EnvList() map[gen.Env]any { + if p.isAlive() == false { + return nil + } - env := make(map[gen.EnvKey]interface{}) + env := make(map[gen.Env]any) + p.env.Range(func(k, v any) bool { + env[gen.Env(k.(string))] = v + return true + }) + return env +} - if p.groupLeader != nil { - for key, value := range p.groupLeader.ListEnv() { - env[key] = value - } - } - if p.parent != nil { - for key, value := range p.parent.ListEnv() { - env[key] = value - } +func (p *process) SetEnv(name gen.Env, value any) { + if p.isAlive() == false { + return } - for key, value := range p.env { - env[key] = value + if value == nil { + p.env.Delete(name.String()) + return } + p.env.Store(name.String(), value) +} - return env +func (p *process) Env(name gen.Env) (any, bool) { + if p.isAlive() == false { + return nil, false + } + x, y := p.env.Load(name.String()) + return x, y } -// SetEnv -func (p *process) SetEnv(name gen.EnvKey, value interface{}) { - p.Lock() - defer p.Unlock() +func (p *process) Compression() bool { + return p.compression.Enable +} - if value == nil { - delete(p.env, name) - return +func (p *process) SetCompression(enable bool) error { + if p.isAlive() == false { + return gen.ErrProcessTerminated } - p.env[name] = value + p.compression.Enable = enable + return nil } -// Env -func (p *process) Env(name gen.EnvKey) interface{} { - p.RLock() - defer p.RUnlock() +func (p *process) CompressionType() gen.CompressionType { + return p.compression.Type +} - if value, ok := p.env[name]; ok { - return value +func (p *process) SetCompressionType(ctype gen.CompressionType) error { + if p.isAlive() == false { + return gen.ErrProcessTerminated } - if p.groupLeader != nil { - return p.groupLeader.Env(name) + switch ctype { + case gen.CompressionTypeGZIP: + case gen.CompressionTypeLZW: + case gen.CompressionTypeZLIB: + default: + return gen.ErrIncorrect } + p.compression.Type = ctype return nil } -// Wait -func (p *process) Wait() { - if p.IsAlive() { - <-p.context.Done() - } +func (p *process) CompressionLevel() gen.CompressionLevel { + return p.compression.Level } -// WaitWithTimeout -func (p *process) WaitWithTimeout(d time.Duration) error { - if !p.IsAlive() { - return nil +func (p *process) SetCompressionLevel(level gen.CompressionLevel) error { + if p.isAlive() == false { + return gen.ErrProcessTerminated } - timer := time.NewTimer(d) - defer timer.Stop() - - select { - case <-timer.C: - return lib.ErrTimeout - case <-p.context.Done(): - return nil + switch level { + case gen.CompressionBestSize: + case gen.CompressionBestSpeed: + case gen.CompressionDefault: + default: + return gen.ErrIncorrect } + + p.compression.Level = level + return nil +} + +func (p *process) CompressionThreshold() int { + return p.compression.Threshold } -// Link -func (p *process) Link(with etf.Pid) error { - p.RLock() - if p.behavior == nil { - p.RUnlock() - return lib.ErrProcessTerminated +func (p *process) SetCompressionThreshold(threshold int) error { + if p.isAlive() == false { + return gen.ErrProcessTerminated } - p.RUnlock() - return p.RouteLink(p.self, with) + if threshold < gen.DefaultCompressionThreshold { + return gen.ErrIncorrect + } + p.compression.Threshold = threshold + return nil +} + +func (p *process) SendPriority() gen.MessagePriority { + return p.priority } -// Unlink -func (p *process) Unlink(with etf.Pid) error { - p.RLock() - if p.behavior == nil { - p.RUnlock() - return lib.ErrProcessTerminated +func (p *process) SetSendPriority(priority gen.MessagePriority) error { + if p.isAlive() == false { + return gen.ErrProcessTerminated + } + + switch priority { + case gen.MessagePriorityNormal: + case gen.MessagePriorityHigh: + case gen.MessagePriorityMax: + default: + return gen.ErrIncorrect } - p.RUnlock() - return p.RouteUnlink(p.self, with) + p.priority = priority + return nil } -// IsAlive -func (p *process) IsAlive() bool { - p.RLock() - defer p.RUnlock() - if p.behavior == nil { - return false +func (p *process) SetKeepNetworkOrder(order bool) error { + if p.isAlive() == false { + return gen.ErrProcessTerminated } - return p.context.Err() == nil + + p.keeporder = order + return nil } -// NodeName -func (p *process) NodeName() string { - return p.coreNodeName() +func (p *process) KeepNetworkOrder() bool { + return p.keeporder } -// NodeStop -func (p *process) NodeStop() { - p.coreStop() +func (p *process) SetImportantDelivery(important bool) error { + if p.isAlive() == false { + return gen.ErrProcessTerminated + } + + p.important = important + return nil } -// NodeUptime -func (p *process) NodeUptime() int64 { - return p.coreUptime() +func (p *process) ImportantDelivery() bool { + return p.important } -// Children -func (p *process) Children() ([]etf.Pid, error) { - c, err := p.Direct(gen.MessageDirectChildren{}) - if err != nil { - return []etf.Pid{}, err +func (p *process) CreateAlias() (gen.Alias, error) { + if p.isStateRW() == false { + return gen.Alias{}, gen.ErrNotAllowed + } - children, correct := c.([]etf.Pid) - if correct == false { - return []etf.Pid{}, err + alias := gen.Alias(p.node.MakeRef()) + if err := p.node.registerAlias(alias, p); err != nil { + return gen.Alias{}, err } - return children, nil -} -// SetTrapExit -func (p *process) SetTrapExit(trap bool) { - p.trapExit = trap + p.aliases = append(p.aliases, alias) + return alias, nil } -// TrapExit -func (p *process) TrapExit() bool { - return p.trapExit -} +func (p *process) DeleteAlias(alias gen.Alias) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } -// SetCompression -func (p *process) SetCompression(enable bool) { - p.compression.Enable = enable + if err := p.node.unregisterAlias(alias, p); err != nil { + return err + } + + p.node.RouteTerminateAlias(alias, gen.ErrUnregistered) + + for i, a := range p.aliases { + if a != alias { + continue + } + p.aliases[0] = p.aliases[i] + p.aliases = p.aliases[1:] + break + } + return nil } -// Compression -func (p *process) Compression() bool { - return p.compression.Enable +func (p *process) Aliases() []gen.Alias { + aliases := make([]gen.Alias, len(p.aliases)) + copy(aliases, p.aliases) + return aliases } -// CompressionLevel -func (p *process) CompressionLevel() int { - return p.compression.Level +func (p *process) SendWithPriority(to any, message any, priority gen.MessagePriority) error { + var prev gen.MessagePriority + prev, p.priority = p.priority, priority + err := p.Send(to, message) + p.priority = prev + return err } -// SetCompressionLevel -func (p *process) SetCompressionLevel(level int) bool { - if level < 1 || level > 9 { - return false +func (p *process) Send(to any, message any) error { + switch t := to.(type) { + case gen.PID: + return p.SendPID(t, message) + case gen.ProcessID: + return p.SendProcessID(t, message) + case gen.Alias: + return p.SendAlias(t, message) + case gen.Atom: + return p.SendProcessID(gen.ProcessID{Name: t, Node: p.node.name}, message) + case string: + return p.SendProcessID(gen.ProcessID{Name: gen.Atom(t), Node: p.node.name}, message) } - p.compression.Level = level - return true + + return gen.ErrUnsupported } -// CompressionThreshold -func (p *process) CompressionThreshold() int { - return p.compression.Threshold +func (p *process) SendImportant(to any, message any) error { + var important bool + + important, p.important = p.important, true + err := p.Send(to, message) + p.important = important + + return err } -// SetCompressionThreshold -func (p *process) SetCompressionThreshold(threshold int) bool { - if threshold < DefaultCompressionThreshold { - return false +func (p *process) SendPID(to gen.PID, message any) error { + // allow to send even being in sleep state (meta-process uses this method) + if p.isAlive() == false { + return gen.ErrNotAllowed + } + if lib.Trace() { + p.log.Trace("SendPID to %s", to) } - p.compression.Threshold = threshold - return true -} -// Behavior -func (p *process) Behavior() gen.ProcessBehavior { - p.RLock() - defer p.RUnlock() + // Sending to itself being in initialization stage: + // + // - we can't route this message to itself (via RouteSendPID) if this process + // is in the initialization stage since it isn't registered yet. + // - message can be routed by the process name (via RouteSendProcessID) + // because it is already registered before the invoking ProcessInit callback, + // which means we should not do this trick in SendProcessID method. + // + // So here, we should check if it is sending to itself and route this message manually + // right into the process mailbox. + + if to == p.pid { + // sending to itself + qm := gen.TakeMailboxMessage() + qm.From = p.pid + qm.Type = gen.MailboxMessageTypeRegular + qm.Target = to + qm.Message = message + + if ok := p.mailbox.Main.Push(qm); ok == false { + return gen.ErrProcessMailboxFull + } - if p.behavior == nil { + atomic.AddUint64(&p.messagesIn, 1) + p.run() return nil } - return p.behavior -} -// Direct -func (p *process) Direct(request interface{}) (interface{}, error) { - return p.DirectWithTimeout(request, gen.DefaultCallTimeout) -} + options := gen.MessageOptions{ + Priority: p.priority, + Compression: p.compression, + KeepNetworkOrder: p.keeporder, + ImportantDelivery: p.important, + } -// DirectWithTimeout -func (p *process) DirectWithTimeout(request interface{}, timeout int) (interface{}, error) { - if timeout < 1 { - timeout = gen.DefaultCallTimeout + if options.ImportantDelivery { + ref := p.node.MakeRef() + options.Ref = ref + options.Ref.ID[0] = ref.ID[0] + ref.ID[1] + ref.ID[2] + options.Ref.ID[1] = 0 + options.Ref.ID[2] = 0 } - direct := gen.ProcessDirectMessage{ - Ref: p.MakeRef(), - Message: request, + if err := p.node.RouteSendPID(p.pid, to, options, message); err != nil { + return err } - if err := p.PutSyncRequest(direct.Ref); err != nil { - return nil, err + atomic.AddUint64(&p.messagesOut, 1) + + if options.ImportantDelivery == false { + return nil } - // sending request - select { - case p.direct <- direct: - default: - p.CancelSyncRequest(direct.Ref) - return nil, lib.ErrProcessBusy + if to.Node == p.node.name { + // local delivery + return nil } - return p.WaitSyncReply(direct.Ref, timeout) -} + // sent to remote node and 'important' flag was set. waiting for response + // from the remote node -func (p *process) RegisterEvent(event gen.Event, messages ...gen.EventMessage) error { - return p.registerEvent(p.self, event, messages) + _, err := p.waitResponse(options.Ref, gen.DefaultRequestTimeout) + return err } -func (p *process) UnregisterEvent(event gen.Event) error { - return p.unregisterEvent(p.self, event) -} +func (p *process) SendProcessID(to gen.ProcessID, message any) error { + if p.isAlive() == false { + return gen.ErrNotAllowed + } -func (p *process) MonitorEvent(event gen.Event) error { - return p.monitorEvent(p.self, event) -} + if lib.Trace() { + p.log.Trace("SendProcessID to %s", to) + } -func (p *process) DemonitorEvent(event gen.Event) error { - return p.demonitorEvent(p.self, event) -} + options := gen.MessageOptions{ + Priority: p.priority, + Compression: p.compression, + KeepNetworkOrder: p.keeporder, + ImportantDelivery: p.important, + } -func (p *process) SendEventMessage(event gen.Event, message gen.EventMessage) error { - return p.sendEvent(p.self, event, message) -} + if options.ImportantDelivery { + ref := p.node.MakeRef() + options.Ref = ref + options.Ref.ID[0] = ref.ID[0] + ref.ID[1] + ref.ID[2] + options.Ref.ID[1] = 0 + options.Ref.ID[2] = 0 + } -// MonitorNode -func (p *process) MonitorNode(name string) etf.Ref { - ref := p.MakeRef() - p.monitorNode(p.self, name, ref) - return ref -} + if err := p.node.RouteSendProcessID(p.pid, to, options, message); err != nil { + return err + } -// DemonitorNode -func (p *process) DemonitorNode(ref etf.Ref) bool { - return p.demonitorNode(ref) -} + atomic.AddUint64(&p.messagesOut, 1) -// MonitorProcess -func (p *process) MonitorProcess(process interface{}) etf.Ref { - ref := p.MakeRef() - switch mp := process.(type) { - case etf.Pid: - p.RouteMonitor(p.self, mp, ref) - return ref - case gen.ProcessID: - p.RouteMonitorReg(p.self, mp, ref) - return ref - case string: - p.RouteMonitorReg(p.self, gen.ProcessID{Name: mp, Node: string(p.self.Node)}, ref) - return ref - case etf.Atom: - p.RouteMonitorReg(p.self, gen.ProcessID{Name: string(mp), Node: string(p.self.Node)}, ref) - return ref + if options.ImportantDelivery == false { + return nil + } + + if to.Node == p.node.name { + // local delivery + return nil } - // create fake gen.ProcessID. Monitor will send MessageDown with "noproc" as a reason - p.RouteMonitorReg(p.self, gen.ProcessID{Node: string(p.self.Node)}, ref) - return ref + // sent to remote node and 'important' flag was set. waiting for response + // from the remote node + + _, err := p.waitResponse(options.Ref, gen.DefaultRequestTimeout) + return err } -// DemonitorProcess -func (p *process) DemonitorProcess(ref etf.Ref) bool { - if err := p.RouteDemonitor(p.self, ref); err != nil { - return false +func (p *process) SendAlias(to gen.Alias, message any) error { + if p.isAlive() == false { + return gen.ErrNotAllowed } - return true -} -// RemoteSpawn makes request to spawn new process on a remote node -func (p *process) RemoteSpawn(node string, object string, opts gen.RemoteSpawnOptions, args ...etf.Term) (etf.Pid, error) { - return p.RemoteSpawnWithTimeout(gen.DefaultCallTimeout, node, object, opts, args...) -} + if lib.Trace() { + p.log.Trace("SendAlias to %s", to) + } -// RemoteSpawnWithTimeout makes request to spawn new process on a remote node with given timeout -func (p *process) RemoteSpawnWithTimeout(timeout int, node string, object string, opts gen.RemoteSpawnOptions, args ...etf.Term) (etf.Pid, error) { - ref := p.MakeRef() - p.PutSyncRequest(ref) - request := gen.RemoteSpawnRequest{ - From: p.self, - Ref: ref, - Options: opts, + options := gen.MessageOptions{ + Priority: p.priority, + Compression: p.compression, + KeepNetworkOrder: p.keeporder, + ImportantDelivery: p.important, } - if err := p.RouteSpawnRequest(node, object, request, args...); err != nil { - p.CancelSyncRequest(ref) - return etf.Pid{}, err + + if options.ImportantDelivery { + ref := p.node.MakeRef() + options.Ref = ref + options.Ref.ID[0] = ref.ID[0] + ref.ID[1] + ref.ID[2] + options.Ref.ID[1] = 0 + options.Ref.ID[2] = 0 } - reply, err := p.WaitSyncReply(ref, timeout) - if err != nil { - return etf.Pid{}, err - } - - // Result of the operation. If Result is a process identifier, - // the operation succeeded and the process identifier is the - // identifier of the newly created process. If Result is an atom, - // the operation failed and the atom identifies failure reason. - switch r := reply.(type) { - case etf.Pid: - m := etf.Ref{} // empty reference - if opts.Monitor != m { - p.RouteMonitor(p.self, r, opts.Monitor) + if err := p.node.RouteSendAlias(p.pid, to, options, message); err != nil { + return err + } + + atomic.AddUint64(&p.messagesOut, 1) + + if options.ImportantDelivery == false { + return nil + } + + if to.Node == p.node.name { + // local delivery + return nil + } + + // sent to remote node and 'important' flag was set. waiting for response + // from the remote node + + _, err := p.waitResponse(options.Ref, gen.DefaultRequestTimeout) + return err +} + +func (p *process) SendAfter(to any, message any, after time.Duration) (gen.CancelFunc, error) { + if p.isAlive() == false { + return nil, gen.ErrNotAllowed + } + return time.AfterFunc(after, func() { + var err error + if lib.Trace() { + p.log.Trace("SendAfter %s to %s", after, to) } - if opts.Link { - p.RouteLink(p.self, r) + // we can't use p.Send(...) because it checks the process state + // and returns gen.ErrNotAllowed, so use p.node.Route* methods for that + options := gen.MessageOptions{ + Priority: p.priority, + Compression: p.compression, + KeepNetworkOrder: p.keeporder, + // ImportantDelivery: ignore on sending with delay } - return r, nil - case etf.Atom: - switch string(r) { - case lib.ErrTaken.Error(): - return etf.Pid{}, lib.ErrTaken - case lib.ErrBehaviorUnknown.Error(): - return etf.Pid{}, lib.ErrBehaviorUnknown + switch t := to.(type) { + case gen.Atom: + err = p.node.RouteSendProcessID(p.pid, gen.ProcessID{Name: t, Node: p.node.name}, options, message) + case gen.PID: + err = p.node.RouteSendPID(p.pid, t, options, message) + case gen.ProcessID: + err = p.node.RouteSendProcessID(p.pid, t, options, message) + case gen.Alias: + err = p.node.RouteSendAlias(p.pid, t, options, message) } - return etf.Pid{}, fmt.Errorf(string(r)) - } - return etf.Pid{}, fmt.Errorf("unknown result: %#v", reply) + if err == nil { + atomic.AddUint64(&p.messagesOut, 1) + } + }).Stop, nil } -// Spawn -func (p *process) Spawn(name string, opts gen.ProcessOptions, behavior gen.ProcessBehavior, args ...etf.Term) (gen.Process, error) { - options := processOptions{ - ProcessOptions: opts, - parent: p, +func (p *process) SendEvent(name gen.Atom, token gen.Ref, message any) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if lib.Trace() { + p.log.Trace("process SendEvent %s with token %s", name, token) } - return p.spawn(name, options, behavior, args...) + + options := gen.MessageOptions{ + Priority: p.priority, + Compression: p.compression, + KeepNetworkOrder: p.keeporder, + } + + em := gen.MessageEvent{ + Event: gen.Event{Name: name, Node: p.node.name}, + Timestamp: time.Now().UnixNano(), + Message: message, + } + + if err := p.node.RouteSendEvent(p.pid, token, options, em); err != nil { + return err + } + + atomic.AddUint64(&p.messagesOut, 1) + return nil } -// PutSyncRequest -func (p *process) PutSyncRequest(ref etf.Ref) error { - var preply map[etf.Ref]chan syncReplyMessage - p.RLock() - preply = p.reply - p.RUnlock() +func (p *process) SendExit(to gen.PID, reason error) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if reason == nil { + return gen.ErrIncorrect + } - if preply == nil { - return lib.ErrProcessTerminated + switch to { + case p.pid, p.parent, p.leader: + p.log.Warning("sending exit-signal to itself, parent, or leader process is not allowed") + return gen.ErrNotAllowed } - reply := syncReplyChannels.Get().(chan syncReplyMessage) - p.replyMutex.Lock() - preply[ref] = reply - p.replyMutex.Unlock() + if lib.Trace() { + p.log.Trace("SendExit to %s", to) + } + err := p.node.RouteSendExit(p.pid, to, reason) + if err != nil { + return err + } + atomic.AddUint64(&p.messagesOut, 1) return nil } -// PutSyncReply -func (p *process) PutSyncReply(ref etf.Ref, reply etf.Term, err error) error { - var preply map[etf.Ref]chan syncReplyMessage - p.RLock() - preply = p.reply - p.RUnlock() +func (p *process) SendExitMeta(alias gen.Alias, reason error) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } - if preply == nil { - return lib.ErrProcessTerminated + value, found := p.node.aliases.Load(alias) + if found == false { + return gen.ErrAliasUnknown } - p.replyMutex.RLock() - rep, ok := preply[ref] - defer p.replyMutex.RUnlock() + metap := value.(*process) + if alive := metap.isAlive(); alive == false { + return gen.ErrProcessTerminated + } - if !ok { - // no process waiting for it - return lib.ErrReferenceUnknown + value, found = metap.metas.Load(alias) + if found == false { + return gen.ErrMetaUnknown } - select { - case rep <- syncReplyMessage{value: reply, err: err}: + + m := value.(*meta) + // send exit signal to the meta processes + qm := gen.TakeMailboxMessage() + qm.From = p.pid + qm.Type = gen.MailboxMessageTypeExit + qm.Message = reason + + if ok := m.system.Push(qm); ok == false { + return gen.ErrMetaMailboxFull } + + atomic.AddUint64(&m.messagesIn, 1) + atomic.AddUint64(&p.messagesOut, 1) + m.handle() return nil } -// CancelSyncRequest -func (p *process) CancelSyncRequest(ref etf.Ref) { - var preply map[etf.Ref]chan syncReplyMessage - p.RLock() - preply = p.reply - p.RUnlock() +func (p *process) SendResponse(to gen.PID, ref gen.Ref, message any) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + if lib.Trace() { + p.log.Trace("SendResponse to %s with %s", to, ref) + } + options := gen.MessageOptions{ + Ref: ref, + Priority: p.priority, + Compression: p.compression, + KeepNetworkOrder: p.keeporder, + } + atomic.AddUint64(&p.messagesOut, 1) + return p.node.RouteSendResponse(p.pid, to, options, message) +} + +func (p *process) SendResponseError(to gen.PID, ref gen.Ref, err error) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + if lib.Trace() { + p.log.Trace("SendResponseError to %s with %s", to, ref) + } + options := gen.MessageOptions{ + Ref: ref, + Priority: p.priority, + Compression: p.compression, + KeepNetworkOrder: p.keeporder, + } + atomic.AddUint64(&p.messagesOut, 1) + return p.node.RouteSendResponseError(p.pid, to, options, err) +} + +func (p *process) CallWithPriority(to any, request any, priority gen.MessagePriority) (any, error) { + var prev gen.MessagePriority + prev, p.priority = p.priority, priority + value, err := p.CallWithTimeout(to, request, gen.DefaultRequestTimeout) + p.priority = prev + return value, err +} - if preply == nil { - return +func (p *process) CallImportant(to any, request any) (any, error) { + var important bool + + important, p.important = p.important, true + result, err := p.CallWithTimeout(to, request, gen.DefaultRequestTimeout) + p.important = important + + return result, err +} + +func (p *process) Call(to any, request any) (any, error) { + return p.CallWithTimeout(to, request, gen.DefaultRequestTimeout) +} +func (p *process) CallWithTimeout(to any, request any, timeout int) (any, error) { + switch t := to.(type) { + case gen.Atom: + return p.CallProcessID(gen.ProcessID{Name: t, Node: p.node.name}, request, timeout) + case gen.PID: + return p.CallPID(t, request, timeout) + case gen.ProcessID: + return p.CallProcessID(t, request, timeout) + case gen.Alias: + return p.CallAlias(t, request, timeout) } - p.replyMutex.Lock() - delete(preply, ref) - p.replyMutex.Unlock() + return nil, gen.ErrUnsupported + } -// WaitSyncReply -func (p *process) WaitSyncReply(ref etf.Ref, timeout int) (etf.Term, error) { - var preply map[etf.Ref]chan syncReplyMessage - p.RLock() - preply = p.reply - p.RUnlock() +func (p *process) CallPID(to gen.PID, message any, timeout int) (any, error) { + if p.isStateRW() == false { + return nil, gen.ErrNotAllowed + } + if to == p.pid { + return nil, gen.ErrNotAllowed + } - if preply == nil { - return nil, lib.ErrProcessTerminated + options := gen.MessageOptions{ + Ref: p.node.MakeRef(), + Priority: p.priority, + Compression: p.compression, + KeepNetworkOrder: p.keeporder, + ImportantDelivery: p.important, } - p.replyMutex.RLock() - reply, wait_for_reply := preply[ref] - p.replyMutex.RUnlock() + if lib.Trace() { + p.log.Trace("CallPID to %s with %s", to, options.Ref) + } - if wait_for_reply == false { - return nil, fmt.Errorf("Unknown request") + if err := p.node.RouteCallPID(p.pid, to, options, message); err != nil { + return nil, err } - defer func(ref etf.Ref) { - p.replyMutex.Lock() - delete(preply, ref) - p.replyMutex.Unlock() - }(ref) + atomic.AddUint64(&p.messagesOut, 1) + if timeout < 1 { + timeout = gen.DefaultRequestTimeout + } + return p.waitResponse(options.Ref, timeout) +} - timer := lib.TakeTimer() - defer lib.ReleaseTimer(timer) - timer.Reset(time.Second * time.Duration(timeout)) - - for { - select { - case m := <-reply: - // get back 'reply' struct to the pool - syncReplyChannels.Put(reply) - return m.value, m.err - case <-timer.C: - return nil, lib.ErrTimeout - case <-p.context.Done(): - return nil, lib.ErrProcessTerminated - } +func (p *process) CallProcessID(to gen.ProcessID, message any, timeout int) (any, error) { + if p.isStateRW() == false { + return nil, gen.ErrNotAllowed } + options := gen.MessageOptions{ + Ref: p.node.MakeRef(), + Priority: p.priority, + Compression: p.compression, + KeepNetworkOrder: p.keeporder, + ImportantDelivery: p.important, + } + if lib.Trace() { + p.log.Trace("CallProcessID %s with %s", to, options.Ref) + } + if err := p.node.RouteCallProcessID(p.pid, to, options, message); err != nil { + return nil, err + } + atomic.AddUint64(&p.messagesOut, 1) + if timeout < 1 { + timeout = gen.DefaultRequestTimeout + } + return p.waitResponse(options.Ref, timeout) } -// ProcessChannels -func (p *process) ProcessChannels() gen.ProcessChannels { - return gen.ProcessChannels{ - Mailbox: p.mailBox, - Direct: p.direct, - GracefulExit: p.gracefulExit, +func (p *process) CallAlias(to gen.Alias, message any, timeout int) (any, error) { + if p.isStateRW() == false { + return nil, gen.ErrNotAllowed + } + + options := gen.MessageOptions{ + Ref: p.node.MakeRef(), + Priority: p.priority, + Compression: p.compression, + KeepNetworkOrder: p.keeporder, + ImportantDelivery: p.important, + } + + if lib.Trace() { + p.log.Trace("CallAlias %s with %s", to, options.Ref) + } + + if err := p.node.RouteCallAlias(p.pid, to, options, message); err != nil { + return nil, err + } + atomic.AddUint64(&p.messagesOut, 1) + if timeout < 1 { + timeout = gen.DefaultRequestTimeout + } + return p.waitResponse(options.Ref, timeout) +} + +func (p *process) Inspect(target gen.PID, item ...string) (map[string]string, error) { + if p.isStateRW() == false { + return nil, gen.ErrNotAllowed + } + + if target.Node != p.pid.Node { + // inspecting remote process is not allowed + return nil, gen.ErrNotAllowed + } + + ref := p.node.MakeRef() + + value, found := p.node.processes.Load(target) + if found == false { + return nil, gen.ErrProcessUnknown + } + targetp := value.(*process) + + if alive := targetp.isAlive(); alive == false { + return nil, gen.ErrProcessTerminated + } + + qm := gen.TakeMailboxMessage() + qm.Ref = ref + qm.From = p.pid + qm.Type = gen.MailboxMessageTypeInspect + qm.Message = item + + if ok := targetp.mailbox.Urgent.Push(qm); ok == false { + return nil, gen.ErrProcessMailboxFull + } + atomic.AddUint64(&p.messagesOut, 1) + atomic.AddUint64(&targetp.messagesIn, 1) + + if lib.Trace() { + p.log.Trace("Inspect %s with %s", target, ref) + } + + targetp.run() + + value, err := p.waitResponse(ref, gen.DefaultRequestTimeout) + if err != nil { + return nil, err + } + return value.(map[string]string), nil +} + +func (p *process) InspectMeta(alias gen.Alias, item ...string) (map[string]string, error) { + if p.isStateRW() == false { + return nil, gen.ErrNotAllowed + } + + if alias.Node != p.pid.Node { + // inspecting remote meta process is not allowed + return nil, gen.ErrNotAllowed + } + + value, found := p.node.aliases.Load(alias) + if found == false { + return nil, gen.ErrMetaUnknown + } + + metap := value.(*process) + if alive := metap.isAlive(); alive == false { + return nil, gen.ErrProcessTerminated + } + + value, found = metap.metas.Load(alias) + if found == false { + return nil, gen.ErrMetaUnknown + } + + m := value.(*meta) + ref := p.node.MakeRef() + + qm := gen.TakeMailboxMessage() + qm.Ref = ref + qm.From = p.pid + qm.Type = gen.MailboxMessageTypeInspect + qm.Message = item + + if ok := m.system.Push(qm); ok == false { + return nil, gen.ErrProcessMailboxFull + } + atomic.AddUint64(&p.messagesOut, 1) + atomic.AddUint64(&m.messagesIn, 1) + + if lib.Trace() { + m.log.Trace("Inspect meta %s with %s", alias, ref) + } + + m.handle() + + v, err := p.waitResponse(ref, gen.DefaultRequestTimeout) + if err != nil { + return nil, err + } + return v.(map[string]string), nil +} + +func (p *process) RegisterEvent(name gen.Atom, options gen.EventOptions) (gen.Ref, error) { + var empty gen.Ref + if p.isStateRW() == false { + return empty, gen.ErrNotAllowed + } + + if lib.Trace() { + p.log.Trace("process RegisterEvent %s", name) + } + + token, err := p.node.registerEvent(name, p.pid, options) + if err != nil { + return token, err + } + p.events.Store(name, true) + return token, nil +} + +func (p *process) UnregisterEvent(name gen.Atom) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if lib.Trace() { + p.log.Trace("process UnregisterEvent %s", name) + } + + if err := p.node.unregisterEvent(name, p.pid); err != nil { + return err + } + + p.events.Delete(name) + return nil +} + +func (p *process) Events() []gen.Atom { + events := []gen.Atom{} + p.events.Range(func(k, _ any) bool { + events = append(events, k.(gen.Atom)) + return true + }) + return events +} + +func (p *process) Link(target any) error { + switch t := target.(type) { + case gen.Atom: + return p.LinkProcessID(gen.ProcessID{Name: t, Node: p.node.name}) + case gen.PID: + return p.LinkPID(t) + case gen.ProcessID: + return p.LinkProcessID(t) + case gen.Alias: + return p.LinkAlias(t) + } + + return gen.ErrUnsupported +} +func (p *process) Unlink(target any) error { + switch t := target.(type) { + case gen.Atom: + return p.UnlinkProcessID(gen.ProcessID{Name: t, Node: p.node.name}) + case gen.PID: + return p.UnlinkPID(t) + case gen.ProcessID: + return p.UnlinkProcessID(t) + case gen.Alias: + return p.UnlinkAlias(t) + } + + return gen.ErrUnsupported +} + +func (p *process) LinkPID(target gen.PID) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if target == p.pid { + return gen.ErrNotAllowed + } + + if _, exist := p.targets.Load(target); exist { + return gen.ErrTargetExist + } + + if lib.Trace() { + p.log.Trace("LinkPID with %s", target) + } + + if err := p.node.RouteLinkPID(p.pid, target); err != nil { + return err + } + + p.targets.Store(target, true) + return nil +} + +func (p *process) UnlinkPID(target gen.PID) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if _, exist := p.targets.Load(target); exist == false { + return gen.ErrTargetUnknown + } + + if lib.Trace() { + p.log.Trace("UnlinkPID with %s", target) + } + + if err := p.node.RouteUnlinkPID(p.pid, target); err != nil { + return err + } + + p.targets.Delete(target) + return nil +} + +func (p *process) LinkProcessID(target gen.ProcessID) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if target.Name == p.name && target.Node == p.node.name { + return gen.ErrNotAllowed + } + + if _, exist := p.targets.Load(target); exist { + return gen.ErrTargetExist + } + + if lib.Trace() { + p.log.Trace("LinkProcessID with %s", target) + } + + if err := p.node.RouteLinkProcessID(p.pid, target); err != nil { + return err + } + + p.targets.Store(target, true) + return nil +} + +func (p *process) UnlinkProcessID(target gen.ProcessID) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if _, exist := p.targets.Load(target); exist == false { + return gen.ErrTargetUnknown + } + + if err := p.node.RouteUnlinkProcessID(p.pid, target); err != nil { + return err + } + + p.targets.Delete(target) + return nil +} + +func (p *process) LinkAlias(target gen.Alias) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + for _, a := range p.aliases { + if a == target { + return gen.ErrNotAllowed + } + } + + if _, exist := p.targets.Load(target); exist { + return gen.ErrTargetExist + } + + if err := p.node.RouteLinkAlias(p.pid, target); err != nil { + return err + } + + p.targets.Store(target, true) + return nil +} + +func (p *process) UnlinkAlias(target gen.Alias) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if _, exist := p.targets.Load(target); exist == false { + return gen.ErrTargetUnknown + } + + if err := p.node.RouteUnlinkAlias(p.pid, target); err != nil { + return err + } + + p.targets.Delete(target) + return nil +} + +func (p *process) LinkEvent(target gen.Event) ([]gen.MessageEvent, error) { + + if p.isStateRW() == false { + return nil, gen.ErrNotAllowed + } + + if target.Node == "" { + target.Node = p.node.name + } + + if _, exist := p.targets.Load(target); exist { + return nil, gen.ErrTargetExist + } + + lastEventMessages, err := p.node.RouteLinkEvent(p.pid, target) + if err != nil { + return nil, err + } + + p.targets.Store(target, true) + return lastEventMessages, nil +} + +func (p *process) UnlinkEvent(target gen.Event) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if _, exist := p.targets.Load(target); exist == false { + return gen.ErrTargetUnknown + } + + if err := p.node.RouteUnlinkEvent(p.pid, target); err != nil { + return err + } + + p.targets.Delete(target) + return nil +} + +func (p *process) LinkNode(target gen.Atom) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if _, exist := p.targets.Load(target); exist { + return gen.ErrTargetExist + } + + if _, err := p.Node().Network().GetNode(target); err != nil { + return err + } + p.node.links.registerConsumer(target, p.pid) + p.targets.Store(target, true) + return nil +} + +func (p *process) UnlinkNode(target gen.Atom) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if _, exist := p.targets.Load(target); exist == false { + return gen.ErrTargetUnknown + } + + p.node.links.unregisterConsumer(target, p.pid) + p.targets.Delete(target) + return nil +} + +func (p *process) Monitor(target any) error { + switch t := target.(type) { + case gen.Atom: + return p.MonitorProcessID(gen.ProcessID{Name: t, Node: p.node.name}) + case gen.PID: + return p.MonitorPID(t) + case gen.ProcessID: + return p.MonitorProcessID(t) + case gen.Alias: + return p.MonitorAlias(t) + } + + return gen.ErrUnsupported +} + +func (p *process) Demonitor(target any) error { + switch t := target.(type) { + case gen.Atom: + return p.DemonitorProcessID(gen.ProcessID{Name: t, Node: p.node.name}) + case gen.PID: + return p.DemonitorPID(t) + case gen.ProcessID: + return p.DemonitorProcessID(t) + case gen.Alias: + return p.DemonitorAlias(t) + } + + return gen.ErrUnsupported +} + +func (p *process) MonitorPID(target gen.PID) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if _, exist := p.targets.Load(target); exist { + return gen.ErrTargetExist + } + + if err := p.node.RouteMonitorPID(p.pid, target); err != nil { + return err + } + + p.targets.Store(target, false) + return nil +} + +func (p *process) DemonitorPID(target gen.PID) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if _, exist := p.targets.Load(target); exist == false { + return gen.ErrTargetUnknown + } + + if err := p.node.RouteDemonitorPID(p.pid, target); err != nil { + return err + } + + p.targets.Delete(target) + return nil +} + +func (p *process) MonitorProcessID(target gen.ProcessID) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if _, exist := p.targets.Load(target); exist { + return gen.ErrTargetExist + } + + if err := p.node.RouteMonitorProcessID(p.pid, target); err != nil { + return err + } + + p.targets.Store(target, false) + return nil +} + +func (p *process) DemonitorProcessID(target gen.ProcessID) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if _, exist := p.targets.Load(target); exist == false { + return gen.ErrTargetUnknown + } + + if err := p.node.RouteDemonitorProcessID(p.pid, target); err != nil { + return err + } + + p.targets.Delete(target) + return nil +} + +func (p *process) MonitorAlias(target gen.Alias) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if _, exist := p.targets.Load(target); exist { + return gen.ErrTargetExist + } + + if err := p.node.RouteMonitorAlias(p.pid, target); err != nil { + return err + } + + p.targets.Store(target, false) + return nil +} + +func (p *process) DemonitorAlias(target gen.Alias) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if _, exist := p.targets.Load(target); exist == false { + return gen.ErrTargetUnknown + } + + if err := p.node.RouteDemonitorAlias(p.pid, target); err != nil { + return err + } + + p.targets.Delete(target) + return nil +} + +func (p *process) MonitorEvent(target gen.Event) ([]gen.MessageEvent, error) { + + if p.isStateRW() == false { + return nil, gen.ErrNotAllowed + } + + if target.Node == "" { + target.Node = p.node.name + } + + if _, exist := p.targets.Load(target); exist { + return nil, gen.ErrTargetExist + } + + lastEventMessages, err := p.node.RouteMonitorEvent(p.pid, target) + if err != nil { + return nil, err + } + + p.targets.Store(target, false) + return lastEventMessages, nil +} + +func (p *process) DemonitorEvent(target gen.Event) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if _, exist := p.targets.Load(target); exist == false { + return gen.ErrTargetUnknown + } + + if err := p.node.RouteDemonitorEvent(p.pid, target); err != nil { + return err + } + + p.targets.Delete(target) + return nil +} + +func (p *process) MonitorNode(target gen.Atom) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + + if _, exist := p.targets.Load(target); exist { + return gen.ErrTargetExist + } + + if _, err := p.Node().Network().GetNode(target); err != nil { + return err + } + p.node.monitors.registerConsumer(target, p.pid) + p.targets.Store(target, false) + return nil +} + +func (p *process) DemonitorNode(target gen.Atom) error { + if p.isStateRW() == false { + return gen.ErrNotAllowed + } + if _, exist := p.targets.Load(target); exist == false { + return gen.ErrTargetUnknown + } + + p.node.monitors.unregisterConsumer(target, p.pid) + p.targets.Delete(target) + return nil +} + +func (p *process) Log() gen.Log { + return p.log +} + +func (p *process) Info() (gen.ProcessInfo, error) { + if p.isStateRW() == false { + return gen.ProcessInfo{}, gen.ErrNotAllowed + } + return p.node.ProcessInfo(p.pid) +} + +func (p *process) MetaInfo(m gen.Alias) (gen.MetaInfo, error) { + if p.isStateRW() == false { + return gen.MetaInfo{}, gen.ErrNotAllowed + } + return p.node.MetaInfo(m) +} + +func (p *process) Mailbox() gen.ProcessMailbox { + return p.mailbox +} + +func (p *process) Behavior() gen.ProcessBehavior { + return p.behavior +} + +func (p *process) Forward(to gen.PID, message *gen.MailboxMessage, priority gen.MessagePriority) error { + var queue lib.QueueMPSC + + // local + value, found := p.node.processes.Load(to) + if found == false { + return gen.ErrProcessUnknown + } + fp := value.(*process) + + if alive := fp.isAlive(); alive == false { + return gen.ErrProcessTerminated + } + + switch priority { + case gen.MessagePriorityHigh: + queue = fp.mailbox.System + case gen.MessagePriorityMax: + queue = fp.mailbox.Urgent + default: + queue = fp.mailbox.Main + } + if ok := queue.Push(message); ok == false { + return gen.ErrProcessMailboxFull + } + atomic.AddUint64(&p.messagesOut, 1) + atomic.AddUint64(&fp.messagesIn, 1) + fp.run() + return nil +} + +// internal + +func (p *process) run() { + if atomic.CompareAndSwapInt32(&p.state, int32(gen.ProcessStateSleep), int32(gen.ProcessStateRunning)) == false { + // already running or terminated + return + } + go func() { + if lib.Recover() { + defer func() { + if rcv := recover(); rcv != nil { + pc, fn, line, _ := runtime.Caller(2) + p.log.Panic("process terminated - %#v at %s[%s:%d]", + rcv, runtime.FuncForPC(pc).Name(), fn, line) + old := atomic.SwapInt32(&p.state, int32(gen.ProcessStateTerminated)) + if old == int32(gen.ProcessStateTerminated) { + return + } + p.node.unregisterProcess(p, gen.TerminateReasonPanic) + p.behavior.ProcessTerminate(gen.TerminateReasonPanic) + } + }() + } + next: + startTime := time.Now().UnixNano() + // handle mailbox + if err := p.behavior.ProcessRun(); err != nil { + p.runningTime = p.runningTime + uint64(time.Now().UnixNano()-startTime) + e := errors.Unwrap(err) + if e == nil { + e = err + } + if e != gen.TerminateReasonNormal && e != gen.TerminateReasonShutdown { + p.log.Error("process terminated abnormally - %s", err) + } + + old := atomic.SwapInt32(&p.state, int32(gen.ProcessStateTerminated)) + if old == int32(gen.ProcessStateTerminated) { + return + } + + p.node.unregisterProcess(p, e) + p.behavior.ProcessTerminate(err) + return + } + + // count the running time + p.runningTime = p.runningTime + uint64(time.Now().UnixNano()-startTime) + + // change running state to sleep + if atomic.CompareAndSwapInt32(&p.state, int32(gen.ProcessStateRunning), int32(gen.ProcessStateSleep)) == false { + // process has been killed (was in zombee state) + old := atomic.SwapInt32(&p.state, int32(gen.ProcessStateTerminated)) + if old == int32(gen.ProcessStateTerminated) { + return + } + p.node.unregisterProcess(p, gen.TerminateReasonKill) + p.behavior.ProcessTerminate(gen.TerminateReasonKill) + return + } + // check if something left in the inbox and try to handle it + if p.mailbox.Main.Item() == nil { + if p.mailbox.System.Item() == nil { + if p.mailbox.Urgent.Item() == nil { + if p.mailbox.Log.Item() == nil { + // inbox is emtpy + return + } + } + } + } + // we got a new messages. try to use this goroutine again + if atomic.CompareAndSwapInt32(&p.state, int32(gen.ProcessStateSleep), int32(gen.ProcessStateRunning)) == false { + // another goroutine is already running + return + } + goto next + }() +} + +func (p *process) isStateIRW() bool { + state := atomic.LoadInt32(&p.state) + irw := int32(gen.ProcessStateInit) | + int32(gen.ProcessStateRunning) | + int32(gen.ProcessStateWaitResponse) + return (state & irw) == state +} + +func (p *process) isStateRW() bool { + state := atomic.LoadInt32(&p.state) + rw := int32(gen.ProcessStateRunning) | + int32(gen.ProcessStateWaitResponse) + return (state & rw) == state +} + +func (p *process) isAlive() bool { + state := atomic.LoadInt32(&p.state) + alive := int32(gen.ProcessStateInit) | + int32(gen.ProcessStateSleep) | + int32(gen.ProcessStateRunning) | + int32(gen.ProcessStateWaitResponse) + return (state & alive) == state +} + +func (p *process) isStateSRW() bool { + state := atomic.LoadInt32(&p.state) + alive := int32(gen.ProcessStateSleep) | + int32(gen.ProcessStateRunning) | + int32(gen.ProcessStateWaitResponse) + return (state & alive) == state +} + +func (p *process) waitResponse(ref gen.Ref, timeout int) (any, error) { + var response any + var err error + + if swapped := atomic.CompareAndSwapInt32(&p.state, int32(gen.ProcessStateRunning), int32(gen.ProcessStateWaitResponse)); swapped == false { + return nil, gen.ErrNotAllowed + } + + timer := lib.TakeTimer() + defer lib.ReleaseTimer(timer) + + if timeout == 0 { + timer.Reset(time.Second * time.Duration(gen.DefaultRequestTimeout)) + } else { + timer.Reset(time.Second * time.Duration(timeout)) + } + +retry: + select { + case <-timer.C: + if lib.Trace() { + p.log.Trace("request with ref %s is timed out", ref) + } + err = gen.ErrTimeout + case r := <-p.response: + if r.ref != ref { + // we got a late response to the previous request that has been timed + // out earlier and we made another request with the new reference - Ref. + // just drop it and wait one more time + if lib.Trace() { + p.log.Trace("got late response on request with ref %s (exp %s). dropped", r.ref, ref) + } + goto retry + } + response = r.message + err = r.err + } + + if swapped := atomic.CompareAndSwapInt32(&p.state, int32(gen.ProcessStateWaitResponse), int32(gen.ProcessStateRunning)); swapped == false { + return nil, gen.ErrProcessTerminated } + return response, err } diff --git a/node/static.go b/node/static.go new file mode 100644 index 00000000..85d8ba64 --- /dev/null +++ b/node/static.go @@ -0,0 +1,176 @@ +package node + +import ( + "regexp" + "sort" + "sync" + + "ergo.services/ergo/gen" +) + +type staticRoutes struct { + routes sync.Map // match => staticRoute +} + +type staticRoute struct { + re *regexp.Regexp + route gen.NetworkRoute + weight int +} + +func (srs *staticRoutes) add(match string, route gen.NetworkRoute, weight int) error { + re, err := regexp.Compile(match) + if err != nil { + return err + } + + sr := staticRoute{ + re: re, + route: route, + weight: weight, + } + _, exist := srs.routes.LoadOrStore(match, sr) + if exist { + return gen.ErrTaken + } + return nil +} + +func (srs *staticRoutes) remove(match string) error { + _, found := srs.routes.LoadAndDelete(match) + if found == false { + return gen.ErrUnknown + } + return nil +} + +func (srs *staticRoutes) lookup(name string) ([]gen.NetworkRoute, bool) { + var sroutes []staticRoute + var routes []gen.NetworkRoute + + srs.routes.Range(func(_, v any) bool { + sr := v.(staticRoute) + if match := sr.re.MatchString(name); match { + sroutes = append(sroutes, sr) + } + return true + }) + + if len(sroutes) == 0 { + return nil, false + } + + sort.Slice(sroutes, func(i, j int) bool { + return sroutes[i].weight > sroutes[j].weight + }) + + for _, sr := range sroutes { + routes = append(routes, sr.route) + } + + return routes, true +} + +func (srs *staticRoutes) info() []gen.RouteInfo { + var info []gen.RouteInfo + srs.routes.Range(func(k, v any) bool { + sr := v.(staticRoute) + ri := gen.RouteInfo{ + Match: k.(string), + Weight: sr.weight, + UseResolver: sr.route.Resolver != nil, + UseCustomCookie: sr.route.Cookie != "", + UseCustomCert: sr.route.Cert != nil, + Flags: sr.route.Flags, + HandshakeVersion: sr.route.Route.HandshakeVersion, + ProtoVersion: sr.route.Route.ProtoVersion, + Host: sr.route.Route.Host, + Port: sr.route.Route.Port, + } + info = append(info, ri) + return true + }) + return info +} + +type staticProxies struct { + routes sync.Map // match => staticProxy +} + +type staticProxy struct { + re *regexp.Regexp + route gen.NetworkProxyRoute + weight int +} + +func (sps *staticProxies) add(match string, route gen.NetworkProxyRoute, weight int) error { + re, err := regexp.Compile(match) + if err != nil { + return err + } + + sp := staticProxy{ + re: re, + route: route, + weight: weight, + } + _, exist := sps.routes.LoadOrStore(match, sp) + if exist { + return gen.ErrTaken + } + return nil +} + +func (sps *staticProxies) remove(match string) error { + _, found := sps.routes.LoadAndDelete(match) + if found == false { + return gen.ErrUnknown + } + return nil +} + +func (sps *staticProxies) lookup(name string) ([]gen.NetworkProxyRoute, bool) { + var sroutes []staticProxy + var routes []gen.NetworkProxyRoute + + sps.routes.Range(func(_, v any) bool { + sp := v.(staticProxy) + if match := sp.re.MatchString(name); match { + sroutes = append(sroutes, sp) + } + return true + }) + + if len(sroutes) == 0 { + return nil, false + } + + sort.Slice(sroutes, func(i, j int) bool { + return sroutes[i].weight > sroutes[j].weight + }) + + for _, sr := range sroutes { + routes = append(routes, sr.route) + } + + return routes, true +} + +func (sps *staticProxies) info() []gen.ProxyRouteInfo { + var info []gen.ProxyRouteInfo + sps.routes.Range(func(k, v any) bool { + sp := v.(staticProxy) + rpi := gen.ProxyRouteInfo{ + Match: k.(string), + Weight: sp.weight, + UseResolver: sp.route.Resolver != nil, + UseCustomCookie: sp.route.Cookie != "", + Flags: sp.route.Flags, + MaxHop: sp.route.MaxHop, + Proxy: sp.route.Route.Proxy, + } + info = append(info, rpi) + return true + }) + return info +} diff --git a/node/target.go b/node/target.go new file mode 100644 index 00000000..480bd8eb --- /dev/null +++ b/node/target.go @@ -0,0 +1,122 @@ +package node + +import ( + "sync" + + "ergo.services/ergo/gen" +) + +func createTarget() *target { + return &target{ + c: make(map[any][]gen.PID), + } +} + +type target struct { + c map[any][]gen.PID // consumers + sync.RWMutex +} + +// returns true if registered first consumer of the target +func (t *target) registerConsumer(target any, consumer gen.PID) bool { + t.Lock() + defer t.Unlock() + + list := t.c[target] + list = append(list, consumer) + t.c[target] = list + + first := len(list) == 1 + return first +} + +// returns true if unregistered consumer was the last one +func (t *target) unregisterConsumer(target any, consumer gen.PID) bool { + t.Lock() + defer t.Unlock() + + list, exist := t.c[target] + if exist == false { + return false + } + + for i, pid := range list { + if pid != consumer { + continue + } + list[0] = list[i] + list = list[1:] + if len(list) == 0 { + delete(t.c, target) + return true + } + t.c[target] = list + break + } + + return false +} + +func (t *target) unregister(target any) []gen.PID { + t.Lock() + defer t.Unlock() + + list, exist := t.c[target] + if exist == false { + return list + } + delete(t.c, target) + return list +} + +func (t *target) targetsNodeDown(node gen.Atom) []any { + var targets []any + + t.Lock() + defer t.Unlock() + + for k, list := range t.c { + switch tt := k.(type) { + case gen.PID: + if tt.Node == node { + targets = append(targets, tt) + } + case gen.ProcessID: + if tt.Node == node { + targets = append(targets, tt) + } + case gen.Alias: + if tt.Node == node { + targets = append(targets, k) + } + case gen.Event: + if tt.Node == node { + targets = append(targets, k) + } + case gen.Atom: + if tt == node { + targets = append(targets, k) + } + } + + // remove remote consumers (belonging to the node that went down) + newlist := []gen.PID{} + for _, pid := range list { + if pid.Node == node { + // skip it + continue + } + newlist = append(newlist, pid) + } + + t.c[k] = newlist + } + + return targets +} + +func (t *target) consumers(target any) []gen.PID { + t.RLock() + defer t.RUnlock() + return t.c[target] +} diff --git a/node/types.go b/node/types.go deleted file mode 100644 index 0a1be51a..00000000 --- a/node/types.go +++ /dev/null @@ -1,612 +0,0 @@ -package node - -import ( - "context" - "crypto/cipher" - "crypto/tls" - "net" - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" -) - -const ( - // node options - defaultListenBegin uint16 = 15000 - defaultListenEnd uint16 = 65000 - defaultKeepAlivePeriod time.Duration = 15 - defaultProxyPathLimit int = 32 - - DefaultProcessMailboxSize int = 100 - DefaultProcessDirectboxSize int = 10 - - EnvKeyVersion gen.EnvKey = "ergo:Version" - EnvKeyNode gen.EnvKey = "ergo:Node" - EnvKeyRemoteSpawn gen.EnvKey = "ergo:RemoteSpawn" - - DefaultProtoRecvQueueLength int = 100 - DefaultProtoSendQueueLength int = 100 - DefaultProtoFragmentationUnit int = 65000 - - DefaultCompressionLevel int = -1 - DefaultCompressionThreshold int = 1024 - - DefaultProxyMaxHop int = 8 - - EventNetwork gen.Event = "network" -) - -type Node interface { - gen.Core - // Name returns node name - Name() string - // IsAlive returns true if node is still alive - IsAlive() bool - // Uptime returns node uptime in seconds - Uptime() int64 - // Version return node version - Version() Version - // ListEnv returns a map of configured Node environment variables. - ListEnv() map[gen.EnvKey]interface{} - // SetEnv set node environment variable with given name. Use nil value to remove variable with given name. Ignores names with "ergo:" as a prefix. - SetEnv(name gen.EnvKey, value interface{}) - // Env returns value associated with given environment name. - Env(name gen.EnvKey) interface{} - - // Spawn spawns a new process - Spawn(name string, opts gen.ProcessOptions, object gen.ProcessBehavior, args ...etf.Term) (gen.Process, error) - - // RegisterName - RegisterName(name string, pid etf.Pid) error - // UnregisterName - UnregisterName(name string) error - - LoadedApplications() []gen.ApplicationInfo - WhichApplications() []gen.ApplicationInfo - ApplicationInfo(name string) (gen.ApplicationInfo, error) - ApplicationLoad(app gen.ApplicationBehavior, args ...etf.Term) (string, error) - ApplicationUnload(appName string) error - ApplicationStart(appName string, args ...etf.Term) (gen.Process, error) - ApplicationStartPermanent(appName string, args ...etf.Term) (gen.Process, error) - ApplicationStartTransient(appName string, args ...etf.Term) (gen.Process, error) - ApplicationStop(appName string) error - - ProvideRemoteSpawn(name string, object gen.ProcessBehavior) error - RevokeRemoteSpawn(name string) error - - // AddStaticRoute adds static route for the given name - AddStaticRoute(node string, host string, port uint16, options RouteOptions) error - // AddStaticRoutePort adds static route for the given node name which makes node skip resolving port process - AddStaticRoutePort(node string, port uint16, options RouteOptions) error - // AddStaticRouteOptions adds static route options for the given node name which does regular port resolving but applies static options - AddStaticRouteOptions(node string, options RouteOptions) error - // Remove static route removes static route with given name - RemoveStaticRoute(name string) bool - // StaticRoutes returns list of routes added using AddStaticRoute - StaticRoutes() []Route - // StaticRoute returns Route for the given name. Returns false if it doesn't exist. - StaticRoute(name string) (Route, bool) - - AddProxyRoute(proxy ProxyRoute) error - RemoveProxyRoute(name string) bool - // ProxyRoutes returns list of proxy routes added using AddProxyRoute - ProxyRoutes() []ProxyRoute - // ProxyRoute returns proxy route added using AddProxyRoute - ProxyRoute(name string) (ProxyRoute, bool) - - // Resolve - Resolve(node string) (Route, error) - // ResolveProxy resolves proxy route. Checks for the proxy route added using AddProxyRoute. - // If it wasn't found makes request to the registrar. - ResolveProxy(node string) (ProxyRoute, error) - - // Returns Registrar interface - Registrar() Registrar - - // Connect sets up a connection to node - Connect(node string) error - // Disconnect close connection to the node - Disconnect(node string) error - // Nodes returns the list of connected nodes - Nodes() []string - // NodesIndirect returns the list of nodes connected via proxies - NodesIndirect() []string - // NetworkStats returns network statistics of the connection with the node. Returns error - // ErrUnknown if connection with given node is not established. - NetworkStats(name string) (NetworkStats, error) - - Links(process etf.Pid) []etf.Pid - Monitors(process etf.Pid) []etf.Pid - MonitorsByName(process etf.Pid) []gen.ProcessID - MonitoredBy(process etf.Pid) []etf.Pid - - Stats() NodeStats - - Stop() - Wait() - WaitWithTimeout(d time.Duration) error -} - -// Version -type Version struct { - Release string - Prefix string - OTP int -} - -// CoreRouter routes messages from/to remote node -type CoreRouter interface { - - // - // implemented by core - // - - // RouteSend routes message by Pid - RouteSend(from etf.Pid, to etf.Pid, message etf.Term) error - // RouteSendReg routes message by registered process name (gen.ProcessID) - RouteSendReg(from etf.Pid, to gen.ProcessID, message etf.Term) error - // RouteSendAlias routes message by process alias - RouteSendAlias(from etf.Pid, to etf.Alias, message etf.Term) error - - RouteSpawnRequest(node string, behaviorName string, request gen.RemoteSpawnRequest, args ...etf.Term) error - RouteSpawnReply(to etf.Pid, ref etf.Ref, result etf.Term) error - - // - // implemented by monitor - // - - // RouteLink makes linking of the given two processes - RouteLink(pidA etf.Pid, pidB etf.Pid) error - // RouteUnlink makes unlinking of the given two processes - RouteUnlink(pidA etf.Pid, pidB etf.Pid) error - // RouteExit routes MessageExit to the linked process - RouteExit(to etf.Pid, terminated etf.Pid, reason string) error - // RouteMonitorReg makes monitor to the given registered process name (gen.ProcessID) - RouteMonitorReg(by etf.Pid, process gen.ProcessID, ref etf.Ref) error - // RouteMonitor makes monitor to the given Pid - RouteMonitor(by etf.Pid, process etf.Pid, ref etf.Ref) error - RouteDemonitor(by etf.Pid, ref etf.Ref) error - RouteMonitorExitReg(terminated gen.ProcessID, reason string, ref etf.Ref) error - RouteMonitorExit(terminated etf.Pid, reason string, ref etf.Ref) error - // RouteNodeDown - RouteNodeDown(name string, disconnect *ProxyDisconnect) - - // - // implemented by network - // - - // RouteProxyConnectRequest - RouteProxyConnectRequest(from ConnectionInterface, request ProxyConnectRequest) error - // RouteProxyConnectReply - RouteProxyConnectReply(from ConnectionInterface, reply ProxyConnectReply) error - // RouteProxyConnectCancel - RouteProxyConnectCancel(from ConnectionInterface, cancel ProxyConnectCancel) error - // RouteProxyDisconnect - RouteProxyDisconnect(from ConnectionInterface, disconnect ProxyDisconnect) error - // RouteProxy returns ErrProxySessionEndpoint if this node is the endpoint of the - // proxy session. In this case, the packet must be handled on this node with - // provided ProxySession parameters. - RouteProxy(from ConnectionInterface, sessionID string, packet *lib.Buffer) error -} - -// Options defines bootstrapping options for the node -type Options struct { - // Applications application list that must be started - Applications []gen.ApplicationBehavior - - // Env node environment - Env map[gen.EnvKey]interface{} - - // Creation. Default value: uint32(time.Now().Unix()) - Creation uint32 - - // Listeners node can have multiple listening interface at once. If this list is empty - // the default listener will be using. Only the first listener will be registered on - // the Registrar - Listeners []Listener - - // Flags defines option flags of this node for the outgoing connection - Flags Flags - - // TLS settings - TLS *tls.Config - - // StaticRoutesOnly disables resolving service (default is EPMD client) and - // makes resolving localy only for nodes added using gen.AddStaticRoute - StaticRoutesOnly bool - - // Registrar defines a registrar service (default is EPMD service, client and server) - Registrar Registrar - - // Compression defines default compression options for the spawning processes. - Compression Compression - - // Handshake defines a handshake handler. By default is using - // DIST handshake created with dist.CreateHandshake(...) - Handshake HandshakeInterface - - // Proto defines a proto handler. By default is using - // DIST proto created with dist.CreateProto(...) - Proto ProtoInterface - - // Cloud enable Ergo Cloud support - Cloud Cloud - - // Proxy options - Proxy Proxy - - // System options for the system application - System System -} - -type Listener struct { - // Cookie cookie for the incoming connection to this listener. Leave it empty in - // case of using the node's cookie. - Cookie string - // Hostname defines an interface for the listener. Default: takes from the node name. - Hostname string - // Listen defines a listening port number for accepting incoming connections. - Listen uint16 - // ListenBegin and ListenEnd define a range of the port numbers where - // the node looking for available free port number for the listening. - // Default values 15000 and 65000 accordingly - ListenBegin uint16 - ListenEnd uint16 - // Handshake if its nil the default TLS (Options.TLS) will be using - TLS *tls.Config - // Handshake if its nil the default Handshake (Options.Handshake) will be using - Handshake HandshakeInterface - // Proto if its nil the default Proto (Options.Proto) will be using - Proto ProtoInterface - // Flags defines option flags of this node for the incoming connection - // on this port. If its disabled the default Flags (Options.Flags) will be using - Flags Flags -} - -type Cloud struct { - Enable bool - Cluster string - Cookie string - Flags CloudFlags - Timeout time.Duration -} - -type Proxy struct { - // Transit allows to use this node as a proxy - Transit bool - // Accept incoming proxy connections - Accept bool - // Cookie sets cookie for incoming connections - Cookie string - // Flags sets options for incoming connections - Flags ProxyFlags - // Routes sets options for outgoing connections - Routes map[string]ProxyRoute -} - -type System struct { - DisableAnonMetrics bool -} - -type Compression struct { - // Enable enables compression for all outgoing messages having size - // greater than the defined threshold. - Enable bool - // Level defines compression level. Value must be in range 1..9 or -1 for the default level - Level int - // Threshold defines the minimal message size for the compression. - // Messages less of this threshold will not be compressed. - Threshold int -} - -// Connection -type Connection struct { - ConnectionInterface -} - -// ConnectionInterface -type ConnectionInterface interface { - Send(from gen.Process, to etf.Pid, message etf.Term) error - SendReg(from gen.Process, to gen.ProcessID, message etf.Term) error - SendAlias(from gen.Process, to etf.Alias, message etf.Term) error - - Link(local etf.Pid, remote etf.Pid) error - Unlink(local etf.Pid, remote etf.Pid) error - LinkExit(to etf.Pid, terminated etf.Pid, reason string) error - - Monitor(local etf.Pid, remote etf.Pid, ref etf.Ref) error - Demonitor(local etf.Pid, remote etf.Pid, ref etf.Ref) error - MonitorExit(to etf.Pid, terminated etf.Pid, reason string, ref etf.Ref) error - - MonitorReg(local etf.Pid, remote gen.ProcessID, ref etf.Ref) error - DemonitorReg(local etf.Pid, remote gen.ProcessID, ref etf.Ref) error - MonitorExitReg(to etf.Pid, terminated gen.ProcessID, reason string, ref etf.Ref) error - - SpawnRequest(nodeName string, behaviorName string, request gen.RemoteSpawnRequest, args ...etf.Term) error - SpawnReply(to etf.Pid, ref etf.Ref, spawned etf.Pid) error - SpawnReplyError(to etf.Pid, ref etf.Ref, err error) error - - ProxyConnectRequest(connect ProxyConnectRequest) error - ProxyConnectReply(reply ProxyConnectReply) error - ProxyConnectCancel(cancel ProxyConnectCancel) error - ProxyDisconnect(disconnect ProxyDisconnect) error - ProxyRegisterSession(session ProxySession) error - ProxyUnregisterSession(id string) error - ProxyPacket(packet *lib.Buffer) error - - Creation() uint32 - Stats() NetworkStats -} - -// Handshake template struct for the custom Handshake implementation -type Handshake struct { - HandshakeInterface -} - -// Handshake defines handshake interface -type HandshakeInterface interface { - // Mandatory: - - // Init initialize handshake. - Init(nodename string, creation uint32, flags Flags) error - - // Optional: - - // Start initiates handshake process. Argument tls means the connection is wrapped by TLS - // Returns the name of connected peer, Flags and Creation wrapped into HandshakeDetails struct - Start(remote net.Addr, conn lib.NetReadWriter, tls bool, cookie string) (HandshakeDetails, error) - // Accept accepts handshake process initiated by another side of this connection. - // Returns the name of connected peer, Flags and Creation wrapped into HandshakeDetails struct - Accept(remote net.Addr, conn lib.NetReadWriter, tls bool, cookie string) (HandshakeDetails, error) - // Version handshake version. Must be implemented if this handshake is going to be used - // for the accepting connections (this method is used in registration on the Resolver) - Version() HandshakeVersion -} - -// HandshakeDetails -type HandshakeDetails struct { - // Name node name - Name string - // Flags node flags - Flags Flags - // Creation - Creation uint32 - // Version - Version int - // NumHandlers defines the number of readers/writers per connection. Default value is provided by ProtoOptions - NumHandlers int - // AtomMapping - AtomMapping *etf.AtomMapping - // ProxyTransit allows to restrict proxy connection requests for this connection - ProxyTransit ProxyTransit - // Buffer keeps data received along with the handshake - Buffer *lib.Buffer - // Custom allows passing the custom data to the ProtoInterface.Start - Custom HandshakeCustomDetails -} - -type HandshakeCustomDetails interface{} - -type HandshakeVersion int - -// Proto template struct for the custom Proto implementation -type Proto struct { - ProtoInterface -} - -// Proto defines proto interface for the custom Proto implementation -type ProtoInterface interface { - // Init initialize connection handler - Init(ctx context.Context, conn lib.NetReadWriter, nodename string, details HandshakeDetails) (ConnectionInterface, error) - // Serve connection - Serve(connection ConnectionInterface, router CoreRouter) - // Terminate invoked once Serve callback is finished - Terminate(connection ConnectionInterface) -} - -// ProtoOptions -type ProtoOptions struct { - // NumHandlers defines the number of readers/writers per connection. Default is the number of CPU - NumHandlers int - // MaxMessageSize limit the message size. Default 0 (no limit) - MaxMessageSize int - // SendQueueLength defines queue size of handler for the outgoing messages. Default 100. - SendQueueLength int - // RecvQueueLength defines queue size of handler for the incoming messages. Default 100. - RecvQueueLength int - // FragmentationUnit defines unit size for the fragmentation feature. Default 65000 - FragmentationUnit int - // Custom brings a custom set of options to the ProtoInterface.Serve handler - Custom CustomProtoOptions -} - -// CustomProtoOptions a custom set of proto options -type CustomProtoOptions interface{} - -// Flags -type Flags struct { - // Enable enable flags customization - Enable bool - // EnableHeaderAtomCache enables header atom cache feature - EnableHeaderAtomCache bool - // EnableBigCreation - EnableBigCreation bool - // EnableBigPidRef accepts a larger amount of data in pids and references - EnableBigPidRef bool - // EnableFragmentation enables fragmentation feature for the sending data - EnableFragmentation bool - // EnableAlias accepts process aliases - EnableAlias bool - // EnableRemoteSpawn accepts remote spawn request - EnableRemoteSpawn bool - // Compression compression support - EnableCompression bool - // Proxy enables support for incoming proxy connection - EnableProxy bool -} - -// Registrar defines registrar interface -type Registrar interface { - Register(ctx context.Context, nodename string, options RegisterOptions) error - RegisterProxy(nodename string, maxhop int, flags ProxyFlags) error - UnregisterProxy(peername string) error - Resolve(peername string) (Route, error) - ResolveProxy(peername string) (ProxyRoute, error) - Config() (RegistrarConfig, error) - ConfigItem(name string) (etf.Term, error) - SetConfigUpdateCallback(func(name string, value etf.Term) error) error -} - -type RegistrarConfig struct { - Version int - Config etf.Term -} - -// RegisterOptions defines resolving options -type RegisterOptions struct { - Port uint16 - Creation uint32 - NodeVersion Version - HandshakeVersion HandshakeVersion - EnableTLS bool - EnableProxy bool - EnableCompression bool - Proxy string -} - -// Route -type Route struct { - Node string - Host string - Port uint16 - Options RouteOptions -} - -// RouteOptions -type RouteOptions struct { - Cookie string - TLS *tls.Config - IsErgo bool - Handshake HandshakeInterface - Proto ProtoInterface -} - -// ProxyRoute -type ProxyRoute struct { - // Name can be either nodename (example@domain) or domain (@domain) - Name string - Proxy string - Cookie string - Flags ProxyFlags - MaxHop int // DefaultProxyMaxHop == 8 -} - -// CloudFlags -type CloudFlags struct { - Enable bool - EnableIntrospection bool - EnableMetrics bool - EnableRemoteSpawn bool -} - -// ProxyFlags -type ProxyFlags struct { - Enable bool - EnableLink bool - EnableMonitor bool - EnableRemoteSpawn bool - EnableEncryption bool -} - -// ProxyTransit -type ProxyTransit struct { - AllowTo []string -} - -// ProxyConnectRequest -type ProxyConnectRequest struct { - ID etf.Ref - To string // To node - Digest []byte // md5(md5(md5(md5(Node)+Cookie)+To)+PublicKey) - PublicKey []byte - Flags ProxyFlags - Creation uint32 - Hop int - Path []string -} - -// ProxyConnectReply -type ProxyConnectReply struct { - ID etf.Ref - To string - Digest []byte // md5(md5(md5(md5(Node)+Cookie)+To)+symmetric key) - Cipher []byte // encrypted symmetric key using PublicKey from the ProxyConnectRequest - Flags ProxyFlags - Creation uint32 - SessionID string // proxy session ID - Path []string -} - -// ProxyConnectCancel -type ProxyConnectCancel struct { - ID etf.Ref - From string - Reason string - Path []string -} - -// ProxyDisconnect -type ProxyDisconnect struct { - Node string - Proxy string - SessionID string - Reason string -} - -// Proxy session -type ProxySession struct { - ID string - NodeFlags ProxyFlags - PeerFlags ProxyFlags - Creation uint32 - PeerName string - Block cipher.Block // made from symmetric key -} - -type NetworkStats struct { - NodeName string - BytesIn uint64 - BytesOut uint64 - TransitBytesIn uint64 - TransitBytesOut uint64 - MessagesIn uint64 - MessagesOut uint64 -} - -type NodeStats struct { - TotalProcesses uint64 - TotalReferences uint64 - RunningProcesses uint64 - RegisteredNames uint64 - RegisteredAliases uint64 - - MonitorsByPid uint64 - MonitorsByName uint64 - MonitorsNodes uint64 - Links uint64 - - LoadedApplications uint64 - RunningApplications uint64 - - NetworkConnections uint64 - ProxyConnections uint64 - TransitConnections uint64 -} - -type MessageEventNetwork struct { - PeerName string - Online bool - Proxy bool -} diff --git a/proto/dist/epmd.go b/proto/dist/epmd.go deleted file mode 100644 index 4e46f99a..00000000 --- a/proto/dist/epmd.go +++ /dev/null @@ -1,237 +0,0 @@ -package dist - -import ( - "context" - "encoding/binary" - "fmt" - "net" - "strconv" - "strings" - "sync" - "time" - - "github.com/ergo-services/ergo/lib" -) - -type registeredNode struct { - port uint16 - hidden bool - hi uint16 - lo uint16 - extra []byte -} - -type epmd struct { - port uint16 - nodes map[string]registeredNode - nodesMutex sync.Mutex -} - -func startServerEPMD(ctx context.Context, host string, port uint16) error { - lc := net.ListenConfig{} - listener, err := lc.Listen(ctx, "tcp", net.JoinHostPort(host, strconv.Itoa(int(port)))) - if err != nil { - lib.Log("Can't start embedded EPMD service: %s", err) - return err - } - - epmd := epmd{ - port: port, - nodes: make(map[string]registeredNode), - } - go epmd.serve(listener) - lib.Log("Started embedded EMPD service and listen port: %d", port) - - return nil -} - -func (e *epmd) serve(l net.Listener) { - for { - c, err := l.Accept() - if err != nil { - lib.Log("EPMD server stopped: %s", err.Error()) - return - } - lib.Log("EPMD accepted new connection from %s", c.RemoteAddr().String()) - go e.handle(c) - } -} - -func (e *epmd) handle(c net.Conn) { - var name string - var node registeredNode - buf := make([]byte, 1024) - - defer c.Close() - for { - n, err := c.Read(buf) - lib.Log("Request from EPMD client: %v", buf[:n]) - if err != nil { - lib.Log("EPMD unregistering node: '%s'", name) - e.nodesMutex.Lock() - delete(e.nodes, name) - e.nodesMutex.Unlock() - return - } - if len(buf) < 6 { - lib.Log("Too short") - return - } - - buf = buf[:n] - // buf[0:1] - length - if uint16(n-2) != binary.BigEndian.Uint16(buf[0:2]) { - continue - } - - switch buf[2] { - case epmdAliveReq: - name, node, err = e.readAliveReq(buf[3:]) - if err != nil { - // send error and close connection - e.sendAliveResp(c, 1) - return - } - - // check if node with this name is already registered - e.nodesMutex.Lock() - _, exist := e.nodes[name] - e.nodesMutex.Unlock() - if exist { - // send error and close connection - e.sendAliveResp(c, 1) - return - } - - // send alive response - if err := e.sendAliveResp(c, 0); err != nil { - return - } - - // register new node - e.nodesMutex.Lock() - e.nodes[name] = node - e.nodesMutex.Unlock() - - // enable keep alive on this connection - if tcp, ok := c.(*net.TCPConn); ok { - tcp.SetKeepAlive(true) - tcp.SetKeepAlivePeriod(15 * time.Second) - tcp.SetNoDelay(true) - } - continue - - case epmdPortPleaseReq: - requestedName := string(buf[3:n]) - - e.nodesMutex.Lock() - node, exist := e.nodes[requestedName] - e.nodesMutex.Unlock() - - if exist == false { - lib.Log("EPMD: looking for '%s'. Not found", name) - c.Write([]byte{epmdPortResp, 1}) - return - } - e.sendPortPleaseResp(c, requestedName, node) - return - - case epmdNamesReq: - e.sendNamesResp(c, buf[3:n]) - return - - default: - lib.Log("unknown EPMD request") - return - } - - } -} - -func (e *epmd) readAliveReq(req []byte) (string, registeredNode, error) { - if len(req) < 10 { - return "", registeredNode{}, fmt.Errorf("Malformed EPMD request %v", req) - } - // Name length - l := binary.BigEndian.Uint16(req[8:10]) - // Name - name := string(req[10 : 10+l]) - // Hidden - hidden := false - if req[2] == 72 { - hidden = true - } - // node - node := registeredNode{ - port: binary.BigEndian.Uint16(req[0:2]), - hidden: hidden, - hi: binary.BigEndian.Uint16(req[4:6]), - lo: binary.BigEndian.Uint16(req[6:8]), - extra: req[10+l:], - } - - return name, node, nil -} - -func (e *epmd) sendAliveResp(c net.Conn, code int) error { - buf := make([]byte, 4) - buf[0] = epmdAliveResp - buf[1] = byte(code) - - // Creation. Ergo doesn't use it. Just for Erlang nodes. - binary.BigEndian.PutUint16(buf[2:], uint16(1)) - _, err := c.Write(buf) - return err -} - -func (e *epmd) sendPortPleaseResp(c net.Conn, name string, node registeredNode) { - buf := make([]byte, 12+len(name)+2+len(node.extra)) - buf[0] = epmdPortResp - - // Result 0 - buf[1] = 0 - // Port - binary.BigEndian.PutUint16(buf[2:4], uint16(node.port)) - // Hidden - if node.hidden { - buf[4] = 72 - } else { - buf[4] = 77 - } - // Protocol TCP - buf[5] = 0 - // Highest version - binary.BigEndian.PutUint16(buf[6:8], uint16(node.hi)) - // Lowest version - binary.BigEndian.PutUint16(buf[8:10], uint16(node.lo)) - // Name - binary.BigEndian.PutUint16(buf[10:12], uint16(len(name))) - offset := 12 + len(name) - copy(buf[12:offset], name) - // Extra - l := len(node.extra) - copy(buf[offset:offset+l], node.extra) - // send - c.Write(buf) - return -} - -func (e *epmd) sendNamesResp(c net.Conn, req []byte) { - var str strings.Builder - var s string - var buf [4]byte - - binary.BigEndian.PutUint32(buf[0:4], uint32(e.port)) - str.WriteString(string(buf[0:])) - - e.nodesMutex.Lock() - for k, v := range e.nodes { - // io:format("name ~ts at port ~p~n", [NodeName, Port]). - s = fmt.Sprintf("name %s at port %d\n", k, v.port) - str.WriteString(s) - } - e.nodesMutex.Unlock() - - c.Write([]byte(str.String())) - return -} diff --git a/proto/dist/flusher.go b/proto/dist/flusher.go deleted file mode 100644 index 4410bee3..00000000 --- a/proto/dist/flusher.go +++ /dev/null @@ -1,105 +0,0 @@ -package dist - -import ( - "bufio" - "io" - "sync" - "time" -) - -var ( - // KeepAlive packet is just 4 bytes with zero value - keepAlivePacket = []byte{0, 0, 0, 0} - keepAlivePeriod = 15 * time.Second -) - -func newLinkFlusher(w io.Writer, latency time.Duration) *linkFlusher { - lf := &linkFlusher{ - latency: latency, - writer: bufio.NewWriter(w), - w: w, // in case if we skip buffering - } - - lf.timer = time.AfterFunc(keepAlivePeriod, func() { - - lf.mutex.Lock() - defer lf.mutex.Unlock() - - // if we have no pending data to send we should - // send a KeepAlive packet - if lf.pending == false { - lf.w.Write(keepAlivePacket) - lf.timer.Reset(keepAlivePeriod) - return - } - - lf.writer.Flush() - lf.pending = false - lf.timer.Reset(keepAlivePeriod) - }) - - return lf -} - -type linkFlusher struct { - mutex sync.Mutex - latency time.Duration - writer *bufio.Writer - w io.Writer - - timer *time.Timer - pending bool -} - -func (lf *linkFlusher) Write(b []byte) (int, error) { - lf.mutex.Lock() - defer lf.mutex.Unlock() - - l := len(b) - lenB := l - - // long data write directly to the socket. - if l > 64000 { - for { - n, e := lf.w.Write(b[lenB-l:]) - if e != nil { - return n, e - } - // check if something left - l -= n - if l > 0 { - continue - } - return lenB, nil - } - } - - // write data to the buffer - for { - n, e := lf.writer.Write(b) - if e != nil { - return n, e - } - // check if something left - l -= n - if l > 0 { - continue - } - break - } - - if lf.pending { - return lenB, nil - } - - lf.pending = true - lf.timer.Reset(lf.latency) - - return lenB, nil -} - -func (lf *linkFlusher) Stop() { - if lf.timer != nil { - lf.timer.Stop() - } -} diff --git a/proto/dist/handshake.go b/proto/dist/handshake.go deleted file mode 100644 index f47da79e..00000000 --- a/proto/dist/handshake.go +++ /dev/null @@ -1,836 +0,0 @@ -package dist - -import ( - "bytes" - "crypto/md5" - "encoding/binary" - "fmt" - "math/rand" - "net" - "time" - - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" -) - -const ( - HandshakeVersion5 node.HandshakeVersion = 5 - HandshakeVersion6 node.HandshakeVersion = 6 - - DefaultHandshakeVersion = HandshakeVersion5 - DefaultHandshakeTimeout = 5 * time.Second - - // distribution flags are defined here https://erlang.org/doc/apps/erts/erl_dist_protocol.html#distribution-flags - flagPublished nodeFlagId = 0x1 - flagAtomCache nodeFlagId = 0x2 - flagExtendedReferences nodeFlagId = 0x4 - flagDistMonitor nodeFlagId = 0x8 - flagFunTags nodeFlagId = 0x10 - flagDistMonitorName nodeFlagId = 0x20 - flagHiddenAtomCache nodeFlagId = 0x40 - flagNewFunTags nodeFlagId = 0x80 - flagExtendedPidsPorts nodeFlagId = 0x100 - flagExportPtrTag nodeFlagId = 0x200 - flagBitBinaries nodeFlagId = 0x400 - flagNewFloats nodeFlagId = 0x800 - flagUnicodeIO nodeFlagId = 0x1000 - flagDistHdrAtomCache nodeFlagId = 0x2000 - flagSmallAtomTags nodeFlagId = 0x4000 - // flagCompressed = 0x8000 // erlang uses this flag for the internal purposes - flagUTF8Atoms nodeFlagId = 0x10000 - flagMapTag nodeFlagId = 0x20000 - flagBigCreation nodeFlagId = 0x40000 - flagSendSender nodeFlagId = 0x80000 // since OTP.21 enable replacement for SEND (distProtoSEND by distProtoSEND_SENDER) - flagBigSeqTraceLabels = 0x100000 - flagExitPayload nodeFlagId = 0x400000 // since OTP.22 enable replacement for EXIT, EXIT2, MONITOR_P_EXIT - flagFragments nodeFlagId = 0x800000 - flagHandshake23 nodeFlagId = 0x1000000 // new connection setup handshake (version 6) introduced in OTP 23 - flagUnlinkID nodeFlagId = 0x2000000 - // for 64bit flags - flagSpawn nodeFlagId = 1 << 32 - flagNameMe nodeFlagId = 1 << 33 - flagV4NC nodeFlagId = 1 << 34 - flagAlias nodeFlagId = 1 << 35 - - // ergo flags - flagCompression = 1 << 63 - flagProxy = 1 << 62 -) - -type nodeFlagId uint64 -type nodeFlags nodeFlagId - -func (nf nodeFlags) toUint32() uint32 { - return uint32(nf) -} - -func (nf nodeFlags) toUint64() uint64 { - return uint64(nf) -} - -func (nf nodeFlags) isSet(f nodeFlagId) bool { - return (uint64(nf) & uint64(f)) != 0 -} - -func toNodeFlags(f ...nodeFlagId) nodeFlags { - var flags uint64 - for _, v := range f { - flags |= uint64(v) - } - return nodeFlags(flags) -} - -// DistHandshake implements Erlang handshake -type DistHandshake struct { - node.Handshake - nodename string - flags node.Flags - creation uint32 - challenge uint32 - options HandshakeOptions -} - -type HandshakeOptions struct { - Timeout time.Duration - Version node.HandshakeVersion // 5 or 6 -} - -func CreateHandshake(options HandshakeOptions) node.HandshakeInterface { - // must be 5 or 6 - if options.Version != HandshakeVersion5 && options.Version != HandshakeVersion6 { - options.Version = DefaultHandshakeVersion - } - - if options.Timeout == 0 { - options.Timeout = DefaultHandshakeTimeout - } - return &DistHandshake{ - options: options, - challenge: rand.Uint32(), - } -} - -// Init implements Handshake interface mothod -func (dh *DistHandshake) Init(nodename string, creation uint32, flags node.Flags) error { - dh.nodename = nodename - dh.creation = creation - dh.flags = flags - return nil -} - -func (dh *DistHandshake) Version() node.HandshakeVersion { - return dh.options.Version -} - -func (dh *DistHandshake) Start(remote net.Addr, conn lib.NetReadWriter, tls bool, cookie string) (node.HandshakeDetails, error) { - - var details node.HandshakeDetails - - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - var await []byte - - if dh.options.Version == HandshakeVersion5 { - dh.composeName(b, tls) - // the next message must be send_status 's' or send_challenge 'n' (for - // handshake version 5) or 'N' (for handshake version 6) - await = []byte{'s', 'n', 'N'} - } else { - dh.composeNameVersion6(b, tls) - await = []byte{'s', 'N'} - } - if e := b.WriteDataTo(conn); e != nil { - return details, e - } - - // define timeout for the handshaking - timer := time.NewTimer(dh.options.Timeout) - defer timer.Stop() - - asyncReadChannel := make(chan error, 2) - asyncRead := func() { - _, e := b.ReadDataFrom(conn, 512) - asyncReadChannel <- e - } - - // http://erlang.org/doc/apps/erts/erl_dist_protocol.html#distribution-handshake - // Every message in the handshake starts with a 16-bit big-endian integer, - // which contains the message length (not counting the two initial bytes). - // In Erlang this corresponds to option {packet, 2} in gen_tcp(3). Notice - // that after the handshake, the distribution switches to 4 byte packet headers. - expectingBytes := 2 - if tls { - // TLS connection has 4 bytes packet length header - expectingBytes = 4 - } - - for { - go asyncRead() - - select { - case <-timer.C: - return details, fmt.Errorf("handshake timeout") - - case e := <-asyncReadChannel: - if e != nil { - return details, e - } - - next: - l := binary.BigEndian.Uint16(b.B[expectingBytes-2 : expectingBytes]) - buffer := b.B[expectingBytes:] - - if len(buffer) < int(l) { - return details, fmt.Errorf("malformed handshake (wrong packet length)") - } - - // check if we got correct message type regarding to 'await' value - if bytes.Count(await, buffer[0:1]) == 0 { - return details, fmt.Errorf("malformed handshake (wrong response)") - } - - switch buffer[0] { - case 'n': - // 'n' + 2 (version) + 4 (flags) + 4 (challenge) + name... - if len(b.B) < 12 { - return details, fmt.Errorf("malformed handshake ('n')") - } - - challenge, err := dh.readChallenge(buffer[1:], &details) - if err != nil { - return details, err - } - b.Reset() - - dh.composeChallengeReply(b, challenge, tls, cookie) - - if e := b.WriteDataTo(conn); e != nil { - return details, e - } - // add 's' status for the case if we got it after 'n' or 'N' message - // yes, sometime it happens - await = []byte{'s', 'a'} - - case 'N': - // Peer support version 6. - - // The new challenge message format (version 6) - // 8 (flags) + 4 (Creation) + 2 (NameLen) + Name - if len(buffer) < 16 { - return details, fmt.Errorf("malformed handshake ('N' length)") - } - - challenge, err := dh.readChallengeVersion6(buffer[1:], &details) - if err != nil { - return details, err - } - b.Reset() - - if dh.options.Version == HandshakeVersion5 { - // upgrade handshake to version 6 by sending complement message - dh.composeComplement(b, tls) - if e := b.WriteDataTo(conn); e != nil { - return details, e - } - } - - dh.composeChallengeReply(b, challenge, tls, cookie) - - if e := b.WriteDataTo(conn); e != nil { - return details, e - } - - // add 's' (send_status message) for the case if we got it after 'n' or 'N' message - await = []byte{'s', 'a'} - - case 'a': - // 'a' + 16 (digest) - if len(buffer) < 17 { - return details, fmt.Errorf("malformed handshake ('a' length of digest)") - } - - // 'a' + 16 (digest) - digest := genDigest(dh.challenge, cookie) - if bytes.Compare(buffer[1:17], digest) != 0 { - return details, fmt.Errorf("malformed handshake ('a' digest)") - } - - // check if we got DIST packet with the final handshake data. - if len(buffer) > 17 { - details.Buffer = lib.TakeBuffer() - details.Buffer.Set(buffer[17:]) - } - - // handshaked - return details, nil - - case 's': - if dh.readStatus(buffer[1:]) == false { - return details, fmt.Errorf("handshake negotiation failed") - } - - await = []byte{'n', 'N'} - // "sok" - if len(buffer) > 4 { - b.B = b.B[expectingBytes+3:] - goto next - } - b.Reset() - - default: - return details, fmt.Errorf("malformed handshake ('%c' digest)", buffer[0]) - } - - } - - } - -} - -func (dh *DistHandshake) Accept(remote net.Addr, conn lib.NetReadWriter, tls bool, cookie string) (node.HandshakeDetails, error) { - var details node.HandshakeDetails - - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - var await []byte - - // define timeout for the handshaking - timer := time.NewTimer(dh.options.Timeout) - defer timer.Stop() - - asyncReadChannel := make(chan error, 2) - asyncRead := func() { - _, e := b.ReadDataFrom(conn, 512) - asyncReadChannel <- e - } - - // http://erlang.org/doc/apps/erts/erl_dist_protocol.html#distribution-handshake - // Every message in the handshake starts with a 16-bit big-endian integer, - // which contains the message length (not counting the two initial bytes). - // In Erlang this corresponds to option {packet, 2} in gen_tcp(3). Notice - // that after the handshake, the distribution switches to 4 byte packet headers. - expectingBytes := 2 - if tls { - // TLS connection has 4 bytes packet length header - expectingBytes = 4 - } - - // the comming message must be 'receive_name' as an answer for the - // 'send_name' message request we just sent - await = []byte{'n', 'N'} - - for { - go asyncRead() - - select { - case <-timer.C: - return details, fmt.Errorf("handshake accept timeout") - case e := <-asyncReadChannel: - if e != nil { - return details, e - } - - if b.Len() < expectingBytes+1 { - return details, fmt.Errorf("malformed handshake (too short packet)") - } - - next: - l := binary.BigEndian.Uint16(b.B[expectingBytes-2 : expectingBytes]) - buffer := b.B[expectingBytes:] - - if len(buffer) < int(l) { - return details, fmt.Errorf("malformed handshake (wrong packet length)") - } - - if bytes.Count(await, buffer[0:1]) == 0 { - return details, fmt.Errorf("malformed handshake (wrong response %d)", buffer[0]) - } - - switch buffer[0] { - case 'n': - if len(buffer) < 8 { - return details, fmt.Errorf("malformed handshake ('n' length)") - } - - if err := dh.readName(buffer[1:], &details); err != nil { - return details, err - } - b.Reset() - dh.composeStatus(b, tls) - if e := b.WriteDataTo(conn); e != nil { - return details, fmt.Errorf("malformed handshake ('n' accept name)") - } - - b.Reset() - if details.Version == 6 { - dh.composeChallengeVersion6(b, tls) - await = []byte{'s', 'r', 'c'} - } else { - dh.composeChallenge(b, tls) - await = []byte{'s', 'r'} - } - if e := b.WriteDataTo(conn); e != nil { - return details, e - } - - case 'N': - // The new challenge message format (version 6) - // 8 (flags) + 4 (Creation) + 2 (NameLen) + Name - if len(buffer) < 16 { - return details, fmt.Errorf("malformed handshake ('N' length)") - } - if err := dh.readNameVersion6(buffer[1:], &details); err != nil { - return details, err - } - b.Reset() - dh.composeStatus(b, tls) - if e := b.WriteDataTo(conn); e != nil { - return details, fmt.Errorf("malformed handshake ('N' accept name)") - } - - b.Reset() - dh.composeChallengeVersion6(b, tls) - if e := b.WriteDataTo(conn); e != nil { - return details, e - } - - await = []byte{'s', 'r'} - - case 'c': - if len(buffer) < 9 { - return details, fmt.Errorf("malformed handshake ('c' length)") - } - dh.readComplement(buffer[1:], &details) - - await = []byte{'r'} - - if len(buffer) > 9 { - b.B = b.B[expectingBytes+9:] - goto next - } - b.Reset() - - case 'r': - if len(buffer) < 19 { - return details, fmt.Errorf("malformed handshake ('r' length)") - } - - challenge, valid := dh.validateChallengeReply(buffer[1:], cookie) - if valid == false { - return details, fmt.Errorf("malformed handshake ('r' invalid reply)") - } - b.Reset() - - dh.composeChallengeAck(b, challenge, tls, cookie) - if e := b.WriteDataTo(conn); e != nil { - return details, e - } - - // handshaked - - return details, nil - - case 's': - if dh.readStatus(buffer[1:]) == false { - return details, fmt.Errorf("link status != ok") - } - - await = []byte{'c', 'r'} - if len(buffer) > 4 { - b.B = b.B[expectingBytes+3:] - goto next - } - b.Reset() - - default: - return details, fmt.Errorf("malformed handshake (unknown code %d)", b.B[0]) - } - - } - - } -} - -// private functions - -func (dh *DistHandshake) composeName(b *lib.Buffer, tls bool) { - flags := composeFlags(dh.flags) - version := uint16(dh.options.Version) - if tls { - b.Allocate(11) - dataLength := 7 + len(dh.nodename) // byte + uint16 + uint32 + len(dh.nodename) - binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength)) - b.B[4] = 'n' - binary.BigEndian.PutUint16(b.B[5:7], version) // uint16 - binary.BigEndian.PutUint32(b.B[7:11], flags.toUint32()) // uint32 - b.Append([]byte(dh.nodename)) - return - } - - b.Allocate(9) - dataLength := 7 + len(dh.nodename) // byte + uint16 + uint32 + len(dh.nodename) - binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength)) - b.B[2] = 'n' - binary.BigEndian.PutUint16(b.B[3:5], version) // uint16 - binary.BigEndian.PutUint32(b.B[5:9], flags.toUint32()) // uint32 - b.Append([]byte(dh.nodename)) -} - -func (dh *DistHandshake) composeNameVersion6(b *lib.Buffer, tls bool) { - flags := composeFlags(dh.flags) - creation := uint32(dh.creation) - if tls { - b.Allocate(19) - dataLength := 15 + len(dh.nodename) // 1 + 8 (flags) + 4 (creation) + 2 (len dh.nodename) - binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength)) - b.B[4] = 'N' - binary.BigEndian.PutUint64(b.B[5:13], flags.toUint64()) // uint64 - binary.BigEndian.PutUint32(b.B[13:17], creation) //uint32 - binary.BigEndian.PutUint16(b.B[17:19], uint16(len(dh.nodename))) // uint16 - b.Append([]byte(dh.nodename)) - return - } - - b.Allocate(17) - dataLength := 15 + len(dh.nodename) // 1 + 8 (flags) + 4 (creation) + 2 (len dh.nodename) - binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength)) - b.B[2] = 'N' - binary.BigEndian.PutUint64(b.B[3:11], flags.toUint64()) // uint64 - binary.BigEndian.PutUint32(b.B[11:15], creation) // uint32 - binary.BigEndian.PutUint16(b.B[15:17], uint16(len(dh.nodename))) // uint16 - b.Append([]byte(dh.nodename)) -} - -func (dh *DistHandshake) readName(b []byte, details *node.HandshakeDetails) error { - flags := nodeFlags(binary.BigEndian.Uint32(b[2:6])) - details.Flags = node.DefaultFlags() - details.Flags.EnableFragmentation = flags.isSet(flagFragments) - details.Flags.EnableBigCreation = flags.isSet(flagBigCreation) - details.Flags.EnableHeaderAtomCache = flags.isSet(flagDistHdrAtomCache) - details.Flags.EnableAlias = flags.isSet(flagAlias) - details.Flags.EnableRemoteSpawn = flags.isSet(flagSpawn) - details.Flags.EnableBigPidRef = flags.isSet(flagV4NC) - version := int(binary.BigEndian.Uint16(b[0:2])) - if version != 5 { - return fmt.Errorf("Malformed version for handshake 5") - } - - details.Version = 5 - if flags.isSet(flagHandshake23) { - details.Version = 6 - } - - // Erlang node limits the node name length to 256 characters (not bytes). - // I don't think anyone wants to use such a ridiculous name with a length > 250 bytes. - // Report an issue you really want to have a name longer that 255 bytes. - if len(b[6:]) > 255 { - return fmt.Errorf("Malformed node name") - } - details.Name = string(b[6:]) - - return nil -} - -func (dh *DistHandshake) readNameVersion6(b []byte, details *node.HandshakeDetails) error { - details.Creation = binary.BigEndian.Uint32(b[8:12]) - - flags := nodeFlags(binary.BigEndian.Uint64(b[0:8])) - details.Flags = node.DefaultFlags() - details.Flags.EnableFragmentation = flags.isSet(flagFragments) - details.Flags.EnableBigCreation = flags.isSet(flagBigCreation) - details.Flags.EnableHeaderAtomCache = flags.isSet(flagDistHdrAtomCache) - details.Flags.EnableAlias = flags.isSet(flagAlias) - details.Flags.EnableRemoteSpawn = flags.isSet(flagSpawn) - details.Flags.EnableBigPidRef = flags.isSet(flagV4NC) - details.Flags.EnableCompression = flags.isSet(flagCompression) - details.Flags.EnableProxy = flags.isSet(flagProxy) - - // see my prev comment about name len - nameLen := int(binary.BigEndian.Uint16(b[12:14])) - if nameLen > 255 { - return fmt.Errorf("Malformed node name") - } - nodename := string(b[14 : 14+nameLen]) - details.Name = nodename - - return nil -} - -func (dh *DistHandshake) composeStatus(b *lib.Buffer, tls bool) { - // there are few options for the status: ok, ok_simultaneous, nok, not_allowed, alive - // More details here: https://erlang.org/doc/apps/erts/erl_dist_protocol.html#the-handshake-in-detail - // support "ok" only, in any other cases link will be just closed - - if tls { - b.Allocate(4) - dataLength := 3 // 's' + "ok" - binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength)) - b.Append([]byte("sok")) - return - } - - b.Allocate(2) - dataLength := 3 // 's' + "ok" - binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength)) - b.Append([]byte("sok")) - -} - -func (dh *DistHandshake) readStatus(msg []byte) bool { - if string(msg[:2]) == "ok" { - return true - } - - return false -} - -func (dh *DistHandshake) composeChallenge(b *lib.Buffer, tls bool) { - flags := composeFlags(dh.flags) - if tls { - b.Allocate(15) - dataLength := uint32(11 + len(dh.nodename)) - binary.BigEndian.PutUint32(b.B[0:4], dataLength) - b.B[4] = 'n' - - //https://www.erlang.org/doc/apps/erts/erl_dist_protocol.html#distribution-handshake - // The Version is a 16-bit big endian integer and must always have the value 5 - binary.BigEndian.PutUint16(b.B[5:7], 5) // uint16 - - binary.BigEndian.PutUint32(b.B[7:11], flags.toUint32()) // uint32 - binary.BigEndian.PutUint32(b.B[11:15], dh.challenge) // uint32 - b.Append([]byte(dh.nodename)) - return - } - - b.Allocate(13) - dataLength := 11 + len(dh.nodename) - binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength)) - b.B[2] = 'n' - //https://www.erlang.org/doc/apps/erts/erl_dist_protocol.html#distribution-handshake - // The Version is a 16-bit big endian integer and must always have the value 5 - binary.BigEndian.PutUint16(b.B[3:5], 5) // uint16 - binary.BigEndian.PutUint32(b.B[5:9], flags.toUint32()) // uint32 - binary.BigEndian.PutUint32(b.B[9:13], dh.challenge) // uint32 - b.Append([]byte(dh.nodename)) -} - -func (dh *DistHandshake) composeChallengeVersion6(b *lib.Buffer, tls bool) { - - flags := composeFlags(dh.flags) - if tls { - // 1 ('N') + 8 (flags) + 4 (chalange) + 4 (creation) + 2 (len(dh.nodename)) - b.Allocate(23) - dataLength := 19 + len(dh.nodename) - binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength)) - b.B[4] = 'N' - binary.BigEndian.PutUint64(b.B[5:13], uint64(flags)) // uint64 - binary.BigEndian.PutUint32(b.B[13:17], dh.challenge) // uint32 - binary.BigEndian.PutUint32(b.B[17:21], dh.creation) // uint32 - binary.BigEndian.PutUint16(b.B[21:23], uint16(len(dh.nodename))) // uint16 - b.Append([]byte(dh.nodename)) - return - } - - // 1 ('N') + 8 (flags) + 4 (chalange) + 4 (creation) + 2 (len(dh.nodename)) - b.Allocate(21) - dataLength := 19 + len(dh.nodename) - binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength)) - b.B[2] = 'N' - binary.BigEndian.PutUint64(b.B[3:11], uint64(flags)) // uint64 - binary.BigEndian.PutUint32(b.B[11:15], dh.challenge) // uint32 - binary.BigEndian.PutUint32(b.B[15:19], dh.creation) // uint32 - binary.BigEndian.PutUint16(b.B[19:21], uint16(len(dh.nodename))) // uint16 - b.Append([]byte(dh.nodename)) -} - -func (dh *DistHandshake) readChallenge(msg []byte, details *node.HandshakeDetails) (uint32, error) { - var challenge uint32 - if len(msg) < 15 { - return challenge, fmt.Errorf("malformed handshake challenge") - } - flags := nodeFlags(binary.BigEndian.Uint32(msg[2:6])) - details.Flags = node.DefaultFlags() - details.Flags.EnableFragmentation = flags.isSet(flagFragments) - details.Flags.EnableBigCreation = flags.isSet(flagBigCreation) - details.Flags.EnableHeaderAtomCache = flags.isSet(flagDistHdrAtomCache) - details.Flags.EnableAlias = flags.isSet(flagAlias) - details.Flags.EnableRemoteSpawn = flags.isSet(flagSpawn) - details.Flags.EnableBigPidRef = flags.isSet(flagV4NC) - - version := binary.BigEndian.Uint16(msg[0:2]) - if version != uint16(HandshakeVersion5) { - return challenge, fmt.Errorf("malformed handshake version %d", version) - } - details.Version = int(version) - - if flags.isSet(flagHandshake23) { - // remote peer does support version 6 - details.Version = 6 - } - - details.Name = string(msg[10:]) - challenge = binary.BigEndian.Uint32(msg[6:10]) - return challenge, nil -} - -func (dh *DistHandshake) readChallengeVersion6(msg []byte, details *node.HandshakeDetails) (uint32, error) { - var challenge uint32 - flags := nodeFlags(binary.BigEndian.Uint64(msg[0:8])) - details.Flags = node.DefaultFlags() - details.Flags.EnableFragmentation = flags.isSet(flagFragments) - details.Flags.EnableBigCreation = flags.isSet(flagBigCreation) - details.Flags.EnableHeaderAtomCache = flags.isSet(flagDistHdrAtomCache) - details.Flags.EnableAlias = flags.isSet(flagAlias) - details.Flags.EnableRemoteSpawn = flags.isSet(flagSpawn) - details.Flags.EnableBigPidRef = flags.isSet(flagV4NC) - details.Flags.EnableCompression = flags.isSet(flagCompression) - details.Flags.EnableProxy = flags.isSet(flagProxy) - - details.Creation = binary.BigEndian.Uint32(msg[12:16]) - details.Version = 6 - - challenge = binary.BigEndian.Uint32(msg[8:12]) - - lenName := int(binary.BigEndian.Uint16(msg[16:18])) - details.Name = string(msg[18 : 18+lenName]) - - return challenge, nil -} - -func (dh *DistHandshake) readComplement(msg []byte, details *node.HandshakeDetails) { - flags := nodeFlags(uint64(binary.BigEndian.Uint32(msg[0:4])) << 32) - - details.Flags.EnableCompression = flags.isSet(flagCompression) - details.Flags.EnableProxy = flags.isSet(flagProxy) - details.Creation = binary.BigEndian.Uint32(msg[4:8]) -} - -func (dh *DistHandshake) validateChallengeReply(b []byte, cookie string) (uint32, bool) { - challenge := binary.BigEndian.Uint32(b[:4]) - digestB := b[4:] - - digestA := genDigest(dh.challenge, cookie) - return challenge, bytes.Equal(digestA[:], digestB) -} - -func (dh *DistHandshake) composeChallengeAck(b *lib.Buffer, challenge uint32, tls bool, cookie string) { - if tls { - b.Allocate(5) - dataLength := uint32(17) // 'a' + 16 (digest) - binary.BigEndian.PutUint32(b.B[0:4], dataLength) - b.B[4] = 'a' - digest := genDigest(challenge, cookie) - b.Append(digest) - return - } - - b.Allocate(3) - dataLength := uint16(17) // 'a' + 16 (digest) - binary.BigEndian.PutUint16(b.B[0:2], dataLength) - b.B[2] = 'a' - digest := genDigest(challenge, cookie) - b.Append(digest) -} - -func (dh *DistHandshake) composeChallengeReply(b *lib.Buffer, challenge uint32, tls bool, cookie string) { - if tls { - digest := genDigest(challenge, cookie) - b.Allocate(9) - dataLength := 5 + len(digest) // 1 (byte) + 4 (challenge) + 16 (digest) - binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength)) - b.B[4] = 'r' - binary.BigEndian.PutUint32(b.B[5:9], dh.challenge) // uint32 - b.Append(digest) - return - } - - b.Allocate(7) - digest := genDigest(challenge, cookie) - dataLength := 5 + len(digest) // 1 (byte) + 4 (challenge) + 16 (digest) - binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength)) - b.B[2] = 'r' - binary.BigEndian.PutUint32(b.B[3:7], dh.challenge) // uint32 - b.Append(digest) -} - -func (dh *DistHandshake) composeComplement(b *lib.Buffer, tls bool) { - flags := composeFlags(dh.flags) - // cast must cast creation to int32 in order to follow the - // erlang's handshake. Ergo don't care of it. - node_flags := uint32(flags.toUint64() >> 32) - if tls { - b.Allocate(13) - dataLength := 9 // 1 + 4 (flag high) + 4 (creation) - binary.BigEndian.PutUint32(b.B[0:4], uint32(dataLength)) - b.B[4] = 'c' - binary.BigEndian.PutUint32(b.B[5:9], node_flags) - binary.BigEndian.PutUint32(b.B[9:13], dh.creation) - return - } - - dataLength := 9 // 1 + 4 (flag high) + 4 (creation) - b.Allocate(11) - binary.BigEndian.PutUint16(b.B[0:2], uint16(dataLength)) - b.B[2] = 'c' - binary.BigEndian.PutUint32(b.B[3:7], node_flags) - binary.BigEndian.PutUint32(b.B[7:11], dh.creation) -} - -func genDigest(challenge uint32, cookie string) []byte { - s := fmt.Sprintf("%s%d", cookie, challenge) - digest := md5.Sum([]byte(s)) - return digest[:] -} - -func composeFlags(flags node.Flags) nodeFlags { - - // default flags - enabledFlags := []nodeFlagId{ - flagPublished, - flagUnicodeIO, - flagDistMonitor, - flagNewFloats, - flagBitBinaries, - flagDistMonitorName, - flagExtendedPidsPorts, - flagExtendedReferences, - flagAtomCache, - flagHiddenAtomCache, - flagFunTags, - flagNewFunTags, - flagExportPtrTag, - flagSmallAtomTags, - flagUTF8Atoms, - flagMapTag, - flagHandshake23, - } - - // optional flags - if flags.EnableHeaderAtomCache { - enabledFlags = append(enabledFlags, flagDistHdrAtomCache) - } - if flags.EnableFragmentation { - enabledFlags = append(enabledFlags, flagFragments) - } - if flags.EnableBigCreation { - enabledFlags = append(enabledFlags, flagBigCreation) - } - if flags.EnableAlias { - enabledFlags = append(enabledFlags, flagAlias) - } - if flags.EnableBigPidRef { - enabledFlags = append(enabledFlags, flagV4NC) - } - if flags.EnableRemoteSpawn { - enabledFlags = append(enabledFlags, flagSpawn) - } - if flags.EnableCompression { - enabledFlags = append(enabledFlags, flagCompression) - } - if flags.EnableProxy { - enabledFlags = append(enabledFlags, flagProxy) - } - return toNodeFlags(enabledFlags...) -} diff --git a/proto/dist/proto.go b/proto/dist/proto.go deleted file mode 100644 index 91a1685e..00000000 --- a/proto/dist/proto.go +++ /dev/null @@ -1,2261 +0,0 @@ -package dist - -import ( - "bytes" - "compress/gzip" - "context" - "crypto/aes" - "crypto/cipher" - crand "crypto/rand" - "encoding/binary" - "fmt" - "io" - "math/rand" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" -) - -var ( - errMissingInCache = fmt.Errorf("missing in cache") - errMalformed = fmt.Errorf("malformed") - gzipReaders = &sync.Pool{ - New: func() interface{} { - return nil - }, - } - gzipWriters = [10]*sync.Pool{} - sendMessages = &sync.Pool{ - New: func() interface{} { - return &sendMessage{} - }, - } -) - -func init() { - rand.Seed(time.Now().UTC().UnixNano()) - for i := range gzipWriters { - gzipWriters[i] = &sync.Pool{ - New: func() interface{} { - return nil - }, - } - } -} - -const ( - defaultLatency = 200 * time.Nanosecond // for linkFlusher - - defaultCleanTimeout = 5 * time.Second // for checkClean - defaultCleanDeadline = 30 * time.Second // for checkClean - - // ergo proxy message - protoProxy = 141 - // ergo proxy encrypted message - protoProxyX = 142 - - // http://erlang.org/doc/apps/erts/erl_ext_dist.html#distribution_header - protoDist = 131 - protoDistMessage = 68 - protoDistFragment1 = 69 - protoDistFragmentN = 70 - - // ergo gzipped messages - protoDistMessageZ = 200 - protoDistFragment1Z = 201 - protoDistFragmentNZ = 202 -) - -type fragmentedPacket struct { - buffer *lib.Buffer - disordered *lib.Buffer - disorderedSlices map[uint64][]byte - fragmentID uint64 - lastUpdate time.Time -} - -type proxySession struct { - session node.ProxySession - cache etf.AtomCache - senderCache []map[etf.Atom]etf.CacheItem -} - -type distConnection struct { - node.Connection - - nodename string - peername string - ctx context.Context - - // peer flags - flags node.Flags - - creation uint32 - - // socket - conn lib.NetReadWriter - cancelContext context.CancelFunc - - // proxy session (endpoints) - proxySessionsByID map[string]proxySession - proxySessionsByPeerName map[string]proxySession - proxySessionsMutex sync.RWMutex - - // route incoming messages - router node.CoreRouter - - // writer - flusher *linkFlusher - - // buffer - buffer *lib.Buffer - - // senders list of channels for the sending goroutines - senders senders - // receivers list of channels for the receiving goroutines - receivers receivers - - // atom cache for outgoing messages - cache etf.AtomCache - - mapping *etf.AtomMapping - - // fragmentation sequence ID - sequenceID int64 - fragments map[uint64]*fragmentedPacket - fragmentsMutex sync.Mutex - - // check and clean lost fragments - checkCleanPending bool - checkCleanTimer *time.Timer - checkCleanTimeout time.Duration // default is 5 seconds - checkCleanDeadline time.Duration // how long we wait for the next fragment of the certain sequenceID. Default is 30 seconds - - // stats - stats node.NetworkStats -} - -type distProto struct { - node.Proto - nodename string - options node.ProtoOptions -} - -func CreateProto(options node.ProtoOptions) node.ProtoInterface { - return &distProto{ - options: options, - } -} - -// -// node.Proto interface implementation -// - -type senders struct { - sender []*senderChannel - n int32 - i int32 -} - -type senderChannel struct { - sync.Mutex - sendChannel chan *sendMessage -} - -type sendMessage struct { - packet *lib.Buffer - control etf.Term - payload etf.Term - compression bool - compressionLevel int - compressionThreshold int - proxy *proxySession -} - -type receivers struct { - recv []chan *lib.Buffer - n int32 - i int32 -} - -func (dp *distProto) Init(ctx context.Context, conn lib.NetReadWriter, nodename string, details node.HandshakeDetails) (node.ConnectionInterface, error) { - connection := &distConnection{ - nodename: nodename, - peername: details.Name, - flags: details.Flags, - creation: details.Creation, - buffer: details.Buffer, - conn: conn, - cache: etf.NewAtomCache(), - mapping: details.AtomMapping, - proxySessionsByID: make(map[string]proxySession), - proxySessionsByPeerName: make(map[string]proxySession), - fragments: make(map[uint64]*fragmentedPacket), - checkCleanTimeout: defaultCleanTimeout, - checkCleanDeadline: defaultCleanDeadline, - } - connection.ctx, connection.cancelContext = context.WithCancel(ctx) - - connection.stats.NodeName = details.Name - - // create connection buffering - connection.flusher = newLinkFlusher(conn, defaultLatency) - - numHandlers := dp.options.NumHandlers - if details.NumHandlers > 0 { - numHandlers = details.NumHandlers - } - - // do not use shared channels within intencive code parts, impacts on a performance - connection.receivers = receivers{ - recv: make([]chan *lib.Buffer, numHandlers), - n: int32(numHandlers), - } - - // run readers for incoming messages - for i := 0; i < numHandlers; i++ { - // run packet reader routines (decoder) - recv := make(chan *lib.Buffer, dp.options.RecvQueueLength) - connection.receivers.recv[i] = recv - go connection.receiver(recv) - } - - connection.senders = senders{ - sender: make([]*senderChannel, numHandlers), - n: int32(numHandlers), - } - - // run readers/writers for incoming/outgoing messages - for i := 0; i < numHandlers; i++ { - // run writer routines (encoder) - send := make(chan *sendMessage, dp.options.SendQueueLength) - connection.senders.sender[i] = &senderChannel{ - sendChannel: send, - } - go connection.sender(i, send, dp.options, connection.flags) - } - - return connection, nil -} - -func (dp *distProto) Serve(ci node.ConnectionInterface, router node.CoreRouter) { - connection, ok := ci.(*distConnection) - if !ok { - lib.Warning("conn is not a *distConnection type") - return - } - - connection.router = router - - // run read loop - var err error - var packetLength int - - b := connection.buffer // not nil if we got extra data withing the handshake process - if b == nil { - b = lib.TakeBuffer() - } - - for { - packetLength, err = connection.read(b, dp.options.MaxMessageSize) - - // validation - if err != nil || packetLength == 0 { - // link was closed or got malformed data - if err != nil { - lib.Warning("link was closed", connection.peername, "error:", err) - } - lib.ReleaseBuffer(b) - return - } - - // check the context if it was cancelled - if connection.ctx.Err() != nil { - // canceled - lib.ReleaseBuffer(b) - return - } - - // take the new buffer for the next reading and append the tail - // (which is part of the next packet) - b1 := lib.TakeBuffer() - b1.Set(b.B[packetLength:]) - - // cut the tail and send it further for handling. - // buffer b has to be released by the reader of - // recv channel (link.ReadHandlePacket) - b.B = b.B[:packetLength] - connection.receivers.recv[connection.receivers.i] <- b - - // set new buffer as a current for the next reading - b = b1 - - // round-robin switch to the next receiver - connection.receivers.i++ - if connection.receivers.i < connection.receivers.n { - continue - } - connection.receivers.i = 0 - } - -} - -func (dp *distProto) Terminate(ci node.ConnectionInterface) { - connection, ok := ci.(*distConnection) - if !ok { - lib.Warning("conn is not a *distConnection type") - return - } - - for i := 0; i < dp.options.NumHandlers; i++ { - sender := connection.senders.sender[i] - if sender != nil { - sender.Lock() - close(sender.sendChannel) - sender.sendChannel = nil - sender.Unlock() - connection.senders.sender[i] = nil - } - if connection.receivers.recv[i] != nil { - close(connection.receivers.recv[i]) - } - } - connection.flusher.Stop() - connection.cancelContext() -} - -// node.Connection interface implementation - -func (dc *distConnection) Send(from gen.Process, to etf.Pid, message etf.Term) error { - msg := sendMessages.Get().(*sendMessage) - - msg.control = etf.Tuple{distProtoSEND, etf.Atom(""), to} - msg.payload = message - msg.compression = from.Compression() - msg.compressionLevel = from.CompressionLevel() - msg.compressionThreshold = from.CompressionThreshold() - - return dc.send(string(to.Node), to.Creation, msg) -} -func (dc *distConnection) SendReg(from gen.Process, to gen.ProcessID, message etf.Term) error { - msg := sendMessages.Get().(*sendMessage) - - msg.control = etf.Tuple{distProtoREG_SEND, from.Self(), etf.Atom(""), etf.Atom(to.Name)} - msg.payload = message - msg.compression = from.Compression() - msg.compressionLevel = from.CompressionLevel() - msg.compressionThreshold = from.CompressionThreshold() - return dc.send(to.Node, 0, msg) -} -func (dc *distConnection) SendAlias(from gen.Process, to etf.Alias, message etf.Term) error { - if dc.flags.EnableAlias == false { - return lib.ErrUnsupported - } - - msg := sendMessages.Get().(*sendMessage) - - msg.control = etf.Tuple{distProtoALIAS_SEND, from.Self(), to} - msg.payload = message - msg.compression = from.Compression() - msg.compressionLevel = from.CompressionLevel() - msg.compressionThreshold = from.CompressionThreshold() - - return dc.send(string(to.Node), to.Creation, msg) -} - -func (dc *distConnection) Link(local etf.Pid, remote etf.Pid) error { - dc.proxySessionsMutex.RLock() - ps, isProxy := dc.proxySessionsByPeerName[string(remote.Node)] - dc.proxySessionsMutex.RUnlock() - if isProxy && ps.session.PeerFlags.EnableLink == false { - return lib.ErrPeerUnsupported - } - msg := &sendMessage{ - control: etf.Tuple{distProtoLINK, local, remote}, - } - return dc.send(string(remote.Node), remote.Creation, msg) -} -func (dc *distConnection) Unlink(local etf.Pid, remote etf.Pid) error { - dc.proxySessionsMutex.RLock() - ps, isProxy := dc.proxySessionsByPeerName[string(remote.Node)] - dc.proxySessionsMutex.RUnlock() - if isProxy && ps.session.PeerFlags.EnableLink == false { - return lib.ErrPeerUnsupported - } - msg := &sendMessage{ - control: etf.Tuple{distProtoUNLINK, local, remote}, - } - return dc.send(string(remote.Node), remote.Creation, msg) -} -func (dc *distConnection) LinkExit(to etf.Pid, terminated etf.Pid, reason string) error { - msg := &sendMessage{ - control: etf.Tuple{distProtoEXIT, terminated, to, etf.Atom(reason)}, - } - return dc.send(string(to.Node), 0, msg) -} - -func (dc *distConnection) Monitor(local etf.Pid, remote etf.Pid, ref etf.Ref) error { - dc.proxySessionsMutex.RLock() - ps, isProxy := dc.proxySessionsByPeerName[string(remote.Node)] - dc.proxySessionsMutex.RUnlock() - if isProxy && ps.session.PeerFlags.EnableMonitor == false { - return lib.ErrPeerUnsupported - } - msg := &sendMessage{ - control: etf.Tuple{distProtoMONITOR, local, remote, ref}, - } - return dc.send(string(remote.Node), remote.Creation, msg) -} -func (dc *distConnection) MonitorReg(local etf.Pid, remote gen.ProcessID, ref etf.Ref) error { - dc.proxySessionsMutex.RLock() - ps, isProxy := dc.proxySessionsByPeerName[remote.Node] - dc.proxySessionsMutex.RUnlock() - if isProxy && ps.session.PeerFlags.EnableMonitor == false { - return lib.ErrPeerUnsupported - } - msg := &sendMessage{ - control: etf.Tuple{distProtoMONITOR, local, etf.Atom(remote.Name), ref}, - } - return dc.send(remote.Node, 0, msg) -} -func (dc *distConnection) Demonitor(local etf.Pid, remote etf.Pid, ref etf.Ref) error { - dc.proxySessionsMutex.RLock() - ps, isProxy := dc.proxySessionsByPeerName[string(remote.Node)] - dc.proxySessionsMutex.RUnlock() - if isProxy && ps.session.PeerFlags.EnableMonitor == false { - return lib.ErrPeerUnsupported - } - msg := &sendMessage{ - control: etf.Tuple{distProtoDEMONITOR, local, remote, ref}, - } - return dc.send(string(remote.Node), remote.Creation, msg) -} -func (dc *distConnection) DemonitorReg(local etf.Pid, remote gen.ProcessID, ref etf.Ref) error { - dc.proxySessionsMutex.RLock() - ps, isProxy := dc.proxySessionsByPeerName[remote.Node] - dc.proxySessionsMutex.RUnlock() - if isProxy && ps.session.PeerFlags.EnableMonitor == false { - return lib.ErrPeerUnsupported - } - msg := &sendMessage{ - control: etf.Tuple{distProtoDEMONITOR, local, etf.Atom(remote.Name), ref}, - } - return dc.send(remote.Node, 0, msg) -} -func (dc *distConnection) MonitorExitReg(to etf.Pid, terminated gen.ProcessID, reason string, ref etf.Ref) error { - msg := &sendMessage{ - control: etf.Tuple{distProtoMONITOR_EXIT, etf.Atom(terminated.Name), to, ref, etf.Atom(reason)}, - } - return dc.send(string(to.Node), to.Creation, msg) -} -func (dc *distConnection) MonitorExit(to etf.Pid, terminated etf.Pid, reason string, ref etf.Ref) error { - msg := &sendMessage{ - control: etf.Tuple{distProtoMONITOR_EXIT, terminated, to, ref, etf.Atom(reason)}, - } - return dc.send(string(to.Node), to.Creation, msg) -} - -func (dc *distConnection) SpawnRequest(nodeName string, behaviorName string, request gen.RemoteSpawnRequest, args ...etf.Term) error { - dc.proxySessionsMutex.RLock() - ps, isProxy := dc.proxySessionsByPeerName[nodeName] - dc.proxySessionsMutex.RUnlock() - if isProxy { - if ps.session.PeerFlags.EnableRemoteSpawn == false { - return lib.ErrPeerUnsupported - } - } else { - if dc.flags.EnableRemoteSpawn == false { - return lib.ErrPeerUnsupported - } - } - - optlist := etf.List{} - if request.Options.Name != "" { - optlist = append(optlist, etf.Tuple{etf.Atom("name"), etf.Atom(request.Options.Name)}) - - } - msg := &sendMessage{ - control: etf.Tuple{distProtoSPAWN_REQUEST, request.Ref, request.From, request.From, - // {M,F,A} - etf.Tuple{etf.Atom(behaviorName), etf.Atom(request.Options.Function), len(args)}, - optlist, - }, - payload: args, - } - return dc.send(nodeName, 0, msg) -} - -func (dc *distConnection) SpawnReply(to etf.Pid, ref etf.Ref, pid etf.Pid) error { - msg := &sendMessage{ - control: etf.Tuple{distProtoSPAWN_REPLY, ref, to, 0, pid}, - } - return dc.send(string(to.Node), to.Creation, msg) -} - -func (dc *distConnection) SpawnReplyError(to etf.Pid, ref etf.Ref, err error) error { - msg := &sendMessage{ - control: etf.Tuple{distProtoSPAWN_REPLY, ref, to, 0, etf.Atom(err.Error())}, - } - return dc.send(string(to.Node), to.Creation, msg) -} - -func (dc *distConnection) ProxyConnectRequest(request node.ProxyConnectRequest) error { - if dc.flags.EnableProxy == false { - return lib.ErrPeerUnsupported - } - - path := []etf.Atom{} - for i := range request.Path { - path = append(path, etf.Atom(request.Path[i])) - } - - msg := &sendMessage{ - control: etf.Tuple{distProtoPROXY_CONNECT_REQUEST, - request.ID, // etf.Ref - etf.Atom(request.To), // to node - request.Digest, // - request.PublicKey, // public key for the sending symmetric key - proxyFlagsToUint64(request.Flags), - request.Creation, - request.Hop, - path, - }, - } - return dc.send(dc.peername, 0, msg) -} - -func (dc *distConnection) ProxyConnectReply(reply node.ProxyConnectReply) error { - if dc.flags.EnableProxy == false { - return lib.ErrPeerUnsupported - } - - path := etf.List{} - for i := range reply.Path { - path = append(path, etf.Atom(reply.Path[i])) - } - - msg := &sendMessage{ - control: etf.Tuple{distProtoPROXY_CONNECT_REPLY, - reply.ID, // etf.Ref - etf.Atom(reply.To), // to node - reply.Digest, // - reply.Cipher, // - proxyFlagsToUint64(reply.Flags), - reply.Creation, - reply.SessionID, - path, - }, - } - - return dc.send(dc.peername, 0, msg) -} - -func (dc *distConnection) ProxyConnectCancel(err node.ProxyConnectCancel) error { - if dc.flags.EnableProxy == false { - return lib.ErrPeerUnsupported - } - - path := etf.List{} - for i := range err.Path { - path = append(path, etf.Atom(err.Path[i])) - } - - msg := &sendMessage{ - control: etf.Tuple{distProtoPROXY_CONNECT_CANCEL, - err.ID, // etf.Ref - etf.Atom(err.From), // from node - err.Reason, - path, - }, - } - - return dc.send(dc.peername, 0, msg) -} - -func (dc *distConnection) ProxyDisconnect(disconnect node.ProxyDisconnect) error { - if dc.flags.EnableProxy == false { - return lib.ErrPeerUnsupported - } - - msg := &sendMessage{ - control: etf.Tuple{distProtoPROXY_DISCONNECT, - etf.Atom(disconnect.Node), - etf.Atom(disconnect.Proxy), - disconnect.SessionID, - disconnect.Reason, - }, - } - - return dc.send(dc.peername, 0, msg) -} - -func (dc *distConnection) ProxyRegisterSession(session node.ProxySession) error { - dc.proxySessionsMutex.Lock() - defer dc.proxySessionsMutex.Unlock() - _, exist := dc.proxySessionsByPeerName[session.PeerName] - if exist { - return lib.ErrProxySessionDuplicate - } - _, exist = dc.proxySessionsByID[session.ID] - if exist { - return lib.ErrProxySessionDuplicate - } - ps := proxySession{ - session: session, - cache: etf.NewAtomCache(), - // every sender should have its own senderAtomCache in the proxy session - senderCache: make([]map[etf.Atom]etf.CacheItem, len(dc.senders.sender)), - } - dc.proxySessionsByPeerName[session.PeerName] = ps - dc.proxySessionsByID[session.ID] = ps - return nil -} - -func (dc *distConnection) ProxyUnregisterSession(id string) error { - dc.proxySessionsMutex.Lock() - defer dc.proxySessionsMutex.Unlock() - ps, exist := dc.proxySessionsByID[id] - if exist == false { - return lib.ErrProxySessionUnknown - } - delete(dc.proxySessionsByPeerName, ps.session.PeerName) - delete(dc.proxySessionsByID, ps.session.ID) - return nil -} - -func (dc *distConnection) ProxyPacket(packet *lib.Buffer) error { - if dc.flags.EnableProxy == false { - return lib.ErrPeerUnsupported - } - msg := &sendMessage{ - packet: packet, - } - return dc.send(dc.peername, 0, msg) -} - -// -// internal -// - -func (dc *distConnection) read(b *lib.Buffer, max int) (int, error) { - // http://erlang.org/doc/apps/erts/erl_dist_protocol.html#protocol-between-connected-nodes - expectingBytes := 4 - for { - if b.Len() < expectingBytes { - // if no data is received during the 4 * keepAlivePeriod the remote node - // seems to be stuck. - deadline := true - if err := dc.conn.SetReadDeadline(time.Now().Add(4 * keepAlivePeriod)); err != nil { - deadline = false - } - - n, e := b.ReadDataFrom(dc.conn, max) - if n == 0 { - if err, ok := e.(net.Error); deadline && ok && err.Timeout() { - lib.Warning("Node %q not responding. Drop connection", dc.peername) - } - // link was closed - return 0, nil - } - - if e != nil && e != io.EOF { - // something went wrong - return 0, e - } - - // check onemore time if we should read more data - continue - } - - packetLength := binary.BigEndian.Uint32(b.B[:4]) - if packetLength == 0 { - // it was "software" keepalive - expectingBytes = 4 - if len(b.B) == 4 { - b.Reset() - continue - } - b.B = b.B[4:] - continue - } - - if b.Len() < int(packetLength)+4 { - expectingBytes = int(packetLength) + 4 - continue - } - - return int(packetLength) + 4, nil - } - -} - -type deferrMissing struct { - b *lib.Buffer - c int -} - -type distMessage struct { - control etf.Term - payload etf.Term - proxy *proxySession -} - -func (dc *distConnection) receiver(recv <-chan *lib.Buffer) { - var b *lib.Buffer - var missing deferrMissing - var Timeout <-chan time.Time - - // cancel connection context if something went wrong - // it will cause closing connection with stopping all - // goroutines around this connection - defer dc.cancelContext() - - deferrChannel := make(chan deferrMissing, 100) - defer close(deferrChannel) - - timer := lib.TakeTimer() - defer lib.ReleaseTimer(timer) - - dChannel := deferrChannel - - for { - select { - case missing = <-dChannel: - b = missing.b - default: - if len(deferrChannel) > 0 { - timer.Reset(150 * time.Millisecond) - Timeout = timer.C - } else { - Timeout = nil - } - select { - case b = <-recv: - if b == nil { - // channel was closed - return - } - case <-Timeout: - dChannel = deferrChannel - continue - } - } - - // read and decode received packet - message, err := dc.decodePacket(b) - - if err == errMissingInCache { - if b == missing.b && missing.c > 100 { - lib.Warning("Disordered data at the link with %q. Close connection", dc.peername) - dc.cancelContext() - lib.ReleaseBuffer(b) - return - } - - if b == missing.b { - missing.c++ - } else { - missing.b = b - missing.c = 0 - } - - select { - case deferrChannel <- missing: - // read recv channel - dChannel = nil - continue - default: - lib.Warning("Mess at the link with %q. Close connection", dc.peername) - dc.cancelContext() - lib.ReleaseBuffer(b) - return - } - } - - dChannel = deferrChannel - - if err != nil { - lib.Warning("[%s] Malformed Dist proto at the link with %s: %s", dc.nodename, dc.peername, err) - dc.cancelContext() - lib.ReleaseBuffer(b) - return - } - - if message == nil { - // fragment or proxy message - continue - } - - // handle message - if err := dc.handleMessage(message); err != nil { - if message.proxy == nil { - lib.Warning("[%s] Malformed Control packet at the link with %s: %#v", dc.nodename, dc.peername, message.control) - dc.cancelContext() - lib.ReleaseBuffer(b) - return - } - // drop proxy session - lib.Warning("[%s] Malformed Control packet at the proxy link with %s: %#v", dc.nodename, message.proxy.session.PeerName, message.control) - disconnect := node.ProxyDisconnect{ - Node: dc.nodename, - Proxy: dc.nodename, - SessionID: message.proxy.session.ID, - Reason: err.Error(), - } - // route it locally to unregister this session - dc.router.RouteProxyDisconnect(dc, disconnect) - // send it to the peer - dc.ProxyDisconnect(disconnect) - } - - atomic.AddUint64(&dc.stats.MessagesIn, 1) - - // we have to release this buffer - lib.ReleaseBuffer(b) - - } -} - -func (dc *distConnection) decodePacket(b *lib.Buffer) (*distMessage, error) { - packet := b.B - if len(packet) < 5 { - return nil, fmt.Errorf("malformed packet") - } - - // [:3] length - switch packet[4] { - case protoDist: - // do not check the length. it was checked on the receiving this packet. - control, payload, err := dc.decodeDist(packet[5:], nil) - if control == nil { - return nil, err - } - atomic.AddUint64(&dc.stats.BytesIn, uint64(b.Len())) - - message := &distMessage{control: control, payload: payload} - return message, err - - case protoProxy: - sessionID := string(packet[5:37]) - dc.proxySessionsMutex.RLock() - ps, exist := dc.proxySessionsByID[sessionID] - dc.proxySessionsMutex.RUnlock() - if exist == false { - // must be send further - if err := dc.router.RouteProxy(dc, sessionID, b); err != nil { - // drop proxy session - disconnect := node.ProxyDisconnect{ - Node: dc.nodename, - Proxy: dc.nodename, - SessionID: sessionID, - Reason: err.Error(), - } - dc.ProxyDisconnect(disconnect) - return nil, nil - } - atomic.AddUint64(&dc.stats.TransitBytesIn, uint64(b.Len())) - return nil, nil - } - - // this node is endpoint of this session - packet = b.B[37:] - control, payload, err := dc.decodeDist(packet, &ps) - if err != nil { - if err == errMissingInCache { - // will be deferred. - // 37 - 5 - // where: - // 37 = packet len (4) + protoProxy (1) + session id (32) - // reserving 5 bytes for: packet len(4) + protoDist (1) - // we don't update packet len value. it was already validated - // and will be ignored on the next dc.decodeDist call - b.B = b.B[32:] - b.B[4] = protoDist - return nil, err - } - // drop this proxy session. send back ProxyDisconnect - disconnect := node.ProxyDisconnect{ - Node: dc.nodename, - Proxy: dc.nodename, - SessionID: sessionID, - Reason: err.Error(), - } - dc.router.RouteProxyDisconnect(dc, disconnect) - dc.ProxyDisconnect(disconnect) - return nil, nil - } - - atomic.AddUint64(&dc.stats.BytesIn, uint64(b.Len())) - - if control == nil { - return nil, nil - } - message := &distMessage{control: control, payload: payload, proxy: &ps} - return message, nil - - case protoProxyX: - atomic.AddUint64(&dc.stats.BytesIn, uint64(b.Len())) - sessionID := string(packet[5:37]) - dc.proxySessionsMutex.RLock() - ps, exist := dc.proxySessionsByID[sessionID] - dc.proxySessionsMutex.RUnlock() - if exist == false { - // must be send further - if err := dc.router.RouteProxy(dc, sessionID, b); err != nil { - // drop proxy session - disconnect := node.ProxyDisconnect{ - Node: dc.nodename, - Proxy: dc.nodename, - SessionID: sessionID, - Reason: err.Error(), - } - dc.ProxyDisconnect(disconnect) - return nil, nil - } - atomic.AddUint64(&dc.stats.TransitBytesIn, uint64(b.Len())) - return nil, nil - } - - packet = b.B[37:] - if (len(packet) % aes.BlockSize) != 0 { - // drop this proxy session. - disconnect := node.ProxyDisconnect{ - Node: dc.nodename, - Proxy: dc.nodename, - SessionID: sessionID, - Reason: "wrong blocksize of the encrypted message", - } - dc.router.RouteProxyDisconnect(dc, disconnect) - dc.ProxyDisconnect(disconnect) - return nil, nil - } - - // BUG? double counted. see below - atomic.AddUint64(&dc.stats.BytesIn, uint64(b.Len())) - - iv := packet[:aes.BlockSize] - msg := packet[aes.BlockSize:] - cfb := cipher.NewCFBDecrypter(ps.session.Block, iv) - cfb.XORKeyStream(msg, msg) - - // check padding - length := len(msg) - unpadding := int(msg[length-1]) - if unpadding > length { - // drop this proxy session. - disconnect := node.ProxyDisconnect{ - Node: dc.nodename, - Proxy: dc.nodename, - SessionID: sessionID, - Reason: "wrong padding of the encrypted message", - } - dc.router.RouteProxyDisconnect(dc, disconnect) - dc.ProxyDisconnect(disconnect) - return nil, nil - } - packet = msg[:(length - unpadding)] - control, payload, err := dc.decodeDist(packet, &ps) - if err != nil { - if err == errMissingInCache { - // will be deferred - b.B = b.B[32+aes.BlockSize:] - b.B[4] = protoDist - return nil, err - } - // drop this proxy session. - disconnect := node.ProxyDisconnect{ - Node: dc.nodename, - Proxy: dc.nodename, - SessionID: sessionID, - Reason: err.Error(), - } - dc.router.RouteProxyDisconnect(dc, disconnect) - dc.ProxyDisconnect(disconnect) - return nil, nil - } - // BUG? double counted. see above - atomic.AddUint64(&dc.stats.BytesIn, uint64(b.Len())) - if control == nil { - return nil, nil - } - message := &distMessage{control: control, payload: payload, proxy: &ps} - return message, nil - - default: - // unknown proto - return nil, fmt.Errorf("unknown/unsupported proto") - } - -} - -func (dc *distConnection) decodeDist(packet []byte, proxy *proxySession) (etf.Term, etf.Term, error) { - switch packet[0] { - case protoDistMessage: - var control, payload etf.Term - var err error - var cache []etf.Atom - - cache, packet, err = dc.decodeDistHeaderAtomCache(packet[1:], proxy) - if err != nil { - return nil, nil, err - } - - decodeOptions := etf.DecodeOptions{ - AtomMapping: dc.mapping, - FlagBigPidRef: dc.flags.EnableBigPidRef, - } - if proxy != nil { - decodeOptions.FlagBigPidRef = true - } - - // decode control message - control, packet, err = etf.Decode(packet, cache, decodeOptions) - if err != nil { - return nil, nil, err - } - - if len(packet) == 0 { - return control, nil, nil - } - - // decode payload message - payload, packet, err = etf.Decode(packet, cache, decodeOptions) - if err != nil { - return nil, nil, err - } - - if len(packet) != 0 { - return nil, nil, fmt.Errorf("packet has extra %d byte(s)", len(packet)) - } - - return control, payload, nil - - case protoDistMessageZ: - var control, payload etf.Term - var err error - var cache []etf.Atom - var zReader *gzip.Reader - var total int - // compressed protoDistMessage - - cache, packet, err = dc.decodeDistHeaderAtomCache(packet[1:], proxy) - if err != nil { - return nil, nil, err - } - - // read the length of unpacked data - lenUnpacked := int(binary.BigEndian.Uint32(packet[:4])) - - // take the gzip reader from the pool - if r, ok := gzipReaders.Get().(*gzip.Reader); ok { - zReader = r - zReader.Reset(bytes.NewBuffer(packet[4:])) - } else { - zReader, _ = gzip.NewReader(bytes.NewBuffer(packet[4:])) - } - defer gzipReaders.Put(zReader) - - // take new buffer and allocate space for the unpacked data - zBuffer := lib.TakeBuffer() - zBuffer.Allocate(lenUnpacked) - defer lib.ReleaseBuffer(zBuffer) - - // unzipping and decoding the data - for { - n, e := zReader.Read(zBuffer.B[total:]) - if n == 0 { - return nil, nil, fmt.Errorf("zbuffer too small") - } - total += n - if e == io.EOF { - break - } - if e != nil { - return nil, nil, e - } - } - - packet = zBuffer.B - decodeOptions := etf.DecodeOptions{ - FlagBigPidRef: dc.flags.EnableBigPidRef, - } - if proxy != nil { - decodeOptions.FlagBigPidRef = true - } - - // decode control message - control, packet, err = etf.Decode(packet, cache, decodeOptions) - if err != nil { - return nil, nil, err - } - if len(packet) == 0 { - return control, nil, nil - } - - // decode payload message - payload, packet, err = etf.Decode(packet, cache, decodeOptions) - if err != nil { - return nil, nil, err - } - - if len(packet) != 0 { - return nil, nil, fmt.Errorf("packet has extra %d byte(s)", len(packet)) - } - - return control, payload, nil - - case protoDistFragment1, protoDistFragmentN, protoDistFragment1Z, protoDistFragmentNZ: - if len(packet) < 18 { - return nil, nil, fmt.Errorf("malformed fragment (too small)") - } - - if assembled, err := dc.decodeFragment(packet, proxy); assembled != nil { - if err != nil { - return nil, nil, err - } - control, payload, err := dc.decodeDist(assembled.B, nil) - lib.ReleaseBuffer(assembled) - return control, payload, err - } else { - if err != nil { - return nil, nil, err - } - } - return nil, nil, nil - } - - return nil, nil, fmt.Errorf("unknown packet type %d", packet[0]) -} - -func (dc *distConnection) handleMessage(message *distMessage) (err error) { - defer func() { - if lib.CatchPanic() { - if r := recover(); r != nil { - err = fmt.Errorf("%s", r) - } - } - }() - - switch t := message.control.(type) { - case etf.Tuple: - switch act := t.Element(1).(type) { - case int: - switch act { - case distProtoREG_SEND: - // {6, FromPid, Unused, ToName} - lib.Log("[%s] CONTROL REG_SEND [from %s]: %#v", dc.nodename, dc.peername, message.control) - to := gen.ProcessID{ - Node: dc.nodename, - Name: string(t.Element(4).(etf.Atom)), - } - dc.router.RouteSendReg(t.Element(2).(etf.Pid), to, message.payload) - return nil - - case distProtoSEND: - // {2, Unused, ToPid} - // SEND has no sender pid - lib.Log("[%s] CONTROL SEND [from %s]: %#v", dc.nodename, dc.peername, message.control) - dc.router.RouteSend(etf.Pid{}, t.Element(3).(etf.Pid), message.payload) - return nil - - case distProtoLINK: - // {1, FromPid, ToPid} - lib.Log("[%s] CONTROL LINK [from %s]: %#v", dc.nodename, dc.peername, message.control) - if message.proxy != nil && message.proxy.session.NodeFlags.EnableLink == false { - // we didn't allow this feature. proxy session will be closed due to - // this violation of the contract - return lib.ErrPeerUnsupported - } - dc.router.RouteLink(t.Element(2).(etf.Pid), t.Element(3).(etf.Pid)) - return nil - - case distProtoUNLINK: - // {4, FromPid, ToPid} - lib.Log("[%s] CONTROL UNLINK [from %s]: %#v", dc.nodename, dc.peername, message.control) - if message.proxy != nil && message.proxy.session.NodeFlags.EnableLink == false { - // we didn't allow this feature. proxy session will be closed due to - // this violation of the contract - return lib.ErrPeerUnsupported - } - dc.router.RouteUnlink(t.Element(2).(etf.Pid), t.Element(3).(etf.Pid)) - return nil - - case distProtoNODE_LINK: - lib.Log("[%s] CONTROL NODE_LINK [from %s]: %#v", dc.nodename, dc.peername, message.control) - return nil - - case distProtoEXIT: - // {3, FromPid, ToPid, Reason} - lib.Log("[%s] CONTROL EXIT [from %s]: %#v", dc.nodename, dc.peername, message.control) - terminated := t.Element(2).(etf.Pid) - to := t.Element(3).(etf.Pid) - reason := fmt.Sprint(t.Element(4)) - dc.router.RouteExit(to, terminated, string(reason)) - return nil - - case distProtoEXIT2: - lib.Log("[%s] CONTROL EXIT2 [from %s]: %#v", dc.nodename, dc.peername, message.control) - return nil - - case distProtoMONITOR: - // {19, FromPid, ToProc, Ref}, where FromPid = monitoring process - // and ToProc = monitored process pid or name (atom) - lib.Log("[%s] CONTROL MONITOR [from %s]: %#v", dc.nodename, dc.peername, message.control) - if message.proxy != nil && message.proxy.session.NodeFlags.EnableMonitor == false { - // we didn't allow this feature. proxy session will be closed due to - // this violation of the contract - return lib.ErrPeerUnsupported - } - - fromPid := t.Element(2).(etf.Pid) - ref := t.Element(4).(etf.Ref) - // if monitoring by pid - if to, ok := t.Element(3).(etf.Pid); ok { - dc.router.RouteMonitor(fromPid, to, ref) - return nil - } - - // if monitoring by process name - if to, ok := t.Element(3).(etf.Atom); ok { - processID := gen.ProcessID{ - Node: dc.nodename, - Name: string(to), - } - dc.router.RouteMonitorReg(fromPid, processID, ref) - return nil - } - - return fmt.Errorf("malformed monitor message") - - case distProtoDEMONITOR: - // {20, FromPid, ToProc, Ref}, where FromPid = monitoring process - // and ToProc = monitored process pid or name (atom) - lib.Log("[%s] CONTROL DEMONITOR [from %s]: %#v", dc.nodename, dc.peername, message.control) - if message.proxy != nil && message.proxy.session.NodeFlags.EnableMonitor == false { - // we didn't allow this feature. proxy session will be closed due to - // this violation of the contract - return lib.ErrPeerUnsupported - } - ref := t.Element(4).(etf.Ref) - fromPid := t.Element(2).(etf.Pid) - dc.router.RouteDemonitor(fromPid, ref) - return nil - - case distProtoMONITOR_EXIT: - // {21, FromProc, ToPid, Ref, Reason}, where FromProc = monitored process - // pid or name (atom), ToPid = monitoring process, and Reason = exit reason for the monitored process - lib.Log("[%s] CONTROL MONITOR_EXIT [from %s]: %#v", dc.nodename, dc.peername, message.control) - reason := fmt.Sprint(t.Element(5)) - ref := t.Element(4).(etf.Ref) - switch terminated := t.Element(2).(type) { - case etf.Pid: - dc.router.RouteMonitorExit(terminated, reason, ref) - return nil - case etf.Atom: - processID := gen.ProcessID{Name: string(terminated), Node: dc.peername} - if message.proxy != nil { - processID.Node = message.proxy.session.PeerName - } - dc.router.RouteMonitorExitReg(processID, reason, ref) - return nil - } - return fmt.Errorf("malformed monitor exit message") - - // Not implemented yet, just stubs. TODO. - case distProtoSEND_SENDER: - lib.Log("[%s] CONTROL SEND_SENDER unsupported [from %s]: %#v", dc.nodename, dc.peername, message.control) - return nil - case distProtoPAYLOAD_EXIT: - lib.Log("[%s] CONTROL PAYLOAD_EXIT unsupported [from %s]: %#v", dc.nodename, dc.peername, message.control) - return nil - case distProtoPAYLOAD_EXIT2: - lib.Log("[%s] CONTROL PAYLOAD_EXIT2 unsupported [from %s]: %#v", dc.nodename, dc.peername, message.control) - return nil - case distProtoPAYLOAD_MONITOR_P_EXIT: - lib.Log("[%s] CONTROL PAYLOAD_MONITOR_P_EXIT unsupported [from %s]: %#v", dc.nodename, dc.peername, message.control) - return nil - - // alias support - case distProtoALIAS_SEND: - // {33, FromPid, Alias} - lib.Log("[%s] CONTROL ALIAS_SEND [from %s]: %#v", dc.nodename, dc.peername, message.control) - alias := etf.Alias(t.Element(3).(etf.Ref)) - dc.router.RouteSendAlias(t.Element(2).(etf.Pid), alias, message.payload) - return nil - - case distProtoSPAWN_REQUEST: - // {29, ReqId, From, GroupLeader, {Module, Function, Arity}, OptList} - lib.Log("[%s] CONTROL SPAWN_REQUEST [from %s]: %#v", dc.nodename, dc.peername, message.control) - if message.proxy != nil && message.proxy.session.NodeFlags.EnableRemoteSpawn == false { - // we didn't allow this feature. proxy session will be closed due to - // this violation of the contract - return lib.ErrPeerUnsupported - } - registerName := "" - for _, option := range t.Element(6).(etf.List) { - name, ok := option.(etf.Tuple) - if !ok || len(name) != 2 { - return fmt.Errorf("malformed spawn request") - } - switch name.Element(1) { - case etf.Atom("name"): - registerName = string(name.Element(2).(etf.Atom)) - } - } - - from := t.Element(3).(etf.Pid) - ref := t.Element(2).(etf.Ref) - - mfa := t.Element(5).(etf.Tuple) - module := mfa.Element(1).(etf.Atom) - function := mfa.Element(2).(etf.Atom) - var args etf.List - if str, ok := message.payload.(string); !ok { - args, _ = message.payload.(etf.List) - } else { - // stupid Erlang's strings :). [1,2,3,4,5] sends as a string. - // args can't be anything but etf.List. - for i := range []byte(str) { - args = append(args, str[i]) - } - } - - spawnRequestOptions := gen.RemoteSpawnOptions{ - Name: registerName, - Function: string(function), - } - spawnRequest := gen.RemoteSpawnRequest{ - From: from, - Ref: ref, - Options: spawnRequestOptions, - } - dc.router.RouteSpawnRequest(dc.nodename, string(module), spawnRequest, args...) - return nil - - case distProtoSPAWN_REPLY: - // {31, ReqId, To, Flags, Result} - lib.Log("[%s] CONTROL SPAWN_REPLY [from %s]: %#v", dc.nodename, dc.peername, message.control) - ref := t.Element(2).(etf.Ref) - to := t.Element(3).(etf.Pid) - dc.router.RouteSpawnReply(to, ref, t.Element(5)) - return nil - - case distProtoPROXY_CONNECT_REQUEST: - // {101, ID, To, Digest, PublicKey, Flags, Hop, Path} - lib.Log("[%s] PROXY CONNECT REQUEST [from %s]: %#v", dc.nodename, dc.peername, message.control) - request := node.ProxyConnectRequest{ - ID: t.Element(2).(etf.Ref), - To: string(t.Element(3).(etf.Atom)), - Digest: t.Element(4).([]byte), - PublicKey: t.Element(5).([]byte), - // FIXME it will be int64 after using more than 32 flags - Flags: proxyFlagsFromUint64(uint64(t.Element(6).(int))), - Creation: uint32(t.Element(7).(int64)), - Hop: t.Element(8).(int), - } - for _, p := range t.Element(9).(etf.List) { - request.Path = append(request.Path, string(p.(etf.Atom))) - } - if err := dc.router.RouteProxyConnectRequest(dc, request); err != nil { - errReply := node.ProxyConnectCancel{ - ID: request.ID, - From: dc.nodename, - Reason: err.Error(), - Path: request.Path[1:], - } - dc.ProxyConnectCancel(errReply) - } - return nil - - case distProtoPROXY_CONNECT_REPLY: - // {102, ID, To, Digest, Cipher, Flags, SessionID, Path} - lib.Log("[%s] PROXY CONNECT REPLY [from %s]: %#v", dc.nodename, dc.peername, message.control) - connectReply := node.ProxyConnectReply{ - ID: t.Element(2).(etf.Ref), - To: string(t.Element(3).(etf.Atom)), - Digest: t.Element(4).([]byte), - Cipher: t.Element(5).([]byte), - // FIXME it will be int64 after using more than 32 flags - Flags: proxyFlagsFromUint64(uint64(t.Element(6).(int))), - Creation: uint32(t.Element(7).(int64)), - SessionID: t.Element(8).(string), - } - for _, p := range t.Element(9).(etf.List) { - connectReply.Path = append(connectReply.Path, string(p.(etf.Atom))) - } - if err := dc.router.RouteProxyConnectReply(dc, connectReply); err != nil { - lib.Log("[%s] PROXY CONNECT REPLY error %s (message: %#v)", dc.nodename, err, connectReply) - // send disconnect to clean up this session all the way to the - // destination node - disconnect := node.ProxyDisconnect{ - Node: dc.nodename, - Proxy: dc.nodename, - SessionID: connectReply.SessionID, - Reason: err.Error(), - } - dc.ProxyDisconnect(disconnect) - if err == lib.ErrNoRoute { - return nil - } - - // send cancel message to the source node - cancel := node.ProxyConnectCancel{ - ID: connectReply.ID, - From: dc.nodename, - Reason: err.Error(), - Path: connectReply.Path, - } - dc.router.RouteProxyConnectCancel(dc, cancel) - } - - return nil - - case distProtoPROXY_CONNECT_CANCEL: - lib.Log("[%s] PROXY CONNECT CANCEL [from %s]: %#v", dc.nodename, dc.peername, message.control) - connectError := node.ProxyConnectCancel{ - ID: t.Element(2).(etf.Ref), - From: string(t.Element(3).(etf.Atom)), - Reason: t.Element(4).(string), - } - for _, p := range t.Element(5).(etf.List) { - connectError.Path = append(connectError.Path, string(p.(etf.Atom))) - } - dc.router.RouteProxyConnectCancel(dc, connectError) - return nil - - case distProtoPROXY_DISCONNECT: - // {104, Node, Proxy, SessionID, Reason} - lib.Log("[%s] PROXY DISCONNECT [from %s]: %#v", dc.nodename, dc.peername, message.control) - proxyDisconnect := node.ProxyDisconnect{ - Node: string(t.Element(2).(etf.Atom)), - Proxy: string(t.Element(3).(etf.Atom)), - SessionID: t.Element(4).(string), - Reason: t.Element(5).(string), - } - dc.router.RouteProxyDisconnect(dc, proxyDisconnect) - return nil - - default: - lib.Log("[%s] CONTROL unknown command [from %s]: %#v", dc.nodename, dc.peername, message.control) - return fmt.Errorf("unknown control command %#v", message.control) - } - } - } - - return fmt.Errorf("unsupported control message %#v", message.control) -} - -func (dc *distConnection) decodeFragment(packet []byte, proxy *proxySession) (*lib.Buffer, error) { - var first, compressed bool - var err error - - sequenceID := binary.BigEndian.Uint64(packet[1:9]) - fragmentID := binary.BigEndian.Uint64(packet[9:17]) - if fragmentID == 0 { - return nil, fmt.Errorf("fragmentID can't be 0") - } - - switch packet[0] { - case protoDistFragment1: - // We should decode atom cache from the first fragment in order - // to get rid the case when we get the first fragment of the packet with - // cached atoms and the next packet is not the part of the fragmented packet, - // but with the ids were cached in the first fragment - _, _, err = dc.decodeDistHeaderAtomCache(packet[17:], proxy) - if err != nil { - return nil, err - } - first = true - case protoDistFragment1Z: - _, _, err = dc.decodeDistHeaderAtomCache(packet[17:], proxy) - if err != nil { - return nil, err - } - first = true - compressed = true - case protoDistFragmentNZ: - compressed = true - } - packet = packet[17:] - - dc.fragmentsMutex.Lock() - defer dc.fragmentsMutex.Unlock() - - fragmented, ok := dc.fragments[sequenceID] - if !ok { - fragmented = &fragmentedPacket{ - buffer: lib.TakeBuffer(), - disordered: lib.TakeBuffer(), - disorderedSlices: make(map[uint64][]byte), - lastUpdate: time.Now(), - } - - // append new packet type - if compressed { - fragmented.buffer.AppendByte(protoDistMessageZ) - } else { - fragmented.buffer.AppendByte(protoDistMessage) - } - dc.fragments[sequenceID] = fragmented - } - - // until we get the first item everything will be treated as disordered - if first { - fragmented.fragmentID = fragmentID + 1 - } - - if fragmented.fragmentID-fragmentID != 1 { - // got the next fragment. disordered - slice := fragmented.disordered.Extend(len(packet)) - copy(slice, packet) - fragmented.disorderedSlices[fragmentID] = slice - } else { - // order is correct. just append - fragmented.buffer.Append(packet) - fragmented.fragmentID = fragmentID - } - - // check whether we have disordered slices and try - // to append them if it does fit - if fragmented.fragmentID > 0 && len(fragmented.disorderedSlices) > 0 { - for i := fragmented.fragmentID - 1; i > 0; i-- { - if slice, ok := fragmented.disorderedSlices[i]; ok { - fragmented.buffer.Append(slice) - delete(fragmented.disorderedSlices, i) - fragmented.fragmentID = i - continue - } - break - } - } - - fragmented.lastUpdate = time.Now() - - if fragmented.fragmentID == 1 && len(fragmented.disorderedSlices) == 0 { - // it was the last fragment - delete(dc.fragments, sequenceID) - lib.ReleaseBuffer(fragmented.disordered) - return fragmented.buffer, nil - } - - if dc.checkCleanPending { - return nil, nil - } - - if dc.checkCleanTimer != nil { - dc.checkCleanTimer.Reset(dc.checkCleanTimeout) - return nil, nil - } - - dc.checkCleanTimer = time.AfterFunc(dc.checkCleanTimeout, func() { - dc.fragmentsMutex.Lock() - defer dc.fragmentsMutex.Unlock() - - if len(dc.fragments) == 0 { - dc.checkCleanPending = false - return - } - - valid := time.Now().Add(-dc.checkCleanDeadline) - for sequenceID, fragmented := range dc.fragments { - if fragmented.lastUpdate.Before(valid) { - // dropping due to exceeded deadline - delete(dc.fragments, sequenceID) - } - } - if len(dc.fragments) == 0 { - dc.checkCleanPending = false - return - } - - dc.checkCleanPending = true - dc.checkCleanTimer.Reset(dc.checkCleanTimeout) - }) - - return nil, nil -} - -func (dc *distConnection) decodeDistHeaderAtomCache(packet []byte, proxy *proxySession) ([]etf.Atom, []byte, error) { - var err error - // all the details are here https://erlang.org/doc/apps/erts/erl_ext_dist.html#normal-distribution-header - - // number of atom references are present in package - references := int(packet[0]) - if references == 0 { - return nil, packet[1:], nil - } - - cache := dc.cache.In - if proxy != nil { - cache = proxy.cache.In - } - cached := make([]etf.Atom, references) - flagsLen := references/2 + 1 - if len(packet) < 1+flagsLen { - // malformed - return nil, nil, errMalformed - } - flags := packet[1 : flagsLen+1] - - // The least significant bit in a half byte is flag LongAtoms. - // If it is set, 2 bytes are used for atom lengths instead of 1 byte - // in the distribution header. - headerAtomLength := 1 // if 'LongAtom' is not set - - // extract this bit. just increase headereAtomLength if this flag is set - lastByte := flags[len(flags)-1] - shift := uint((references & 0x01) * 4) - headerAtomLength += int((lastByte >> shift) & 0x01) - - // 1 (number of references) + references/2+1 (length of flags) - packet = packet[1+flagsLen:] - - for i := 0; i < references; i++ { - if len(packet) < 1+headerAtomLength { - // malformed - return nil, nil, errMalformed - } - shift = uint((i & 0x01) * 4) - flag := (flags[i/2] >> shift) & 0x0F - isNewReference := flag&0x08 == 0x08 - idxReference := uint16(flag & 0x07) - idxInternal := uint16(packet[0]) - idx := (idxReference << 8) | idxInternal - - if isNewReference { - atomLen := uint16(packet[1]) - if headerAtomLength == 2 { - atomLen = binary.BigEndian.Uint16(packet[1:3]) - } - // extract atom - packet = packet[1+headerAtomLength:] - if len(packet) < int(atomLen) { - // malformed - return nil, nil, errMalformed - } - atom := etf.Atom(packet[:atomLen]) - // store in temporary cache for decoding - cached[i] = atom - - // store in link' cache - cache.Atoms[idx] = &atom - packet = packet[atomLen:] - continue - } - - c := cache.Atoms[idx] - if c == nil { - packet = packet[1:] - // decode the rest of this cache but set return err = errMissingInCache - err = errMissingInCache - continue - } - cached[i] = *c - packet = packet[1:] - } - - return cached, packet, err -} - -func (dc *distConnection) encodeDistHeaderAtomCache(b *lib.Buffer, - senderAtomCache map[etf.Atom]etf.CacheItem, - encodingAtomCache *etf.EncodingAtomCache) { - - n := encodingAtomCache.Len() - b.AppendByte(byte(n)) // write NumberOfAtomCache - if n == 0 { - return - } - - startPosition := len(b.B) - lenFlags := n/2 + 1 - flags := b.Extend(lenFlags) - flags[lenFlags-1] = 0 // clear last byte to make sure we have valid LongAtom flag - - for i := 0; i < len(encodingAtomCache.L); i++ { - // clean internal name cache - encodingAtomCache.Delete(encodingAtomCache.L[i].Name) - - shift := uint((i & 0x01) * 4) - idxReference := byte(encodingAtomCache.L[i].ID >> 8) // SegmentIndex - idxInternal := byte(encodingAtomCache.L[i].ID & 255) // InternalSegmentIndex - - cachedItem := senderAtomCache[encodingAtomCache.L[i].Name] - if !cachedItem.Encoded { - idxReference |= 8 // set NewCacheEntryFlag - } - - // the 'flags' slice could be changed if b.B was reallocated during the encoding atoms - flags = b.B[startPosition : startPosition+lenFlags] - // clean it up before reuse - if shift == 0 { - flags[i/2] = 0 - } - flags[i/2] |= idxReference << shift - - if cachedItem.Encoded { - b.AppendByte(idxInternal) - continue - } - - if encodingAtomCache.HasLongAtom { - // 1 (InternalSegmentIndex) + 2 (length) + name - allocLen := 1 + 2 + len(encodingAtomCache.L[i].Name) - buf := b.Extend(allocLen) - buf[0] = idxInternal - binary.BigEndian.PutUint16(buf[1:3], uint16(len(encodingAtomCache.L[i].Name))) - copy(buf[3:], encodingAtomCache.L[i].Name) - } else { - // 1 (InternalSegmentIndex) + 1 (length) + name - allocLen := 1 + 1 + len(encodingAtomCache.L[i].Name) - buf := b.Extend(allocLen) - buf[0] = idxInternal - buf[1] = byte(len(encodingAtomCache.L[i].Name)) - copy(buf[2:], encodingAtomCache.L[i].Name) - } - - cachedItem.Encoded = true - senderAtomCache[encodingAtomCache.L[i].Name] = cachedItem - } - - if encodingAtomCache.HasLongAtom { - shift := uint((n & 0x01) * 4) - flags = b.B[startPosition : startPosition+lenFlags] - flags[lenFlags-1] |= 1 << shift // set LongAtom = 1 - } -} - -func (dc *distConnection) sender(sender_id int, send <-chan *sendMessage, options node.ProtoOptions, peerFlags node.Flags) { - var lenMessage, lenAtomCache, lenPacket, startDataPosition int - var atomCacheBuffer, packetBuffer *lib.Buffer - var err error - var compressed bool - var cacheEnabled, fragmentationEnabled, compressionEnabled, encryptionEnabled bool - - // cancel connection context if something went wrong - // it will cause closing connection with stopping all - // goroutines around this connection - defer dc.cancelContext() - - // Header atom cache is encoded right after the control/message encoding process - // but should be stored as a first item in the packet. - // Thats why we do reserve some space for it in order to get rid - // of reallocation packetBuffer data - reserveHeaderAtomCache := 8192 - - // atom cache of this sender - senderAtomCache := make(map[etf.Atom]etf.CacheItem) - // atom cache of this encoding - encodingAtomCache := etf.TakeEncodingAtomCache() - defer etf.ReleaseEncodingAtomCache(encodingAtomCache) - - encrypt := func(data []byte, sessionID string, block cipher.Block) *lib.Buffer { - l := len(data) - padding := aes.BlockSize - l%aes.BlockSize - padtext := bytes.Repeat([]byte{byte(padding)}, padding) - data = append(data, padtext...) - l = len(data) - - // take another buffer for encrypted message - xBuffer := lib.TakeBuffer() - // 4 (packet len) + 1 (protoProxyX) + 32 (sessionID) + aes.BlockSize + l - xBuffer.Allocate(4 + 1 + 32 + aes.BlockSize + l) - - binary.BigEndian.PutUint32(xBuffer.B, uint32(xBuffer.Len()-4)) - xBuffer.B[4] = protoProxyX - copy(xBuffer.B[5:], sessionID) - iv := xBuffer.B[4+1+32 : 4+1+32+aes.BlockSize] - if _, err := io.ReadFull(crand.Reader, iv); err != nil { - lib.ReleaseBuffer(xBuffer) - return nil - } - cfb := cipher.NewCFBEncrypter(block, iv) - cfb.XORKeyStream(xBuffer.B[4+1+32+aes.BlockSize:], data) - return xBuffer - } - - message := &sendMessage{} - encodingOptions := etf.EncodeOptions{ - EncodingAtomCache: encodingAtomCache, - AtomMapping: dc.mapping, - NodeName: dc.nodename, - PeerName: dc.peername, - } - - for { - // clean up and get back message struct to the pool - message.packet = nil - message.control = nil - message.payload = nil - message.compression = false - message.proxy = nil - sendMessages.Put(message) - - // waiting for the next message - message = <-send - - if message == nil { - // channel was closed - return - } - - if message.packet != nil { - // transit proxy message - bytesOut, err := dc.flusher.Write(message.packet.B) - if err != nil { - return - } - atomic.AddUint64(&dc.stats.TransitBytesOut, uint64(bytesOut)) - lib.ReleaseBuffer(message.packet) - continue - } - - atomic.AddUint64(&dc.stats.MessagesOut, 1) - - packetBuffer = lib.TakeBuffer() - lenMessage, lenAtomCache, lenPacket = 0, 0, 0 - startDataPosition = reserveHeaderAtomCache - - // do reserve for the header 8K, should be enough - packetBuffer.Allocate(reserveHeaderAtomCache) - - // compression feature is always available for the proxy connection - // check whether compress is enabled for the peer and for this message - compressed = false - compressionEnabled = false - if message.compression { - if message.proxy != nil || peerFlags.EnableCompression { - compressionEnabled = true - } - } - - cacheEnabled = false - // atom cache feature is always available for the proxy connection - if message.proxy != nil || peerFlags.EnableHeaderAtomCache { - cacheEnabled = true - encodingAtomCache.Reset() - } - - // fragmentation feature is always available for the proxy connection - fragmentationEnabled = false - if options.FragmentationUnit > 0 { - if message.proxy != nil || peerFlags.EnableFragmentation { - fragmentationEnabled = true - } - } - - // encryption feature is only available for the proxy connection - encryptionEnabled = false - if message.proxy != nil && message.proxy.session.PeerFlags.EnableEncryption { - encryptionEnabled = true - } - - if message.proxy == nil { - // use connection atom cache - encodingOptions.AtomCache = dc.cache.Out - encodingOptions.SenderAtomCache = senderAtomCache - // use connection flags - encodingOptions.FlagBigCreation = peerFlags.EnableBigCreation - encodingOptions.FlagBigPidRef = peerFlags.EnableBigPidRef - - } else { - // use proxy connection atom cache - encodingOptions.AtomCache = message.proxy.cache.Out - if message.proxy.senderCache[sender_id] == nil { - message.proxy.senderCache[sender_id] = make(map[etf.Atom]etf.CacheItem) - } - encodingOptions.SenderAtomCache = message.proxy.senderCache[sender_id] - // these flags are always enabled for the proxy connection - encodingOptions.FlagBigCreation = true - encodingOptions.FlagBigPidRef = true - } - - // We could use gzip writer for the encoder, but we don't know - // the actual size of the control/payload. For small data, gzipping - // is getting extremely inefficient. That's why it is cheaper to - // encode control/payload first and then decide whether to compress it - // according to a threshold value. - - // encode Control - err = etf.Encode(message.control, packetBuffer, encodingOptions) - if err != nil { - lib.Warning("can not encode control message: %s", err) - lib.ReleaseBuffer(packetBuffer) - continue - } - - // encode Message if present - if message.payload != nil { - err = etf.Encode(message.payload, packetBuffer, encodingOptions) - if err != nil { - lib.Warning("can not encode payload message: %s", err) - lib.ReleaseBuffer(packetBuffer) - continue - } - - } - lenMessage = packetBuffer.Len() - reserveHeaderAtomCache - - if compressionEnabled && packetBuffer.Len() > (reserveHeaderAtomCache+message.compressionThreshold) { - var zWriter *gzip.Writer - - //// take another buffer - zBuffer := lib.TakeBuffer() - // allocate extra 4 bytes for the lenMessage (length of unpacked data) - zBuffer.Allocate(reserveHeaderAtomCache + 4) - level := message.compressionLevel - if level == -1 { - level = 0 - } - if w, ok := gzipWriters[level].Get().(*gzip.Writer); ok { - zWriter = w - zWriter.Reset(zBuffer) - } else { - zWriter, _ = gzip.NewWriterLevel(zBuffer, message.compressionLevel) - } - zWriter.Write(packetBuffer.B[reserveHeaderAtomCache:]) - zWriter.Close() - gzipWriters[level].Put(zWriter) - - // swap buffers only if gzipped data less than the original ones - if zBuffer.Len() < packetBuffer.Len() { - binary.BigEndian.PutUint32(zBuffer.B[reserveHeaderAtomCache:], uint32(lenMessage)) - lenMessage = zBuffer.Len() - reserveHeaderAtomCache - packetBuffer, zBuffer = zBuffer, packetBuffer - compressed = true - } - lib.ReleaseBuffer(zBuffer) - } - - // encode Header Atom Cache if its enabled - if cacheEnabled && encodingAtomCache.Len() > 0 { - atomCacheBuffer = lib.TakeBuffer() - atomCacheBuffer.Allocate(1024) - dc.encodeDistHeaderAtomCache(atomCacheBuffer, encodingOptions.SenderAtomCache, encodingAtomCache) - - lenAtomCache = atomCacheBuffer.Len() - 1024 - if lenAtomCache > reserveHeaderAtomCache-1024 { - // we got huge atom cache - atomCacheBuffer.Append(packetBuffer.B[startDataPosition:]) - startDataPosition = 1024 - lib.ReleaseBuffer(packetBuffer) - packetBuffer = atomCacheBuffer - } else { - startDataPosition -= lenAtomCache - copy(packetBuffer.B[startDataPosition:], atomCacheBuffer.B[1024:]) - lib.ReleaseBuffer(atomCacheBuffer) - } - - } else { - lenAtomCache = 1 - startDataPosition -= lenAtomCache - packetBuffer.B[startDataPosition] = byte(0) - } - - for { - // 4 (packet len) + 1 (dist header: 131) + 1 (dist header: protoDistMessage[Z]) + lenAtomCache - lenPacket = 1 + 1 + lenAtomCache + lenMessage - if !fragmentationEnabled || lenMessage < options.FragmentationUnit { - // send as a single packet - startDataPosition -= 1 - if compressed { - packetBuffer.B[startDataPosition] = protoDistMessageZ // 200 - } else { - packetBuffer.B[startDataPosition] = protoDistMessage // 68 - } - - if message.proxy == nil { - // 4 (packet len) + 1 (protoDist) - startDataPosition -= 4 + 1 - - binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition:], uint32(lenPacket)) - packetBuffer.B[startDataPosition+4] = protoDist // 131 - - bytesOut, err := dc.flusher.Write(packetBuffer.B[startDataPosition:]) - if err != nil { - return - } - atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut)) - break - } - - // proxy message. - if encryptionEnabled == false { - // no encryption - // 4 (packet len) + protoProxy + sessionID - startDataPosition -= 1 + 4 + 32 - l := len(packetBuffer.B[startDataPosition:]) - 4 - binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition:], uint32(l)) - packetBuffer.B[startDataPosition+4] = protoProxy - copy(packetBuffer.B[startDataPosition+5:], message.proxy.session.ID) - bytesOut, err := dc.flusher.Write(packetBuffer.B[startDataPosition:]) - if err != nil { - return - } - atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut)) - break - } - - // send encrypted proxy message - xBuffer := encrypt(packetBuffer.B[startDataPosition:], - message.proxy.session.ID, message.proxy.session.Block) - if xBuffer == nil { - // can't encrypt message - return - } - bytesOut, err := dc.flusher.Write(xBuffer.B) - if err != nil { - return - } - atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut)) - lib.ReleaseBuffer(xBuffer) - break - } - - // Message should be fragmented - - // https://erlang.org/doc/apps/erts/erl_ext_dist.html#distribution-header-for-fragmented-messages - // "The entire atom cache and control message has to be part of the starting fragment" - - sequenceID := uint64(atomic.AddInt64(&dc.sequenceID, 1)) - numFragments := lenMessage/options.FragmentationUnit + 1 - - // 1 (dist header: 131) + 1 (dist header: protoDistFragment) + 8 (sequenceID) + 8 (fragmentID) + ... - lenPacket = 1 + 1 + 8 + 8 + lenAtomCache + options.FragmentationUnit - - // 4 (packet len) + 1 (dist header: 131) + 1 (dist header: protoDistFragment[Z]) + 8 (sequenceID) + 8 (fragmentID) - startDataPosition -= 22 - - if compressed { - packetBuffer.B[startDataPosition+5] = protoDistFragment1Z // 201 - } else { - packetBuffer.B[startDataPosition+5] = protoDistFragment1 // 69 - } - - binary.BigEndian.PutUint64(packetBuffer.B[startDataPosition+6:], uint64(sequenceID)) - binary.BigEndian.PutUint64(packetBuffer.B[startDataPosition+14:], uint64(numFragments)) - - if message.proxy == nil { - binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition:], uint32(lenPacket)) - packetBuffer.B[startDataPosition+4] = protoDist // 131 - bytesOut, err := dc.flusher.Write(packetBuffer.B[startDataPosition : startDataPosition+4+lenPacket]) - if err != nil { - return - } - atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut)) - } else { - // proxy message - if encryptionEnabled == false { - // send proxy message - // shift left on 32 bytes for the session id - binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition-32:], uint32(lenPacket+32)) - packetBuffer.B[startDataPosition-32+4] = protoProxy // 141 - copy(packetBuffer.B[startDataPosition-32+5:], message.proxy.session.ID) - bytesOut, err := dc.flusher.Write(packetBuffer.B[startDataPosition-32 : startDataPosition+4+lenPacket]) - if err != nil { - return - } - atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut)) - - } else { - // send encrypted proxy message - // encryption makes padding (up to aes.BlockSize = 16 bytes) so we should keep the data - tail16 := [16]byte{} - n := copy(tail16[:], packetBuffer.B[startDataPosition+4+lenPacket:]) - xBuffer := encrypt(packetBuffer.B[startDataPosition+5:startDataPosition+4+lenPacket], - message.proxy.session.ID, message.proxy.session.Block) - if xBuffer == nil { - // can't encrypt message - return - } - bytesOut, err := dc.flusher.Write(xBuffer.B) - if err != nil { - return - } - atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut)) - - // resore tail - copy(packetBuffer.B[startDataPosition+4+lenPacket:], tail16[:n]) - lib.ReleaseBuffer(xBuffer) - } - } - - startDataPosition += 4 + lenPacket - numFragments-- - - nextFragment: - - if len(packetBuffer.B[startDataPosition:]) > options.FragmentationUnit { - lenPacket = 1 + 1 + 8 + 8 + options.FragmentationUnit - // reuse the previous 22 bytes for the next frame header - startDataPosition -= 22 - - } else { - // the last one - lenPacket = 1 + 1 + 8 + 8 + len(packetBuffer.B[startDataPosition:]) - startDataPosition -= 22 - } - - if compressed { - packetBuffer.B[startDataPosition+5] = protoDistFragmentNZ // 202 - } else { - packetBuffer.B[startDataPosition+5] = protoDistFragmentN // 70 - } - - binary.BigEndian.PutUint64(packetBuffer.B[startDataPosition+6:], uint64(sequenceID)) - binary.BigEndian.PutUint64(packetBuffer.B[startDataPosition+14:], uint64(numFragments)) - if message.proxy == nil { - // send fragment - binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition:], uint32(lenPacket)) - packetBuffer.B[startDataPosition+4] = protoDist // 131 - bytesOut, err := dc.flusher.Write(packetBuffer.B[startDataPosition : startDataPosition+4+lenPacket]) - if err != nil { - return - } - atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut)) - } else { - // wrap it as a proxy message - if encryptionEnabled == false { - binary.BigEndian.PutUint32(packetBuffer.B[startDataPosition-32:], uint32(lenPacket+32)) - packetBuffer.B[startDataPosition-32+4] = protoProxy // 141 - copy(packetBuffer.B[startDataPosition-32+5:], message.proxy.session.ID) - bytesOut, err := dc.flusher.Write(packetBuffer.B[startDataPosition-32 : startDataPosition+4+lenPacket]) - if err != nil { - return - } - atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut)) - } else { - // send encrypted proxy message - tail16 := [16]byte{} - n := copy(tail16[:], packetBuffer.B[startDataPosition+4+lenPacket:]) - xBuffer := encrypt(packetBuffer.B[startDataPosition+5:startDataPosition+4+lenPacket], - message.proxy.session.ID, message.proxy.session.Block) - if xBuffer == nil { - // can't encrypt message - return - } - bytesOut, err := dc.flusher.Write(xBuffer.B) - if err != nil { - return - } - atomic.AddUint64(&dc.stats.BytesOut, uint64(bytesOut)) - // resore tail - copy(packetBuffer.B[startDataPosition+4+lenPacket:], tail16[:n]) - lib.ReleaseBuffer(xBuffer) - } - } - - startDataPosition += 4 + lenPacket - numFragments-- - if numFragments > 0 { - goto nextFragment - } - - // done - break - } - - lib.ReleaseBuffer(packetBuffer) - - if cacheEnabled == false { - continue - } - - // get updates from the connection AtomCache and update the sender's cache (senderAtomCache) - lastAddedAtom, lastAddedID := encodingOptions.AtomCache.LastAdded() - if lastAddedID < 0 { - continue - } - if _, exist := encodingOptions.SenderAtomCache[lastAddedAtom]; exist { - continue - } - - encodingOptions.AtomCache.RLock() - for _, a := range encodingOptions.AtomCache.ListSince(lastAddedID) { - encodingOptions.SenderAtomCache[a] = etf.CacheItem{ID: lastAddedID, Name: a, Encoded: false} - lastAddedID++ - } - encodingOptions.AtomCache.RUnlock() - - } - -} - -func (dc *distConnection) send(to string, creation uint32, msg *sendMessage) error { - i := atomic.AddInt32(&dc.senders.i, 1) - n := i % dc.senders.n - s := dc.senders.sender[n] - if s == nil { - // connection was closed - return lib.ErrNoRoute - } - dc.proxySessionsMutex.RLock() - ps, isProxy := dc.proxySessionsByPeerName[to] - dc.proxySessionsMutex.RUnlock() - peer_creation := dc.creation - if isProxy { - msg.proxy = &ps - peer_creation = ps.session.Creation - } else { - // its direct sending, so have to make sure if this peer does support compression - if dc.flags.EnableCompression == false { - msg.compression = false - } - } - - // if this peer is Erlang OTP 22 (and earlier), peer_creation is always 0, so we - // must skip this checking. - if creation > 0 && peer_creation > 0 && peer_creation != creation { - return lib.ErrProcessIncarnation - } - - // TODO to decide whether to return error if channel is full - //select { - //case s.sendChannel <- msg: - // return nil - //default: - // return ErrOverloadConnection - //} - - s.Lock() - defer s.Unlock() - - s.sendChannel <- msg - return nil -} - -func (dc *distConnection) Stats() node.NetworkStats { - return dc.stats -} - -func proxyFlagsToUint64(pf node.ProxyFlags) uint64 { - var flags uint64 - if pf.EnableLink { - flags |= 1 - } - if pf.EnableMonitor { - flags |= 1 << 1 - } - if pf.EnableRemoteSpawn { - flags |= 1 << 2 - } - if pf.EnableEncryption { - flags |= 1 << 3 - } - return flags -} - -func proxyFlagsFromUint64(f uint64) node.ProxyFlags { - var flags node.ProxyFlags - flags.EnableLink = f&1 > 0 - flags.EnableMonitor = f&(1<<1) > 0 - flags.EnableRemoteSpawn = f&(1<<2) > 0 - flags.EnableEncryption = f&(1<<3) > 0 - return flags -} diff --git a/proto/dist/proto_test.go b/proto/dist/proto_test.go deleted file mode 100644 index f96d8b57..00000000 --- a/proto/dist/proto_test.go +++ /dev/null @@ -1,294 +0,0 @@ -package dist - -import ( - "bytes" - "math/rand" - "reflect" - "testing" - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" -) - -func TestDecodeDistHeaderAtomCache(t *testing.T) { - c := &distConnection{} - c.cache = etf.NewAtomCache() - a1 := etf.Atom("atom1") - a2 := etf.Atom("atom2") - c.cache.In.Atoms[1034] = &a1 - c.cache.In.Atoms[5] = &a2 - packet := []byte{ - 131, 68, // start dist header - 5, 4, 137, 9, // 5 atoms and theirs flags - 10, 5, // already cached atom ids - 236, 3, 114, 101, 103, // atom 'reg' - 9, 4, 99, 97, 108, 108, //atom 'call' - 238, 13, 115, 101, 116, 95, 103, 101, 116, 95, 115, 116, 97, 116, 101, // atom 'set_get_state' - 104, 4, 97, 6, 103, 82, 0, 0, 0, 0, 85, 0, 0, 0, 0, 2, 82, 1, 82, 2, // message... - 104, 3, 82, 3, 103, 82, 0, 0, 0, 0, 245, 0, 0, 0, 2, 2, - 104, 2, 82, 4, 109, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - } - - cacheExpected := []etf.Atom{"atom1", "atom2", "reg", "call", "set_get_state"} - cacheInExpected := c.cache.In.Atoms - a3 := etf.Atom("reg") - a4 := etf.Atom("call") - a5 := etf.Atom("set_get_state") - cacheInExpected[492] = &a3 - cacheInExpected[9] = &a4 - cacheInExpected[494] = &a5 - - packetExpected := packet[34:] - cache, packet1, _ := c.decodeDistHeaderAtomCache(packet[2:], nil) - - if !bytes.Equal(packet1, packetExpected) { - t.Fatal("incorrect packet") - } - - if !reflect.DeepEqual(c.cache.In.Atoms, cacheInExpected) { - t.Fatal("incorrect cacheIn") - } - - if !reflect.DeepEqual(cache, cacheExpected) { - t.Fatal("incorrect cache", cache) - } - -} - -func TestEncodeDistHeaderAtomCache(t *testing.T) { - - b := lib.TakeBuffer() - defer lib.ReleaseBuffer(b) - - senderAtomCache := make(map[etf.Atom]etf.CacheItem) - encodingAtomCache := etf.TakeEncodingAtomCache() - defer etf.ReleaseEncodingAtomCache(encodingAtomCache) - - senderAtomCache["reg"] = etf.CacheItem{ID: 1000, Encoded: false, Name: "reg"} - senderAtomCache["call"] = etf.CacheItem{ID: 499, Encoded: false, Name: "call"} - senderAtomCache["one_more_atom"] = etf.CacheItem{ID: 199, Encoded: true, Name: "one_more_atom"} - senderAtomCache["yet_another_atom"] = etf.CacheItem{ID: 2, Encoded: false, Name: "yet_another_atom"} - senderAtomCache["extra_atom"] = etf.CacheItem{ID: 10, Encoded: true, Name: "extra_atom"} - senderAtomCache["potato"] = etf.CacheItem{ID: 2017, Encoded: true, Name: "potato"} - - // Encoded field is ignored here - encodingAtomCache.Append(etf.CacheItem{ID: 499, Name: "call"}) - encodingAtomCache.Append(etf.CacheItem{ID: 1000, Name: "reg"}) - encodingAtomCache.Append(etf.CacheItem{ID: 199, Name: "one_more_atom"}) - encodingAtomCache.Append(etf.CacheItem{ID: 2017, Name: "potato"}) - - expected := []byte{ - 4, 185, 112, 0, // 4 atoms and theirs flags - 243, 4, 99, 97, 108, 108, // atom call - 232, 3, 114, 101, 103, // atom reg - 199, // atom one_more_atom, already encoded - 225, // atom potato, already encoded - - } - - l := &distConnection{} - l.encodeDistHeaderAtomCache(b, senderAtomCache, encodingAtomCache) - - if !reflect.DeepEqual(b.B, expected) { - t.Fatal("incorrect value") - } - - b.Reset() - encodingAtomCache.Append(etf.CacheItem{ID: 2, Name: "yet_another_atom"}) - - expected = []byte{ - 5, 49, 112, 8, // 5 atoms and theirs flags - 243, // atom call. already encoded - 232, // atom reg. already encoded - 199, // atom one_more_atom. already encoded - 225, // atom potato. already encoded - 2, 16, 121, 101, 116, 95, // atom yet_another_atom - 97, 110, 111, 116, 104, 101, - 114, 95, 97, 116, 111, 109, - } - l.encodeDistHeaderAtomCache(b, senderAtomCache, encodingAtomCache) - - if !reflect.DeepEqual(b.B, expected) { - t.Fatal("incorrect value", b.B) - } -} - -func BenchmarkDecodeDistHeaderAtomCache(b *testing.B) { - link := &distConnection{} - packet := []byte{ - 131, 68, // start dist header - 5, 4, 137, 9, // 5 atoms and theirs flags - 10, 5, // already cached atom ids - 236, 3, 114, 101, 103, // atom 'reg' - 9, 4, 99, 97, 108, 108, //atom 'call' - 238, 13, 115, 101, 116, 95, 103, 101, 116, 95, 115, 116, 97, 116, 101, // atom 'set_get_state' - 104, 4, 97, 6, 103, 82, 0, 0, 0, 0, 85, 0, 0, 0, 0, 2, 82, 1, 82, 2, // message... - 104, 3, 82, 3, 103, 82, 0, 0, 0, 0, 245, 0, 0, 0, 2, 2, - 104, 2, 82, 4, 109, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - link.decodeDistHeaderAtomCache(packet[2:], nil) - } -} - -func BenchmarkEncodeDistHeaderAtomCache(b *testing.B) { - link := &distConnection{} - buf := lib.TakeBuffer() - defer lib.ReleaseBuffer(buf) - - senderAtomCache := make(map[etf.Atom]etf.CacheItem) - encodingAtomCache := etf.TakeEncodingAtomCache() - defer etf.ReleaseEncodingAtomCache(encodingAtomCache) - - senderAtomCache["reg"] = etf.CacheItem{ID: 1000, Encoded: false, Name: "reg"} - senderAtomCache["call"] = etf.CacheItem{ID: 499, Encoded: false, Name: "call"} - senderAtomCache["one_more_atom"] = etf.CacheItem{ID: 199, Encoded: true, Name: "one_more_atom"} - senderAtomCache["yet_another_atom"] = etf.CacheItem{ID: 2, Encoded: false, Name: "yet_another_atom"} - senderAtomCache["extra_atom"] = etf.CacheItem{ID: 10, Encoded: true, Name: "extra_atom"} - senderAtomCache["potato"] = etf.CacheItem{ID: 2017, Encoded: true, Name: "potato"} - - // Encoded field is ignored here - encodingAtomCache.Append(etf.CacheItem{ID: 499, Name: "call"}) - encodingAtomCache.Append(etf.CacheItem{ID: 1000, Name: "reg"}) - encodingAtomCache.Append(etf.CacheItem{ID: 199, Name: "one_more_atom"}) - encodingAtomCache.Append(etf.CacheItem{ID: 2017, Name: "potato"}) - b.ResetTimer() - for i := 0; i < b.N; i++ { - link.encodeDistHeaderAtomCache(buf, senderAtomCache, encodingAtomCache) - } -} - -func TestDecodeFragment(t *testing.T) { - link := &distConnection{} - - link.checkCleanTimeout = 50 * time.Millisecond - link.checkCleanDeadline = 150 * time.Millisecond - link.fragments = make(map[uint64]*fragmentedPacket) - - // decode fragment with fragmentID=0 should return error - fragment0 := []byte{protoDistFragment1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3} - if _, e := link.decodeFragment(fragment0, nil); e == nil { - t.Fatal("should be error here") - } - - fragment1 := []byte{protoDistFragment1, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 3, 0, 1, 2, 3} - fragment2 := []byte{protoDistFragmentN, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 2, 4, 5, 6} - fragment3 := []byte{protoDistFragmentN, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 1, 7, 8, 9} - - expected := []byte{68, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9} - - // add first fragment - if x, e := link.decodeFragment(fragment1, nil); x != nil || e != nil { - t.Fatal("should be nil here", x, e) - } - // add second one - if x, e := link.decodeFragment(fragment2, nil); x != nil || e != nil { - t.Fatal("should be nil here", e) - } - // add the last one. should return *lib.Buffer with assembled packet - if x, e := link.decodeFragment(fragment3, nil); x == nil || e != nil { - t.Fatal("shouldn't be nil here", e) - } else { - // x should be *lib.Buffer - if !reflect.DeepEqual(expected, x.B) { - t.Fatal("exp:", expected, "got:", x.B) - } - lib.ReleaseBuffer(x) - - // map of the fragments should be empty here - if len(link.fragments) > 0 { - t.Fatal("fragments should be empty") - } - } - - link.checkCleanTimeout = 50 * time.Millisecond - link.checkCleanDeadline = 150 * time.Millisecond - // test lost fragment - // add the first fragment and wait 160ms - if x, e := link.decodeFragment(fragment1, nil); x != nil || e != nil { - t.Fatal("should be nil here", e) - } - if len(link.fragments) == 0 { - t.Fatal("fragments should have a record ") - } - // check and clean process should remove this record - time.Sleep(360 * time.Millisecond) - - // map of the fragments should be empty here - if len(link.fragments) > 0 { - t.Fatal("fragments should be empty") - } - - link.checkCleanTimeout = 0 - link.checkCleanDeadline = 0 - fragments := [][]byte{ - {protoDistFragment1, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 9, 0, 1, 2, 3}, - {protoDistFragmentN, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 8, 4, 5, 6}, - {protoDistFragmentN, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 7, 8, 9}, - {protoDistFragmentN, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 6, 10, 11, 12}, - {protoDistFragmentN, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 5, 13, 14, 15}, - {protoDistFragmentN, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 4, 16, 17, 18}, - {protoDistFragmentN, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 3, 19, 20, 21}, - {protoDistFragmentN, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 2, 22, 23, 24}, - {protoDistFragmentN, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 1, 25, 26, 27}, - } - expected = []byte{68, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27} - - fragmentsReverse := make([][]byte, len(fragments)) - l := len(fragments) - for i := 0; i < l; i++ { - fragmentsReverse[l-i-1] = fragments[i] - } - - var result *lib.Buffer - var e error - for i := range fragmentsReverse { - if result, e = link.decodeFragment(fragmentsReverse[i], nil); e != nil { - t.Fatal(e) - } - - } - if result == nil { - t.Fatal("got nil result") - } - if !reflect.DeepEqual(expected, result.B) { - t.Fatal("exp:", expected, "got:", result.B[:len(expected)]) - } - // map of the fragments should be empty here - if len(link.fragments) > 0 { - t.Fatal("fragments should be empty") - } - - // reshuffling 100 times - for k := 0; k < 100; k++ { - result = nil - fragmentsShuffle := make([][]byte, l) - rand.Seed(time.Now().UnixNano()) - for i, v := range rand.Perm(l) { - fragmentsShuffle[v] = fragments[i] - } - - for i := range fragmentsShuffle { - if result, e = link.decodeFragment(fragmentsShuffle[i], nil); e != nil { - t.Fatal(e) - } - - } - if result == nil { - t.Fatal("got nil result") - } - if !reflect.DeepEqual(expected, result.B) { - t.Fatal("exp:", expected, "got:", result.B[:len(expected)]) - } - } -} diff --git a/proto/dist/registrar.go b/proto/dist/registrar.go deleted file mode 100644 index 3d4e7d9f..00000000 --- a/proto/dist/registrar.go +++ /dev/null @@ -1,362 +0,0 @@ -package dist - -import ( - "context" - "crypto/tls" - "encoding/binary" - "fmt" - "io" - "net" - "strconv" - "strings" - "sync/atomic" - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" -) - -const ( - DefaultEPMDPort uint16 = 4369 - - epmdAliveReq = 120 - epmdAliveResp = 121 - epmdAliveRespX = 118 - epmdPortPleaseReq = 122 - epmdPortResp = 119 - epmdNamesReq = 110 - - // wont be implemented - // epmdDumpReq = 100 - // epmdKillReq = 107 - // epmdStopReq = 115 - - // Extra data - ergoExtraMagic = 4411 - ergoExtraVersion1 = 1 -) - -// epmd implements registrar interface -type epmdRegistrar struct { - // EPMD server - enableEPMD bool - host string - port uint16 - - // Node - name string - nodePort uint16 - nodeName string - nodeHost string - handshakeVersion node.HandshakeVersion - - running int32 - extra []byte -} - -func CreateRegistrar() node.Registrar { - registrar := &epmdRegistrar{ - port: DefaultEPMDPort, - } - return registrar -} - -func CreateRegistrarWithLocalEPMD(host string, port uint16) node.Registrar { - if port == 0 { - port = DefaultEPMDPort - } - registrar := &epmdRegistrar{ - enableEPMD: true, - host: host, - port: port, - } - return registrar -} - -func CreateRegistrarWithRemoteEPMD(host string, port uint16) node.Registrar { - if port == 0 { - port = DefaultEPMDPort - } - registrar := &epmdRegistrar{ - host: host, - port: port, - } - return registrar -} - -func (e *epmdRegistrar) Register(ctx context.Context, name string, options node.RegisterOptions) error { - if atomic.CompareAndSwapInt32(&e.running, 0, 1) == false { - return fmt.Errorf("registrar is already running") - } - - n := strings.Split(name, "@") - if len(n) != 2 { - return fmt.Errorf("(EMPD) FQDN for node name is required (example: node@hostname)") - } - - e.name = name - e.nodeName = n[0] - e.nodeHost = n[1] - e.nodePort = options.Port - e.handshakeVersion = options.HandshakeVersion - - e.composeExtra(options) - ready := make(chan error) - - go func() { - defer atomic.StoreInt32(&e.running, 0) - buf := make([]byte, 16) - var reconnecting bool - - for { - if ctx.Err() != nil { - // node is stopped - return - } - - // try to start embedded EPMD server - if e.enableEPMD { - startServerEPMD(ctx, e.host, e.port) - } - - // register this node on EPMD server - conn, err := e.registerNode(options) - if err != nil { - if reconnecting == false { - ready <- err - break - } - lib.Warning("EPMD client: can't register node %q (%s). Retry in 3 seconds...", name, err) - time.Sleep(3 * time.Second) - continue - } - - go func() { - <-ctx.Done() - conn.Close() - }() - - if reconnecting == false { - ready <- nil - reconnecting = true - } - - for { - _, err := conn.Read(buf) - if err == nil { - continue - } - break - } - lib.Log("[%s] EPMD client: closing connection", name) - } - }() - - defer close(ready) - return <-ready -} - -func (e *epmdRegistrar) Resolve(name string) (node.Route, error) { - var route node.Route - - n := strings.Split(name, "@") - if len(n) != 2 { - return node.Route{}, fmt.Errorf("incorrect FQDN node name (example: node@localhost)") - } - conn, err := net.Dial("tcp", net.JoinHostPort(n[1], fmt.Sprintf("%d", e.port))) - if err != nil { - return node.Route{}, err - } - - defer conn.Close() - - if err := e.sendPortPleaseReq(conn, n[0]); err != nil { - return node.Route{}, err - } - - route.Node = n[0] - route.Host = n[1] - - err = e.readPortResp(&route, conn) - if err != nil { - return node.Route{}, err - } - - return route, nil -} - -func (e *epmdRegistrar) ResolveProxy(name string) (node.ProxyRoute, error) { - var route node.ProxyRoute - return route, lib.ErrProxyNoRoute -} -func (e *epmdRegistrar) RegisterProxy(name string, maxhop int, flags node.ProxyFlags) error { - return lib.ErrUnsupported -} -func (e *epmdRegistrar) UnregisterProxy(name string) error { - return lib.ErrUnsupported -} -func (e *epmdRegistrar) Config() (node.RegistrarConfig, error) { - return node.RegistrarConfig{}, lib.ErrUnsupported -} -func (e *epmdRegistrar) ConfigItem(name string) (etf.Term, error) { - return nil, lib.ErrUnsupported -} - -// just stub -func (e *epmdRegistrar) SetConfigUpdateCallback(func(string, etf.Term) error) error { - return lib.ErrUnsupported -} - -func (e *epmdRegistrar) composeExtra(options node.RegisterOptions) { - buf := make([]byte, 4) - - // 2 bytes: ergoExtraMagic - binary.BigEndian.PutUint16(buf[0:2], uint16(ergoExtraMagic)) - // 1 byte Extra version - buf[2] = ergoExtraVersion1 - // 1 byte flag enabled TLS - if options.EnableTLS { - buf[3] = 1 - } - e.extra = buf - return -} - -func (e *epmdRegistrar) readExtra(route *node.Route, buf []byte) { - if len(buf) < 4 { - return - } - magic := binary.BigEndian.Uint16(buf[0:2]) - if uint16(ergoExtraMagic) != magic { - return - } - - if buf[2] != ergoExtraVersion1 { - return - } - - if buf[3] == 1 { - route.Options.TLS = &tls.Config{} - } - - route.Options.IsErgo = true - return -} - -func (e *epmdRegistrar) registerNode(options node.RegisterOptions) (net.Conn, error) { - // - registrarHost := e.host - if registrarHost == "" { - registrarHost = e.nodeHost - } - dialer := net.Dialer{ - KeepAlive: 15 * time.Second, - } - dsn := net.JoinHostPort(registrarHost, strconv.Itoa(int(e.port))) - conn, err := dialer.Dial("tcp", dsn) - if err != nil { - return nil, err - } - - if err := e.sendAliveReq(conn); err != nil { - conn.Close() - return nil, err - } - - if err := e.readAliveResp(conn); err != nil { - conn.Close() - return nil, err - } - - lib.Log("[%s] EPMD client: node registered", e.name) - return conn, nil -} - -func (e *epmdRegistrar) sendAliveReq(conn net.Conn) error { - buf := make([]byte, 2+14+len(e.nodeName)+len(e.extra)) - binary.BigEndian.PutUint16(buf[0:2], uint16(len(buf)-2)) - buf[2] = byte(epmdAliveReq) - binary.BigEndian.PutUint16(buf[3:5], e.nodePort) - // http://erlang.org/doc/reference_manual/distributed.html (section 13.5) - // 77 — regular public node, 72 — hidden - // We use a regular one - buf[5] = 77 - // Protocol TCP - buf[6] = 0 - // HighestVersion - binary.BigEndian.PutUint16(buf[7:9], uint16(HandshakeVersion6)) - // LowestVersion - binary.BigEndian.PutUint16(buf[9:11], uint16(HandshakeVersion5)) - // length Node name - l := len(e.nodeName) - binary.BigEndian.PutUint16(buf[11:13], uint16(l)) - // Node name - offset := (13 + l) - copy(buf[13:offset], e.nodeName) - // Extra data - l = len(e.extra) - binary.BigEndian.PutUint16(buf[offset:offset+2], uint16(l)) - copy(buf[offset+2:offset+2+l], e.extra) - // Send - if _, err := conn.Write(buf); err != nil { - return err - } - return nil -} - -func (e *epmdRegistrar) readAliveResp(conn net.Conn) error { - buf := make([]byte, 16) - if _, err := conn.Read(buf); err != nil { - return err - } - switch buf[0] { - case epmdAliveResp, epmdAliveRespX: - default: - return fmt.Errorf("malformed EPMD response %v", buf) - } - if buf[1] != 0 { - if buf[1] == 1 { - return fmt.Errorf("can not register node with %q, name is taken", e.nodeName) - } - return fmt.Errorf("can not register %q, code: %v", e.nodeName, buf[1]) - } - return nil -} - -func (e *epmdRegistrar) sendPortPleaseReq(conn net.Conn, name string) error { - buflen := uint16(2 + len(name) + 1) - buf := make([]byte, buflen) - binary.BigEndian.PutUint16(buf[0:2], uint16(len(buf)-2)) - buf[2] = byte(epmdPortPleaseReq) - copy(buf[3:buflen], name) - _, err := conn.Write(buf) - return err -} - -func (e *epmdRegistrar) readPortResp(route *node.Route, c net.Conn) error { - - buf := make([]byte, 1024) - n, err := c.Read(buf) - if err != nil && err != io.EOF { - return fmt.Errorf("reading from link - %s", err) - } - buf = buf[:n] - - if buf[0] == epmdPortResp && buf[1] == 0 { - p := binary.BigEndian.Uint16(buf[2:4]) - nameLen := binary.BigEndian.Uint16(buf[10:12]) - route.Port = p - extraStart := 12 + int(nameLen) - // read extra data - buf = buf[extraStart:] - extraLen := binary.BigEndian.Uint16(buf[:2]) - buf = buf[2 : extraLen+2] - e.readExtra(route, buf) - return nil - } else if buf[1] > 0 { - return fmt.Errorf("desired node not found") - } else { - return fmt.Errorf("malformed reply - %#v", buf) - } -} diff --git a/proto/dist/types.go b/proto/dist/types.go deleted file mode 100644 index e526c9ab..00000000 --- a/proto/dist/types.go +++ /dev/null @@ -1,41 +0,0 @@ -package dist - -// Distributed operations codes (http://www.erlang.org/doc/apps/erts/erl_dist_protocol.html) -const ( - distProtoLINK = 1 - distProtoSEND = 2 - distProtoEXIT = 3 - distProtoUNLINK = 4 - distProtoNODE_LINK = 5 - distProtoREG_SEND = 6 - distProtoGROUP_LEADER = 7 - distProtoEXIT2 = 8 - distProtoSEND_TT = 12 - distProtoEXIT_TT = 13 - distProtoREG_SEND_TT = 16 - distProtoEXIT2_TT = 18 - distProtoMONITOR = 19 - distProtoDEMONITOR = 20 - distProtoMONITOR_EXIT = 21 - distProtoSEND_SENDER = 22 - distProtoSEND_SENDER_TT = 23 - distProtoPAYLOAD_EXIT = 24 - distProtoPAYLOAD_EXIT_TT = 25 - distProtoPAYLOAD_EXIT2 = 26 - distProtoPAYLOAD_EXIT2_TT = 27 - distProtoPAYLOAD_MONITOR_P_EXIT = 28 - distProtoSPAWN_REQUEST = 29 - distProtoSPAWN_REQUEST_TT = 30 - distProtoSPAWN_REPLY = 31 - distProtoSPAWN_REPLY_TT = 32 - distProtoALIAS_SEND = 33 - distProtoALIAS_SEND_TT = 34 - distProtoUNLINK_ID = 35 - distProtoUNLINK_ID_ACK = 36 - - // ergo operations codes - distProtoPROXY_CONNECT_REQUEST = 101 - distProtoPROXY_CONNECT_REPLY = 102 - distProtoPROXY_CONNECT_CANCEL = 103 - distProtoPROXY_DISCONNECT = 104 -) diff --git a/tests/001_local/common.go b/tests/001_local/common.go new file mode 100644 index 00000000..8386dba5 --- /dev/null +++ b/tests/001_local/common.go @@ -0,0 +1,32 @@ +package local + +import ( + "fmt" + "time" + + "ergo.services/ergo/gen" +) + +var ( + errIncorrect = fmt.Errorf("incorrect") +) + +type initcase struct{} + +type testcase struct { + name string + input any + output any + err chan error +} + +func (t *testcase) wait(timeout int) error { + timer := time.NewTimer(time.Second * time.Duration(timeout)) + defer timer.Stop() + select { + case <-timer.C: + return gen.ErrTimeout + case e := <-t.err: + return e + } +} diff --git a/tests/001_local/t000_node_test.go b/tests/001_local/t000_node_test.go new file mode 100644 index 00000000..245ab080 --- /dev/null +++ b/tests/001_local/t000_node_test.go @@ -0,0 +1,258 @@ +package local + +import ( + "reflect" + "testing" + + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/node" +) + +func factory_t0() gen.ProcessBehavior { + return &t0{} +} + +type t0 struct { + act.Actor +} + +func TestT0NodeBasic(t *testing.T) { + nodename := gen.Atom("t0node@localhost") + env := map[gen.Env]any{ + gen.Env("A"): 1, + gen.Env("B"): 1.23, + gen.Env("C"): "d", + } + + nopt := gen.NodeOptions{ + Env: env, + } + nopt.Log.DefaultLogger.Disable = true + // use the direct method to start node with no applications + //node, err := ergo.StartNode(nodename, nopt) + node, err := node.Start(nodename, nopt, gen.Version{}) + if err != nil { + t.Fatal(err) + } + if node.Name() != nodename { + t.Fatal("wrong nodename") + } + + if node.IsAlive() == false { + t.Fatal("must be alive") + } + + info, err := node.Info() + if err != nil { + t.Fatal(err) + } + if info.Name != nodename { + t.Fatal("wrong nodename") + } + + if info.ProcessesTotal > 0 { + t.Fatal(errIncorrect) + } + if info.ProcessesRunning > 0 { + t.Fatal(errIncorrect) + } + if info.ProcessesZombee > 0 { + t.Fatal(errIncorrect) + } + if info.RegisteredAliases > 0 { + t.Fatal(errIncorrect) + } + if info.RegisteredNames > 0 { + t.Fatal(errIncorrect) + } + + // check node env + nenv := node.EnvList() + if reflect.DeepEqual(env, nenv) == false { + t.Fatal("unequal env") + } + + // get the value of env variable + if v, exist := node.Env("a"); exist { + if v != nenv["A"] { + t.Fatal(errIncorrect) + } + } else { + t.Fatal(errIncorrect) + } + + // removing env variable + node.SetEnv("a", nil) + if _, exist := node.Env("a"); exist { + t.Fatal(errIncorrect) + } + + // set env variable + v := "v" + node.SetEnv("a", v) + if nv, exist := node.Env("a"); exist { + if nv != v { + t.Fatal(errIncorrect) + } + } else { + t.Fatal(errIncorrect) + } + + // spawn process + pid, err := node.Spawn(factory_t0, gen.ProcessOptions{}) + if err != nil { + t.Fatal(err) + } + + // register associated name with the process + if err := node.RegisterName("test", pid); err != nil { + t.Fatal(err) + } + // register associated name with the process one more time + if err := node.RegisterName("test", pid); err != gen.ErrTaken { + t.Fatal(err) + } + // register associated name with the non existing process + unkpid := gen.PID{} + if err := node.RegisterName("test", unkpid); err != gen.ErrProcessUnknown { + t.Fatal(err) + } + + // check process info + pinfo, err := node.ProcessInfo(pid) + if err != nil { + t.Fatal(err) + } + if pinfo.PID != pid { + t.Fatal(errIncorrect) + } + + if pinfo.Name != "test" { + t.Fatal(errIncorrect) + } + + if pinfo.State != gen.ProcessStateSleep && pinfo.State != gen.ProcessStateRunning { + t.Fatal(errIncorrect) + } + + if pinfo.Parent != node.PID() { + t.Fatal(errIncorrect) + } + if pinfo.Leader != node.PID() { + t.Fatal(errIncorrect) + } + + // check node info with 1 running process + info, err = node.Info() + if info.ProcessesTotal != 1 { + t.Fatal(errIncorrect) + } + if pinfo.State == gen.ProcessStateSleep && info.ProcessesRunning != 0 { + t.Fatal(errIncorrect) + } + if pinfo.State == gen.ProcessStateRunning && info.ProcessesRunning != 1 { + t.Fatal(errIncorrect) + } + if info.RegisteredNames != 1 { + t.Fatal(errIncorrect) + } + + // unregister associated name + if p, err := node.UnregisterName("test"); err != nil || p != pid { + t.Fatal(errIncorrect) + } + + // unregister unknown/unregistered name + if _, err := node.UnregisterName("test"); err != gen.ErrNameUnknown { + t.Fatal(errIncorrect) + } + + // check process list + if l, err := node.ProcessList(); err != nil { + t.Fatal(err) + } else { + if reflect.DeepEqual(l, []gen.PID{pid}) == false { + t.Fatal(errIncorrect) + } + } + + // check kill + if err := node.Kill(pid); err != nil { + t.Fatal(err) + } + if l, err := node.ProcessList(); err != nil || len(l) != 0 { + // it can be in zombee or terminated state + if inf, err := node.ProcessInfo(l[0]); err == nil { + if inf.State != gen.ProcessStateZombee && inf.State != gen.ProcessStateTerminated { + t.Fatal(errIncorrect) + } + } else { + if _, err := node.ProcessInfo(pid); err != gen.ErrProcessUnknown { + t.Fatal(errIncorrect) + } + info, err = node.Info() + if info.ProcessesTotal != 0 { + t.Fatal(errIncorrect) + } + } + } + + // spawn a new process with registered name + pid, err = node.SpawnRegister("test2", factory_t0, gen.ProcessOptions{}) + if err != nil { + t.Fatal(err) + } + if info.RegisteredNames != 1 { + t.Fatal(errIncorrect) + } + pinfo, err = node.ProcessInfo(pid) + if pinfo.PID != pid { + t.Fatal(errIncorrect) + } + if pinfo.Name != "test2" { + t.Fatal(errIncorrect) + } + + // check sending message + if err := node.Send(pid, 1); err != nil { + t.Fatal(err) + } + if err := node.Send(gen.Atom("test2"), 1); err != nil { + t.Fatal(err) + } + if err := node.Send(gen.Atom("unknown"), 1); err != gen.ErrProcessUnknown { + t.Fatal(err) + } + + // unregister name that was associated with the process on spawning + if p, err := node.UnregisterName("test2"); err != nil || p != pid { + t.Fatal(errIncorrect) + } + if pinfo, err = node.ProcessInfo(pid); err != nil || pinfo.Name != "" { + t.Fatal(errIncorrect) + } + + // check exit signal (nil can't be used as a reason) + if err := node.SendExit(pid, nil); err != gen.ErrIncorrect { + t.Fatal(err) + } + // check sending exit signal. with no checking if this process has been terminated + if err := node.SendExit(pid, gen.TerminateReasonNormal); err != nil { + t.Fatal(errIncorrect) + } + + if err := node.WaitWithTimeout(0); err != gen.ErrTimeout { + t.Fatal(err) + } + node.Stop() + if node.IsAlive() == true { + t.Fatal("still alive") + } + if _, err := node.Info(); err != gen.ErrNodeTerminated { + t.Fatal(errIncorrect) + } + if err := node.WaitWithTimeout(0); err != gen.ErrNodeTerminated { + t.Fatal(err) + } +} diff --git a/tests/001_local/t001_process_test.go b/tests/001_local/t001_process_test.go new file mode 100644 index 00000000..a87761d8 --- /dev/null +++ b/tests/001_local/t001_process_test.go @@ -0,0 +1,406 @@ +package local + +import ( + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +var ( + t1cases []*testcase +) + +func factory_t1() gen.ProcessBehavior { + return &t1{} +} + +type t1 struct { + act.Actor +} + +func (t *t1) Init(args ...any) error { + return nil +} + +func (t *t1) HandleMessage(from gen.PID, message any) error { + tc := message.(*testcase) + // get method by name + method := reflect.ValueOf(t).MethodByName(tc.name) + // call this method with the provided *testcase + args := []reflect.Value{reflect.ValueOf(tc)} + method.Call(args) + return nil +} + +// +// test methods +// + +func (t *t1) TestNodeInterface(tc *testcase) { + if t.Node() != tc.output.(gen.Node) { + tc.err <- errIncorrect + return + } + tc.err <- nil +} + +func (t *t1) TestEnv(tc *testcase) { + if reflect.DeepEqual(t.EnvList(), tc.output) == false { + tc.err <- errIncorrect + return + } + t.SetEnv("k", int(123)) + if v, exist := t.Env("k"); exist == false { + tc.err <- errIncorrect + return + } else { + i, _ := v.(int) + if i != 123 { + tc.err <- errIncorrect + return + } + } + // remove env + t.SetEnv("a", nil) + if _, exist := t.Env("a"); exist { + tc.err <- errIncorrect + return + } + // modification of process' env shouldn't reflect on the node's env + if reflect.DeepEqual(t.EnvList(), t.Node().EnvList()) == true { + tc.err <- errIncorrect + return + } + tc.err <- nil +} + +func (t *t1) TestName(tc *testcase) { + if reflect.DeepEqual(t.Name(), tc.output) == false { + tc.err <- errIncorrect + return + } + if err := t.RegisterName("newname"); err != gen.ErrTaken { + tc.err <- errIncorrect + return + } + + if err := t.UnregisterName(); err != nil { + tc.err <- err + return + } + if err := t.UnregisterName(); err != gen.ErrNameUnknown { + tc.err <- errIncorrect + return + } + if err := t.RegisterName("newname"); err != nil { + tc.err <- err + return + } + if t.Name() != "newname" { + tc.err <- errIncorrect + return + } + tc.err <- nil +} + +func (t *t1) TestPID(tc *testcase) { + if reflect.DeepEqual(t.PID(), tc.output) == false { + tc.err <- errIncorrect + return + } + tc.err <- nil +} + +func (t *t1) TestParentLeader(tc *testcase) { + if reflect.DeepEqual(t.Parent(), tc.output) == false { + tc.err <- errIncorrect + return + } + if reflect.DeepEqual(t.Leader(), tc.output) == false { + tc.err <- errIncorrect + return + } + tc.err <- nil +} + +func (t *t1) TestUptime(tc *testcase) { + if t.Uptime() != 0 { + tc.err <- errIncorrect + return + } + tc.err <- nil +} + +func (t *t1) TestState(tc *testcase) { + if t.State() != gen.ProcessStateRunning { + tc.err <- errIncorrect + return + } + tc.err <- nil +} + +func (t *t1) TestCompression(tc *testcase) { + // enable/disable + if t.Compression() != false { + tc.err <- errIncorrect + return + } + if err := t.SetCompression(true); err != nil { + t.Log().Error("%s", err) + tc.err <- err + return + } + if t.Compression() != true { + tc.err <- errIncorrect + return + } + + // level + if t.CompressionLevel() != gen.DefaultCompressionLevel { + tc.err <- errIncorrect + return + } + if err := t.SetCompressionLevel(100); err != gen.ErrIncorrect { + t.Log().Error("SetCompressionLevel (with invalid value): %s", err) + tc.err <- errIncorrect + return + } + if err := t.SetCompressionLevel(gen.CompressionBestSize); err != nil { + t.Log().Error("SetCompressionLevel: %s", err) + tc.err <- err + return + } + if t.CompressionLevel() != gen.CompressionBestSize { + t.Log().Error("CompressionLevel") + tc.err <- errIncorrect + return + } + // threshold + if x := t.CompressionThreshold(); x != gen.DefaultCompressionThreshold { + t.Log().Error("CompressionThreshold") + tc.err <- errIncorrect + return + } + + if err := t.SetCompressionThreshold(1); err != gen.ErrIncorrect { + t.Log().Error("SetCompressionThreshold: %s", err) + tc.err <- errIncorrect + return + } + if err := t.SetCompressionThreshold(gen.DefaultCompressionThreshold + 100); err != nil { + t.Log().Error("SetCompressionThreshold (with invalid value): %s", err) + tc.err <- errIncorrect + return + } + if x := t.CompressionThreshold(); x != gen.DefaultCompressionThreshold+100 { + tc.err <- errIncorrect + return + } + tc.err <- nil +} + +func (t *t1) TestSendPriority(tc *testcase) { + if t.SendPriority() != gen.MessagePriorityNormal { + tc.err <- errIncorrect + return + } + + if err := t.SetSendPriority(gen.MessagePriorityMax); err != nil { + tc.err <- errIncorrect + return + } + if x := t.SendPriority(); x != gen.MessagePriorityMax { + tc.err <- errIncorrect + return + } + if err := t.SetSendPriority(gen.MessagePriority(12345)); err != gen.ErrIncorrect { + tc.err <- errIncorrect + return + } + + tc.err <- nil +} + +func (t *t1) TestAliases(tc *testcase) { + if len(t.Aliases()) != 0 { + tc.err <- errIncorrect + return + } + a1, err := t.CreateAlias() + if err != nil { + tc.err <- err + return + } + + a2, err := t.CreateAlias() + if err != nil { + tc.err <- err + return + } + aliases := []gen.Alias{a1, a2} + if reflect.DeepEqual(aliases, t.Aliases()) == false { + tc.err <- errIncorrect + return + } + if err := t.DeleteAlias(a1); err != nil { + tc.err <- err + return + } + aliases = []gen.Alias{a2} + if reflect.DeepEqual(aliases, t.Aliases()) == false { + tc.err <- errIncorrect + return + } + tc.err <- nil +} +func (t *t1) TestEvents(tc *testcase) { + e1 := gen.Atom("e1") + e2 := gen.Atom("e2") + if len(t.Events()) != 0 { + tc.err <- errIncorrect + return + } + opts := gen.EventOptions{ + Notify: true, + Buffer: 10, + } + _, err := t.RegisterEvent(e1, opts) + if err != nil { + tc.err <- err + return + } + + _, err = t.RegisterEvent(e2, opts) + if err != nil { + tc.err <- err + return + } + mevents := map[gen.Atom]bool{ + e1: true, + e2: true, + } + ev := make(map[gen.Atom]bool) + for _, e := range t.Events() { + ev[e] = true + } + if reflect.DeepEqual(mevents, ev) == false { + tc.err <- errIncorrect + return + } + if err := t.UnregisterEvent(e1); err != nil { + tc.err <- err + return + } + events := []gen.Atom{e2} + if reflect.DeepEqual(events, t.Events()) == false { + tc.err <- errIncorrect + return + } + tc.err <- nil +} + +func (t *t1) TestSpawn(tc *testcase) { + factory := func() gen.ProcessBehavior { + x := struct { + act.Actor + }{} + return &x + } + pid, err := t.Spawn(factory, gen.ProcessOptions{}) + if err != nil { + tc.err <- err + return + } + info, err := t.Node().ProcessInfo(pid) + if err != nil { + tc.err <- err + return + } + if info.Parent != t.PID() { + tc.err <- errIncorrect + return + } + + pid, err = t.SpawnRegister("reg", factory, gen.ProcessOptions{}) + if err != nil { + tc.err <- err + return + } + info, err = t.Node().ProcessInfo(pid) + if err != nil { + tc.err <- err + return + } + if info.Parent != t.PID() { + tc.err <- errIncorrect + return + } + if info.Name != "reg" { + tc.err <- errIncorrect + return + } + + tc.err <- nil +} + +func TestT1ProcessBasic(t *testing.T) { + nenv := map[gen.Env]any{ + gen.Env("A"): 1, + gen.Env("B"): 1.23, + gen.Env("C"): "d", + } + nopt := gen.NodeOptions{ + Env: nenv, + } + nopt.Log.DefaultLogger.Disable = true + // nopt.Log.Level = gen.LogLevelTrace + node, err := ergo.StartNode("t1node@localhost", nopt) + if err != nil { + t.Fatal(err) + } + + penv := map[gen.Env]any{ + gen.Env("B"): 1.23, + gen.Env("D"): "d", + } + popt := gen.ProcessOptions{ + Env: penv, + } + pid, err := node.SpawnRegister(gen.Atom("a"), factory_t1, popt) + if err != nil { + panic(err) + } + + expenv := nenv + for k, v := range penv { + expenv[k] = v + } + + t1cases = []*testcase{ + {"TestNodeInterface", nil, node, make(chan error)}, + {"TestEnv", nil, expenv, make(chan error)}, + {"TestName", nil, gen.Atom("a"), make(chan error)}, + {"TestPID", nil, pid, make(chan error)}, + {"TestParentLeader", nil, node.PID(), make(chan error)}, + {"TestUptime", nil, nil, make(chan error)}, + {"TestState", nil, nil, make(chan error)}, + {"TestCompression", nil, nil, make(chan error)}, + {"TestSendPriority", nil, nil, make(chan error)}, + {"TestAliases", nil, nil, make(chan error)}, + {"TestEvents", nil, nil, make(chan error)}, + {"TestSpawn", nil, nil, make(chan error)}, + } + for _, tc := range t1cases { + t.Run(tc.name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(10); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/t002_actor_test.go b/tests/001_local/t002_actor_test.go new file mode 100644 index 00000000..fe531a04 --- /dev/null +++ b/tests/001_local/t002_actor_test.go @@ -0,0 +1,195 @@ +package local + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +var ( + t2cases []*testcase +) + +func factory_t2() gen.ProcessBehavior { + return &t2{} +} + +type t2 struct { + act.Actor + + testcase *testcase +} + +func (t *t2) Init(args ...any) error { + return nil +} + +func (t *t2) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t2) Terminate(reason error) { + tc := t.testcase + if tc == nil { + return + } + tc.output = reason + tc.err <- nil +} + +// +// test methods +// + +func (t *t2) TestTrapExit(input any) { + defer func() { + t.testcase = nil + }() + + // set/unset trap exit + if t.TrapExit() != false { + t.testcase.err <- errIncorrect + return + } + t.SetTrapExit(true) + if t.TrapExit() != true { + t.testcase.err <- errIncorrect + return + } + t.SetTrapExit(false) + if t.TrapExit() != false { + t.testcase.err <- errIncorrect + return + } + + // spawn a child to check this flag in action + pid, err := t.Spawn(factory_t2, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + defer t.Node().Kill(pid) + + targetTC := &testcase{"TargetTrapExit", nil, nil, make(chan error)} + t.Send(pid, targetTC) + if err := targetTC.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + // spawn the other process for sending gen.TerminateReasonShutdown + alien, err := t.Spawn(factory_t2, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + defer t.Node().Kill(alien) + + alienTC := &testcase{"Alien", pid, nil, make(chan error)} + t.Send(alien, alienTC) + if err := alienTC.wait(1); err != nil { + t.testcase.err <- err + return + } + + // target must receive gen.MessageExitPID with gen.TerminateReasonShutdown as a reason + if err := targetTC.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + m := targetTC.output.(gen.MessageExitPID) + if m.Reason != gen.TerminateReasonShutdown { + t.testcase.err <- errIncorrect + return + } + if m.PID != alien { + t.testcase.err <- errIncorrect + return + } + + // send "shutdown" by parent. child process should be terminated + // invoking Terminate callback + t.SendExit(pid, gen.TerminateReasonShutdown) + if err := targetTC.wait(1); err != nil { + t.testcase.err <- err + return + } + reason := errors.Unwrap(targetTC.output.(error)) + if reason != gen.TerminateReasonShutdown { + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil +} + +func (t *t2) TargetTrapExit(input any) { + if _, ok := input.(initcase); ok { + t.SetTrapExit(true) + t.testcase.err <- nil + return + } + + // got message + t.testcase.output = input + t.testcase.err <- nil +} + +func (t *t2) Alien(input any) { + if _, ok := input.(initcase); ok { + pid := t.testcase.input.(gen.PID) + t.SendExit(pid, gen.TerminateReasonShutdown) + t.testcase.err <- nil + return + } + panic(input) +} + +func TestT2Actor(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + node, err := ergo.StartNode("t2node@localhost", nopt) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t2, popt) + if err != nil { + panic(err) + } + + t2cases = []*testcase{ + {"TestTrapExit", nil, nil, make(chan error)}, + } + for _, tc := range t2cases { + t.Run(tc.name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(1); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/t003_actor_send_test.go b/tests/001_local/t003_actor_send_test.go new file mode 100644 index 00000000..73be54fc --- /dev/null +++ b/tests/001_local/t003_actor_send_test.go @@ -0,0 +1,249 @@ +package local + +import ( + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +var ( + t3cases []*testcase +) + +func factory_t3() gen.ProcessBehavior { + return &t3{} +} + +type t3 struct { + act.Actor + + testcase *testcase +} + +func (t *t3) Init(args ...any) error { + return nil +} + +func (t *t3) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +// +// test methods +// + +func (t *t3) TestSendPID(input any) { + defer func() { + t.testcase = nil + }() + + pid, err := t.Spawn(factory_t3, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + defer t.Node().Kill(pid) + + // send the new testcase for the spawned process + newtc := &testcase{"PongPID", 1.234, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + t.Send(pid, newtc.input) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + if reflect.DeepEqual(newtc.input, newtc.output) == false { + t.testcase.err <- errIncorrect + return + } + t.testcase.err <- nil +} + +func (t *t3) TestSendProcessID(input any) { + defer func() { + t.testcase = nil + }() + + pid, err := t.Spawn(factory_t3, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + defer t.Node().Kill(pid) + + // send the new testcase for the spawned process + newtc := &testcase{"PongProcessID", 1234, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + // as a result newtc.output should have a process name here + t.Send(newtc.output, newtc.input) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + // as a final result newtc.output should have the same value we sent (input value) + if reflect.DeepEqual(newtc.input, newtc.output) == false { + t.testcase.err <- errIncorrect + return + } + t.testcase.err <- nil +} + +func (t *t3) TestSendAlias(input any) { + defer func() { + t.testcase = nil + }() + + pid, err := t.Spawn(factory_t3, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + defer t.Node().Kill(pid) + + newtc := &testcase{"PongAlias", "hello", nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + // as a result newtc.output should have the alias of this process here + t.Send(newtc.output, newtc.input) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + // as a final result newtc.output should have the same value we sent (input value) + if reflect.DeepEqual(newtc.input, newtc.output) == false { + t.testcase.err <- errIncorrect + return + } + t.testcase.err <- nil +} +func (t *t3) TestSendUnknown(input any) { + defer func() { + t.testcase = nil + }() + + if err := t.Send(gen.Atom("unknown process"), 1); err != gen.ErrProcessUnknown { + t.testcase.err <- errIncorrect + return + } + t.testcase.err <- nil +} + +func (t *t3) PongPID(input any) { + if _, init := input.(initcase); init { + t.testcase.err <- nil + return + } + + // received message by pid. check this input with the testcase.input + if reflect.DeepEqual(t.testcase.input, input) == false { + t.testcase.err <- errIncorrect + return + } + + // pong the input + t.testcase.output = input + t.testcase.err <- nil +} + +func (t *t3) PongProcessID(input any) { + if _, init := input.(initcase); init { + name := gen.Atom(lib.RandomString(10)) + if err := t.RegisterName(name); err != nil { + t.testcase.err <- err + return + } + t.testcase.output = name + t.testcase.err <- nil + return + } + // received message by process name. check this input with the testcase.input + if reflect.DeepEqual(t.testcase.input, input) == false { + t.testcase.err <- errIncorrect + return + } + + // pong the input + t.testcase.output = input + t.testcase.err <- nil +} + +func (t *t3) PongAlias(input any) { + if _, init := input.(initcase); init { + alias, err := t.CreateAlias() + if err != nil { + t.testcase.err <- err + return + } + t.testcase.output = alias + t.testcase.err <- nil + return + } + + // received message by process alias. check this input with the testcase.input + if reflect.DeepEqual(t.testcase.input, input) == false { + t.testcase.err <- errIncorrect + return + } + t.testcase.output = t.testcase.input + t.testcase.err <- nil +} + +func TestT3ActorSend(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + node, err := ergo.StartNode("t3node@localhost", nopt) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t3, popt) + if err != nil { + panic(err) + } + + t3cases = []*testcase{ + {"TestSendPID", nil, nil, make(chan error)}, + {"TestSendProcessID", nil, nil, make(chan error)}, + {"TestSendAlias", nil, nil, make(chan error)}, + {"TestSendUnknown", nil, nil, make(chan error)}, + } + for _, tc := range t3cases { + t.Run(tc.name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(1); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/t004_actor_call_test.go b/tests/001_local/t004_actor_call_test.go new file mode 100644 index 00000000..74a0d025 --- /dev/null +++ b/tests/001_local/t004_actor_call_test.go @@ -0,0 +1,340 @@ +package local + +import ( + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +var ( + t4cases []*testcase +) + +func factory_t4() gen.ProcessBehavior { + return &t4{} +} + +type forward struct { + from gen.PID + ref gen.Ref + request any +} + +type t4 struct { + act.Actor + + testcase *testcase +} + +func (t *t4) Init(args ...any) error { + return nil +} + +func (t *t4) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t4) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + if pid, ok := t.testcase.output.(gen.PID); ok { + if err := t.Send(pid, forward{from, ref, request}); err != nil { + return nil, err + } + // return nil to skip sending response. and stop this process + return nil, gen.TerminateReasonNormal + } + // send response and terminate this process normaly + return request, gen.TerminateReasonNormal +} + +// test cases + +func (t *t4) TestCallPID(input any) { + defer func() { + t.testcase = nil + }() + + pid, err := t.Spawn(factory_t4, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + // send new testcase for the spawned process + newtc := &testcase{"PongPID", 1234, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + output, err := t.Call(pid, newtc.input) + if err != nil { + t.testcase.err <- err + return + } + if reflect.DeepEqual(newtc.input, output) == false { + t.testcase.err <- errIncorrect + return + } + + // must be stopped here + if _, err := t.Node().ProcessInfo(pid); err != gen.ErrProcessUnknown { + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil +} + +func (t *t4) TestCallProcessID(input any) { + defer func() { + t.testcase = nil + }() + + pid, err := t.Spawn(factory_t4, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + // send new testcase for the spawned process + newtc := &testcase{"PongProcessID", 1.234, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + + // we must have process name in the output field + if _, ok := newtc.output.(gen.Atom); ok == false { + t.testcase.err <- errIncorrect + } + output, err := t.Call(newtc.output, newtc.input) + if err != nil { + t.testcase.err <- err + return + } + if reflect.DeepEqual(newtc.input, output) == false { + t.testcase.err <- errIncorrect + return + } + + // must be stopped here + if _, err := t.Node().ProcessInfo(pid); err != gen.ErrProcessUnknown { + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil +} + +func (t *t4) TestCallAlias(input any) { + defer func() { + t.testcase = nil + }() + + pid, err := t.Spawn(factory_t4, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + // send new testcase for the spawned process + newtc := &testcase{"PongAlias", "hello", nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + + // we must have process alias in the output field + if _, ok := newtc.output.(gen.Alias); ok == false { + t.testcase.err <- errIncorrect + } + output, err := t.Call(newtc.output, newtc.input) + if err != nil { + t.testcase.err <- err + return + } + if reflect.DeepEqual(newtc.input, output) == false { + t.testcase.err <- errIncorrect + return + } + + // must be stopped here + if _, err := t.Node().ProcessInfo(pid); err != gen.ErrProcessUnknown { + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil +} + +func (t *t4) TestCallForward(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t4, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + // send new testcase for the spawned process + newtc := &testcase{"ForwardCall", 12.34, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + output, err := t.Call(pid, newtc.input) + if err != nil { + t.testcase.err <- err + return + } + if reflect.DeepEqual(newtc.input, output) == false { + t.testcase.err <- errIncorrect + return + } + + // must be stopped here + if _, err := t.Node().ProcessInfo(pid); err != gen.ErrProcessUnknown { + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil +} + +func (t *t4) TestCallUnknown(input any) { + defer func() { + t.testcase = nil + }() + if _, err := t.Call(gen.Atom("unknown process"), 1); err != gen.ErrProcessUnknown { + t.testcase.err <- errIncorrect + return + } + t.testcase.err <- nil +} + +func (t *t4) PongPID(input any) { + if _, init := input.(initcase); init { + t.testcase.err <- nil + return + } + t.testcase.err <- errIncorrect +} + +func (t *t4) PongProcessID(input any) { + if _, init := input.(initcase); init { + name := gen.Atom(lib.RandomString(10)) + if err := t.RegisterName(name); err != nil { + t.testcase.err <- err + return + } + t.testcase.output = name + t.testcase.err <- nil + return + } + t.testcase.err <- errIncorrect +} + +func (t *t4) PongAlias(input any) { + if _, init := input.(initcase); init { + alias, err := t.CreateAlias() + if err != nil { + t.testcase.err <- err + return + } + t.testcase.output = alias + t.testcase.err <- nil + return + } + t.testcase.err <- errIncorrect + +} + +func (t *t4) ForwardCall(input any) { + if _, init := input.(initcase); init == false { + t.testcase.err <- errIncorrect + return + } + pid, err := t.Spawn(factory_t4, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"PongForwardCall", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + + t.testcase.output = pid + t.testcase.err <- nil +} + +func (t *t4) PongForwardCall(input any) { + if _, init := input.(initcase); init { + t.testcase.err <- nil + return + } + + fwd := input.(forward) + if err := t.SendResponse(fwd.from, fwd.ref, fwd.request); err != nil { + t.testcase.err <- err + return + } + + return +} + +func TestT4ActorCall(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + node, err := ergo.StartNode("t4node@localhost", nopt) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t4, popt) + if err != nil { + panic(err) + } + + t4cases = []*testcase{ + {"TestCallPID", nil, nil, make(chan error)}, + {"TestCallProcessID", nil, nil, make(chan error)}, + {"TestCallAlias", nil, nil, make(chan error)}, + {"TestCallForward", nil, nil, make(chan error)}, + {"TestCallUnknown", nil, nil, make(chan error)}, + } + for _, tc := range t4cases { + t.Run(tc.name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(1); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/t005_actor_monitor_test.go b/tests/001_local/t005_actor_monitor_test.go new file mode 100644 index 00000000..b882a23a --- /dev/null +++ b/tests/001_local/t005_actor_monitor_test.go @@ -0,0 +1,767 @@ +package local + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +var ( + t5cases []*testcase +) + +func factory_t5() gen.ProcessBehavior { + return &t5{} +} + +type t5 struct { + act.Actor + + testcase *testcase +} + +func (t *t5) Init(args ...any) error { + return nil +} + +func (t *t5) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t5) Terminate(reason error) { + tc := t.testcase + if tc == nil { + return + } + tc.output = reason + tc.err <- nil +} + +// test methods + +func (t *t5) TestMonitorPID(input any) { + defer func() { + t.testcase = nil + }() + + for _, reason := range []error{gen.TerminateReasonKill, errors.New("custom"), gen.TerminateReasonShutdown, gen.TerminateReasonPanic} { + + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"MonitoringPID", reason, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.Node().Kill(pid) + } + + t.testcase.err <- nil +} + +func (t *t5) TestMonitorProcessID(input any) { + defer func() { + t.testcase = nil + }() + + for _, reason := range []error{gen.TerminateReasonKill, errors.New("custom"), gen.TerminateReasonShutdown, gen.TerminateReasonPanic, gen.ErrUnregistered} { + + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"MonitoringProcessID", reason, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.Node().Kill(pid) + } + + t.testcase.err <- nil +} + +func (t *t5) TestMonitorAlias(input any) { + defer func() { + t.testcase = nil + }() + + for _, reason := range []error{gen.TerminateReasonKill, errors.New("custom"), gen.TerminateReasonShutdown, gen.TerminateReasonPanic, gen.ErrUnregistered} { + + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"MonitoringAlias", reason, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.Node().Kill(pid) + } + + t.testcase.err <- nil +} + +func (t *t5) TestMonitorEvent(input any) { + defer func() { + t.testcase = nil + }() + for _, reason := range []error{gen.TerminateReasonKill, errors.New("custom"), gen.TerminateReasonShutdown, gen.TerminateReasonPanic, gen.ErrUnregistered} { + + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"MonitoringEvent", reason, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.Node().Kill(pid) + } + + t.testcase.err <- nil +} + +func (t *t5) TestMonitorUnknown(input any) { + defer func() { + t.testcase = nil + }() + + // unknown PID + pid := t.PID() + pid.ID += 10000 + if err := t.MonitorPID(pid); err != gen.ErrProcessUnknown { + t.testcase.err <- errIncorrect + return + } + + // unknown ProcessID + pname := gen.ProcessID{Name: "test", Node: t.Node().Name()} + if err := t.MonitorProcessID(pname); err != gen.ErrProcessUnknown { + t.testcase.err <- errIncorrect + return + } + + // unknown Alias + alias := gen.Alias{Node: t.Node().Name()} + if err := t.MonitorAlias(alias); err != gen.ErrAliasUnknown { + t.testcase.err <- errIncorrect + return + } + + // unknown Event + event := gen.Event{Name: "test"} + if _, err := t.MonitorEvent(event); err != gen.ErrEventUnknown { + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil +} + +// test implementations for monitoring by PID + +func (t *t5) MonitoringPID(input any) { + if _, ok := input.(initcase); ok { + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + t.testcase.output = pid + newtc := &testcase{"TargetPID", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + if info, err := t.Node().ProcessInfo(t.PID()); err == nil { + if len(info.MonitorsPID) != 0 { + t.Node().Kill(pid) + t.testcase.err <- errIncorrect + return + } + } else { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + if err := t.MonitorPID(pid); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + if info, err := t.Node().ProcessInfo(t.PID()); err == nil { + if len(info.MonitorsPID) != 1 && info.MonitorsPID[0] != pid { + t.Node().Kill(pid) + t.testcase.err <- errIncorrect + return + } + } else { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + reason := t.testcase.input.(error) + switch reason { + case gen.TerminateReasonPanic: + // send it as a regular message. it will cause panic inside of the TargetPID + t.Send(pid, "custom panic") + return + case gen.TerminateReasonKill: + t.Node().Kill(pid) + return + default: + t.SendExit(pid, reason) + } + + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + + // Terminate callback should be invoked + if errors.Unwrap(newtc.output.(error)) != reason { + t.testcase.err <- errIncorrect + return + } + + if _, err := t.Node().ProcessInfo(pid); err != gen.ErrProcessUnknown { + t.testcase.err <- errIncorrect + return + } + // wait for down message + return + } + + // must receive down message + if m, ok := input.(gen.MessageDownPID); ok { + pid := t.testcase.output.(gen.PID) + if m.PID != pid { + t.testcase.err <- errIncorrect + return + } + + reason := t.testcase.input.(error) + if m.Reason != reason { + t.testcase.err <- errIncorrect + return + } + t.testcase.err <- nil + return + } + + t.testcase.err <- errIncorrect +} + +// test implementations for monitoring by ProcessID + +func (t *t5) MonitoringProcessID(input any) { + if _, ok := input.(initcase); ok { + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"TargetProcessID", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + + name, ok := newtc.output.(gen.Atom) + if ok == false { + t.testcase.err <- errIncorrect + return + } + + if info, err := t.Node().ProcessInfo(t.PID()); err == nil { + if len(info.MonitorsProcessID) != 0 { + t.Node().Kill(pid) + t.testcase.err <- errIncorrect + return + } + } else { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + pname := gen.ProcessID{Name: name, Node: t.Node().Name()} + t.testcase.output = pname + if err := t.MonitorProcessID(pname); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + if info, err := t.Node().ProcessInfo(t.PID()); err == nil { + if len(info.MonitorsProcessID) != 1 && info.MonitorsProcessID[0] != pname { + t.Node().Kill(pid) + t.testcase.err <- errIncorrect + return + } + } else { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + reason := t.testcase.input.(error) + switch reason { + case gen.TerminateReasonPanic: + // send it as a regular message. it will cause panic inside of the TargetProcessID + t.Send(pid, "custom panic") + return + case gen.TerminateReasonKill: + t.Node().Kill(pid) + return + case gen.ErrUnregistered: + t.Send(pid, 1) + default: + t.SendExit(pid, reason) + } + + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + + // Terminate callback should be invoked + if errors.Unwrap(newtc.output.(error)) != reason { + t.testcase.err <- errIncorrect + return + } + + if reason == gen.ErrUnregistered { + // must be alive + if _, err := t.Node().ProcessInfo(pid); err != nil { + t.testcase.err <- errIncorrect + return + } + } else { + if _, err := t.Node().ProcessInfo(pid); err != gen.ErrProcessUnknown { + t.testcase.err <- errIncorrect + return + } + + } + + // wait for down message + return + } + + // must receive down message + if m, ok := input.(gen.MessageDownProcessID); ok { + processid := t.testcase.output.(gen.ProcessID) + if m.ProcessID != processid { + t.testcase.err <- errIncorrect + return + } + + reason := t.testcase.input.(error) + if m.Reason != reason { + t.testcase.err <- errIncorrect + return + } + t.testcase.err <- nil + return + } + + t.testcase.err <- errIncorrect +} + +// test implementations for monitoring by Alias + +func (t *t5) MonitoringAlias(input any) { + if _, ok := input.(initcase); ok { + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"TargetAlias", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + alias, ok := newtc.output.(gen.Alias) + if ok == false { + t.testcase.err <- errIncorrect + return + } + t.testcase.output = alias + + if info, err := t.Node().ProcessInfo(t.PID()); err == nil { + if len(info.MonitorsAlias) != 0 { + t.Node().Kill(pid) + t.testcase.err <- errIncorrect + return + } + } else { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + if err := t.MonitorAlias(alias); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + if info, err := t.Node().ProcessInfo(t.PID()); err == nil { + if len(info.MonitorsAlias) != 1 && info.MonitorsAlias[0] != alias { + t.Node().Kill(pid) + t.testcase.err <- errIncorrect + return + } + } else { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + reason := t.testcase.input.(error) + switch reason { + case gen.TerminateReasonPanic: + // send it as a regular message. it will cause panic inside of the TargetAlias + t.Send(pid, "custom panic") + return + case gen.TerminateReasonKill: + t.Node().Kill(pid) + return + case gen.ErrUnregistered: + t.Send(pid, alias) + default: + t.SendExit(pid, reason) + } + + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + + // Terminate callback should be invoked + if errors.Unwrap(newtc.output.(error)) != reason { + t.testcase.err <- errIncorrect + return + } + + if reason == gen.ErrUnregistered { + // must be alive + if _, err := t.Node().ProcessInfo(pid); err != nil { + t.testcase.err <- errIncorrect + return + } + } else { + if _, err := t.Node().ProcessInfo(pid); err != gen.ErrProcessUnknown { + t.testcase.err <- errIncorrect + return + } + + } + + // wait for down message + return + } + + // must receive down message + if m, ok := input.(gen.MessageDownAlias); ok { + if m.Alias != t.testcase.output.(gen.Alias) { + t.testcase.err <- errIncorrect + return + } + + reason := t.testcase.input.(error) + if m.Reason != reason { + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil + return + } + + t.testcase.err <- errIncorrect +} + +// test implementations for monitoring event + +func (t *t5) MonitoringEvent(input any) { + if _, ok := input.(initcase); ok { + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"TargetEvent", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + + name, ok := newtc.output.(gen.Atom) + if ok == false { + t.testcase.err <- errIncorrect + return + } + + if info, err := t.Node().ProcessInfo(t.PID()); err == nil { + if len(info.MonitorsEvent) != 0 { + t.Node().Kill(pid) + t.testcase.err <- errIncorrect + return + } + } else { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + event := gen.Event{Name: name, Node: t.Node().Name()} + t.testcase.output = event + + if _, err := t.MonitorEvent(event); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + if info, err := t.Node().ProcessInfo(t.PID()); err == nil { + if len(info.MonitorsEvent) != 1 && info.MonitorsEvent[0] != event { + t.Node().Kill(pid) + t.testcase.err <- errIncorrect + return + } + } else { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + reason := t.testcase.input.(error) + switch reason { + case gen.TerminateReasonPanic: + // send it as a regular message. it will cause panic inside of the TargetAlias + t.Send(pid, "custom panic") + return + case gen.TerminateReasonKill: + t.Node().Kill(pid) + return + case gen.ErrUnregistered: + t.Send(pid, event) + default: + t.SendExit(pid, reason) + } + + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + + // Terminate callback should be invoked + if errors.Unwrap(newtc.output.(error)) != reason { + t.testcase.err <- errIncorrect + return + } + + if reason == gen.ErrUnregistered { + // must be alive + if _, err := t.Node().ProcessInfo(pid); err != nil { + t.testcase.err <- errIncorrect + return + } + } else { + if _, err := t.Node().ProcessInfo(pid); err != gen.ErrProcessUnknown { + t.testcase.err <- errIncorrect + return + } + + } + + // wait for down message + return + } + + // must receive down message + if m, ok := input.(gen.MessageDownEvent); ok { + if m.Event != t.testcase.output.(gen.Event) { + t.testcase.err <- errIncorrect + return + } + + reason := t.testcase.input.(error) + if m.Reason != reason { + t.testcase.err <- errIncorrect + return + } + t.testcase.err <- nil + return + } + + t.testcase.err <- errIncorrect +} +func (t *t5) TargetPID(input any) { + if _, ok := input.(initcase); ok { + t.testcase.err <- nil + return + } + t.testcase = nil + panic(input) +} + +func (t *t5) TargetProcessID(input any) { + if _, ok := input.(initcase); ok { + name := gen.Atom(lib.RandomString(10)) + if err := t.RegisterName(name); err != nil { + t.testcase.err <- err + return + } + t.testcase.output = name + t.testcase.err <- nil + return + } + + if _, ok := input.(int); ok { + if err := t.UnregisterName(); err != nil { + t.testcase.err <- err + return + } + t.testcase.output = fmt.Errorf("bla: %w", gen.ErrUnregistered) + t.testcase.err <- nil + return + } + t.testcase = nil + panic(input) +} + +func (t *t5) TargetAlias(input any) { + if _, ok := input.(initcase); ok { + if alias, err := t.CreateAlias(); err != nil { + t.testcase.err <- err + return + } else { + t.testcase.output = alias + } + t.testcase.err <- nil + return + } + + if alias, ok := input.(gen.Alias); ok { + if err := t.DeleteAlias(alias); err != nil { + t.testcase.err <- err + return + } + t.testcase.output = fmt.Errorf("%w", gen.ErrUnregistered) + t.testcase.err <- nil + return + } + t.testcase = nil + panic(input) +} + +func (t *t5) TargetEvent(input any) { + if _, ok := input.(initcase); ok { + name := gen.Atom(lib.RandomString(10)) + + opts := gen.EventOptions{ + Notify: false, + Buffer: 10, + } + if _, err := t.RegisterEvent(name, opts); err != nil { + t.testcase.err <- err + return + } else { + t.testcase.output = name + } + t.testcase.err <- nil + return + } + + if event, ok := input.(gen.Event); ok { + if err := t.UnregisterEvent(event.Name); err != nil { + t.testcase.err <- err + return + } + t.testcase.output = fmt.Errorf("%w", gen.ErrUnregistered) + t.testcase.err <- nil + return + } + t.testcase = nil + panic(input) +} + +func TestT5ActorMonitor(t *testing.T) { + nopt := gen.NodeOptions{} + + // to suppress Panic messages disable logging + nopt.Log.DefaultLogger.Disable = true + + node, err := ergo.StartNode("t5node@localhost", nopt) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t5, popt) + if err != nil { + panic(err) + } + + t5cases = []*testcase{ + {"TestMonitorPID", nil, nil, make(chan error)}, + {"TestMonitorProcessID", nil, nil, make(chan error)}, + {"TestMonitorAlias", nil, nil, make(chan error)}, + {"TestMonitorEvent", nil, nil, make(chan error)}, + {"TestMonitorUnknown", nil, nil, make(chan error)}, + } + for _, tc := range t5cases { + t.Run(tc.name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(1); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/t006_actor_link_test.go b/tests/001_local/t006_actor_link_test.go new file mode 100644 index 00000000..bc74a1dc --- /dev/null +++ b/tests/001_local/t006_actor_link_test.go @@ -0,0 +1,683 @@ +package local + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +var ( + t6cases []*testcase +) + +func factory_t6() gen.ProcessBehavior { + return &t6{} +} + +type t6 struct { + act.Actor + + testcase *testcase +} + +func (t *t6) Init(args ...any) error { + return nil +} + +func (t *t6) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t6) Terminate(reason error) { + tc := t.testcase + if tc == nil { + return + } + tc.output = reason + + // we shouldn't be blocked by the channel, so we use select + select { + case tc.err <- nil: + default: + } +} + +// test methods + +func (t *t6) TestLinkPID(input any) { + defer func() { + t.testcase = nil + }() + + for _, trapexit := range []bool{false, true} { + for _, reason := range []error{gen.TerminateReasonKill, errors.New("custom"), gen.TerminateReasonShutdown} { + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + newtc := &testcase{"LinkingPID", trapexit, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.Send(pid, reason) + + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + // check newtc.output + if trapexit { + exit := newtc.output.(gen.MessageExitPID) + if exit.Reason != reason { + t.Node().Kill(pid) + t.testcase.err <- errIncorrect + return + } + } else { + e := errors.Unwrap(newtc.output.(error)) + if e != reason { + t.Node().Kill(pid) + t.testcase.err <- errIncorrect + return + } + } + + t.Node().Kill(pid) + } + } + t.testcase.err <- nil +} + +func (t *t6) TestLinkProcessID(input any) { + defer func() { + t.testcase = nil + }() + for _, trapexit := range []bool{false, true} { + for _, reason := range []error{gen.TerminateReasonKill, errors.New("custom"), gen.TerminateReasonShutdown, gen.ErrUnregistered} { + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + newtc := &testcase{"LinkingProcessID", trapexit, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.Send(pid, reason) + + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + // check newtc.output + if trapexit { + exit := newtc.output.(gen.MessageExitProcessID) + if exit.Reason != reason { + t.Node().Kill(pid) + t.testcase.err <- errIncorrect + return + } + } else { + e := errors.Unwrap(newtc.output.(error)) + if e != reason { + t.Node().Kill(pid) + t.testcase.err <- errIncorrect + return + } + } + + t.Node().Kill(pid) + } + } + t.testcase.err <- nil +} + +func (t *t6) TestLinkAlias(input any) { + defer func() { + t.testcase = nil + }() + for _, trapexit := range []bool{false, true} { + for _, reason := range []error{gen.TerminateReasonKill, errors.New("custom"), gen.TerminateReasonShutdown, gen.ErrUnregistered} { + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + newtc := &testcase{"LinkingAlias", trapexit, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.Send(pid, reason) + + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + // check newtc.output + if trapexit { + exit := newtc.output.(gen.MessageExitAlias) + if exit.Reason != reason { + t.Node().Kill(pid) + t.testcase.err <- errIncorrect + return + } + } else { + e := errors.Unwrap(newtc.output.(error)) + if e != reason { + t.Node().Kill(pid) + t.testcase.err <- errIncorrect + return + } + } + + t.Node().Kill(pid) + } + } + t.testcase.err <- nil +} + +func (t *t6) TestLinkEvent(input any) { + defer func() { + t.testcase = nil + }() + for _, trapexit := range []bool{false, true} { + for _, reason := range []error{gen.TerminateReasonKill, errors.New("custom"), gen.TerminateReasonShutdown, gen.ErrUnregistered} { + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + newtc := &testcase{"LinkingEvent", trapexit, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.Send(pid, reason) + + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + // check newtc.output + if trapexit { + exit := newtc.output.(gen.MessageExitEvent) + if exit.Reason != reason { + t.Node().Kill(pid) + t.testcase.err <- errIncorrect + return + } + } else { + e := errors.Unwrap(newtc.output.(error)) + if e != reason { + t.Node().Kill(pid) + t.testcase.err <- errIncorrect + return + } + } + + t.Node().Kill(pid) + } + } + t.testcase.err <- nil +} + +func (t *t6) TestLinkUnknown(input any) { + defer func() { + t.testcase = nil + }() + // unknown PID + pid := t.PID() + pid.ID += 10000 + if err := t.LinkPID(pid); err != gen.ErrProcessUnknown { + t.testcase.err <- errIncorrect + return + } + + // unknown ProcessID + pname := gen.ProcessID{Name: "test", Node: t.Node().Name()} + if err := t.LinkProcessID(pname); err != gen.ErrProcessUnknown { + t.testcase.err <- errIncorrect + return + } + + // unknown Alias + alias := gen.Alias{Node: t.Node().Name()} + if err := t.LinkAlias(alias); err != gen.ErrAliasUnknown { + t.testcase.err <- errIncorrect + return + } + + // unknown Event + event := gen.Event{Name: "test"} + if _, err := t.LinkEvent(event); err != gen.ErrEventUnknown { + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil +} + +func (t *t6) LinkingPID(input any) { + + if _, ok := input.(initcase); ok { + t.SetTrapExit(t.testcase.input.(bool)) + + // start child + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"TargetPID", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + + // link with it + if err := t.LinkPID(pid); err != nil { + t.testcase.err <- err + return + } + if info, e := t.Info(); e != nil || len(info.LinksPID) != 1 { + t.testcase.err <- errIncorrect + return + } else { + if info.LinksPID[0] != pid { + t.testcase.err <- errIncorrect + return + } + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + } + + switch m := input.(type) { + case error: + // got exit reason for the child + if m == gen.TerminateReasonKill { + pid := t.testcase.input.(gen.PID) + t.Node().Kill(pid) + return + } else { + pid := t.testcase.input.(gen.PID) + t.SendExit(pid, m) + return + } + case gen.MessageExitPID: + // if trap exit is true we receive gen.MessageExitPID + t.testcase.output = m + t.testcase.err <- nil + return + + } + panic(input) +} + +func (t *t6) LinkingProcessID(input any) { + + if _, ok := input.(initcase); ok { + t.SetTrapExit(t.testcase.input.(bool)) + + // start child + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"TargetProcessID", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + name, ok := newtc.output.(gen.Atom) + if ok == false { + t.testcase.err <- errIncorrect + return + } + + // link with it + pname := gen.ProcessID{Name: name, Node: t.Node().Name()} + if err := t.LinkProcessID(pname); err != nil { + t.testcase.err <- err + return + } + if info, e := t.Info(); e != nil || len(info.LinksProcessID) != 1 { + t.testcase.err <- errIncorrect + return + } else { + if info.LinksProcessID[0] != pname { + t.testcase.err <- errIncorrect + return + } + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + } + + switch m := input.(type) { + case error: + pid := t.testcase.input.(gen.PID) + switch m { + case gen.TerminateReasonKill: + t.Node().Kill(pid) + return + case gen.ErrUnregistered: + t.Send(pid, 1) + return + default: + t.SendExit(pid, m) + return + } + + case gen.MessageExitProcessID: + // if trap exit is true we receive gen.MessageExitPID + t.testcase.output = m + t.testcase.err <- nil + return + + } + panic(input) +} + +func (t *t6) LinkingAlias(input any) { + + if _, ok := input.(initcase); ok { + t.SetTrapExit(t.testcase.input.(bool)) + + // start child + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"TargetAlias", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + alias, ok := newtc.output.(gen.Alias) + if ok == false { + t.testcase.err <- errIncorrect + return + } + + // link with it + if err := t.LinkAlias(alias); err != nil { + t.testcase.err <- err + return + } + if info, e := t.Info(); e != nil || len(info.LinksAlias) != 1 { + t.testcase.err <- errIncorrect + return + } else { + if info.LinksAlias[0] != alias { + t.testcase.err <- errIncorrect + return + } + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + } + + switch m := input.(type) { + case error: + pid := t.testcase.input.(gen.PID) + switch m { + case gen.TerminateReasonKill: + t.Node().Kill(pid) + return + case gen.ErrUnregistered: + t.Send(pid, 1) + return + default: + t.SendExit(pid, m) + return + } + + case gen.MessageExitAlias: + // if trap exit is true we receive gen.MessageExitAlias + t.testcase.output = m + t.testcase.err <- nil + return + + } + panic(input) +} + +func (t *t6) LinkingEvent(input any) { + + if _, ok := input.(initcase); ok { + t.SetTrapExit(t.testcase.input.(bool)) + + // start child + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"TargetEvent", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.testcase.err <- err + return + } + name, ok := newtc.output.(gen.Atom) + if ok == false { + t.testcase.err <- errIncorrect + return + } + event := gen.Event{Name: name, Node: t.Node().Name()} + + // link with it + if _, err := t.LinkEvent(event); err != nil { + t.testcase.err <- err + return + } + if info, e := t.Info(); e != nil || len(info.LinksEvent) != 1 { + t.testcase.err <- errIncorrect + return + } else { + if info.LinksEvent[0] != event { + t.testcase.err <- errIncorrect + return + } + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + } + + switch m := input.(type) { + case error: + pid := t.testcase.input.(gen.PID) + switch m { + case gen.TerminateReasonKill: + t.Node().Kill(pid) + return + case gen.ErrUnregistered: + t.Send(pid, 1) + return + default: + t.SendExit(pid, m) + return + } + + case gen.MessageExitEvent: + // if trap exit is true we receive gen.MessageExitAlias + t.testcase.output = m + t.testcase.err <- nil + return + + } + panic(input) +} + +func (t *t6) TargetPID(input any) { + if _, ok := input.(initcase); ok { + t.testcase.err <- nil + return + } + panic(input) +} + +func (t *t6) TargetProcessID(input any) { + if _, ok := input.(initcase); ok { + name := gen.Atom(lib.RandomString(10)) + if err := t.RegisterName(name); err != nil { + t.testcase.err <- err + return + } + t.testcase.output = name + t.testcase.err <- nil + return + } + + if _, ok := input.(int); ok { + if err := t.UnregisterName(); err != nil { + panic(err) + } + return + } + panic(input) +} + +func (t *t6) TargetAlias(input any) { + if _, ok := input.(initcase); ok { + if alias, err := t.CreateAlias(); err != nil { + t.testcase.err <- err + return + } else { + t.testcase.output = alias + } + t.testcase.err <- nil + return + } + + if _, ok := input.(int); ok { + alias := t.testcase.output.(gen.Alias) + if err := t.DeleteAlias(alias); err != nil { + t.testcase.err <- err + return + } + return + } + panic(input) +} + +func (t *t6) TargetEvent(input any) { + if _, ok := input.(initcase); ok { + name := gen.Atom(lib.RandomString(10)) + opts := gen.EventOptions{ + Notify: false, + Buffer: 10, + } + if _, err := t.RegisterEvent(name, opts); err != nil { + t.testcase.err <- err + return + } else { + t.testcase.output = name + } + t.testcase.err <- nil + return + } + + if _, ok := input.(int); ok { + name := t.testcase.output.(gen.Atom) + if err := t.UnregisterEvent(name); err != nil { + t.testcase.err <- err + return + } + return + } + panic(input) +} + +func TestT6ActorLink(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + node, err := ergo.StartNode("t6node@localhost", nopt) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t6, popt) + if err != nil { + panic(err) + } + + t6cases = []*testcase{ + {"TestLinkPID", nil, nil, make(chan error)}, + {"TestLinkProcessID", nil, nil, make(chan error)}, + {"TestLinkAlias", nil, nil, make(chan error)}, + {"TestLinkEvent", nil, nil, make(chan error)}, + {"TestLinkUnknown", nil, nil, make(chan error)}, + } + for _, tc := range t6cases { + t.Run(tc.name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(1); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/t007_actor_event_test.go b/tests/001_local/t007_actor_event_test.go new file mode 100644 index 00000000..1ba957e9 --- /dev/null +++ b/tests/001_local/t007_actor_event_test.go @@ -0,0 +1,364 @@ +package local + +import ( + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +var ( + t7cases []*testcase +) + +func factory_t7() gen.ProcessBehavior { + return &t7{} +} + +type t7 struct { + act.Actor + + testcase *testcase +} + +func (t *t7) Init(args ...any) error { + return nil +} + +func (t *t7) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} +func (t *t7) HandleEvent(message gen.MessageEvent) error { + var event gen.Event + switch m := t.testcase.input.(type) { + case gen.MessageEvent: + event = m.Event + case gen.Event: + event = m + default: + t.testcase.err <- errIncorrect + return nil + } + if message.Event != event { + t.testcase.err <- errIncorrect + return nil + } + + if message.Timestamp == 0 { + t.testcase.err <- errIncorrect + return nil + } + + t.testcase.output = message + t.testcase.err <- nil + return nil +} + +func (t *t7) Terminate(reason error) { + tc := t.testcase + if tc == nil { + return + } + tc.output = reason + + // we shouldn't be blocked by the channel, so we use select + select { + case tc.err <- nil: + default: + } +} + +func (t *t7) TestEvent(input any) { + defer func() { + t.testcase = nil + }() + + producerPID, err := t.Spawn(factory_t7, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + producerTC := &testcase{"ProducerNotify", nil, nil, make(chan error)} + t.Send(producerPID, producerTC) + if err := producerTC.wait(1); err != nil { + t.Node().Kill(producerPID) + t.testcase.err <- err + return + } + defer t.Node().Kill(producerPID) + + consumer1PID, err := t.Spawn(factory_t7, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + defer t.Node().Kill(consumer1PID) + + // producerTC.output has the name of created event + name := producerTC.output.(gen.Atom) + event := gen.Event{Name: name, Node: t.Node().Name()} + consumer1TC := &testcase{"Consumer1", event, nil, make(chan error)} + t.Send(consumer1PID, consumer1TC) + if err := consumer1TC.wait(1); err != nil { + t.testcase.err <- err + return + } + + // producer should receive gen.MessageEventStart + if err := producerTC.wait(1); err != nil { + t.Node().Kill(producerPID) + t.testcase.err <- err + return + } + + t.Send(producerPID, int8(123)) + // producer sending event. check it + if err := producerTC.wait(1); err != nil { + t.testcase.err <- err + return + } + + // consumer1 should receive this event + if err := consumer1TC.wait(1); err != nil { + t.testcase.err <- err + return + } + + mev := consumer1TC.output.(gen.MessageEvent) + if mev.Event != event { + t.testcase.err <- errIncorrect + return + } + + if mev.Message.(int8) != int8(123) { + t.testcase.err <- errIncorrect + return + } + + // start yet another consumer + consumer2PID, err := t.Spawn(factory_t7, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + defer t.Node().Kill(consumer2PID) + consumer2TC := &testcase{"Consumer2", mev, nil, make(chan error)} + t.Send(consumer2PID, consumer2TC) + if err := consumer2TC.wait(1); err != nil { + t.testcase.err <- err + return + } + + t.Send(producerPID, int16(1234)) + // producer sending event. check it + if err := producerTC.wait(1); err != nil { + t.testcase.err <- err + return + } + + // consumer1 should receive this event + if err := consumer1TC.wait(1); err != nil { + t.testcase.err <- err + return + } + + mev = consumer1TC.output.(gen.MessageEvent) + if mev.Event != event { + t.testcase.err <- errIncorrect + return + } + if mev.Message.(int16) != int16(1234) { + t.testcase.err <- errIncorrect + return + } + // consumer2 should also receive this event + if err := consumer2TC.wait(1); err != nil { + t.testcase.err <- err + return + } + + mev = consumer2TC.output.(gen.MessageEvent) + if mev.Event != event { + t.testcase.err <- errIncorrect + return + } + if mev.Message.(int16) != int16(1234) { + t.testcase.err <- errIncorrect + return + } + + // send both consumer int value so they should Unlink/Demonitor this event + t.Send(consumer1PID, 1) + if err := consumer1TC.wait(1); err != nil { + t.testcase.err <- err + return + } + t.Send(consumer2PID, 1) + if err := consumer2TC.wait(1); err != nil { + t.testcase.err <- err + return + } + + // ... and producer should receive MessageEventStop message since there are no + // consumers for this event anymore + if err := producerTC.wait(1); err != nil { + t.testcase.err <- err + return + } + t.testcase.err <- nil +} + +func (t *t7) ProducerNotify(input any) { + if _, ok := input.(initcase); ok { + name := gen.Atom(lib.RandomString(10)) + opts := gen.EventOptions{ + Notify: true, + Buffer: 10, + } + if token, err := t.RegisterEvent(name, opts); err != nil { + t.testcase.err <- err + return + } else { + t.testcase.input = token + t.testcase.output = name + t.testcase.err <- nil + return + } + } + + switch m := input.(type) { + case gen.MessageEventStart: + name := t.testcase.output.(gen.Atom) + if m.Name != name { + t.testcase.err <- errIncorrect + } + t.testcase.err <- nil + + case gen.MessageEventStop: + name := t.testcase.output.(gen.Atom) + if m.Name != name { + t.testcase.err <- errIncorrect + } + t.testcase.err <- nil + case int8: + token := t.testcase.input.(gen.Ref) + name := t.testcase.output.(gen.Atom) + if err := t.SendEvent(name, token, m); err != nil { + t.testcase.err <- err + } + t.testcase.err <- nil + case int16: + token := t.testcase.input.(gen.Ref) + name := t.testcase.output.(gen.Atom) + if err := t.SendEvent(name, token, m); err != nil { + t.testcase.err <- err + } + t.testcase.err <- nil + } + + // wait messages +} + +func (t *t7) Consumer1(input any) { + if _, ok := input.(initcase); ok { + event := t.testcase.input.(gen.Event) + lastevents, err := t.LinkEvent(event) + if err != nil { + t.testcase.err <- errIncorrect + return + } + if len(lastevents) != 0 { + t.testcase.err <- errIncorrect + return + } + t.testcase.err <- nil + + // waiting event + return + } + + switch input.(type) { + case int: + event := t.testcase.input.(gen.Event) + t.UnlinkEvent(event) + t.testcase.err <- nil + return + } + panic(input) +} + +func (t *t7) Consumer2(input any) { + if _, ok := input.(initcase); ok { + mev := t.testcase.input.(gen.MessageEvent) + lastevents, err := t.MonitorEvent(mev.Event) + if err != nil || len(lastevents) != 1 { + t.testcase.err <- errIncorrect + return + } + + // here must be the last MessageEvent value + if lastevents[0] != mev { + t.testcase.err <- errIncorrect + return + } + t.testcase.err <- nil + + // waiting event + return + } + + switch input.(type) { + case int: + mev := t.testcase.input.(gen.MessageEvent) + t.DemonitorEvent(mev.Event) + t.testcase.err <- nil + return + } + panic(input) +} + +func TestT7ActorEvent(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + node, err := ergo.StartNode("t7node@localhost", nopt) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t7, popt) + if err != nil { + panic(err) + } + + t7cases = []*testcase{ + {"TestEvent", nil, nil, make(chan error)}, + } + for _, tc := range t7cases { + t.Run(tc.name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(1); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/t008_actor_split_test.go b/tests/001_local/t008_actor_split_test.go new file mode 100644 index 00000000..69824118 --- /dev/null +++ b/tests/001_local/t008_actor_split_test.go @@ -0,0 +1,196 @@ +package local + +import ( + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +var ( + t8cases []*testcase +) + +func factory_t8() gen.ProcessBehavior { + return &t8{} +} + +type t8 struct { + act.Actor + + testcase *testcase +} + +func (t *t8) Init(args ...any) error { + return nil +} + +func (t *t8) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t8) HandleMessageName(name gen.Atom, from gen.PID, message any) error { + t.testcase.output = name + t.testcase.err <- nil + return nil +} + +func (t *t8) HandleMessageAlias(alias gen.Alias, from gen.PID, message any) error { + t.testcase.output = alias + t.testcase.err <- nil + return nil +} + +func (t *t8) HandleCall(from gen.PID, ref gen.Ref, message any) (any, error) { + return t.PID(), nil +} + +func (t *t8) HandleCallName(name gen.Atom, from gen.PID, ref gen.Ref, message any) (any, error) { + return name, nil +} + +func (t *t8) HandleCallAlias(alias gen.Alias, from gen.PID, ref gen.Ref, message any) (any, error) { + return alias, nil +} + +// +// test methods +// + +func (t *t8) TestSplitHandle(input any) { + defer func() { + t.testcase = nil + }() + + // set/unset split + if t.SplitHandle() != false { + t.testcase.err <- errIncorrect + return + } + t.SetSplitHandle(true) + if t.SplitHandle() != true { + t.testcase.err <- errIncorrect + return + } + t.SetSplitHandle(false) + if t.SplitHandle() != false { + t.testcase.err <- errIncorrect + return + } + + for _, split := range []bool{false, true} { + name := gen.Atom(lib.RandomString(10)) + pid, err := t.SpawnRegister(name, factory_t8, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + defer t.Node().Kill(pid) + + targetTC := &testcase{"TargetSplit", split, nil, make(chan error)} + t.Send(pid, targetTC) + if err := targetTC.wait(1); err != nil { + t.testcase.err <- err + return + } + + target := []any{pid, name, targetTC.output.(gen.Alias)} + for _, tar := range target { + if err := t.Send(tar, "test"); err != nil { + t.testcase.err <- err + return + } + + if err := targetTC.wait(1); err != nil { + t.testcase.err <- err + return + } + + x := tar + if split == false { + // must be HandleMessage(...) invoked + x = pid + } + if reflect.DeepEqual(x, targetTC.output) == false { + t.testcase.err <- errIncorrect + return + } + res, err := t.Call(tar, "test") + if err != nil { + t.testcase.err <- err + return + } + + if reflect.DeepEqual(x, res) == false { + t.testcase.err <- errIncorrect + return + } + } + } + + t.testcase.err <- nil +} + +func (t *t8) TargetSplit(input any) { + if _, ok := input.(initcase); ok { + alias, err := t.CreateAlias() + if err != nil { + t.testcase.err <- err + return + } + split := t.testcase.input.(bool) + t.SetSplitHandle(split) + + t.testcase.output = alias + t.testcase.err <- nil + return + } + + t.testcase.output = t.PID() + t.testcase.err <- nil +} + +func TestT8ActorSplit(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + node, err := ergo.StartNode("t8node@localhost", nopt) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t8, popt) + if err != nil { + panic(err) + } + + t8cases = []*testcase{ + {"TestSplitHandle", nil, nil, make(chan error)}, + } + for _, tc := range t8cases { + t.Run(tc.name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(1); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/t009_supervisor_sofo_test.go b/tests/001_local/t009_supervisor_sofo_test.go new file mode 100644 index 00000000..9d7f04cc --- /dev/null +++ b/tests/001_local/t009_supervisor_sofo_test.go @@ -0,0 +1,486 @@ +package local + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +var ( + t9casesSOFO []*testcase +) + +func factory_t9_sofo() gen.ProcessBehavior { + return &t9_sofo{} +} + +type t9_sofo struct { + act.Actor + + testcase *testcase +} + +func (t *t9_sofo) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t9_sofo) TestBasic(input any) { + defer func() { + t.testcase = nil + }() + + // start supervisor + pid, err := t.Spawn(factory_t9_sup_sofo_basic, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + } + defer t.Node().Kill(pid) + + childtc := &testcase{"TestBasicSOFO", nil, nil, make(chan error)} + t.Send(pid, childtc) + if err := childtc.wait(1); err != nil { + t.testcase.err <- err + return + } + + t.Send(pid, 12345) + if err := childtc.wait(1); err != nil { + t.testcase.err <- err + return + } + + t.testcase.err <- nil +} + +func (t *t9_sofo) TestStrategy(input any) { + defer func() { + t.testcase = nil + }() + + // start supervisor with the given restart strategy + pid, err := t.Spawn(factory_t9_sup_sofo, gen.ProcessOptions{}, t.testcase.input) + if err != nil { + t.testcase.err <- err + } + + childtc := &testcase{"TestSOFO", 5, nil, make(chan error)} + t.Send(pid, childtc) + if err := childtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + reasons := []error{gen.TerminateReasonNormal, gen.TerminateReasonShutdown, gen.TerminateReasonKill} + for _, reason := range reasons { + t.Send(pid, reason) + if err := childtc.wait(1); err != nil { + t.testcase.err <- err + return + } + } + + // send exit signal to the supervisor process + exit := errors.New("my exit") + t.SendExit(pid, exit) + if err := childtc.wait(1); err != exit { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + t.testcase.err <- nil +} + +// SOFO supervisors + +// t9_sup_sofo_basic +func factory_t9_sup_sofo_basic() gen.ProcessBehavior { + return &t9_sup_sofo_basic{} +} + +type t9_sup_sofo_basic struct { + act.Supervisor + + testcase *testcase +} + +func (t *t9_sup_sofo_basic) Init(args ...any) (act.SupervisorSpec, error) { + var spec act.SupervisorSpec + spec.Type = act.SupervisorTypeSimpleOneForOne + spec.Children = []act.SupervisorChildSpec{ + { + Name: "child_sofo", + Factory: factory_t9_child_sofo, + }, + } + return spec, nil +} + +func (t *t9_sup_sofo_basic) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t9_sup_sofo_basic) TestBasicSOFO(input any) { + if _, ok := input.(initcase); ok { + // start child + if err := t.StartChild("child_sofo"); err != nil { + t.testcase.err <- err + return + } + // check children + children := t.Children() + if len(children) != 1 { + t.testcase.err <- errIncorrect + return + } + + // start child with unknown spec + if err := t.StartChild("child_sofo1"); err != act.ErrSupervisorChildUnknown { + t.testcase.err <- errIncorrect + return + } + + // check children. must be the same + children1 := t.Children() + if reflect.DeepEqual(children, children1) == false { + t.testcase.err <- errIncorrect + return + } + + // add new child spec + newChildSpec := act.SupervisorChildSpec{ + Name: "child_sofo1", + Factory: factory_t9_child_sofo, + } + if err := t.AddChild(newChildSpec); err != nil { + t.testcase.err <- err + return + } + + // check children. must be the same + children2 := t.Children() + if reflect.DeepEqual(children, children2) == false { + t.testcase.err <- errIncorrect + return + } + + // start child with new child spec + if err := t.StartChild("child_sofo1"); err != nil { + t.testcase.err <- err + return + } + + children3 := t.Children() + if len(children3) != 2 { + t.testcase.err <- errIncorrect + return + } + + if reflect.DeepEqual(children3[0], children[0]) == false { + t.testcase.err <- errIncorrect + return + } + + // disable unknown child spec + if err := t.DisableChild("child_sofo111"); err != act.ErrSupervisorChildUnknown { + t.testcase.err <- errIncorrect + return + } + + // disable known child spec + if err := t.DisableChild("child_sofo1"); err != nil { + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil + return + } + + // try to start disabled child spec + if err := t.StartChild("child_sofo1"); err != act.ErrSupervisorChildDisabled { + t.testcase.err <- err + return + } + t.testcase.err <- nil +} + +// +// t9_sup_sofo +// + +func factory_t9_sup_sofo() gen.ProcessBehavior { + return &t9_sup_sofo{} +} + +type t9_child_stop struct { + pid gen.PID + reason error +} +type t9_sup_sofo struct { + act.Supervisor + + strategy act.SupervisorStrategy + testcase *testcase + waitStop []t9_child_stop + waitStart []gen.Atom +} + +func (t *t9_sup_sofo) Init(args ...any) (act.SupervisorSpec, error) { + var spec act.SupervisorSpec + spec.Type = act.SupervisorTypeSimpleOneForOne + spec.Children = []act.SupervisorChildSpec{ + { + Name: "child_sofo", + Factory: factory_t9_child_sofo, + }, + } + spec.Restart.Strategy = args[0].(act.SupervisorStrategy) + spec.EnableHandleChild = true + t.strategy = spec.Restart.Strategy + return spec, nil +} + +func (t *t9_sup_sofo) HandleChildStart(name gen.Atom, pid gen.PID) error { + + // shouldn't start before stopping + if len(t.waitStop) > 0 { + t.testcase.err <- errIncorrect + return nil + } + waitName := t.waitStart[0] + if waitName != name { + t.testcase.err <- errIncorrect + return nil + } + + t.waitStart = t.waitStart[1:] + if len(t.waitStart) > 0 { + return nil + } + + // done + t.testcase.err <- nil + return nil +} + +func (t *t9_sup_sofo) HandleChildTerminate(name gen.Atom, pid gen.PID, reason error) error { + if len(t.waitStop) == 0 { + // do nothing + return nil + } + + stop := t.waitStop[0] + if stop.pid != pid { + t.testcase.err <- errIncorrect + return nil + } + if stop.reason != reason { + t.testcase.err <- errIncorrect + return nil + } + t.waitStop = t.waitStop[1:] + if len(t.waitStart) > 0 { + return nil + } + if len(t.waitStop) > 0 { + return nil + } + + t.testcase.err <- nil + return nil +} + +func (t *t9_sup_sofo) Terminate(reason error) { + select { + case t.testcase.err <- reason: + default: + } +} + +func (t *t9_sup_sofo) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t9_sup_sofo) TestSOFO(input any) { + if _, ok := input.(initcase); ok { + n := t.testcase.input.(int) + childrentc := &testcase{"TestChildrenSOFO", nil, nil, make(chan error, 5)} + // start n child processes + for i := 0; i < n; i++ { + if err := t.StartChild("child_sofo", childrentc); err != nil { + t.testcase.err <- err + return + } + if err := childrentc.wait(1); err != nil { + t.testcase.err <- err + return + } + } + + t.waitStart = []gen.Atom{"child_sofo", "child_sofo", "child_sofo", "child_sofo", "child_sofo"} + + // check the number of children + children := t.Children() + if len(children) != n { + t.testcase.err <- errIncorrect + return + } + + return + } + + // terminate child process with a given reason + reason := input.(error) + children := t.Children() + if len(children) == 0 { + t.testcase.err <- errIncorrect + return + } + if err := t.SendExit(children[0].PID, reason); err != nil { + t.testcase.err <- err + return + } + + stop := t9_child_stop{ + pid: children[0].PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + + switch t.strategy { + case act.SupervisorStrategyTransient: + if reason == gen.TerminateReasonKill { + t.waitStart = append(t.waitStart, children[0].Spec) + } + case act.SupervisorStrategyTemporary: + break + case act.SupervisorStrategyPermanent: + t.waitStart = append(t.waitStart, children[0].Spec) + } + +} + +// +// t9_child_sofo +// + +func factory_t9_child_sofo() gen.ProcessBehavior { + return &t9_child_sofo{} +} + +type t9_child_sofo struct { + act.Actor + + testcase *testcase +} + +func (t *t9_child_sofo) Init(args ...any) error { + if len(args) > 0 { + t.testcase = args[0].(*testcase) + t.testcase.err <- nil + } + return nil +} + +func (t *t9_child_sofo) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t9_child_sofo) TestChildrenSOFO(input any) { + if _, ok := input.(initcase); ok { + t.testcase.err <- nil + } + panic(input) +} + +// tests +func TestT9SupervisorSOFO(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + node, err := ergo.StartNode("t9nodeSOFO@localhost", nopt) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t9_sofo, popt) + if err != nil { + panic(err) + } + + t9casesSOFO = []*testcase{ + {"TestBasic", nil, nil, make(chan error)}, + {"TestStrategy", act.SupervisorStrategyTransient, nil, make(chan error)}, + {"TestStrategy", act.SupervisorStrategyTemporary, nil, make(chan error)}, + {"TestStrategy", act.SupervisorStrategyPermanent, nil, make(chan error)}, + } + for _, tc := range t9casesSOFO { + name := tc.name + if tc.input != nil { + name = fmt.Sprintf("%s:%s", tc.name, tc.input) + } + t.Run(name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(3); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/t010_supervisor_ofo_test.go b/tests/001_local/t010_supervisor_ofo_test.go new file mode 100644 index 00000000..fcd10063 --- /dev/null +++ b/tests/001_local/t010_supervisor_ofo_test.go @@ -0,0 +1,719 @@ +package local + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +var ( + t10casesOFO []*testcase +) + +func factory_t10_ofo() gen.ProcessBehavior { + return &t10_ofo{} +} + +type t10_ofo struct { + act.Actor + + testcase *testcase +} + +func (t *t10_ofo) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t10_ofo) TestBasic(input any) { + defer func() { + t.testcase = nil + }() + + // start supervisor + pid, err := t.Spawn(factory_t10_sup_ofo_basic, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + } + defer t.Node().Kill(pid) + + childtc := &testcase{"TestBasicOFO", nil, nil, make(chan error)} + t.Send(pid, childtc) + if err := childtc.wait(1); err != nil { + t.testcase.err <- err + return + } + + // to test starting disabled spec. send random message + t.Send(pid, 12345) + if err := childtc.wait(1); err != nil { + t.testcase.err <- err + return + } + + t.testcase.err <- nil +} + +func (t *t10_ofo) TestStrategy(input any) { + defer func() { + t.testcase = nil + }() + + // start supervisor with the given restart strategy + pid, err := t.Spawn(factory_t10_sup_ofo, gen.ProcessOptions{}, t.testcase.input) + if err != nil { + t.testcase.err <- err + } + + childtc := &testcase{"TestOFO", nil, nil, make(chan error)} + t.Send(pid, childtc) + if err := childtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + reasons := []error{gen.TerminateReasonNormal, gen.TerminateReasonShutdown, gen.TerminateReasonKill} + for _, reason := range reasons { + t.Send(pid, reason) + if err := childtc.wait(1); err != nil { + t.testcase.err <- err + return + } + } + + // send exit signal to the supervisor process + exit := errors.New("my exit") + t.SendExit(pid, exit) + if err := childtc.wait(1); err != exit { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + t.testcase.err <- nil +} + +func (t *t10_ofo) TestSignificant(input any) { + defer func() { + t.testcase = nil + }() + + strategy := t.testcase.input.(act.SupervisorStrategy) + // start supervisor with the given restart strategy + pid, err := t.Spawn(factory_t10_sup_ofo_significant, gen.ProcessOptions{}, strategy) + if err != nil { + t.testcase.err <- err + } + childtc := &testcase{"TestOFOSignificant", nil, nil, make(chan error)} + t.Send(pid, childtc) + if err := childtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + switch strategy { + case act.SupervisorStrategyTransient: + reason := errors.New("custom reason1") + t.Send(pid, reason) + // supervisor will use this reason for SendExit to the significant child process. + // due to abnormal reason and transient strategy the child process will be restarted + // and 'wait' should return nil + if err := childtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + reason = gen.TerminateReasonNormal + // this reason makes the significant child process to be terminated with normal reason + // wich makes the supervisor shutdown with the same reason + t.Send(pid, reason) + if err := childtc.wait(1); err != gen.TerminateReasonNormal { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.testcase.err <- nil + + case act.SupervisorStrategyTemporary: + reason := errors.New("custom reason2") + // any reason will cause supervisor termination with the reason of + // significant child termination + t.Send(pid, reason) + if err := childtc.wait(1); err != reason { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.testcase.err <- nil + + case act.SupervisorStrategyPermanent: + reasons := []error{ + gen.TerminateReasonNormal, + gen.TerminateReasonShutdown, + errors.New("cusrom reason3"), + } + for _, reason := range reasons { + t.Send(pid, reason) + if err := childtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + } + t.testcase.err <- nil + + } +} + +// t10_sup_ofo_basic +func factory_t10_sup_ofo_basic() gen.ProcessBehavior { + return &t10_sup_ofo_basic{} +} + +type t10_sup_ofo_basic struct { + act.Supervisor + + testcase *testcase +} + +func (t *t10_sup_ofo_basic) Init(args ...any) (act.SupervisorSpec, error) { + var spec act.SupervisorSpec + spec.Type = act.SupervisorTypeOneForOne + spec.Children = []act.SupervisorChildSpec{ + { + Name: "child_ofo", + Factory: factory_t10_child_ofo, + }, + { + Name: "child_ofo1", + Factory: factory_t10_child_ofo, + }, + { + Name: "child_ofo2", + Factory: factory_t10_child_ofo, + }, + } + return spec, nil +} + +func (t *t10_sup_ofo_basic) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t10_sup_ofo_basic) TestBasicOFO(input any) { + if _, ok := input.(initcase); ok { + // check children + children := t.Children() + if len(children) != 3 { + t.testcase.err <- errIncorrect + return + } + + // all children should have registered name (the same as a spec one) + for _, c := range children { + if c.Spec != c.Name { + t.testcase.err <- errIncorrect + return + } + } + + // start child + if err := t.StartChild("child_ofo"); err != act.ErrSupervisorChildRunning { + t.testcase.err <- err + return + } + // check children. must be the same + if reflect.DeepEqual(children, t.Children()) == false { + t.testcase.err <- errIncorrect + return + } + + // start child with unknown spec + if err := t.StartChild("child_ofo111"); err != act.ErrSupervisorChildUnknown { + t.testcase.err <- errIncorrect + return + } + + // check children. must be the same + if reflect.DeepEqual(children, t.Children()) == false { + t.testcase.err <- errIncorrect + return + } + + // add new child spec + newChildSpec := act.SupervisorChildSpec{ + Name: "child_ofo3", + Factory: factory_t10_child_ofo, + } + if err := t.AddChild(newChildSpec); err != nil { + t.testcase.err <- err + return + } + + children = t.Children() + if len(children) != 4 { + t.testcase.err <- errIncorrect + return + } + // it should have registered name + if children[3].Spec != children[3].Name { + t.testcase.err <- errIncorrect + return + } + + if err := t.StartChild("child_ofo3"); err != act.ErrSupervisorChildRunning { + t.testcase.err <- err + return + } + // check children. must be the same + if reflect.DeepEqual(children, t.Children()) == false { + t.testcase.err <- errIncorrect + return + } + + // disable unknown child spec + if err := t.DisableChild("child_ofo111"); err != act.ErrSupervisorChildUnknown { + t.testcase.err <- errIncorrect + return + } + + // disable known child spec + if err := t.DisableChild("child_ofo"); err != nil { + t.testcase.err <- errIncorrect + return + } + + children[0].Disabled = true + + // we do not wait the child termination. just align the expected values + children[0].PID = gen.PID{} + children1 := t.Children() + children1[0].PID = gen.PID{} + // check children. must be the same + if reflect.DeepEqual(children, children1) == false { + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil + return + } + + // try to start disabled child spec + if err := t.StartChild("child_ofo"); err != act.ErrSupervisorChildDisabled { + t.testcase.err <- err + return + } + t.testcase.err <- nil +} + +// t10_sup_ofo +func factory_t10_sup_ofo() gen.ProcessBehavior { + return &t10_sup_ofo{} +} + +type t10_child_stop struct { + pid gen.PID + reason error +} + +type t10_sup_ofo struct { + act.Supervisor + + strategy act.SupervisorStrategy + testcase *testcase + waitStop []t10_child_stop + waitStart []gen.Atom +} + +func (t *t10_sup_ofo) Init(args ...any) (act.SupervisorSpec, error) { + var spec act.SupervisorSpec + spec.Type = act.SupervisorTypeOneForOne + spec.Children = []act.SupervisorChildSpec{ + { + Name: "child_ofo10", + Factory: factory_t10_child_ofo, + }, + { + Name: "child_ofo11", + Factory: factory_t10_child_ofo, + }, + { + Name: "child_ofo12", + Factory: factory_t10_child_ofo, + }, + } + spec.Restart.Strategy = args[0].(act.SupervisorStrategy) + spec.EnableHandleChild = true + spec.DisableAutoShutdown = true + t.strategy = spec.Restart.Strategy + + t.waitStart = []gen.Atom{"child_ofo10", "child_ofo11", "child_ofo12"} + return spec, nil +} + +func (t *t10_sup_ofo) HandleChildStart(name gen.Atom, pid gen.PID) error { + // shouldn't start before stopping + if len(t.waitStop) > 0 { + t.testcase.err <- errIncorrect + return nil + } + waitName := t.waitStart[0] + if waitName != name { + t.testcase.err <- errIncorrect + return nil + } + + t.waitStart = t.waitStart[1:] + if len(t.waitStart) > 0 { + return nil + } + + // done + if t.testcase != nil { + t.testcase.err <- nil + } + return nil +} + +func (t *t10_sup_ofo) HandleChildTerminate(name gen.Atom, pid gen.PID, reason error) error { + if len(t.waitStop) == 0 { + // do nothing + return nil + } + + stop := t.waitStop[0] + if stop.pid != pid { + t.testcase.err <- errIncorrect + return nil + } + if stop.reason != reason { + t.testcase.err <- errIncorrect + return nil + } + t.waitStop = t.waitStop[1:] + if len(t.waitStart) > 0 { + return nil + } + if len(t.waitStop) > 0 { + return nil + } + + t.testcase.err <- nil + return nil +} + +func (t *t10_sup_ofo) Terminate(reason error) { + //var empty gen.PID + select { + case t.testcase.err <- reason: + default: + } +} + +func (t *t10_sup_ofo) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t10_sup_ofo) TestOFO(input any) { + if _, ok := input.(initcase); ok { + // check children + children := t.Children() + if len(children) != 3 { + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil + return + } + + // terminate child process with a given reason + reason := input.(error) + children := t.Children() + if len(children) == 0 { + t.testcase.err <- errIncorrect + return + } + empty := gen.PID{} + for _, c := range children { + if c.PID == empty { + continue + } + if err := t.SendExit(c.PID, reason); err != nil { + t.testcase.err <- err + return + } + + stop := t10_child_stop{ + pid: c.PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + + switch t.strategy { + case act.SupervisorStrategyTransient: + if reason == gen.TerminateReasonKill { + t.waitStart = append(t.waitStart, c.Spec) + } + case act.SupervisorStrategyTemporary: + break + case act.SupervisorStrategyPermanent: + t.waitStart = append(t.waitStart, c.Spec) + } + return + } + panic("shouldn't be here") +} + +// t10_sup_ofo_significant +func factory_t10_sup_ofo_significant() gen.ProcessBehavior { + return &t10_sup_ofo_significant{} +} + +type t10_sup_ofo_significant struct { + act.Supervisor + + strategy act.SupervisorStrategy + testcase *testcase + waitStop []t10_child_stop + waitStart []gen.Atom +} + +func (t *t10_sup_ofo_significant) Init(args ...any) (act.SupervisorSpec, error) { + var spec act.SupervisorSpec + spec.Type = act.SupervisorTypeOneForOne + spec.Children = []act.SupervisorChildSpec{ + { + Name: "child_ofo100", + Factory: factory_t10_child_ofo, + }, + { + Name: "child_ofo110", + Significant: true, + Factory: factory_t10_child_ofo, + }, + } + spec.Restart.Strategy = args[0].(act.SupervisorStrategy) + spec.EnableHandleChild = true + t.strategy = spec.Restart.Strategy + t.waitStart = []gen.Atom{"child_ofo100", "child_ofo110"} + return spec, nil +} + +func (t *t10_sup_ofo_significant) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t10_sup_ofo_significant) HandleChildStart(name gen.Atom, pid gen.PID) error { + // shouldn't start before stopping + if len(t.waitStop) > 0 { + t.testcase.err <- errIncorrect + return nil + } + waitName := t.waitStart[0] + if waitName != name { + t.testcase.err <- errIncorrect + return nil + } + + t.waitStart = t.waitStart[1:] + if len(t.waitStart) > 0 { + return nil + } + + // done + if t.testcase != nil { + t.testcase.err <- nil + } + return nil +} + +func (t *t10_sup_ofo_significant) HandleChildTerminate(name gen.Atom, pid gen.PID, reason error) error { + if len(t.waitStop) == 0 { + // do nothing + return nil + } + + stop := t.waitStop[0] + if stop.pid != pid { + t.testcase.err <- errIncorrect + return nil + } + if stop.reason != reason { + t.testcase.err <- errIncorrect + return nil + } + t.waitStop = t.waitStop[1:] + if len(t.waitStart) > 0 { + return nil + } + if len(t.waitStop) > 0 { + return nil + } + + t.testcase.err <- nil + return nil +} + +func (t *t10_sup_ofo_significant) TestOFOSignificant(input any) { + if _, ok := input.(initcase); ok { + // check children + children := t.Children() + if len(children) != 2 { + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil + return + } + + // terminate significant child with a given reason + reason := input.(error) + children := t.Children() + c := children[1] + if c.Significant != true { + t.Log().Error("it is not a significant spec") + t.testcase.err <- errIncorrect + return + } + if err := t.SendExit(c.PID, reason); err != nil { + t.testcase.err <- err + return + } + + stop := t10_child_stop{ + pid: c.PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + + switch t.strategy { + case act.SupervisorStrategyTransient: + if reason != gen.TerminateReasonNormal && reason != gen.TerminateReasonShutdown { + t.waitStart = append(t.waitStart, c.Spec) + } + case act.SupervisorStrategyTemporary: + break + case act.SupervisorStrategyPermanent: + t.waitStart = append(t.waitStart, c.Spec) + } +} + +func (t *t10_sup_ofo_significant) Terminate(reason error) { + select { + case t.testcase.err <- reason: + default: + } +} + +// +// t10_child_ofo +// + +func factory_t10_child_ofo() gen.ProcessBehavior { + return &t10_child_ofo{} +} + +type t10_child_ofo struct { + act.Actor +} + +// tests +func TestT10SupervisorOFO(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + //nopt.Log.Level = gen.LogLevelTrace + node, err := ergo.StartNode("t10nodeOFO@localhost", nopt) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t10_ofo, popt) + if err != nil { + panic(err) + } + + t10casesOFO = []*testcase{ + {"TestBasic", nil, nil, make(chan error)}, + {"TestStrategy", act.SupervisorStrategyTransient, nil, make(chan error)}, + {"TestStrategy", act.SupervisorStrategyTemporary, nil, make(chan error)}, + {"TestStrategy", act.SupervisorStrategyPermanent, nil, make(chan error)}, + {"TestSignificant", act.SupervisorStrategyTransient, nil, make(chan error)}, + {"TestSignificant", act.SupervisorStrategyTemporary, nil, make(chan error)}, + {"TestSignificant", act.SupervisorStrategyPermanent, nil, make(chan error)}, + //TODO {"TestAutoShutdown", nil, nil, make(chan error)}, + //TODO {"TestRestartIntensity", nil, nil, make(chan error)}, + } + for _, tc := range t10casesOFO { + name := tc.name + if tc.input != nil { + name = fmt.Sprintf("%s:%s", tc.name, tc.input) + } + t.Run(name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(3); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/t011_supervisor_arfo_test.go b/tests/001_local/t011_supervisor_arfo_test.go new file mode 100644 index 00000000..99f943c7 --- /dev/null +++ b/tests/001_local/t011_supervisor_arfo_test.go @@ -0,0 +1,1083 @@ +package local + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +var ( + t11casesARFO []*testcase +) + +func factory_t11_arfo() gen.ProcessBehavior { + return &t11_arfo{} +} + +type t11_arfo struct { + act.Actor + + supType act.SupervisorType + testcase *testcase +} + +func (t *t11_arfo) Init(args ...any) error { + supType := args[0].(act.SupervisorType) + t.supType = supType + return nil +} + +func (t *t11_arfo) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t11_arfo) TestBasic(input any) { + defer func() { + t.testcase = nil + }() + + // start supervisor + pid, err := t.Spawn(factory_t11_sup_arfo_basic, gen.ProcessOptions{}, t.supType) + if err != nil { + t.testcase.err <- err + } + defer t.Node().Kill(pid) + + childtc := &testcase{"TestBasicARFO", nil, nil, make(chan error)} + t.Send(pid, childtc) + if err := childtc.wait(1); err != nil { + t.testcase.err <- err + return + } + + // to test starting disabled spec. send random message + t.Send(pid, 12345) + if err := childtc.wait(1); err != nil { + t.testcase.err <- err + return + } + + t.testcase.err <- nil +} + +func (t *t11_arfo) TestStrategy(input any) { + var childtc *testcase + + exit := errors.New("my exit") + defer func() { + t.testcase = nil + }() + + // start supervisor with the given supevision type, restart strategy + // and ask the supervisor to send the termination reason to the child process + pid, err := t.Spawn(factory_t11_sup_arfo, gen.ProcessOptions{}, + t.supType, t.testcase.input) // args + if err != nil { + t.testcase.err <- err + } + + if t.supType == act.SupervisorTypeAllForOne { + childtc = &testcase{"TestAFO", nil, nil, make(chan error)} + } else { + childtc = &testcase{"TestRFO", nil, nil, make(chan error)} + } + + t.Send(pid, childtc) + if err := childtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + // test stopping/restarting child processes + // depending on a termination reason and restart strategy + reasons := []error{gen.TerminateReasonNormal, gen.TerminateReasonShutdown, gen.TerminateReasonKill} + for _, reason := range reasons { + t.Send(pid, reason) + if err := childtc.wait(1); err != nil { + t.testcase.err <- err + return + } + } + + // send exit signal to the supervisor process + t.SendExit(pid, exit) + if err := childtc.wait(1); err != exit { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + // another test... sending kill as a termination reason + // to the 1st, 2nd and 3rd child process + + if t.supType == act.SupervisorTypeAllForOne { + childtc = &testcase{"TestAFO", nil, nil, make(chan error)} + } else { + childtc = &testcase{"TestRFO", nil, nil, make(chan error)} + } + + pid1, err := t.Spawn(factory_t11_sup_arfo, gen.ProcessOptions{}, + t.supType, t.testcase.input) // args + if err != nil { + t.testcase.err <- err + } + + t.Send(pid1, childtc) + if err := childtc.wait(1); err != nil { + t.Node().Kill(pid1) + t.testcase.err <- err + return + } + // test restarting children (will be gen.TerminateReasonKill for every child process) + // depending on a termination reason and restart strategy + // n - child in the spec + for n := 0; n < 3; n++ { + t.Send(pid1, n) + if err := childtc.wait(1); err != nil { + t.testcase.err <- err + return + } + } + + t.SendExit(pid1, exit) + if err := childtc.wait(1); err != exit { + t.Node().Kill(pid1) + t.testcase.err <- err + return + } + t.testcase.err <- nil +} + +func (t *t11_arfo) TestSignificant(input any) { + defer func() { + t.testcase = nil + }() + + strategy := t.testcase.input.(act.SupervisorStrategy) + // start supervisor with the given restart strategy + pid, err := t.Spawn(factory_t11_sup_arfo_significant, gen.ProcessOptions{}, + t.supType, t.testcase.input) // args + if err != nil { + t.testcase.err <- err + } + childtc := &testcase{"TestARFOSignificant", nil, nil, make(chan error)} + t.Send(pid, childtc) + if err := childtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + switch strategy { + case act.SupervisorStrategyTransient: + reason := errors.New("custom reason1") + t.Send(pid, reason) + // supervisor will use this reason for SendExit to the significant child process. + // due to abnormal reason and transient strategy the child process will be restarted + // and 'wait' should return nil + if err := childtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + reason = gen.TerminateReasonNormal + // this reason makes the significant child process to be terminated with normal reason + // wich makes the supervisor shutdown with the same reason + t.Send(pid, reason) + if err := childtc.wait(1); err != gen.TerminateReasonNormal { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.testcase.err <- nil + + case act.SupervisorStrategyTemporary: + reason := errors.New("custom reason2") + // any reason will cause supervisor termination with the reason of + // significant child termination + t.Send(pid, reason) + if err := childtc.wait(1); err != reason { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.testcase.err <- nil + + case act.SupervisorStrategyPermanent: + reasons := []error{ + gen.TerminateReasonNormal, + gen.TerminateReasonShutdown, + errors.New("cusrom reason3"), + } + for _, reason := range reasons { + t.Send(pid, reason) + if err := childtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + } + t.testcase.err <- nil + + } +} + +// t11_sup_arfo_basic +func factory_t11_sup_arfo_basic() gen.ProcessBehavior { + return &t11_sup_arfo_basic{} +} + +type t11_sup_arfo_basic struct { + act.Supervisor + + testcase *testcase +} + +func (t *t11_sup_arfo_basic) Init(args ...any) (act.SupervisorSpec, error) { + var spec act.SupervisorSpec + spec.Type = args[0].(act.SupervisorType) + spec.Children = []act.SupervisorChildSpec{ + { + Name: "child_arfo", + Factory: factory_t11_child_arfo, + }, + { + Name: "child_arfo1", + Factory: factory_t11_child_arfo, + }, + { + Name: "child_arfo2", + Factory: factory_t11_child_arfo, + }, + } + return spec, nil +} + +func (t *t11_sup_arfo_basic) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t11_sup_arfo_basic) TestBasicARFO(input any) { + if _, ok := input.(initcase); ok { + // check children + children := t.Children() + if len(children) != 3 { + t.testcase.err <- errIncorrect + return + } + + // all children should have registered name (the same as a spec one) + for _, c := range children { + if c.Spec != c.Name { + t.testcase.err <- errIncorrect + return + } + } + + // start child + if err := t.StartChild("child_arfo"); err != act.ErrSupervisorChildRunning { + t.testcase.err <- err + return + } + // check children. must be the same + if reflect.DeepEqual(children, t.Children()) == false { + t.testcase.err <- errIncorrect + return + } + + // start child with unknown spec + if err := t.StartChild("child_arfo111"); err != act.ErrSupervisorChildUnknown { + t.testcase.err <- errIncorrect + return + } + + // check children. must be the same + if reflect.DeepEqual(children, t.Children()) == false { + t.testcase.err <- errIncorrect + return + } + + // add new child spec + newChildSpec := act.SupervisorChildSpec{ + Name: "child_arfo3", + Factory: factory_t11_child_arfo, + } + if err := t.AddChild(newChildSpec); err != nil { + t.testcase.err <- err + return + } + + children = t.Children() + if len(children) != 4 { + t.testcase.err <- errIncorrect + return + } + // it should have registered name + if children[3].Spec != children[3].Name { + t.testcase.err <- errIncorrect + return + } + + if err := t.StartChild("child_arfo3"); err != act.ErrSupervisorChildRunning { + t.testcase.err <- err + return + } + // check children. must be the same + if reflect.DeepEqual(children, t.Children()) == false { + t.testcase.err <- errIncorrect + return + } + + // disable unknown child spec + if err := t.DisableChild("child_arfo111"); err != act.ErrSupervisorChildUnknown { + t.testcase.err <- errIncorrect + return + } + + // disable known child spec + if err := t.DisableChild("child_arfo"); err != nil { + t.testcase.err <- errIncorrect + return + } + children[0].Disabled = true + + // we do not wait the child termination. just align the expected values + children[0].PID = gen.PID{} + children1 := t.Children() + children1[0].PID = gen.PID{} + + // check children. must be the same + if reflect.DeepEqual(children, children1) == false { + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil + return + } + + // try to start disabled child spec + if err := t.StartChild("child_arfo"); err != act.ErrSupervisorChildDisabled { + t.testcase.err <- err + return + } + t.testcase.err <- nil +} + +// t11_sup_arfo +func factory_t11_sup_arfo() gen.ProcessBehavior { + return &t11_sup_arfo{} +} + +type t11_child_stop struct { + pid gen.PID + reason error +} + +type t11_sup_arfo struct { + act.Supervisor + + supType act.SupervisorType + strategy act.SupervisorStrategy + + testcase *testcase + waitStop []t11_child_stop + waitStart []gen.Atom +} + +func (t *t11_sup_arfo) Init(args ...any) (act.SupervisorSpec, error) { + var spec act.SupervisorSpec + + t.supType = args[0].(act.SupervisorType) + spec.Type = t.supType + t.strategy = args[1].(act.SupervisorStrategy) + spec.Restart.Strategy = t.strategy + spec.Restart.KeepOrder = true + + spec.Children = []act.SupervisorChildSpec{ + { + Name: "child_arfo10", + Factory: factory_t11_child_arfo, + }, + { + Name: "child_arfo11", + Factory: factory_t11_child_arfo, + }, + { + Name: "child_arfo12", + Factory: factory_t11_child_arfo, + }, + } + spec.EnableHandleChild = true + spec.DisableAutoShutdown = true + + t.waitStart = []gen.Atom{"child_arfo10", "child_arfo11", "child_arfo12"} + return spec, nil +} + +func (t *t11_sup_arfo) HandleChildStart(name gen.Atom, pid gen.PID) error { + // shouldn't start before stopping + if len(t.waitStop) > 0 { + t.testcase.err <- errIncorrect + return nil + } + waitName := t.waitStart[0] + if waitName != name { + t.testcase.err <- errIncorrect + return nil + } + + t.waitStart = t.waitStart[1:] + if len(t.waitStart) > 0 { + return nil + } + + // done + if t.testcase != nil { + t.testcase.err <- nil + } + return nil +} + +func (t *t11_sup_arfo) HandleChildTerminate(name gen.Atom, pid gen.PID, reason error) error { + if len(t.waitStop) == 0 { + // do nothing + return nil + } + + stop := t.waitStop[0] + if stop.pid != pid { + t.testcase.err <- errIncorrect + return nil + } + if stop.reason != reason { + t.testcase.err <- errIncorrect + return nil + } + t.waitStop = t.waitStop[1:] + if len(t.waitStart) > 0 { + return nil + } + if len(t.waitStop) > 0 { + return nil + } + + t.testcase.err <- nil + return nil +} + +func (t *t11_sup_arfo) Terminate(reason error) { + //var empty gen.PID + select { + case t.testcase.err <- reason: + default: + } +} + +func (t *t11_sup_arfo) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t11_sup_arfo) TestAFO(input any) { + if _, ok := input.(initcase); ok { + // check children + children := t.Children() + if len(children) != 3 { + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil + return + } + + switch v := input.(type) { + case error: + // terminate child process with a given reason + empty := gen.PID{} + reason := v + children := t.Children() + if len(children) == 0 { + t.testcase.err <- errIncorrect + return + } + + n := 0 + switch reason { + case gen.TerminateReasonShutdown: + n = 1 + case gen.TerminateReasonKill: + n = 2 + } + if err := t.SendExit(children[n].PID, reason); err != nil { + t.testcase.err <- err + return + } + stop := t11_child_stop{ + pid: children[n].PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + + for i, c := range children { + k := len(children) - 1 - i + switch t.strategy { + case act.SupervisorStrategyTransient: + if reason == gen.TerminateReasonKill { + t.waitStart = append(t.waitStart, c.Spec) + if children[k].PID == children[n].PID { + // already added to wait list + continue + } + + if children[k].PID == empty { + // already terminated + continue + } + stop = t11_child_stop{ + pid: children[k].PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + continue + } + return + case act.SupervisorStrategyTemporary: + return + case act.SupervisorStrategyPermanent: + t.waitStart = append(t.waitStart, c.Spec) + if children[k].PID == children[n].PID { + // already added to wait list + continue + } + stop = t11_child_stop{ + pid: children[k].PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + } + } + return + case int: + children := t.Children() + if len(children) == 0 { + t.testcase.err <- errIncorrect + return + } + if err := t.SendExit(children[v].PID, gen.TerminateReasonKill); err != nil { + t.testcase.err <- err + return + } + stop := t11_child_stop{ + pid: children[v].PID, + reason: gen.TerminateReasonKill, + } + t.waitStop = append(t.waitStop, stop) + if t.strategy == act.SupervisorStrategyTemporary { + return + } + for i := range children { + t.waitStart = append(t.waitStart, children[i].Spec) + + k := len(children) - 1 - i + if children[k].PID == children[v].PID { + // already appended + continue + } + stop = t11_child_stop{ + pid: children[k].PID, + reason: gen.TerminateReasonKill, + } + t.waitStop = append(t.waitStop, stop) + } + return + } + panic("shouldn't be here") +} + +func (t *t11_sup_arfo) TestRFO(input any) { + if _, ok := input.(initcase); ok { + // check children + children := t.Children() + if len(children) != 3 { + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil + return + } + + switch v := input.(type) { + case error: + // terminate child process with a given reason + empty := gen.PID{} + reason := v + children := t.Children() + if len(children) == 0 { + t.testcase.err <- errIncorrect + return + } + + n := 0 + switch reason { + case gen.TerminateReasonShutdown: + n = 1 + case gen.TerminateReasonKill: + n = 2 + } + if err := t.SendExit(children[n].PID, reason); err != nil { + t.testcase.err <- err + return + } + stop := t11_child_stop{ + pid: children[n].PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + + ch := children[n:] + for i, c := range ch { + k := len(ch) - 1 - i + switch t.strategy { + case act.SupervisorStrategyTransient: + if reason == gen.TerminateReasonKill { + t.waitStart = append(t.waitStart, c.Spec) + if ch[k].PID == children[n].PID { + // already added to wait list + continue + } + + if ch[k].PID == empty { + // already terminated + continue + } + stop = t11_child_stop{ + pid: ch[k].PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + continue + } + return + case act.SupervisorStrategyTemporary: + return + case act.SupervisorStrategyPermanent: + t.waitStart = append(t.waitStart, c.Spec) + if ch[k].PID == children[n].PID { + // already added to wait list + continue + } + stop = t11_child_stop{ + pid: ch[k].PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + } + } + return + case int: + children := t.Children() + if len(children) == 0 { + t.testcase.err <- errIncorrect + return + } + if err := t.SendExit(children[v].PID, gen.TerminateReasonKill); err != nil { + t.testcase.err <- err + return + } + stop := t11_child_stop{ + pid: children[v].PID, + reason: gen.TerminateReasonKill, + } + t.waitStop = append(t.waitStop, stop) + if t.strategy == act.SupervisorStrategyTemporary { + return + } + ch := children[v:] + for i := range ch { + k := len(ch) - 1 - i + t.waitStart = append(t.waitStart, ch[i].Spec) + + if ch[k].PID == children[v].PID { + // already appended + continue + } + stop = t11_child_stop{ + pid: ch[k].PID, + reason: gen.TerminateReasonKill, + } + t.waitStop = append(t.waitStop, stop) + } + return + } + panic("shouldn't be here") +} + +// t11_sup_arfo_significant +func factory_t11_sup_arfo_significant() gen.ProcessBehavior { + return &t11_sup_arfo_significant{} +} + +type t11_sup_arfo_significant struct { + act.Supervisor + + supType act.SupervisorType + strategy act.SupervisorStrategy + + testcase *testcase + waitStop []t11_child_stop + waitStart []gen.Atom +} + +func (t *t11_sup_arfo_significant) Init(args ...any) (act.SupervisorSpec, error) { + var spec act.SupervisorSpec + t.supType = args[0].(act.SupervisorType) + spec.Type = t.supType + t.strategy = args[1].(act.SupervisorStrategy) + spec.Restart.Strategy = t.strategy + spec.Restart.KeepOrder = true + + spec.Children = []act.SupervisorChildSpec{ + { + Name: "child_arfo100", + Factory: factory_t11_child_arfo, + }, + { + Name: "child_arfo110", + Significant: true, + Factory: factory_t11_child_arfo, + }, + { + Name: "child_arfo120", + Factory: factory_t11_child_arfo, + }, + } + spec.EnableHandleChild = true + t.waitStart = []gen.Atom{"child_arfo100", "child_arfo110", "child_arfo120"} + return spec, nil +} + +func (t *t11_sup_arfo_significant) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t11_sup_arfo_significant) HandleChildStart(name gen.Atom, pid gen.PID) error { + // shouldn't start before stopping + if len(t.waitStop) > 0 { + t.testcase.err <- errIncorrect + return nil + } + waitName := t.waitStart[0] + if waitName != name { + t.testcase.err <- errIncorrect + return nil + } + + t.waitStart = t.waitStart[1:] + if len(t.waitStart) > 0 { + return nil + } + + // done + if t.testcase != nil { + t.testcase.err <- nil + } + return nil +} + +func (t *t11_sup_arfo_significant) HandleChildTerminate(name gen.Atom, pid gen.PID, reason error) error { + if len(t.waitStop) == 0 { + // do nothing + return nil + } + + stop := t.waitStop[0] + if stop.pid != pid { + t.testcase.err <- errIncorrect + return nil + } + if stop.reason != reason { + t.testcase.err <- errIncorrect + return nil + } + t.waitStop = t.waitStop[1:] + if len(t.waitStart) > 0 { + return nil + } + if len(t.waitStop) > 0 { + return nil + } + + t.testcase.err <- nil + return nil +} + +func (t *t11_sup_arfo_significant) TestARFOSignificant(input any) { + if _, ok := input.(initcase); ok { + // check children + children := t.Children() + if len(children) != 3 { + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil + return + } + + // terminate significant child with a given reason + reason := input.(error) + children := t.Children() + c := children[1] + if c.Significant != true { + t.Log().Error("it is not a significant spec") + t.testcase.err <- errIncorrect + return + } + if err := t.SendExit(c.PID, reason); err != nil { + t.testcase.err <- err + return + } + + stop := t11_child_stop{ + pid: c.PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + + switch t.strategy { + case act.SupervisorStrategyTransient: + if reason != gen.TerminateReasonNormal && reason != gen.TerminateReasonShutdown { + if t.supType == act.SupervisorTypeAllForOne { + stop = t11_child_stop{ + pid: children[2].PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + stop = t11_child_stop{ + pid: children[0].PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + + t.waitStart = append(t.waitStart, children[0].Spec) + t.waitStart = append(t.waitStart, children[1].Spec) + t.waitStart = append(t.waitStart, children[2].Spec) + + } else { + stop = t11_child_stop{ + pid: children[2].PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + + t.waitStart = append(t.waitStart, children[1].Spec) + t.waitStart = append(t.waitStart, children[2].Spec) + + } + } + + case act.SupervisorStrategyTemporary: + stop = t11_child_stop{ + pid: children[2].PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + stop = t11_child_stop{ + pid: children[0].PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + break + + case act.SupervisorStrategyPermanent: + if t.supType == act.SupervisorTypeAllForOne { + stop = t11_child_stop{ + pid: children[2].PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + stop = t11_child_stop{ + pid: children[0].PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + + t.waitStart = append(t.waitStart, children[0].Spec) + t.waitStart = append(t.waitStart, children[1].Spec) + t.waitStart = append(t.waitStart, children[2].Spec) + } else { + stop = t11_child_stop{ + pid: children[2].PID, + reason: reason, + } + t.waitStop = append(t.waitStop, stop) + + t.waitStart = append(t.waitStart, children[1].Spec) + t.waitStart = append(t.waitStart, children[2].Spec) + + } + } +} + +func (t *t11_sup_arfo_significant) Terminate(reason error) { + select { + case t.testcase.err <- reason: + default: + } +} + +// +// t11_child_arfo +// + +func factory_t11_child_arfo() gen.ProcessBehavior { + return &t11_child_arfo{} +} + +type t11_child_arfo struct { + act.Actor +} + +// tests +func TestT11SupervisorAFO(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + //nopt.Log.Level = gen.LogLevelTrace + node, err := ergo.StartNode("t11nodeAFO@localhost", nopt) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t11_arfo, popt, act.SupervisorTypeAllForOne) + if err != nil { + panic(err) + } + + t11casesARFO = []*testcase{ + &testcase{"TestBasic", nil, nil, make(chan error)}, + &testcase{"TestStrategy", act.SupervisorStrategyTransient, nil, make(chan error)}, + &testcase{"TestStrategy", act.SupervisorStrategyTemporary, nil, make(chan error)}, + &testcase{"TestStrategy", act.SupervisorStrategyPermanent, nil, make(chan error)}, + &testcase{"TestSignificant", act.SupervisorStrategyTransient, nil, make(chan error)}, + &testcase{"TestSignificant", act.SupervisorStrategyTemporary, nil, make(chan error)}, + &testcase{"TestSignificant", act.SupervisorStrategyPermanent, nil, make(chan error)}, + + //TODO &testcase{"TestAutoShutdown", nil, nil, make(chan error)}, + //TODO &testcase{"TestRestartIntensity", nil, nil, make(chan error)}, + } + for _, tc := range t11casesARFO { + name := tc.name + if tc.input != nil { + name = fmt.Sprintf("%s:%s", tc.name, tc.input) + } + t.Run(name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(3); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} + +func TestT11SupervisorRFO(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + //nopt.Log.Level = gen.LogLevelTrace + node, err := ergo.StartNode("t11nodeRFO@localhost", nopt) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t11_arfo, popt, act.SupervisorTypeRestForOne) + if err != nil { + panic(err) + } + + t11casesARFO = []*testcase{ + {"TestBasic", nil, nil, make(chan error)}, + {"TestStrategy", act.SupervisorStrategyTransient, nil, make(chan error)}, + {"TestStrategy", act.SupervisorStrategyTemporary, nil, make(chan error)}, + {"TestStrategy", act.SupervisorStrategyPermanent, nil, make(chan error)}, + {"TestSignificant", act.SupervisorStrategyTransient, nil, make(chan error)}, + {"TestSignificant", act.SupervisorStrategyTemporary, nil, make(chan error)}, + {"TestSignificant", act.SupervisorStrategyPermanent, nil, make(chan error)}, + + //TODO {"TestAutoShutdown", nil, nil, make(chan error)}, + //TODO {"TestRestartIntensity", nil, nil, make(chan error)}, + } + for _, tc := range t11casesARFO { + name := tc.name + if tc.input != nil { + name = fmt.Sprintf("%s:%s", tc.name, tc.input) + } + t.Run(name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(3); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/t012_application_test.go b/tests/001_local/t012_application_test.go new file mode 100644 index 00000000..ead95445 --- /dev/null +++ b/tests/001_local/t012_application_test.go @@ -0,0 +1,550 @@ +package local + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/node" +) + +func createTestApp() gen.ApplicationBehavior { + return &testApp{} +} + +type testApp struct{} + +func (ta *testApp) Load(node gen.Node, args ...any) (gen.ApplicationSpec, error) { + return gen.ApplicationSpec{ + Name: "test_app", + Group: []gen.ApplicationMemberSpec{ + { + Name: "test_member", + Factory: factory_t12, + }, + }, + Env: appEnv, + }, nil +} + +func (ta *testApp) Start(mode gen.ApplicationMode) {} +func (ta *testApp) Terminate(reason error) {} + +func createTestAppDep() gen.ApplicationBehavior { + return &testAppDep{} +} + +type testAppDep struct{} + +func (tad *testAppDep) Load(node gen.Node, args ...any) (gen.ApplicationSpec, error) { + spec := gen.ApplicationSpec{ + Name: "test_app_dep", + Group: []gen.ApplicationMemberSpec{ + { + Factory: factory_t12, + }, + }, + Env: appEnv, + } + + spec.Depends.Applications = []gen.Atom{"test_app"} + return spec, nil +} +func (tad *testAppDep) Start(mode gen.ApplicationMode) {} +func (tad *testAppDep) Terminate(reason error) {} + +func createTestAppMode(testcase *testcase, mode gen.ApplicationMode) gen.ApplicationBehavior { + return &testAppMode{ + testcase: testcase, + mode: mode, + } +} + +type testAppMode struct { + testcase *testcase + mode gen.ApplicationMode +} + +func (tam *testAppMode) Load(node gen.Node, args ...any) (gen.ApplicationSpec, error) { + spec := gen.ApplicationSpec{ + Name: "test_app_mode", + Group: []gen.ApplicationMemberSpec{ + { + Factory: factory_t12, + }, + { + Factory: factory_t12, + }, + { + Factory: factory_t12, + }, + }, + Env: appEnv, + Mode: tam.mode, + } + return spec, nil +} +func (tad *testAppMode) Start(mode gen.ApplicationMode) {} +func (tad *testAppMode) Terminate(reason error) { + select { + case tad.testcase.err <- reason: + default: + } +} + +var ( + appEnv = map[gen.Env]any{ + "TEST": 12345, + "VALUE": "09887", + } + t12cases []*testcase +) + +func factory_t12() gen.ProcessBehavior { + return &t12{} +} + +type t12 struct { + act.Actor + + testcase *testcase +} + +func (t *t12) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t12) TestBasic(input any) { + defer func() { + t.testcase = nil + }() + + nopt := gen.NodeOptions{} + nopt.Applications = append(nopt.Applications, createTestApp()) + nopt.Log.DefaultLogger.Disable = true + node, err := node.Start("t12nodeBasic@localhost", nopt, gen.Version{}) + if err != nil { + t.Log().Error("unable to start new node: %s", node) + t.testcase.err <- err + return + } + defer node.Stop() + + // check autostarted app + if apps := node.ApplicationsRunning(); len(apps) != 1 || apps[0] != "test_app" { + t.Log().Error("incorrect application list: %s", apps) + t.testcase.err <- errIncorrect + return + } + + list, err := node.ProcessList() + if err != nil { + t.Log().Error("process list failed: %s", err) + t.testcase.err <- err + return + } + + // must be 1 process + if len(list) != 1 { + t.Log().Error("incorrect process list (exp 1 member) : %s", list) + t.testcase.err <- errIncorrect + return + } + + info, err := node.ProcessInfo(list[0]) + if err != nil { + t.Log().Error("process info failed: %s", err) + t.testcase.err <- err + return + } + // check app name of the process + if info.Application != "test_app" { + t.Log().Error("process app name incorrect: %s", info.Application) + t.testcase.err <- errIncorrect + return + } + if info.Name != "test_member" { + t.Log().Error("incorrect member process name: %s", info.Name) + t.testcase.err <- errIncorrect + return + } + + docase := &testcase{"Do", nil, nil, make(chan error)} + node.Send(list[0], docase) + if err := docase.wait(1); err != nil { + t.Log().Error("unable to get process env: %s", err) + t.testcase.err <- err + return + } + + if reflect.DeepEqual(docase.output, appEnv) == false { + t.Log().Error("incorrect env vars: %#v exp %#v ", docase.output, appEnv) + t.testcase.err <- err + return + } + + // ---- new member start tests + //ask to spawn a new proc + node.Send(list[0], 1) + if err := docase.wait(1); err != nil { + t.Log().Error("unable to get process env: %s", err) + t.testcase.err <- err + return + } + newpid, ok := docase.output.(gen.PID) + if ok == false { + t.Log().Error("incorrect pid value: %#v ", docase.output) + t.testcase.err <- errIncorrect + } + newinfo, err := node.ProcessInfo(newpid) + if err != nil { + t.Log().Error("new process info failed: %s", err) + t.testcase.err <- err + return + } + + // check app name of the new process + if newinfo.Application != "test_app" { + t.Log().Error("new process app name incorrect: %s", newinfo.Application) + t.testcase.err <- errIncorrect + return + } + if newinfo.Name != "" { + t.Log().Error("incorrect new member process name: %s", newinfo.Name) + t.testcase.err <- errIncorrect + return + } + + newdocase := &testcase{"Do", nil, nil, make(chan error)} + node.Send(newpid, newdocase) + if err := newdocase.wait(1); err != nil { + t.Log().Error("unable to get new process env: %s", err) + t.testcase.err <- err + return + } + + if reflect.DeepEqual(newdocase.output, appEnv) == false { + t.Log().Error("incorrect env vars: %#v exp %#v ", newdocase.output, appEnv) + t.testcase.err <- err + return + } + // ---- new member stop tests + + // try to unload running app. must be err + if err := node.ApplicationUnload("test_app"); err != gen.ErrApplicationRunning { + t.Log().Error("expecting gen.ErrApplicationRunning, got: %s", err) + t.testcase.err <- err + return + } + + // stop app and check + if err := node.ApplicationStop("test_app"); err != nil { + t.Log().Error("unable to stop application: %s", err) + t.testcase.err <- err + return + } + + if apps := node.ApplicationsRunning(); len(apps) != 0 { + t.Log().Error("incorrect application list: %s", apps) + t.testcase.err <- errIncorrect + return + } + // check the app. must be still loaded + if apps := node.Applications(); len(apps) != 1 || apps[0] != "test_app" { + t.Log().Error("incorrect application list: %s", apps) + t.testcase.err <- errIncorrect + return + } + + // check unload + if err := node.ApplicationUnload("test_app"); err != nil { + t.Log().Error("unable to unload application: %s", err) + t.testcase.err <- err + return + } + + if l := node.Applications(); len(l) != 0 { + t.Log().Error("incorrect application list: %v", l) + t.testcase.err <- err + return + + } + + // load app with dep + appdep, err := node.ApplicationLoad(createTestAppDep()) + if err != nil { + t.Log().Error("unable to load test_app_dep: %s", err) + t.testcase.err <- err + return + } + + // try to start it (with no loaded dep app). must fail + if err := node.ApplicationStart(appdep, gen.ApplicationOptions{}); err != gen.ErrApplicationDepends { + t.Log().Error("must be failed on start test_app_dep with unloaded test_app. got: %s", err) + t.testcase.err <- errIncorrect + } + + // load dep app + if _, err := node.ApplicationLoad(createTestApp()); err != nil { + t.Log().Error("unable to load test_app again: %s", err) + t.testcase.err <- err + } + + // now it must be started + if err := node.ApplicationStart(appdep, gen.ApplicationOptions{}); err != nil { + t.Log().Error("unable to start test_app_dep. got: %s", err) + t.testcase.err <- err + } + + // check application list (running) + if l := node.ApplicationsRunning(); len(l) != 2 { + t.Log().Error("incorrect application list: %v", l) + t.testcase.err <- errIncorrect + } + + t.testcase.err <- nil +} + +func (t *t12) TestApplicationMode(input any) { + defer func() { + t.testcase = nil + }() + + mode := t.testcase.input.(string) + switch mode { + case "Temporary": + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + node, err := node.Start("t12nodeAppTemporary@localhost", nopt, gen.Version{}) + if err != nil { + t.Log().Error("unable to start new node: %s", node) + t.testcase.err <- err + return + } + defer node.Stop() + + appcase := &testcase{"appcase", nil, nil, make(chan error)} + app := createTestAppMode(appcase, gen.ApplicationModeTemporary) + appName, err := node.ApplicationLoad(app) + if err != nil { + t.Log().Error("unable to load app: %s", err) + t.testcase.err <- err + return + } + + for _, reason := range []error{gen.TerminateReasonNormal, gen.TerminateReasonShutdown, gen.TerminateReasonKill, errors.New("whatever")} { + + if err := node.ApplicationStart(appName, gen.ApplicationOptions{}); err != nil { + t.Log().Error("unable to start %s. got: %s", appName, err) + t.testcase.err <- err + return + } + + appInfo, err := node.ApplicationInfo(appName) + if err != nil { + t.Log().Error("unable to get app info %s. got: %s", appName, err) + t.testcase.err <- err + return + } + + for _, pid := range appInfo.Group { + if err := node.SendExit(pid, reason); err != nil { + panic(err) + } + } + // app termination reason must be gen.TerminateReasonNormal. always. + if err := appcase.wait(1); err != gen.TerminateReasonNormal { + t.Log().Error("application must be terminated with reason 'normal'. got: %s", err) + if err == nil { + t.testcase.err <- errIncorrect + return + } + t.testcase.err <- err + return + } + + } + case "Transient": + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + node, err := node.Start("t12nodeAppTransient@localhost", nopt, gen.Version{}) + if err != nil { + t.Log().Error("unable to start new node: %s", node) + t.testcase.err <- err + return + } + defer node.Stop() + + appcase := &testcase{"appcase", nil, nil, make(chan error)} + app := createTestAppMode(appcase, gen.ApplicationModeTransient) + appName, err := node.ApplicationLoad(app) + if err != nil { + t.Log().Error("unable to load app: %s", err) + t.testcase.err <- err + return + } + + for _, reason := range []error{gen.TerminateReasonNormal, gen.TerminateReasonShutdown, gen.TerminateReasonKill, errors.New("whatever")} { + + if err := node.ApplicationStart(appName, gen.ApplicationOptions{}); err != nil { + t.Log().Error("unable to start %s. got: %s", appName, err) + t.testcase.err <- err + return + } + + appInfo, err := node.ApplicationInfo(appName) + if err != nil { + t.Log().Error("unable to get app info %s. got: %s", appName, err) + t.testcase.err <- err + return + } + + expectingAppTermReason := gen.TerminateReasonNormal + for _, pid := range appInfo.Group { + if err := node.SendExit(pid, reason); err != nil { + panic(err) + } + if reason == gen.TerminateReasonNormal || reason == gen.TerminateReasonShutdown { + continue + } + expectingAppTermReason = reason + break + } + if err := appcase.wait(1); err != expectingAppTermReason { + t.Log().Error("application must be terminated with reason '%s'. got: %s", expectingAppTermReason, err) + if err == nil { + t.testcase.err <- errIncorrect + return + } + t.testcase.err <- err + return + } + + } + + case "Permanent": + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + node, err := node.Start("t12nodeAppPermanent@localhost", nopt, gen.Version{}) + if err != nil { + t.Log().Error("unable to start new node: %s", node) + t.testcase.err <- err + return + } + defer node.Stop() + + appcase := &testcase{"appcase", nil, nil, make(chan error)} + app := createTestAppMode(appcase, gen.ApplicationModePermanent) + appName, err := node.ApplicationLoad(app) + if err != nil { + t.Log().Error("unable to load app: %s", err) + t.testcase.err <- err + return + } + + for _, reason := range []error{gen.TerminateReasonNormal, gen.TerminateReasonShutdown, gen.TerminateReasonKill, errors.New("whatever")} { + + if err := node.ApplicationStart(appName, gen.ApplicationOptions{}); err != nil { + t.Log().Error("unable to start %s. got: %s", appName, err) + t.testcase.err <- err + return + } + + appInfo, err := node.ApplicationInfo(appName) + if err != nil { + t.Log().Error("unable to get app info %s. got: %s", appName, err) + t.testcase.err <- err + return + } + + // must be enough to terminate just one member to terminate whole app + if err := node.SendExit(appInfo.Group[1], reason); err != nil { + panic(err) + } + if err := appcase.wait(1); err != reason { + t.Log().Error("application must be terminated with reason '%s'. got: %s", reason, err) + if err == nil { + t.testcase.err <- errIncorrect + return + } + t.testcase.err <- err + return + } + + } + + default: + } + + t.testcase.err <- nil +} + +func (t *t12) Do(input any) { + if _, ok := input.(initcase); ok { + env := t.EnvList() + t.testcase.output = env + t.testcase.err <- nil + return + } + + pid, err := t.Spawn(factory_t12, gen.ProcessOptions{}) + if err != nil { + t.Log().Error("unable to spawn new process by request: %s", err) + t.testcase.err <- err + return + } + t.testcase.output = pid + t.testcase.err <- nil +} + +func TestT12Application(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + //nopt.Log.Level = gen.LogLevelTrace + node, err := node.Start("t12node@localhost", nopt, gen.Version{}) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t12, popt) + if err != nil { + panic(err) + } + + t12cases = []*testcase{ + {"TestBasic", nil, nil, make(chan error)}, + {"TestApplicationMode", "Temporary", nil, make(chan error)}, + {"TestApplicationMode", "Transient", nil, make(chan error)}, + {"TestApplicationMode", "Permanent", nil, make(chan error)}, + } + for _, tc := range t12cases { + name := tc.name + if tc.input != nil { + name = fmt.Sprintf("%s:%s", tc.name, tc.input) + } + t.Run(name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(3); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/t013_meta_process_test.go b/tests/001_local/t013_meta_process_test.go new file mode 100644 index 00000000..e8c03562 --- /dev/null +++ b/tests/001_local/t013_meta_process_test.go @@ -0,0 +1,416 @@ +package local + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +var ( + t13cases []*testcase +) + +func createTestMeta() gen.MetaBehavior { + return &testMeta{ + stop: make(chan struct{}), + } +} + +type testMeta struct { + gen.MetaProcess + testcase *testcase + stop chan struct{} +} + +func (tm *testMeta) Init(meta gen.MetaProcess) error { + tm.MetaProcess = meta + return nil +} + +func (tm *testMeta) Start() error { + <-tm.stop + return nil +} + +func (tm *testMeta) HandleMessage(from gen.PID, message any) error { + tm.Log().Info("got message %s", message) + switch x := message.(type) { + case *testcase: + tm.testcase = x + x.err <- nil + case gen.PID: + if err := tm.Send(x, "forward"); err == nil { + tm.Log().Info("sent 'forward' to %s", x) + } else { + tm.Log().Error("unable to send 'forward' to %s", x) + } + } + return nil +} + +func (tm *testMeta) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + if _, ok := request.(bool); ok { + // spawn request + id, err := tm.Spawn(createTestMeta(), gen.MetaOptions{}) + if err != nil { + tm.Log().Error("unable to start child meta %s", err) + return false, nil + } + return id, nil + } + tm.Log().Info("got request %s from %s", request, from) + return request, nil +} + +func (tm *testMeta) Terminate(reason error) { + tm.Log().Info("terminate with reason %s", reason) + close(tm.stop) + if tm.testcase != nil { + select { + case tm.testcase.err <- reason: + default: + } + } +} + +func (tm *testMeta) HandleInspect(from gen.PID, item ...string) map[string]string { + tm.Log().Info("got inspect request from %s", from) + result := map[string]string{ + from.String(): "ok", + } + return result +} + +func factory_t13() gen.ProcessBehavior { + return &t13{} +} + +type t13 struct { + act.Actor + + testcase *testcase +} + +func (t *t13) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t13) TestBasic(input any) { + defer func() { + t.testcase = nil + }() + + meta := createTestMeta() + id, err := t.SpawnMeta(meta, gen.MetaOptions{}) + if err != nil { + t.testcase.err <- err + return + } + t.Log().Info("started meta %s", id) + + // test call + req := "ping-pong" + t.Log().Info("making call to %s", id) + resp, err := t.Call(id, req) + if err != nil { + t.Log().Error("test call failed: %s", err) + select { + case t.testcase.err <- err: + default: + } + return + } + + if reflect.DeepEqual(req, resp) == false { + t.Log().Error("test call failed. incorrect response") + t.testcase.err <- errIncorrect + return + } + + // test send + x := &testcase{"x", nil, nil, make(chan error)} + t.Log().Info("send message to %s", id) + if err := t.Send(id, x); err != nil { + t.Log().Error("unable to send message to %s: %s", id, err) + t.testcase.err <- errIncorrect + return + } + if err := x.wait(1); err != nil { + t.Log().Error("seems meta process hasnt recv this message") + t.testcase.err <- err + return + } + + exp := map[string]string{ + t.PID().String(): "ok", + } + // test inspect + insp, err := t.InspectMeta(id) + if err != nil { + t.Log().Error("inspect error: %s", err) + t.testcase.err <- err + return + + } + if reflect.DeepEqual(insp, exp) == false { + t.Log().Error("inspect failed. incorrect response") + t.testcase.err <- errIncorrect + return + } + + // test spawn child meta + v, err := t.Call(id, true) + if err != nil { + t.Log().Error("test call failed. incorrect response") + t.testcase.err <- errIncorrect + return + } + if _, ok := v.(gen.Alias); ok == false { + t.testcase.err <- errIncorrect + return + } + + // test exit-signal + xterm := errors.New("test meta exit") + t.SendExitMeta(id, xterm) + if err := x.wait(1); err != xterm { + t.Log().Error("seems meta process hasnt recv exit signal") + t.testcase.err <- err + return + } + + t.testcase.err <- nil + t.Log().Info("TestBasic done") +} + +func (t *t13) TestLinkMonitor(input any) { + defer func() { + t.testcase = nil + }() + + pid, err := t.Spawn(factory_t13, gen.ProcessOptions{}) + if err != nil { + t.Log().Error("unable to start new process: %s", err) + t.testcase.err <- err + return + } + metatc := &testcase{"LinkMonitorMeta", nil, nil, make(chan error)} + t.Send(pid, metatc) + if err := metatc.wait(1); err != nil { + t.testcase.err <- err + return + } + + // ask to start meta + t.Send(pid, "start") + if err := metatc.wait(1); err != nil { + t.testcase.err <- err + return + } + id := metatc.output.(gen.Alias) + + // ask meta to send message to the pid + t.Send(id, pid) + if err := metatc.wait(1); err != nil { + t.testcase.err <- err + return + } + + // must be "forward" + if metatc.output.(string) != "forward" { + t.Log().Error("incorrect value") + t.testcase.err <- errIncorrect + return + } + + // ask to link with meta + metatc.input = id + t.Send(pid, "link") + if err := metatc.wait(1); err != nil { + t.testcase.err <- err + return + } + + // send exit to the meta + idterm := errors.New("term meta") + t.SendExitMeta(id, idterm) + + // waiting for the gen.MessageExitAlias + if err := metatc.wait(1); err != nil { + t.Log().Error("waiting for gen.MessageExitAlias failed: %s", err) + t.testcase.err <- err + return + } + expexit := gen.MessageExitAlias{ + Alias: id, + Reason: idterm, + } + if reflect.DeepEqual(metatc.output, expexit) == false { + t.Log().Error("incorrect exit message. got %#v", metatc.output) + t.testcase.err <- errIncorrect + return + } + + // ask to start meta + t.Send(pid, "start") + if err := metatc.wait(1); err != nil { + t.testcase.err <- err + return + } + id = metatc.output.(gen.Alias) + + // ask to create monitor with meta + metatc.input = id + t.Send(pid, "monitor") + if err := metatc.wait(1); err != nil { + t.testcase.err <- err + return + } + + // send exit to the meta + t.SendExitMeta(id, idterm) + + // waiting for the gen.MessageDownAlias + if err := metatc.wait(1); err != nil { + t.Log().Error("waiting for gen.MessageDownAlias failed: %s", err) + t.testcase.err <- err + return + } + expdown := gen.MessageDownAlias{ + Alias: id, + Reason: idterm, + } + if reflect.DeepEqual(metatc.output, expdown) == false { + t.Log().Error("incorrect down message. got %#v", metatc.output) + t.testcase.err <- errIncorrect + return + } + + // case: terminate parent process + + // ask to start meta + t.Send(pid, "start") + if err := metatc.wait(1); err != nil { + t.testcase.err <- err + return + } + id = metatc.output.(gen.Alias) + // send this case to the meta process + t.Send(id, metatc) + if err := metatc.wait(1); err != nil { + t.testcase.err <- err + return + } + + pidterm := errors.New("blabla") + t.SendExit(pid, pidterm) + // meta should be terminated with the same reason + if err := metatc.wait(1); err != pidterm { + t.Log().Error("incorrect value. expected: %s", pidterm) + t.testcase.err <- err + return + } + + t.testcase.err <- nil + + t.Log().Info("TestLinkMonitor done") +} + +func (t *t13) LinkMonitorMeta(input any) { + switch x := input.(type) { + case initcase: + t.SetTrapExit(true) + t.testcase.err <- nil + return + case string: + switch x { + case "start": + id, err := t.SpawnMeta(createTestMeta(), gen.MetaOptions{}) + if err != nil { + t.Log().Error("unable to start meta: %s", err) + t.testcase.err <- err + return + } + t.Log().Info("started meta %s", id) + t.testcase.output = id + t.testcase.err <- nil + return + case "forward": + t.testcase.output = x + t.testcase.err <- nil + return + case "link": + if err := t.Link(t.testcase.input); err != nil { + t.Log().Error("unable to link with %v: %s", t.testcase.input, err) + t.testcase.err <- err + return + } + t.testcase.err <- nil + t.Log().Info("created link with %s", t.testcase.input) + return + case "monitor": + if err := t.Monitor(t.testcase.input); err != nil { + t.Log().Error("unable to monitor %v: %s", t.testcase.input, err) + t.testcase.err <- err + return + } + t.testcase.err <- nil + t.Log().Info("created monitor with %s", t.testcase.input) + return + } + case gen.MessageExitAlias, gen.MessageDownAlias: + t.testcase.output = x + t.testcase.err <- nil + return + } + t.Log().Error("got unknown message %#v", input) + panic("shouldnt be here") +} + +func TestT13Meta(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + //nopt.Log.Level = gen.LogLevelTrace + node, err := ergo.StartNode("t13node@localhost", nopt) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t13, popt) + if err != nil { + panic(err) + } + + t13cases = []*testcase{ + {"TestBasic", nil, nil, make(chan error)}, + {"TestLinkMonitor", nil, nil, make(chan error)}, + } + for _, tc := range t13cases { + t.Run(tc.name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(3); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/t014_pool_test.go b/tests/001_local/t014_pool_test.go new file mode 100644 index 00000000..8bf10963 --- /dev/null +++ b/tests/001_local/t014_pool_test.go @@ -0,0 +1,323 @@ +package local + +import ( + "fmt" + "reflect" + "sync/atomic" + "testing" + + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/node" +) + +var ( + t14cases []*testcase +) + +func factory_t14() gen.ProcessBehavior { + return &t14{} +} + +type t14 struct { + act.Actor + + testcase *testcase +} + +func (t *t14) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func factory_t14pool() gen.ProcessBehavior { + return &t14pool{} +} + +type t14pool struct { + act.Pool + tc *testcase +} + +func (p *t14pool) Init(args ...any) (act.PoolOptions, error) { + var options act.PoolOptions + options.WorkerFactory = factory_t14worker + options.PoolSize = 5 + return options, nil +} + +func (p *t14pool) HandleMessage(from gen.PID, message any) error { + p.Log().Info("pool process got message from %s: %s", from, message) + select { + case p.tc.err <- nil: + default: + } + return nil +} + +func (p *t14pool) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + p.Log().Info("pool process got request from %s", from) + switch v := request.(type) { + case *testcase: + p.tc = v + case int: + if v > 0 { + p.Log().Info("start %d new workers", v) + n, err := p.AddWorkers(v) + if err != nil { + p.Log().Error("unable to add %d workers:%s", v, err) + return 0, nil + } + p.Log().Info("total num of workers now: %d", n) + return n, nil + } + + p.Log().Info("stop %d workers", v) + n, err := p.RemoveWorkers(-v) + if err != nil { + p.Log().Error("unable to add %d workers:%s", v, err) + return 0, nil + } + p.Log().Info("total num of workers now: %d", n) + return n, nil + } + return true, nil +} + +func (p *t14pool) Terminate(reason error) { + p.Log().Info("pool process terminated: %s", reason) +} + +func factory_t14worker() gen.ProcessBehavior { + return &t14worker{} +} + +type t14worker struct { + act.Actor + id int32 + tc *testcase +} + +var poolworkerid int32 + +func (w *t14worker) Init(args ...any) error { + w.Log().Info("started worker") + w.id = atomic.AddInt32(&poolworkerid, 1) + return nil +} + +func (w *t14worker) HandleMessage(from gen.PID, message any) error { + w.Log().Info("worker process got message from %s", from) + w.tc.output = w.id + select { + case w.tc.err <- nil: + default: + } + return nil +} + +func (w *t14worker) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + w.Log().Info("worker process got request from %s", from) + w.tc = request.(*testcase) + return w.id, nil +} + +func (p *t14worker) Terminate(reason error) { + p.Log().Info("worker process id=%d terminated: %s", p.id, reason) +} + +func (t *t14) TestBasic(input any) { + defer func() { + t.testcase = nil + }() + + poolpid, err := t.Spawn(factory_t14pool, gen.ProcessOptions{}) + if err != nil { + t.Log().Error("unable to spawn pool process: %s", err) + t.testcase.err <- err + return + } + + pris := []gen.MessagePriority{gen.MessagePriorityHigh, gen.MessagePriorityMax} + tc := &testcase{"", nil, nil, make(chan error)} + // test call request to the pool process. must be handled by itself due to high priority + for _, p := range pris { + t.Log().Info("making a call with priority %s to the pool process", p) + v, err := t.CallWithPriority(poolpid, tc, p) + if err != nil { + t.Log().Error("call pool process failed: %s", err) + t.testcase.err <- err + return + } + if res, ok := v.(bool); ok == false || res != true { + t.Log().Error("incorrect result: %v (exp: true)", res) + t.testcase.err <- err + return + } + } + + // test sending a message to the pool process. must be handled by itself due to high priority + for _, p := range pris { + t.Log().Info("send with priority %s to the pool process", p) + err := t.SendWithPriority(poolpid, "hi", p) + if err != nil { + t.Log().Error("sending to the pool process failed: %s", err) + t.testcase.err <- err + return + } + if err := tc.wait(1); err != nil { + t.Log().Error("got error", err) + t.testcase.err <- err + return + } + } + + // test call forwarding to the worker processes + for i := int32(0); i < 10; i++ { + expid := i%5 + 1 + t.Log().Info("making a call to the pool process. must be forwared. i=%d , exp=%d", i, expid) + v, err := t.Call(poolpid, tc) + if err != nil { + t.Log().Error("call worker process failed: %s", err) + t.testcase.err <- err + return + } + // check worker id + if id, ok := v.(int32); ok == false || id != expid { + t.Log().Error("incorrect worker id: %d (exp: %d)", id, expid) + t.testcase.err <- errIncorrect + return + } + } + + // test send forwarding to the worker processes + for i := int32(0); i < 10; i++ { + expid := i%5 + 1 + t.Log().Info("sending a message to the pool process. must be forwared. i=%d , exp=%d", i, expid) + err := t.Send(poolpid, tc) + if err != nil { + t.Log().Error("call worker process failed: %s", err) + t.testcase.err <- err + return + } + if err := tc.wait(1); err != nil { + t.Log().Error("got error", err) + t.testcase.err <- err + return + } + // check worker id + if id, ok := tc.output.(int32); ok == false || id != expid { + t.Log().Error("incorrect worker id: %d (exp: %d)", id, expid) + t.testcase.err <- errIncorrect + return + } + } + + // ask pool process to add 3 workers + v, err := t.CallWithPriority(poolpid, 3, gen.MessagePriorityHigh) + if err != nil { + t.Log().Error("call pool process failed: %s", err) + t.testcase.err <- err + return + } + if res, ok := v.(int64); ok == false || res != 8 { + t.Log().Error("incorrect result: %v (exp: 8)", v) + t.testcase.err <- errIncorrect + return + } + + // test call forwarding with updated pool of workers + for i := int32(0); i < 16; i++ { + expid := i%8 + 1 + t.Log().Info("making a call to the pool process. must be forwared. i=%d , exp=%d", i, expid) + v, err := t.Call(poolpid, tc) + if err != nil { + t.Log().Error("call worker process failed: %s", err) + t.testcase.err <- err + return + } + // check worker id + if id, ok := v.(int32); ok == false || id != expid { + t.Log().Error("incorrect worker id: %d (exp: %d)", id, expid) + t.testcase.err <- errIncorrect + return + } + } + + // ask pool process to remove 5 workers + v, err = t.CallWithPriority(poolpid, -5, gen.MessagePriorityHigh) + if err != nil { + t.Log().Error("call pool process failed: %s", err) + t.testcase.err <- err + return + } + if res, ok := v.(int64); ok == false || res != 3 { + t.Log().Error("incorrect result: %v (exp: 3)", v) + t.testcase.err <- errIncorrect + return + } + + // test call forwarding with updated pool of workers + for i := int32(0); i < 10; i++ { + expid := i%3 + 6 + t.Log().Info("making a call to the pool process. must be forwared. i=%d , exp=%d", i, expid) + v, err := t.Call(poolpid, tc) + if err != nil { + t.Log().Error("call worker process failed: %s", err) + t.testcase.err <- err + return + } + // check worker id + if id, ok := v.(int32); ok == false || id != expid { + t.Log().Error("incorrect worker id: %d (exp: %d)", id, expid) + t.testcase.err <- errIncorrect + return + } + } + t.testcase.err <- nil +} + +func TestT14Pool(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + //nopt.Log.Level = gen.LogLevelTrace + node, err := node.Start("t14node@localhost", nopt, gen.Version{}) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t14, popt) + if err != nil { + panic(err) + } + + t14cases = []*testcase{ + {"TestBasic", nil, nil, make(chan error)}, + } + for _, tc := range t14cases { + name := tc.name + if tc.input != nil { + name = fmt.Sprintf("%s:%s", tc.name, tc.input) + } + t.Run(name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(30); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/t015_web_test.go b/tests/001_local/t015_web_test.go new file mode 100644 index 00000000..b40ae5e3 --- /dev/null +++ b/tests/001_local/t015_web_test.go @@ -0,0 +1,260 @@ +package local + +import ( + "bytes" + "fmt" + "net/http" + "reflect" + "testing" + + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/meta" + "ergo.services/ergo/node" +) + +var ( + t15cases []*testcase +) + +func factory_t15() gen.ProcessBehavior { + return &t15{} +} + +type t15 struct { + act.Actor + + testcase *testcase +} + +func factory_t15web() gen.ProcessBehavior { + return &t15web{} +} + +type t15web struct { + act.Actor + + tc *testcase +} + +func factory_t15worker() gen.ProcessBehavior { + return &t15worker{} +} + +type t15worker struct { + act.WebWorker +} + +func (t *t15worker) HandleGet(from gen.PID, writer http.ResponseWriter, request *http.Request) error { + writer.WriteHeader(http.StatusAccepted) + return nil +} + +func (t *t15web) Init(args ...any) error { + + mux := http.NewServeMux() + + // root + handler1 := meta.CreateWebHandler(meta.WebHandlerOptions{}) // returns http.StatusNoContent + if _, err := t.SpawnMeta(handler1, gen.MetaOptions{}); err != nil { + return err + } + mux.Handle("/", handler1) + + // /test + opt := meta.WebHandlerOptions{ + Worker: "webworker", // must forward request to this process + } + handler2 := meta.CreateWebHandler(opt) // returns http.StatusAccepted + if _, err := t.SpawnMeta(handler2, gen.MetaOptions{}); err != nil { + return err + } + mux.Handle("/test", handler2) + + // do not start. check the case with no meta process + handler3 := meta.CreateWebHandler(meta.WebHandlerOptions{}) + mux.Handle("/nometaprocess", handler3) // returns http.StatusBadGateway + + // create and spawn web server meta process + serverOptions := meta.WebServerOptions{ + Port: 12121, + Host: "localhost", + Handler: mux, + } + + webserver, err := meta.CreateWebServer(serverOptions) + if err != nil { + return err + } + if _, err := t.SpawnMeta(webserver, gen.MetaOptions{}); err != nil { + return err + } + return nil +} + +func (t *t15web) HandleMessage(from gen.PID, message any) error { + switch m := message.(type) { + case meta.MessageWebRequest: + defer m.Done() + t.Log().Info("got http request from %s", from) + m.Response.WriteHeader(http.StatusNoContent) + default: + t.Log().Info("got unknown message from %s: %#v", from, message) + } + + return nil +} + +func (t *t15) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t15) TestBasic(input any) { + defer func() { + t.testcase = nil + }() + + // start web-process + webpid, err := t.Spawn(factory_t15web, gen.ProcessOptions{}) + if err != nil { + t.Log().Error("unable to spawn web process: %s", err) + t.testcase.err <- err + return + } + + // start webworker-process + webworkerpid, err := t.SpawnRegister("webworker", factory_t15worker, gen.ProcessOptions{}) + if err != nil { + t.Log().Error("unable to spawn handler process: %s", err) + t.testcase.err <- err + return + } + + // must be handler by web-process and return http.StatusNoContent + url := "http://localhost:12121/" + t.Log().Info("making request to %q. must be handled by %s (web)", url, webpid) + r, err := http.Get(url) + if err != nil { + t.Log().Error("unable to make web request / : %s", err) + t.testcase.err <- err + return + } + + if r.StatusCode != http.StatusNoContent { + t.Log().Error("incorrect status code for /: %d (exp: %d)", r.StatusCode, http.StatusNoContent) + t.testcase.err <- errIncorrect + return + } + + // must be handler by handler-process and return http.StatusAccepted + url = "http://localhost:12121/test" + t.Log().Info("making GET request to %q. must be handled by %s (webworker)", url, webworkerpid) + r, err = http.Get(url) + if err != nil { + t.Log().Error("unable to make web request / : %s", err) + t.testcase.err <- err + return + } + + if r.StatusCode != http.StatusAccepted { + t.Log().Error("incorrect status code for /: %d (exp: %d)", r.StatusCode, http.StatusAccepted) + t.testcase.err <- errIncorrect + return + } + + t.Log().Info("making POST request to %q. must be handled by %s (webworker)", url, webworkerpid) + r, err = http.Post(url, "", bytes.NewBuffer([]byte{1, 2, 3})) + if err != nil { + t.Log().Error("unable to make web request / : %s", err) + t.testcase.err <- err + return + } + + if r.StatusCode != http.StatusNotImplemented { + t.Log().Error("incorrect status code for /: %d (exp: %d)", r.StatusCode, http.StatusNotImplemented) + t.testcase.err <- errIncorrect + return + } + // must be handler by meta-process itself and return http.StatusBadGateway + url = "http://localhost:12121/nometaprocess" + t.Log().Info("making request to %q. must be handled by meta-process itself", url) + r, err = http.Get(url) + if err != nil { + t.Log().Error("unable to make web request / : %s", err) + t.testcase.err <- err + return + } + + if r.StatusCode != http.StatusServiceUnavailable { + t.Log().Error("incorrect status code for /: %d (exp: %d)", r.StatusCode, http.StatusServiceUnavailable) + t.testcase.err <- errIncorrect + return + } + + // kill webworkierpid process and make request to the handler url. + // must be http.StatusBadGateway + t.Node().Kill(webworkerpid) + + url = "http://localhost:12121/test" + t.Log().Info("making request to %q. must be handled by meta-process (worker-process was killed)", url) + r, err = http.Get(url) + if err != nil { + t.Log().Error("unable to make web request / : %s", err) + t.testcase.err <- err + return + } + + if r.StatusCode != http.StatusBadGateway { + t.Log().Error("incorrect status code for /: %d (exp: %d)", r.StatusCode, http.StatusBadGateway) + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil +} + +func TestT15Web(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + // nopt.Log.Level = gen.LogLevelTrace + node, err := node.Start("t15Webnode@localhost", nopt, gen.Version{}) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t15, popt) + if err != nil { + panic(err) + } + + t15cases = []*testcase{ + {"TestBasic", nil, nil, make(chan error)}, + } + for _, tc := range t15cases { + name := tc.name + if tc.input != nil { + name = fmt.Sprintf("%s:%s", tc.name, tc.input) + } + t.Run(name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(30); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/t016_tcp_test.go b/tests/001_local/t016_tcp_test.go new file mode 100644 index 00000000..6b318da9 --- /dev/null +++ b/tests/001_local/t016_tcp_test.go @@ -0,0 +1,473 @@ +package local + +import ( + "errors" + "fmt" + "net" + "reflect" + "testing" + "time" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/meta" +) + +var ( + t16cases []*testcase +) + +func factory_tcp() gen.ProcessBehavior { + return &tcp{} +} + +type tcp struct { + act.Actor + proc bool + tc *testcase + + addr net.Addr +} + +func (tt *tcp) Init(args ...any) error { + tt.tc = args[0].(*testcase) + tt.proc = args[1].(bool) + port := args[2].(uint16) + if tt.proc { + return nil + } + + // TCP server with no Process + opt := meta.TCPServerOptions{ + Port: port, + } + if metatcp, err := meta.CreateTCPServer(opt); err != nil { + tt.Log().Error("unable to create tcp meta-process: %s", err) + return nil + } else { + if id, err := tt.SpawnMeta(metatcp, gen.MetaOptions{}); err != nil { + tt.Log().Error("unable to spawn tcp meta-process", err) + metatcp.Terminate(err) // to stop TCP-listener + return nil + } else { + tt.Log().Info("meta-process %s with tcp-server on %d started", id, port) + } + } + + // TCP server with Process + opt = meta.TCPServerOptions{ + Port: port + 1, + ProcessPool: []gen.Atom{"handler1", "handler2"}, + } + if metatcp, err := meta.CreateTCPServer(opt); err != nil { + tt.Log().Error("unable to create tcp meta-process: %s", err) + return nil + } else { + if id, err := tt.SpawnMeta(metatcp, gen.MetaOptions{}); err != nil { + tt.Log().Error("unable to spawn tcp meta-process", err) + metatcp.Terminate(err) // to stop TCP-listener + return nil + } else { + tt.Log().Info("meta-process %s with tcp-server on %d started", id, port+1) + } + } + return nil +} + +func (tt *tcp) HandleMessage(from gen.PID, message any) error { + switch m := message.(type) { + case meta.MessageTCPConnect: + tt.Log().Info("got new connection %s with %s", m.ID, m.RemoteAddr.String()) + tt.addr = m.RemoteAddr + tt.tc.err <- nil + case meta.MessageTCPDisconnect: + tt.Log().Info("connection %s (%s) has terminated", m.ID, tt.addr.String()) + tt.tc.err <- nil + case meta.MessageTCP: + tt.Log().Info("server got tcp packet from %s: %q ", m.ID, string(m.Data)) + if tt.proc { + m.Data = []byte(tt.Name()) + } else { + m.Data = []byte("noproc") + } + if err := tt.SendAlias(m.ID, m); err != nil { + tt.Log().Error("unable to send to %s: %s", m.ID, err) + } + tt.Log().Info("server sent reply") + tt.tc.err <- nil + default: + tt.Log().Info("got unknown message from %s: %#v", from, message) + } + return nil +} + +func factory_t16() gen.ProcessBehavior { + return &t16{} +} + +type t16 struct { + act.Actor + + testcase *testcase +} + +func (t *t16) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t16) TestServer(input any) { + defer func() { + t.testcase = nil + }() + + port := uint16(17171) + tc_proc_handler1 := &testcase{"", nil, nil, make(chan error)} + handler1_pid, err := t.SpawnRegister("handler1", factory_tcp, gen.ProcessOptions{}, tc_proc_handler1, true, port) + if err != nil { + t.Log().Error("unable to spawn 'handler1' process: %s", err) + t.testcase.err <- err + return + } + t.Log().Info("spawned handler1 process: %s", handler1_pid) + + tc_proc_handler2 := &testcase{"", nil, nil, make(chan error)} + handler2_pid, err := t.SpawnRegister("handler2", factory_tcp, gen.ProcessOptions{}, tc_proc_handler2, true, port) + if err != nil { + t.Log().Error("unable to spawn 'handler2' process: %s", err) + t.testcase.err <- err + return + } + t.Log().Info("spawned handler2 process: %s", handler2_pid) + + tc_noproc := &testcase{"", nil, nil, make(chan error)} + pid, err := t.Spawn(factory_tcp, gen.ProcessOptions{}, tc_noproc, false, port) + if err != nil { + t.Log().Error("unable to spawn process: %s", err) + t.testcase.err <- err + return + } + t.Log().Info("spawned process with tcp servers: %s", pid) + + // making tcp connect that must be handled by handler1 + addr1 := fmt.Sprintf("127.0.0.1:%d", port+1) + conn_handler1, err := net.Dial("tcp", addr1) + if err != nil { + t.Log().Error("unable to connect to %s :%s", addr1, err) + t.testcase.err <- err + return + } + if err := tc_proc_handler1.wait(1); err != nil { + t.Log().Error("handler 1 has no connection") + t.testcase.err <- errIncorrect + return + } + + conn_handler1.Close() + if err := tc_proc_handler1.wait(1); err != nil { + t.Log().Error("handler 1 didnt receive meta.MessageTCPDissconnect") + t.testcase.err <- errIncorrect + return + } + + // second try. must be handled by handler2 + conn_handler2, err := net.Dial("tcp", addr1) + if err != nil { + t.Log().Error("unable to connect to %s :%s", addr1, err) + t.testcase.err <- err + return + } + if err := tc_proc_handler2.wait(1); err != nil { + t.Log().Error("handler 1 has no connection") + t.testcase.err <- errIncorrect + return + } + + conn_handler2.Close() + if err := tc_proc_handler2.wait(1); err != nil { + t.Log().Error("handler 2 didnt receive meta.MessageTCPDissconnect") + t.testcase.err <- errIncorrect + return + } + + // making tcp connect to 17171. must be handled by the process (that startd tcp-server) itself + addr2 := fmt.Sprintf("127.0.0.1:%d", port) + conn, err := net.Dial("tcp", addr2) + if err != nil { + t.Log().Error("unable to connect to %s :%s", addr2, err) + t.testcase.err <- err + return + } + if err := tc_noproc.wait(1); err != nil { + t.Log().Error("handler noproc has no connection") + t.testcase.err <- errIncorrect + return + } + + // test send/recv + + if _, err := conn.Write([]byte("hi")); err != nil { + conn.Close() + t.Log().Error("noproc unable to write: %s", err) + t.testcase.err <- errIncorrect + return + } + if err := tc_noproc.wait(1); err != nil { + t.Log().Error("handler noproc didnt receive meta.MessageTCP") + t.testcase.err <- errIncorrect + return + } + + conn.SetReadDeadline(time.Now().Add(300 * time.Millisecond)) + buf := make([]byte, 10) + if n, err := conn.Read(buf); err != nil { + t.Log().Error("unable to read from tcp socket: %s", err) + t.testcase.err <- err + return + } else { + data := buf[:n] + if reflect.DeepEqual(data, []byte("noproc")) == false { + t.Log().Error("recv incorrect data: %v (exp: noproc)", string(data)) + t.testcase.err <- errIncorrect + return + } + } + + conn.Close() + if err := tc_noproc.wait(1); err != nil { + t.Log().Error("handler noproc didnt receive meta.MessageTCPDissconnect") + t.testcase.err <- errIncorrect + return + } + + t.testcase.err <- nil +} + +func factory_tcpclient() gen.ProcessBehavior { + return &tcpclient{} +} + +type tcpclient struct { + act.Actor + + id gen.Alias + tc *testcase + addr net.Addr + port uint16 +} + +func (c *tcpclient) Init(args ...any) error { + c.tc = args[0].(*testcase) + c.port = args[1].(uint16) + return nil +} + +func (c *tcpclient) HandleMessage(from gen.PID, message any) error { + switch m := message.(type) { + case int: + switch m { + case 1: + // create tcp conn + opt := meta.TCPConnectionOptions{ + Port: c.port, + } + client, err := meta.CreateTCPConnection(opt) + if err != nil { + c.Log().Error("unable to create tcp client: %s", err) + c.tc.err <- err + return nil + } + + id, err := c.SpawnMeta(client, gen.MetaOptions{}) + if err != nil { + c.Log().Error("unable to spawn tcp client meta process: %s", err) + c.tc.err <- err + return nil + } + c.id = id + c.tc.err <- nil + return nil + + case 2: // send a mess + msg := meta.MessageTCP{ + Data: []byte("hi"), + } + if err := c.SendAlias(c.id, msg); err != nil { + c.Log().Error("unable to send data: %s", err) + c.tc.err <- err + return nil + } + c.tc.err <- nil + return nil + + case 3: // send exit to terminate conn + if err := c.SendExitMeta(c.id, errors.New("whatever")); err != nil { + c.Log().Error("unable to send exit signal: %s", err) + c.tc.err <- err + return nil + } + c.tc.err <- nil + return nil + } + panic("unknown cmd") + + case meta.MessageTCPConnect: + c.Log().Info("client. connection %s with %s", m.ID, m.RemoteAddr.String()) + c.addr = m.RemoteAddr + c.tc.output = m + c.tc.err <- nil + case meta.MessageTCPDisconnect: + c.tc.output = m + c.Log().Info("client connection %s (%s) has terminated", m.ID, c.addr.String()) + c.tc.err <- nil + case meta.MessageTCP: + c.tc.output = m + c.Log().Info("client got tcp packet from %s: %s ", m.ID, string(m.Data)) + c.tc.err <- nil + default: + c.Log().Info("got unknown message from %s: %#v", from, message) + } + return nil +} + +func (t *t16) TestClient(input any) { + defer func() { + t.testcase = nil + }() + + port := uint16(19191) + tc_server := &testcase{"", nil, nil, make(chan error)} + serverpid, err := t.Spawn(factory_tcp, gen.ProcessOptions{}, tc_server, false, port) + if err != nil { + t.Log().Error("unable to spawn process: %s", err) + t.testcase.err <- err + return + } + t.Log().Info("spawned process with tcp servers: %s", serverpid) + + tc_client := &testcase{"", nil, nil, make(chan error)} + clientpid, err := t.Spawn(factory_tcpclient, gen.ProcessOptions{}, tc_client, port) + if err != nil { + t.Log().Error("unable to spawn process: %s", err) + t.testcase.err <- err + return + } + t.Log().Info("spawned process with tcp client: %s", clientpid) + + t.Send(clientpid, 1) // make tcp conn + + if err := tc_server.wait(1); err != nil { + t.Log().Error("server proc should recv MessageTCPConnect. failed: %s", err) + t.testcase.err <- err + return + } + + if err := tc_client.wait(1); err != nil { + t.Log().Error("making tcp conn failed: %s", err) + t.testcase.err <- err + return + } + + // client should recv MessageTCPConnect + if err := tc_client.wait(int(1)); err != nil { + t.Log().Error("didn't recv MessageTCPConnect: %s", err) + t.testcase.err <- err + return + } + + if _, ok := tc_client.output.(meta.MessageTCPConnect); ok == false { + t.Log().Error("incorrect value (exp: meta.MessageTCPConnect): %#v", tc_client.output) + t.testcase.err <- errIncorrect + return + } + + t.Send(clientpid, 2) // ask to send a message + if err := tc_client.wait(1); err != nil { + t.Log().Error("ask to send a message failed: %s", err) + t.testcase.err <- err + return + } + + // server should recv it and send reply + if err := tc_server.wait(1); err != nil { + t.Log().Error("server proc should recv MessageTCP. failed: %s", err) + t.testcase.err <- err + return + } + // client should recv MessageTCP + if err := tc_client.wait(int(1)); err != nil { + t.Log().Error("client didn't recv MessageTCP: %s", err) + t.testcase.err <- err + return + } + if _, ok := tc_client.output.(meta.MessageTCP); ok == false { + t.Log().Error("incorrect value (exp: meta.MessageTCP): %#v", tc_client.output) + t.testcase.err <- errIncorrect + return + } + + t.Send(clientpid, 3) // ask to terminate tcp conn + + if err := tc_server.wait(1); err != nil { + t.Log().Error("server proc should recv MessageTCPDisconnect. failed: %s", err) + t.testcase.err <- err + return + } + + if err := tc_client.wait(1); err != nil { + t.Log().Error("term tcp conn failed: %s", err) + t.testcase.err <- err + return + } + + // client should recv MessageTCPDisconnect + if err := tc_client.wait(int(1)); err != nil { + t.Log().Error("didn't recv MessageTCPDisonnect: %s", err) + t.testcase.err <- err + return + } + t.testcase.err <- nil +} + +func TestT16TCP(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + // nopt.Log.Level = gen.LogLevelTrace + node, err := ergo.StartNode("t16node@localhost", nopt) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t16, popt) + if err != nil { + panic(err) + } + + t16cases = []*testcase{ + {"TestServer", nil, nil, make(chan error)}, + {"TestClient", nil, nil, make(chan error)}, + } + for _, tc := range t16cases { + t.Run(tc.name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(3); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/t017_udp_test.go b/tests/001_local/t017_udp_test.go new file mode 100644 index 00000000..e07aab3e --- /dev/null +++ b/tests/001_local/t017_udp_test.go @@ -0,0 +1,221 @@ +package local + +import ( + "fmt" + "net" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/meta" +) + +var ( + t17cases []*testcase +) + +func factory_t17() gen.ProcessBehavior { + return &t17{} +} + +type t17 struct { + act.Actor + + testcase *testcase +} + +func (t *t17) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func factory_udp() gen.ProcessBehavior { + return &udp{} +} + +type udp struct { + act.Actor + proc bool + tc *testcase +} + +func (u *udp) Init(args ...any) error { + u.tc = args[0].(*testcase) + u.proc = args[1].(bool) + if u.proc { + return nil + } + + // UDP server with no Process + opt := meta.UDPServerOptions{ + Port: 17171, + } + if metaudp, err := meta.CreateUDPServer(opt); err != nil { + u.Log().Error("unable to create udp meta-process: %s", err) + return nil + } else { + if _, err := u.SpawnMeta(metaudp, gen.MetaOptions{}); err != nil { + u.Log().Error("unable to spawn udp meta-process", err) + metaudp.Terminate(err) // to stop UDP-listener + return nil + } + } + + // UDP server with Process + opt = meta.UDPServerOptions{ + Port: 18181, + Process: "handler", + } + if metaudp, err := meta.CreateUDPServer(opt); err != nil { + u.Log().Error("unable to create udp meta-process: %s", err) + return nil + } else { + if _, err := u.SpawnMeta(metaudp, gen.MetaOptions{}); err != nil { + u.Log().Error("unable to spawn udp meta-process", err) + metaudp.Terminate(err) // to stop UDP-listener + return nil + } + } + return nil +} + +func (u *udp) HandleMessage(from gen.PID, message any) error { + switch m := message.(type) { + case meta.MessageUDP: + u.Log().Info("got udp packet from %s: %s ", m.Addr, string(m.Data)) + if u.proc { + m.Data = []byte("proc") + } else { + m.Data = []byte("noproc") + } + if err := u.SendAlias(m.ID, m); err != nil { + u.Log().Error("unable to send to %s: %s", m.ID, err) + } + u.tc.err <- nil + default: + u.Log().Info("got unknown message from %s: %#v", from, message) + } + return nil +} + +func (t *t17) TestUDP(input any) { + defer func() { + t.testcase = nil + }() + + tcproc := &testcase{"", nil, nil, make(chan error)} + procpid, err := t.SpawnRegister("handler", factory_udp, gen.ProcessOptions{}, tcproc, true) + if err != nil { + t.Log().Error("unable to spawn 'handler' process: %s", err) + t.testcase.err <- err + return + } + t.Log().Info("spawned udp process with name: %s", procpid) + + tcnoproc := &testcase{"", nil, nil, make(chan error)} + noprocpid, err := t.Spawn(factory_udp, gen.ProcessOptions{}, tcnoproc, false) + if err != nil { + t.Log().Error("unable to spawn udp process: %s", err) + t.testcase.err <- err + return + } + t.Log().Info("spawned udp process with no name and udp servers: %s", noprocpid) + + // test proc + connproc, err := net.Dial("udp", "127.0.0.1:18181") + defer connproc.Close() + if _, err := connproc.Write([]byte("test proc")); err != nil { + t.Log().Error("unable to send to 'proc': %s", err) + t.testcase.err <- err + return + } + if err := tcproc.wait(1); err != nil { + t.Log().Error("udp with proc didn't receive udp-packet: %s", err) + t.testcase.err <- err + return + } + buf := make([]byte, 10) + if n, err := connproc.Read(buf); err != nil { + t.Log().Error("unable to read from udp socket: %s", err) + t.testcase.err <- err + return + } else { + data := buf[:n] + if reflect.DeepEqual(data, []byte("proc")) == false { + t.Log().Error("got incorrect data: %v (exp: 'proc')", data) + t.testcase.err <- errIncorrect + return + } + } + + // test noproc + connnoproc, err := net.Dial("udp", "127.0.0.1:17171") + defer connnoproc.Close() + if _, err := connnoproc.Write([]byte("test no proc")); err != nil { + t.Log().Error("unable to send to 'no proc': %s", err) + t.testcase.err <- err + return + } + if err := tcnoproc.wait(1); err != nil { + t.Log().Error("udp with proc didn't receive udp-packet: %s", err) + t.testcase.err <- err + return + } + if n, err := connnoproc.Read(buf); err != nil { + t.Log().Error("unable to read from udp socket: %s", err) + t.testcase.err <- err + return + } else { + data := buf[:n] + if reflect.DeepEqual(data, []byte("noproc")) == false { + t.Log().Error("got incorrect data: %v (exp: 'noproc')", data) + t.testcase.err <- errIncorrect + return + } + } + + t.testcase.err <- nil +} + +func TestT17UDP(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + //nopt.Log.Level = gen.LogLevelTrace + node, err := ergo.StartNode("t17node@localhost", nopt) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_t17, popt) + if err != nil { + panic(err) + } + + t17cases = []*testcase{ + {"TestUDP", nil, nil, make(chan error)}, + } + for _, tc := range t17cases { + t.Run(tc.name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(3); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/001_local/txxx_template_test.go b/tests/001_local/txxx_template_test.go new file mode 100644 index 00000000..ec401805 --- /dev/null +++ b/tests/001_local/txxx_template_test.go @@ -0,0 +1,92 @@ +package local + +import ( + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +// +// this is the template for writing new tests +// + +var ( + tXXXcases []*testcase +) + +func factory_tXXX() gen.ProcessBehavior { + return &tXXX{} +} + +type tXXX struct { + act.Actor + + testcase *testcase +} + +func (t *tXXX) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *tXXX) TestFeature1(input any) { + defer func() { + t.testcase = nil + }() + + t.testcase.err <- nil +} + +func (t *tXXX) TestFeatureX(input any) { + defer func() { + t.testcase = nil + }() + + t.testcase.err <- nil +} + +func TestTXXXtemplate(t *testing.T) { + nopt := gen.NodeOptions{} + nopt.Log.DefaultLogger.Disable = true + //nopt.Log.Level = gen.LogLevelTrace + node, err := ergo.StartNode("tXXXnode@localhost", nopt) + if err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node.Spawn(factory_tXXX, popt) + if err != nil { + panic(err) + } + + tXXXcases = []*testcase{ + &testcase{"TestFeature1", nil, nil, make(chan error)}, + &testcase{"TestFeatureX", nil, nil, make(chan error)}, + } + for _, tc := range tXXXcases { + t.Run(tc.name, func(t *testing.T) { + node.Send(pid, tc) + if err := tc.wait(3); err != nil { + t.Fatal(err) + } + }) + } + + node.Stop() +} diff --git a/tests/002_distributed/common.go b/tests/002_distributed/common.go new file mode 100644 index 00000000..8661daa6 --- /dev/null +++ b/tests/002_distributed/common.go @@ -0,0 +1,32 @@ +package distributed + +import ( + "fmt" + "time" + + "ergo.services/ergo/gen" +) + +var ( + errIncorrect = fmt.Errorf("incorrect") +) + +type initcase struct{} + +type testcase struct { + name string + input any + output any + err chan error +} + +func (t *testcase) wait(timeout int) error { + timer := time.NewTimer(time.Second * time.Duration(timeout)) + defer timer.Stop() + select { + case <-timer.C: + return gen.ErrTimeout + case e := <-t.err: + return e + } +} diff --git a/tests/002_distributed/t000_connect_test.go b/tests/002_distributed/t000_connect_test.go new file mode 100644 index 00000000..58ea9a55 --- /dev/null +++ b/tests/002_distributed/t000_connect_test.go @@ -0,0 +1,112 @@ +package distributed + +import ( + "testing" + "time" + + "ergo.services/ergo" + "ergo.services/ergo/gen" +) + +// TODO test static route + +func TestT0NodeBasic(t *testing.T) { + options1 := gen.NodeOptions{} + options1.Network.Cookie = "123" + options1.Network.MaxMessageSize = 567 + options1.Log.DefaultLogger.Disable = true + // options1.Log.Level = gen.LogLevelTrace + node1, err := ergo.StartNode("distT0node1basic@localhost", options1) + if err != nil { + t.Fatal(err) + } + defer node1.Stop() + + if _, err := node1.Network().GetNode("unknown@node"); err != gen.ErrNoRoute { + t.Fatal("must be gen.ErrNoRoute here") + } + + options2 := gen.NodeOptions{} + options2.Network.Cookie = "123" + options2.Network.MaxMessageSize = 765 + options2.Log.DefaultLogger.Disable = true + node2, err := ergo.StartNode("distT0node2basic@localhost", options2) + if err != nil { + t.Fatal(err) + } + defer node2.Stop() + + // make connection to the node2 + remote1, err := node1.Network().GetNode(node2.Name()) + if err != nil { + t.Fatal(err) + } + + // TODO rework it + // race condition. should wait a bit until + // new connection got registered on the node2. + time.Sleep(100 * time.Millisecond) + + // take the existing connection with node1 + remote2, err := node2.Network().Node(node1.Name()) + if err != nil { + t.Fatal(err) + } + + remote1Info := remote1.Info() + remote2Info := remote2.Info() + + if remote1Info.Node != node2.Name() { + t.Fatal("incorrect remote1 node name") + } + if remote2Info.Node != node1.Name() { + t.Fatal("incorrect remote2 node name") + } + + if remote1Info.Version != node2.Version() { + t.Fatal("incorrect remote1 version") + } + if remote2Info.Version != node1.Version() { + t.Fatal("incorrect remote2 version") + } + + acceptors1, err := node1.Network().Acceptors() + if err != nil { + t.Fatal(err) + } + flags1 := acceptors1[0].NetworkFlags() + if flags1 != remote2Info.NetworkFlags { + t.Fatal("incorrect network flags on remote2") + } + + acceptors2, err := node2.Network().Acceptors() + if err != nil { + t.Fatal(err) + } + flags2 := acceptors2[0].NetworkFlags() + if flags2 != remote1Info.NetworkFlags { + t.Fatal("incorrect network flags on remote1") + } + + if node1.Network().MaxMessageSize() != options1.Network.MaxMessageSize { + t.Fatal("incorrect MaxMessageSize on node1 (ignored option)") + } + if node2.Network().MaxMessageSize() != options2.Network.MaxMessageSize { + t.Fatal("incorrect MaxMessageSize on node2 (ignored option)") + } + + if node1.Network().MaxMessageSize() != remote2Info.MaxMessageSize { + t.Fatal("incorrect MaxMessageSize (ignored on node2)") + } + if node2.Network().MaxMessageSize() != remote1Info.MaxMessageSize { + t.Fatal("incorrect MaxMessageSize (ignored on node1)") + } + + if acceptors2[0].Info().HandshakeVersion != remote1Info.HandshakeVersion { + t.Fatal("handshake version mismatch") + } + + if acceptors2[0].Info().ProtoVersion != remote1Info.ProtoVersion { + t.Fatal("proto version mismatch") + } +} diff --git a/tests/002_distributed/t001_remote_spawn_test.go b/tests/002_distributed/t001_remote_spawn_test.go new file mode 100644 index 00000000..aaa6d6d2 --- /dev/null +++ b/tests/002_distributed/t001_remote_spawn_test.go @@ -0,0 +1,279 @@ +package distributed + +import ( + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +type testServerRemoteSpawn struct { + act.Actor +} + +func factoryTestServerRemoteSpawn() gen.ProcessBehavior { + return &testServerRemoteSpawn{} +} + +func TestT1RemoteNodeSpawn(t *testing.T) { + options1 := gen.NodeOptions{ + Env: map[gen.Env]any{ + "env1": 123, + "env2": "example", + }, + } + options1.Network.Cookie = "123" + options1.Log.DefaultLogger.Disable = true + options1.Log.Level = gen.LogLevelTrace + options1.Security.ExposeEnvRemoteSpawn = true + node1, err := ergo.StartNode("distT1node1remotespawn@localhost", options1) + if err != nil { + t.Fatal(err) + } + defer node1.Stop() + + options2 := gen.NodeOptions{} + options2.Network.Cookie = "123" + options2.Log.Level = gen.LogLevelTrace + options2.Log.DefaultLogger.Disable = true + options2.Security.ExposeEnvInfo = true + node2, err := ergo.StartNode("distT1node2remotespawn@localhost", options2) + if err != nil { + t.Fatal(err) + } + defer node2.Stop() + node2.Network().EnableSpawn("tst", factoryTestServerRemoteSpawn) + + // make connection to the node2 + remote1, err := node1.Network().GetNode(node2.Name()) + if err != nil { + t.Fatal(err) + } + + // TODO spawn unknown name => gen.ErrUnknown + + pid, err := remote1.Spawn("tst", gen.ProcessOptions{}) + if err != nil { + t.Fatal(err) + } + pidInfo, err := node2.ProcessInfo(pid) + if err != nil { + t.Fatal(err) + } + if pidInfo.LogLevel != node1.Log().Level() { + t.Fatalf("mismatch log level %d", pidInfo.LogLevel) + } + if pidInfo.Parent != node1.PID() { + t.Fatal("mismatch parent PID") + } + if pidInfo.Leader != node1.PID() { + t.Fatal("mismatch leader PID") + } + + if reflect.DeepEqual(pidInfo.Env, node1.EnvList()) == false { + t.Fatal("mismatch process env") + } + + // TODO spawn one more process with the same registered name => gen.ErrTaken + + pid, err = remote1.SpawnRegister("regtst", "tst", gen.ProcessOptions{}) + pidInfo, err = node2.ProcessInfo(pid) + if err != nil { + t.Fatal(err) + } + if pidInfo.Name != "regtst" { + t.Fatal("process has no registered name") + } + if pidInfo.Parent != node1.PID() { + t.Fatal("mismatch parent PID") + } + if pidInfo.Leader != node1.PID() { + t.Fatal("mismatch leader PID") + } + +} + +func factory_t1remotespawn() gen.ProcessBehavior { + return &t1remotespawn{} +} + +type t1remotespawn struct { + act.Actor + + testcase *testcase +} + +func (t *t1remotespawn) HandleMessage(from gen.PID, message any) error { + + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +// +// test methods +// + +func (t *t1remotespawn) TestSpawn(input any) { + defer func() { + t.testcase = nil + }() + + t.Log().SetLevel(gen.LogLevelWarning) + t.SetEnv("penv", "pval") + + node2 := t.testcase.input.(gen.Node) + + if _, err := t.RemoteSpawn(node2.Name(), "unknown", gen.ProcessOptions{}); err != gen.ErrNameUnknown { + t.testcase.err <- fmt.Errorf("expected gen.ErrNameUnknown, got %q", err) + return + + } + pid, err := t.RemoteSpawn(node2.Name(), "tst", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + pidInfo, err := node2.ProcessInfo(pid) + if err != nil { + t.testcase.err <- err + return + } + if pidInfo.LogLevel != t.Log().Level() { + t.testcase.err <- fmt.Errorf("mismatch log level %d", pidInfo.LogLevel) + return + } + if pidInfo.Parent != t.PID() { + t.testcase.err <- fmt.Errorf("mismatch parent PID") + return + } + if pidInfo.Leader != t.Leader() { + t.testcase.err <- fmt.Errorf("mismatch leader PID") + return + } + + if reflect.DeepEqual(pidInfo.Env, t.EnvList()) == false { + t.testcase.err <- fmt.Errorf("mismatch process env") + return + } + + t.testcase.err <- nil +} + +func (t *t1remotespawn) TestSpawnRegister(input any) { + defer func() { + t.testcase = nil + }() + + t.Log().SetLevel(gen.LogLevelDebug) + t.SetEnv("penv", "pval") + + node2 := t.testcase.input.(gen.Node) + pid, err := t.RemoteSpawnRegister(node2.Name(), "tst", "regname", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + pidInfo, err := node2.ProcessInfo(pid) + if err != nil { + t.testcase.err <- err + return + } + if pidInfo.Name != "regname" { + t.testcase.err <- fmt.Errorf("mismatch registered process name") + return + } + if pidInfo.LogLevel != t.Log().Level() { + t.testcase.err <- fmt.Errorf("mismatch log level %d", pidInfo.LogLevel) + return + } + if pidInfo.Parent != t.PID() { + t.testcase.err <- fmt.Errorf("mismatch parent PID") + return + } + if pidInfo.Leader != t.Leader() { + t.testcase.err <- fmt.Errorf("mismatch leader PID") + return + } + + if reflect.DeepEqual(pidInfo.Env, t.EnvList()) == false { + t.testcase.err <- fmt.Errorf("mismatch process env") + return + } + if _, err := t.RemoteSpawnRegister(node2.Name(), "tst", "regname", gen.ProcessOptions{}); err != gen.ErrTaken { + t.testcase.err <- fmt.Errorf("expected gen.ErrTaken, got %q", err) + return + } + + t.testcase.err <- nil +} + +func TestT1ProcessRemoteSpawn(t *testing.T) { + options1 := gen.NodeOptions{ + Env: map[gen.Env]any{ + "env1": 123, + "env2": "example", + }, + } + options1.Network.Cookie = "12345" + options1.Log.DefaultLogger.Disable = true + options1.Log.Level = gen.LogLevelTrace + options1.Security.ExposeEnvRemoteSpawn = true + node1, err := ergo.StartNode("distT1node1processremotespawn@localhost", options1) + if err != nil { + t.Fatal(err) + } + defer node1.Stop() + + options2 := gen.NodeOptions{} + options2.Network.Cookie = "12345" + options2.Log.DefaultLogger.Disable = true + options2.Security.ExposeEnvInfo = true + node2, err := ergo.StartNode("distT1node2processremotespawn@localhost", options2) + if err != nil { + t.Fatal(err) + } + defer node2.Stop() + node2.Network().EnableSpawn("tst", factoryTestServerRemoteSpawn) + + // make connection to the node2 + if _, err := node1.Network().GetNode(node2.Name()); err != nil { + t.Fatal(err) + } + + popt := gen.ProcessOptions{} + pid, err := node1.Spawn(factory_t1remotespawn, popt) + if err != nil { + panic(err) + } + + cases := []*testcase{ + {"TestSpawn", node2, nil, make(chan error)}, + {"TestSpawnRegister", node2, nil, make(chan error)}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + node1.Send(pid, tc) + if err := tc.wait(1); err != nil { + t.Fatal(err) + } + }) + } + +} diff --git a/tests/002_distributed/t002_remote_app_start_test.go b/tests/002_distributed/t002_remote_app_start_test.go new file mode 100644 index 00000000..54cf849a --- /dev/null +++ b/tests/002_distributed/t002_remote_app_start_test.go @@ -0,0 +1,85 @@ +package distributed + +import ( + "fmt" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +func createTestRemApp() gen.ApplicationBehavior { + return &testremapp{} +} + +type testremapp struct { +} + +func (a *testremapp) Load(node gen.Node, args ...any) (gen.ApplicationSpec, error) { + return gen.ApplicationSpec{ + Name: "test rem app", + Group: []gen.ApplicationMemberSpec{ + { + Name: "test app member", + Factory: factory_testappmember, + }, + }, + }, nil +} + +func (a *testremapp) Start(mode gen.ApplicationMode) {} +func (a *testremapp) Terminate(reason error) {} + +func factory_testappmember() gen.ProcessBehavior { + return &testappmember{} +} + +type testappmember struct { + act.Actor +} + +func TestT2RemoteAppStart(t *testing.T) { + options1 := gen.NodeOptions{} + options1.Network.Cookie = "123" + options1.Network.MaxMessageSize = 567 + options1.Log.DefaultLogger.Disable = true + // options1.Log.Level = gen.LogLevelTrace + node1, err := ergo.StartNode("distT2node1remoteapp@localhost", options1) + if err != nil { + t.Fatal(err) + } + defer node1.Stop() + + options2 := gen.NodeOptions{} + options2.Network.Cookie = "123" + options2.Network.MaxMessageSize = 765 + options2.Log.DefaultLogger.Disable = true + node2, err := ergo.StartNode("distT2node2remoteapp@localhost", options2) + if err != nil { + t.Fatal(err) + } + defer node2.Stop() + + // make connection to the node2 + remoteNode2, err := node1.Network().GetNode(node2.Name()) + if err != nil { + t.Fatal(err) + } + + appname, err := node2.ApplicationLoad(createTestRemApp()) + if err != nil { + t.Fatal(err) + } + + node2.Network().EnableApplicationStart(appname) + + if err := remoteNode2.ApplicationStart(appname, gen.ApplicationOptions{}); err != nil { + t.Fatal(err) + } + + if err := remoteNode2.ApplicationStart("unknown", gen.ApplicationOptions{}); err != gen.ErrNameUnknown { + t.Fatal(fmt.Errorf("expected gen.ErrNameUnknown, got %q", err)) + } + +} diff --git a/tests/002_distributed/t003_send_test.go b/tests/002_distributed/t003_send_test.go new file mode 100644 index 00000000..b6e1284f --- /dev/null +++ b/tests/002_distributed/t003_send_test.go @@ -0,0 +1,532 @@ +package distributed + +import ( + "errors" + "fmt" + "reflect" + "testing" + "time" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +// send by pid +// by process name +// by alias + +// send to unknown pid/process id/alias + +// send compressed +// send exit (with custom reason. must be registered in edf) + +// TODO test send by process name with registered name in atom cache +// using edf.RegisterAtom. + +var ( + t3pongCh chan any + t3pongAlias gen.Alias +) + +func factory_t3pong() gen.ProcessBehavior { + return &t3pong{} +} + +type t3pong struct { + act.Actor +} + +func (t *t3pong) HandleMessage(from gen.PID, message any) error { + select { + case t3pongCh <- message: + default: + } + t3pongAlias, _ = t.CreateAlias() + return nil +} + +func (t *t3pong) Terminate(reason error) { + err := errors.Unwrap(reason) + select { + case t3pongCh <- err: + default: + } +} + +func factory_t3() gen.ProcessBehavior { + return &t3{} +} + +type t3 struct { + act.Actor + + remote gen.Atom + testcase *testcase +} + +func (t *t3) Init(args ...any) error { + t.remote = args[0].(gen.Atom) + return nil +} +func (t *t3) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t3) TestSendRemotePID(input any) { + var pingvalue any + defer func() { + t.testcase = nil + }() + + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + t3pongCh = make(chan any, 1) + pingvalue = 123 + if err := t.Send(pid, pingvalue); err != nil { + t.testcase.err <- err + return + } + + select { + case pong := <-t3pongCh: + if reflect.DeepEqual(pingvalue, pong) == false { + t.testcase.err <- fmt.Errorf("pong value mismatch") + return + } + case <-time.NewTimer(time.Second).C: + t.testcase.err <- gen.ErrTimeout + return + } + + t.testcase.err <- nil +} + +func (t *t3) TestSendImportantRemotePID(input any) { + var pingvalue any + defer func() { + t.testcase = nil + }() + + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + t3pongCh = make(chan any, 1) + pingvalue = 123 + if err := t.SendImportant(pid, pingvalue); err != nil { + t.testcase.err <- err + return + } + + select { + case pong := <-t3pongCh: + if reflect.DeepEqual(pingvalue, pong) == false { + t.testcase.err <- fmt.Errorf("pong value mismatch") + return + } + case <-time.NewTimer(time.Second).C: + t.testcase.err <- gen.ErrTimeout + return + } + + // send to unknown pid + pid.ID = 100000 // unknown pid + if err := t.SendImportant(pid, pingvalue); err != gen.ErrProcessUnknown { + t.testcase.err <- gen.ErrIncorrect + return + } + + t.testcase.err <- nil +} + +func (t *t3) TestSendRemoteProcessID(input any) { + var pingvalue any + defer func() { + t.testcase = nil + }() + + regName := gen.Atom("regpong") + _, err := t.RemoteSpawnRegister(t.remote, "pong", regName, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + t3pongCh = make(chan any, 1) + pingvalue = 123.456 + pingProcessID := gen.ProcessID{Name: regName, Node: t.remote} + if err := t.Send(pingProcessID, pingvalue); err != nil { + t.testcase.err <- err + return + } + + select { + case pong := <-t3pongCh: + if reflect.DeepEqual(pingvalue, pong) == false { + t.testcase.err <- fmt.Errorf("pong value mismatch") + return + } + case <-time.NewTimer(time.Second).C: + t.testcase.err <- gen.ErrTimeout + return + } + + t.testcase.err <- nil +} + +func (t *t3) TestSendImportantRemoteProcessID(input any) { + var pingvalue any + defer func() { + t.testcase = nil + }() + + regName := gen.Atom("regpongimportant") + _, err := t.RemoteSpawnRegister(t.remote, "pong", regName, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + t3pongCh = make(chan any, 1) + pingvalue = 123.456 + pingProcessID := gen.ProcessID{Name: regName, Node: t.remote} + if err := t.SendImportant(pingProcessID, pingvalue); err != nil { + t.testcase.err <- err + return + } + + select { + case pong := <-t3pongCh: + if reflect.DeepEqual(pingvalue, pong) == false { + t.testcase.err <- fmt.Errorf("pong value mismatch") + return + } + case <-time.NewTimer(time.Second).C: + t.testcase.err <- gen.ErrTimeout + return + } + + pingProcessID.Name = "unknown_name" + if err := t.SendImportant(pingProcessID, pingvalue); err != gen.ErrProcessUnknown { + t.testcase.err <- gen.ErrIncorrect + return + } + + t.testcase.err <- nil +} + +func (t *t3) TestSendRemoteAlias(input any) { + var pingvalue any + defer func() { + t.testcase = nil + }() + + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + t3pongCh = make(chan any) + pingvalue = "test value" + if err := t.Send(pid, pingvalue); err != nil { + t.testcase.err <- err + return + } + + select { + case pong := <-t3pongCh: + if reflect.DeepEqual(pingvalue, pong) == false { + t.testcase.err <- fmt.Errorf("pong value mismatch") + return + } + case <-time.NewTimer(time.Second).C: + t.testcase.err <- gen.ErrTimeout + return + } + + emptyAlias := gen.Alias{} + if t3pongAlias == emptyAlias { + t.testcase.err <- fmt.Errorf("alias hasn't been created") + return + } + if err := t.Send(t3pongAlias, pingvalue); err != nil { + t.testcase.err <- err + return + } + + select { + case pong := <-t3pongCh: + if reflect.DeepEqual(pingvalue, pong) == false { + t.testcase.err <- fmt.Errorf("pong value mismatch") + return + } + case <-time.NewTimer(time.Second).C: + t.testcase.err <- gen.ErrTimeout + return + } + + t.testcase.err <- nil +} + +func (t *t3) TestSendImportantRemoteAlias(input any) { + var pingvalue any + defer func() { + t.testcase = nil + }() + + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + t3pongCh = make(chan any, 1) + pingvalue = "test value" + if err := t.SendImportant(pid, pingvalue); err != nil { + t.testcase.err <- err + return + } + + select { + case pong := <-t3pongCh: + if reflect.DeepEqual(pingvalue, pong) == false { + t.testcase.err <- fmt.Errorf("pong value mismatch") + return + } + case <-time.NewTimer(time.Second).C: + t.testcase.err <- gen.ErrTimeout + return + } + + emptyAlias := gen.Alias{} + if t3pongAlias == emptyAlias { + t.testcase.err <- fmt.Errorf("alias hasn't been created") + return + } + if err := t.SendImportant(t3pongAlias, pingvalue); err != nil { + t.testcase.err <- err + return + } + + select { + case pong := <-t3pongCh: + if reflect.DeepEqual(pingvalue, pong) == false { + t.testcase.err <- fmt.Errorf("pong value mismatch") + return + } + case <-time.NewTimer(time.Second).C: + t.testcase.err <- gen.ErrTimeout + return + } + + t3pongAlias.ID[1] = 0 // unknown alias + if err := t.SendImportant(t3pongAlias, pingvalue); err != gen.ErrProcessUnknown { + t.testcase.err <- err + return + } + + t.testcase.err <- nil +} + +func (t *t3) TestSendRemoteTooLarge(input any) { + var pingvalue any + defer func() { + t.testcase = nil + }() + + remote, err := t.Node().Network().Node(t.remote) + if err != nil { + t.testcase.err <- err + return + } + info := remote.Info() + if info.MaxMessageSize == 0 { + t.testcase.err <- fmt.Errorf("MaxMessageSize is not set on the remote node. Unable to test") + return + } + // exceed the limit by +1 + pingvalue = lib.RandomString(info.MaxMessageSize + 1) + pingProcessID := gen.ProcessID{Name: "whatever", Node: t.remote} + if err := t.Send(pingProcessID, pingvalue); err != gen.ErrTooLarge { + t.testcase.err <- err + return + } + + t.testcase.err <- nil +} + +func (t *t3) TestSendRemoteCompress(input any) { + var pingvalue any + defer func() { + t.testcase = nil + }() + + remote, err := t.Node().Network().Node(t.remote) + if err != nil { + t.testcase.err <- err + return + } + info := remote.Info() + if info.MaxMessageSize == 0 { + t.testcase.err <- fmt.Errorf("MaxMessageSize is not set on the remote node. Unable to test") + return + } + + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + if info.MaxMessageSize*2 < gen.DefaultCompressionThreshold { + t.testcase.err <- fmt.Errorf("MaxMessageSize too small. Unable to test compression") + return + } + // string value has good compression ratio + s := lib.RandomString(info.MaxMessageSize + info.MaxMessageSize/2) + if len(s) < gen.DefaultCompressionThreshold { + t.testcase.err <- fmt.Errorf("MaxMessageSize too small. Unable to test compression") + return + } + pingvalue = s + + if err := t.Send(pid, pingvalue); err != gen.ErrTooLarge { + t.testcase.err <- fmt.Errorf("expected gen.ErrTooLarge, but got: %v", err) + return + } + + t.SetCompression(true) + + t3pongCh = make(chan any) + if err := t.Send(pid, pingvalue); err != nil { + t.testcase.err <- err + return + } + + select { + case pong := <-t3pongCh: + if reflect.DeepEqual(pingvalue, pong) == false { + t.testcase.err <- fmt.Errorf("pong value mismatch") + return + } + case <-time.NewTimer(time.Second).C: + t.testcase.err <- gen.ErrTimeout + return + } + + t.testcase.err <- nil +} + +func (t *t3) TestSendRemoteExit(input any) { + defer func() { + t.testcase = nil + }() + + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + t3pongCh = make(chan any) + // use gen.ErrTaken as a sample reason. This error is already + // registered in edf for sending over the network + if err := t.SendExit(pid, gen.ErrTaken); err != nil { + t.testcase.err <- err + return + } + + select { + case pong := <-t3pongCh: + // pong must be gen.ErrTaken + if pong != gen.ErrTaken { + t.testcase.err <- fmt.Errorf("pong value mismatch, expected gen.ErrTaken, got: %v", pong) + return + } + case <-time.NewTimer(time.Second).C: + t.testcase.err <- gen.ErrTimeout + return + } + + t.testcase.err <- nil +} + +func TestT3SendRemote(t *testing.T) { + options1 := gen.NodeOptions{} + options1.Network.Cookie = "123" + options1.Network.MaxMessageSize = 567 + options1.Log.DefaultLogger.Disable = true + options1.Log.Level = gen.LogLevelTrace + node1, err := ergo.StartNode("distT0node1SendRemote@localhost", options1) + if err != nil { + t.Fatal(err) + } + defer node1.Stop() + + options2 := gen.NodeOptions{} + options2.Network.Cookie = "123" + options2.Network.MaxMessageSize = 765 + options2.Log.DefaultLogger.Disable = true + options2.Log.Level = gen.LogLevelTrace + node2, err := ergo.StartNode("distT0node2SendRemote@localhost", options2) + if err != nil { + t.Fatal(err) + } + defer node2.Stop() + + if err := node2.Network().EnableSpawn("pong", factory_t3pong); err != nil { + t.Fatal(err) + } + + // make connection to the node2 + if _, err := node1.Network().GetNode(node2.Name()); err != nil { + t.Fatal(err) + } + + pid, err := node1.Spawn(factory_t3, gen.ProcessOptions{}, node2.Name()) + if err != nil { + panic(err) + } + + t3cases := []*testcase{ + {"TestSendRemotePID", nil, nil, make(chan error)}, + {"TestSendImportantRemotePID", nil, nil, make(chan error)}, + {"TestSendRemoteProcessID", nil, nil, make(chan error)}, + {"TestSendImportantRemoteProcessID", nil, nil, make(chan error)}, + {"TestSendRemoteAlias", nil, nil, make(chan error)}, + {"TestSendImportantRemoteAlias", nil, nil, make(chan error)}, + {"TestSendRemoteTooLarge", nil, nil, make(chan error)}, + {"TestSendRemoteCompress", nil, nil, make(chan error)}, + {"TestSendRemoteExit", nil, nil, make(chan error)}, + } + for _, tc := range t3cases { + t.Run(tc.name, func(t *testing.T) { + node1.Send(pid, tc) + if err := tc.wait(1); err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/tests/002_distributed/t004_call_test.go b/tests/002_distributed/t004_call_test.go new file mode 100644 index 00000000..89942f42 --- /dev/null +++ b/tests/002_distributed/t004_call_test.go @@ -0,0 +1,322 @@ +package distributed + +// call by pid, process name, alias +// unknown process + +// TODO test call by process name with registered name in atom cache +// using edf.RegisterAtom. + +import ( + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" +) + +var ( + t4pongAlias gen.Alias +) + +func factory_t4pong() gen.ProcessBehavior { + return &t4pong{} +} + +type t4pong struct { + act.Actor +} + +func (t *t4pong) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + t4pongAlias, _ = t.CreateAlias() + return request, nil // just send back this request as a response value +} + +func factory_t4() gen.ProcessBehavior { + return &t4{} +} + +type t4 struct { + act.Actor + + remote gen.Atom + testcase *testcase +} + +func (t *t4) Init(args ...any) error { + t.remote = args[0].(gen.Atom) + return nil +} +func (t *t4) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t4) TestCallRemotePID(input any) { + var pingvalue any + defer func() { + t.testcase = nil + }() + + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + pingvalue = 123 + pong, err := t.Call(pid, pingvalue) + if err != nil { + t.testcase.err <- err + return + } + if reflect.DeepEqual(pingvalue, pong) == false { + t.testcase.err <- fmt.Errorf("pong value mismatch") + return + } + + t.testcase.err <- nil +} + +func (t *t4) TestCallImportantRemotePID(input any) { + var pingvalue any + defer func() { + t.testcase = nil + }() + + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + pingvalue = 123 + pong, err := t.CallImportant(pid, pingvalue) + if err != nil { + t.testcase.err <- err + return + } + if reflect.DeepEqual(pingvalue, pong) == false { + t.testcase.err <- fmt.Errorf("pong value mismatch") + return + } + // unknown pid + pid.ID = 10000 + if _, err := t.CallImportant(pid, pingvalue); err != gen.ErrProcessUnknown { + t.testcase.err <- gen.ErrIncorrect + return + } + + t.testcase.err <- nil +} + +func (t *t4) TestCallRemoteProcessID(input any) { + var pingvalue any + defer func() { + t.testcase = nil + }() + + regName := gen.Atom("regpong") + _, err := t.RemoteSpawnRegister(t.remote, "pong", regName, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + pingvalue = 123.456 + pingProcessID := gen.ProcessID{Name: regName, Node: t.remote} + pong, err := t.Call(pingProcessID, pingvalue) + if err != nil { + t.testcase.err <- err + return + } + if reflect.DeepEqual(pingvalue, pong) == false { + t.testcase.err <- fmt.Errorf("pong value mismatch") + return + } + + t.testcase.err <- nil +} + +func (t *t4) TestCallImportantRemoteProcessID(input any) { + var pingvalue any + defer func() { + t.testcase = nil + }() + + regName := gen.Atom("regpongimportant") + _, err := t.RemoteSpawnRegister(t.remote, "pong", regName, gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + pingvalue = 123.456 + pingProcessID := gen.ProcessID{Name: regName, Node: t.remote} + pong, err := t.CallImportant(pingProcessID, pingvalue) + if err != nil { + t.testcase.err <- err + return + } + if reflect.DeepEqual(pingvalue, pong) == false { + t.testcase.err <- fmt.Errorf("pong value mismatch") + return + } + pingProcessID.Name = "unknown_process" + if _, err := t.CallImportant(pingProcessID, pingvalue); err != gen.ErrProcessUnknown { + t.testcase.err <- gen.ErrIncorrect + return + } + + t.testcase.err <- nil +} + +func (t *t4) TestCallRemoteAlias(input any) { + var pingvalue any + defer func() { + t.testcase = nil + }() + + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + pingvalue = "test value" + if _, err := t.Call(pid, pingvalue); err != nil { + t.testcase.err <- err + return + } + + emptyAlias := gen.Alias{} + if t4pongAlias == emptyAlias { + t.testcase.err <- fmt.Errorf("alias hasn't been created") + return + } + pong, err := t.Call(t4pongAlias, pingvalue) + + if err != nil { + t.testcase.err <- err + return + } + + if reflect.DeepEqual(pingvalue, pong) == false { + t.testcase.err <- fmt.Errorf("pong value mismatch") + return + } + + t.testcase.err <- nil +} + +func (t *t4) TestCallImportantRemoteAlias(input any) { + var pingvalue any + defer func() { + t.testcase = nil + }() + + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + pingvalue = "test value" + if _, err := t.CallImportant(pid, pingvalue); err != nil { + t.testcase.err <- err + return + } + + emptyAlias := gen.Alias{} + if t4pongAlias == emptyAlias { + t.testcase.err <- fmt.Errorf("alias hasn't been created") + return + } + + pong, err := t.CallImportant(t4pongAlias, pingvalue) + if err != nil { + t.testcase.err <- err + return + } + + if reflect.DeepEqual(pingvalue, pong) == false { + t.testcase.err <- fmt.Errorf("pong value mismatch") + return + } + + t4pongAlias.ID[1] = 0 // unknown alias + if _, err := t.CallImportant(t4pongAlias, pingvalue); err != gen.ErrProcessUnknown { + t.testcase.err <- gen.ErrIncorrect + return + } + + t.testcase.err <- nil +} + +func TestT4CallRemote(t *testing.T) { + options1 := gen.NodeOptions{} + options1.Network.Cookie = "123" + options1.Network.MaxMessageSize = 567 + options1.Log.DefaultLogger.Disable = true + options1.Log.Level = gen.LogLevelTrace + node1, err := ergo.StartNode("distT0node1CallRemote@localhost", options1) + if err != nil { + t.Fatal(err) + } + defer node1.Stop() + + options2 := gen.NodeOptions{} + options2.Network.Cookie = "123" + options2.Network.MaxMessageSize = 765 + options2.Log.DefaultLogger.Disable = true + options2.Log.Level = gen.LogLevelTrace + node2, err := ergo.StartNode("distT0node2CallRemote@localhost", options2) + if err != nil { + t.Fatal(err) + } + defer node2.Stop() + + if err := node2.Network().EnableSpawn("pong", factory_t4pong); err != nil { + t.Fatal(err) + } + + // make connection to the node2 + if _, err := node1.Network().GetNode(node2.Name()); err != nil { + t.Fatal(err) + } + + pid, err := node1.Spawn(factory_t4, gen.ProcessOptions{}, node2.Name()) + if err != nil { + panic(err) + } + + t4cases := []*testcase{ + {"TestCallRemotePID", nil, nil, make(chan error)}, + {"TestCallImportantRemotePID", nil, nil, make(chan error)}, + {"TestCallRemoteProcessID", nil, nil, make(chan error)}, + {"TestCallImportantRemoteProcessID", nil, nil, make(chan error)}, + {"TestCallRemoteAlias", nil, nil, make(chan error)}, + {"TestCallImportantRemoteAlias", nil, nil, make(chan error)}, + } + for _, tc := range t4cases { + t.Run(tc.name, func(t *testing.T) { + node1.Send(pid, tc) + if err := tc.wait(1); err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/tests/002_distributed/t005_link_test.go b/tests/002_distributed/t005_link_test.go new file mode 100644 index 00000000..16bc1c4f --- /dev/null +++ b/tests/002_distributed/t005_link_test.go @@ -0,0 +1,872 @@ +package distributed + +import ( + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +// Linik PID, ProcessID, Alias +// handle Terminate linked process +// handle terminated network connection +// send Event +// Link node + +func factory_t5pong() gen.ProcessBehavior { + return &t5pong{} +} + +type t5pong struct { + act.Actor +} + +func (t *t5pong) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + req := request.(string) + switch req { + case "alias": + alias, _ := t.CreateAlias() + return alias, nil + case "event": + name := gen.Atom(lib.RandomString(10)) + if _, err := t.RegisterEvent(name, gen.EventOptions{}); err != nil { + t.Log().Error("unable to register event: %s", err) + break + } + ev := gen.Event{Name: name, Node: t.Node().Name()} + return ev, nil + } + return nil, nil +} + +func factory_t5() gen.ProcessBehavior { + return &t5{} +} + +type t5 struct { + act.Actor + + remote gen.Atom + testcase *testcase +} + +func (t *t5) Init(args ...any) error { + t.remote = args[0].(gen.Atom) + return nil +} +func (t *t5) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t5) TestLinkRemotePID(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"LinkingPID", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + reason := gen.TerminateReasonShutdown + t.Send(pid, reason) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageExitPID) + t.Node().Kill(pid) + if exit.Reason != reason { + t.testcase.err <- fmt.Errorf("incorrect termination reason: %s", exit.Reason) + return + } + + t.testcase.err <- nil +} + +func (t *t5) TestLinkRemotePIDNodeDown(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"LinkingPIDNodeDown", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.Send(pid, fmt.Errorf("doDisconnect")) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageExitPID) + t.Node().Kill(pid) + if exit.Reason != gen.ErrNoConnection { + t.testcase.err <- fmt.Errorf("incorrect exit reason: %s", exit.Reason) + return + } + + t.testcase.err <- nil +} + +func (t *t5) TestLinkRemoteProcessID(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"LinkingProcessID", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + reason := gen.TerminateReasonShutdown + t.Send(pid, reason) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageExitProcessID) + t.Node().Kill(pid) + if exit.Reason != reason { + t.testcase.err <- fmt.Errorf("incorrect termination reason: %s", exit.Reason) + return + } + + t.testcase.err <- nil +} + +func (t *t5) TestLinkRemoteProcessIDNodeDown(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"LinkingProcessIDNodeDown", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.Send(pid, fmt.Errorf("doDisconnect")) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageExitProcessID) + t.Node().Kill(pid) + if exit.Reason != gen.ErrNoConnection { + t.testcase.err <- fmt.Errorf("incorrect exit reason: %s", exit.Reason) + return + } + + t.testcase.err <- nil +} + +func (t *t5) TestLinkRemoteAlias(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"LinkingAlias", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + reason := gen.TerminateReasonShutdown + t.Send(pid, reason) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageExitAlias) + t.Node().Kill(pid) + if exit.Reason != reason { + t.testcase.err <- fmt.Errorf("incorrect termination reason: %s", exit.Reason) + return + } + + t.testcase.err <- nil +} + +func (t *t5) TestLinkRemoteAliasNodeDown(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"LinkingAliasNodeDown", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + t.Send(pid, fmt.Errorf("doDisconnect")) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageExitAlias) + t.Node().Kill(pid) + if exit.Reason != gen.ErrNoConnection { + t.testcase.err <- fmt.Errorf("incorrect exit reason: %s", exit.Reason) + return + } + + t.testcase.err <- nil +} + +func (t *t5) TestLinkRemoteEvent(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"LinkingEvent", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + reason := gen.TerminateReasonShutdown + t.Send(pid, reason) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageExitEvent) + t.Node().Kill(pid) + if exit.Reason != reason { + t.testcase.err <- fmt.Errorf("incorrect termination reason: %s", exit.Reason) + return + } + + t.testcase.err <- nil +} + +func (t *t5) TestLinkRemoteEventNodeDown(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"LinkingEventNodeDown", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.Send(pid, fmt.Errorf("doDisconnect")) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageExitEvent) + t.Node().Kill(pid) + if exit.Reason != gen.ErrNoConnection { + t.testcase.err <- fmt.Errorf("incorrect exit reason: %s", exit.Reason) + return + } + + t.testcase.err <- nil +} + +func (t *t5) TestLinkRemoteNodeDown(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t5, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"LinkingNodeDown", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.Send(pid, fmt.Errorf("doDisconnect")) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageExitNode) + t.Node().Kill(pid) + if exit.Name != t.remote { + t.testcase.err <- fmt.Errorf("incorrect exit node name: %s", exit.Name) + return + } + + t.testcase.err <- nil +} + +func (t *t5) LinkingPID(input any) { + switch m := input.(type) { + case initcase: + t.SetTrapExit(true) + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + if err := t.LinkPID(pid); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.LinksPID[0] != pid { + t.testcase.err <- fmt.Errorf("link pid is incorrect: %s (must be: %s)", info.LinksPID[0], pid) + return + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + case error: + pid := t.testcase.input.(gen.PID) + t.SendExit(pid, m) + return + + case gen.MessageExitPID: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func (t *t5) LinkingPIDNodeDown(input any) { + switch m := input.(type) { + case initcase: + t.SetTrapExit(true) + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + if err := t.LinkPID(pid); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.LinksPID[0] != pid { + t.testcase.err <- fmt.Errorf("link pid is incorrect: %s (must be: %s)", info.LinksPID[0], pid) + return + } + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + case error: + remoteNode, err := t.Node().Network().Node(t.remote) + if err != nil { + t.testcase.err <- err + return + + } + remoteNode.Disconnect() + return + + case gen.MessageExitPID: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func (t *t5) LinkingProcessID(input any) { + switch m := input.(type) { + case initcase: + t.SetTrapExit(true) + pid, err := t.RemoteSpawnRegister(t.remote, "pong", "regpong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + processid := gen.ProcessID{Name: "regpong", Node: t.remote} + if err := t.LinkProcessID(processid); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.LinksProcessID[0] != processid { + t.testcase.err <- fmt.Errorf("link process id is incorrect: %s (must be: %s)", info.LinksProcessID[0], processid) + return + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + case error: + pid := t.testcase.input.(gen.PID) + t.SendExit(pid, m) + return + + case gen.MessageExitProcessID: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func (t *t5) LinkingProcessIDNodeDown(input any) { + switch m := input.(type) { + case initcase: + t.SetTrapExit(true) + pid, err := t.RemoteSpawnRegister(t.remote, "pong", "regpongdown", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + processid := gen.ProcessID{Name: "regpongdown", Node: t.remote} + if err := t.LinkProcessID(processid); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.LinksProcessID[0] != processid { + t.testcase.err <- fmt.Errorf("link process id is incorrect: %s (must be: %s)", info.LinksProcessID[0], processid) + return + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + + case error: + remoteNode, err := t.Node().Network().Node(t.remote) + if err != nil { + t.testcase.err <- err + return + + } + remoteNode.Disconnect() + return + + case gen.MessageExitProcessID: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func (t *t5) LinkingAlias(input any) { + switch m := input.(type) { + case initcase: + t.SetTrapExit(true) + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + v, err := t.Call(pid, "alias") + if err != nil { + t.testcase.err <- err + return + } + alias := v.(gen.Alias) + + if err := t.LinkAlias(alias); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.LinksAlias[0] != alias { + t.testcase.err <- fmt.Errorf("link alias is incorrect: %s (must be: %s)", info.LinksAlias[0], alias) + return + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + case error: + pid := t.testcase.input.(gen.PID) + t.SendExit(pid, m) + return + + case gen.MessageExitAlias: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func (t *t5) LinkingAliasNodeDown(input any) { + switch m := input.(type) { + case initcase: + t.SetTrapExit(true) + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + v, err := t.Call(pid, "alias") + if err != nil { + t.testcase.err <- err + return + } + alias := v.(gen.Alias) + + if err := t.LinkAlias(alias); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.LinksAlias[0] != alias { + t.testcase.err <- fmt.Errorf("link alias is incorrect: %s (must be: %s)", info.LinksAlias[0], alias) + return + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + case error: + remoteNode, err := t.Node().Network().Node(t.remote) + if err != nil { + t.testcase.err <- err + return + + } + remoteNode.Disconnect() + return + + case gen.MessageExitAlias: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func (t *t5) LinkingEvent(input any) { + switch m := input.(type) { + case initcase: + t.SetTrapExit(true) + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + v, err := t.Call(pid, "event") + if err != nil { + t.testcase.err <- err + return + } + ev := v.(gen.Event) + + if _, err := t.LinkEvent(ev); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.LinksEvent[0] != ev { + t.testcase.err <- fmt.Errorf("link event is incorrect: %s (must be: %s)", info.LinksEvent[0], ev) + return + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + case error: + pid := t.testcase.input.(gen.PID) + t.SendExit(pid, m) + return + + case gen.MessageExitEvent: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func (t *t5) LinkingEventNodeDown(input any) { + switch m := input.(type) { + case initcase: + t.SetTrapExit(true) + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + v, err := t.Call(pid, "event") + if err != nil { + t.testcase.err <- err + return + } + ev := v.(gen.Event) + + if _, err := t.LinkEvent(ev); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.LinksEvent[0] != ev { + t.testcase.err <- fmt.Errorf("link event is incorrect: %s (must be: %s)", info.LinksEvent[0], ev) + return + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + case error: + remoteNode, err := t.Node().Network().Node(t.remote) + if err != nil { + t.testcase.err <- err + return + + } + remoteNode.Disconnect() + return + + case gen.MessageExitEvent: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func (t *t5) LinkingNodeDown(input any) { + switch m := input.(type) { + case initcase: + t.SetTrapExit(true) + + if err := t.LinkNode(t.remote); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.LinksNode[0] != t.remote { + t.testcase.err <- fmt.Errorf("link node is incorrect: %s (must be: %s)", info.LinksNode[0], t.remote) + return + } + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + case error: + remoteNode, err := t.Node().Network().Node(t.remote) + if err != nil { + t.testcase.err <- err + return + + } + remoteNode.Disconnect() + return + + case gen.MessageExitNode: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func TestT5LinkRemote(t *testing.T) { + options1 := gen.NodeOptions{} + options1.Network.Cookie = "123" + options1.Network.MaxMessageSize = 567 + options1.Log.DefaultLogger.Disable = true + options1.Log.Level = gen.LogLevelTrace + node1, err := ergo.StartNode("distT0node1LinkRemote@localhost", options1) + if err != nil { + t.Fatal(err) + } + defer node1.Stop() + + options2 := gen.NodeOptions{} + options2.Network.Cookie = "123" + options2.Network.MaxMessageSize = 765 + options2.Log.DefaultLogger.Disable = true + options2.Log.Level = gen.LogLevelTrace + node2, err := ergo.StartNode("distT0node2LinkRemote@localhost", options2) + if err != nil { + t.Fatal(err) + } + defer node2.Stop() + + if err := node2.Network().EnableSpawn("pong", factory_t5pong); err != nil { + t.Fatal(err) + } + + // make connection to the node2 + if _, err := node1.Network().GetNode(node2.Name()); err != nil { + t.Fatal(err) + } + + pid, err := node1.Spawn(factory_t5, gen.ProcessOptions{}, node2.Name()) + if err != nil { + panic(err) + } + + t5cases := []*testcase{ + {"TestLinkRemotePID", nil, nil, make(chan error)}, + {"TestLinkRemotePIDNodeDown", nil, nil, make(chan error)}, + {"TestLinkRemoteProcessID", nil, nil, make(chan error)}, + {"TestLinkRemoteProcessIDNodeDown", nil, nil, make(chan error)}, + {"TestLinkRemoteAlias", nil, nil, make(chan error)}, + {"TestLinkRemoteAliasNodeDown", nil, nil, make(chan error)}, + {"TestLinkRemoteEvent", nil, nil, make(chan error)}, + {"TestLinkRemoteEventNodeDown", nil, nil, make(chan error)}, + {"TestLinkRemoteNodeDown", nil, nil, make(chan error)}, + } + for _, tc := range t5cases { + t.Run(tc.name, func(t *testing.T) { + node1.Send(pid, tc) + if err := tc.wait(1); err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/tests/002_distributed/t006_monitor_test.go b/tests/002_distributed/t006_monitor_test.go new file mode 100644 index 00000000..60c6a614 --- /dev/null +++ b/tests/002_distributed/t006_monitor_test.go @@ -0,0 +1,858 @@ +package distributed + +import ( + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +func factory_t6pong() gen.ProcessBehavior { + return &t6pong{} +} + +type t6pong struct { + act.Actor +} + +func (t *t6pong) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + req := request.(string) + switch req { + case "alias": + alias, _ := t.CreateAlias() + return alias, nil + case "event": + name := gen.Atom(lib.RandomString(10)) + if _, err := t.RegisterEvent(name, gen.EventOptions{}); err != nil { + t.Log().Error("unable to register event: %s", err) + break + } + ev := gen.Event{Name: name, Node: t.Node().Name()} + return ev, nil + } + return nil, nil +} + +func factory_t6() gen.ProcessBehavior { + return &t6{} +} + +type t6 struct { + act.Actor + + remote gen.Atom + testcase *testcase +} + +func (t *t6) Init(args ...any) error { + t.remote = args[0].(gen.Atom) + return nil +} +func (t *t6) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t6) TestMonitorRemotePID(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"MonitoringPID", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + reason := gen.TerminateReasonShutdown + t.Send(pid, reason) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageDownPID) + t.Node().Kill(pid) + if exit.Reason != reason { + t.testcase.err <- fmt.Errorf("incorrect down reason: %s", exit.Reason) + return + } + + t.testcase.err <- nil +} + +func (t *t6) TestMonitorRemotePIDNodeDown(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"MonitoringPIDNodeDown", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.Send(pid, fmt.Errorf("doDisconnect")) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageDownPID) + t.Node().Kill(pid) + if exit.Reason != gen.ErrNoConnection { + t.testcase.err <- fmt.Errorf("incorrect down reason: %s", exit.Reason) + return + } + + t.testcase.err <- nil +} + +func (t *t6) MonitoringPID(input any) { + switch m := input.(type) { + case initcase: + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + if err := t.MonitorPID(pid); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.MonitorsPID[0] != pid { + t.testcase.err <- fmt.Errorf("monitor pid is incorrect: %s (must be: %s)", info.MonitorsPID[0], pid) + return + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + case error: + pid := t.testcase.input.(gen.PID) + t.SendExit(pid, m) + return + + case gen.MessageDownPID: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func (t *t6) MonitoringPIDNodeDown(input any) { + switch m := input.(type) { + case initcase: + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + if err := t.MonitorPID(pid); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.MonitorsPID[0] != pid { + t.testcase.err <- fmt.Errorf("monitor pid is incorrect: %s (must be: %s)", info.MonitorsPID[0], pid) + return + } + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + case error: + remoteNode, err := t.Node().Network().Node(t.remote) + if err != nil { + t.testcase.err <- err + return + + } + remoteNode.Disconnect() + return + + case gen.MessageDownPID: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func (t *t6) TestMonitorRemoteProcessID(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"MonitoringProcessID", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + reason := gen.TerminateReasonShutdown + t.Send(pid, reason) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageDownProcessID) + t.Node().Kill(pid) + if exit.Reason != reason { + t.testcase.err <- fmt.Errorf("incorrect termination reason: %s", exit.Reason) + return + } + + t.testcase.err <- nil +} + +func (t *t6) TestMonitorRemoteProcessIDNodeDown(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"MonitoringProcessIDNodeDown", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.Send(pid, fmt.Errorf("doDisconnect")) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageDownProcessID) + t.Node().Kill(pid) + if exit.Reason != gen.ErrNoConnection { + t.testcase.err <- fmt.Errorf("incorrect down reason: %s", exit.Reason) + return + } + + t.testcase.err <- nil +} + +func (t *t6) MonitoringProcessID(input any) { + switch m := input.(type) { + case initcase: + pid, err := t.RemoteSpawnRegister(t.remote, "pong", "regpong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + processid := gen.ProcessID{Name: "regpong", Node: t.remote} + if err := t.MonitorProcessID(processid); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.MonitorsProcessID[0] != processid { + t.testcase.err <- fmt.Errorf("monitor process id is incorrect: %s (must be: %s)", info.MonitorsProcessID[0], processid) + return + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + case error: + pid := t.testcase.input.(gen.PID) + t.SendExit(pid, m) + return + + case gen.MessageDownProcessID: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func (t *t6) MonitoringProcessIDNodeDown(input any) { + switch m := input.(type) { + case initcase: + pid, err := t.RemoteSpawnRegister(t.remote, "pong", "regpongdown", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + processid := gen.ProcessID{Name: "regpongdown", Node: t.remote} + if err := t.MonitorProcessID(processid); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.MonitorsProcessID[0] != processid { + t.testcase.err <- fmt.Errorf("monitor process id is incorrect: %s (must be: %s)", info.MonitorsProcessID[0], processid) + return + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + + case error: + remoteNode, err := t.Node().Network().Node(t.remote) + if err != nil { + t.testcase.err <- err + return + + } + remoteNode.Disconnect() + return + + case gen.MessageDownProcessID: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func (t *t6) TestMonitorRemoteAlias(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"MonitoringAlias", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + reason := gen.TerminateReasonShutdown + t.Send(pid, reason) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageDownAlias) + t.Node().Kill(pid) + if exit.Reason != reason { + t.testcase.err <- fmt.Errorf("incorrect termination reason: %s", exit.Reason) + return + } + + t.testcase.err <- nil +} + +func (t *t6) TestMonitorRemoteAliasNodeDown(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"MonitoringAliasNodeDown", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + t.Send(pid, fmt.Errorf("doDisconnect")) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageDownAlias) + t.Node().Kill(pid) + if exit.Reason != gen.ErrNoConnection { + t.testcase.err <- fmt.Errorf("incorrect down reason: %s", exit.Reason) + return + } + + t.testcase.err <- nil +} + +func (t *t6) MonitoringAlias(input any) { + switch m := input.(type) { + case initcase: + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + v, err := t.Call(pid, "alias") + if err != nil { + t.testcase.err <- err + return + } + alias := v.(gen.Alias) + + if err := t.MonitorAlias(alias); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.MonitorsAlias[0] != alias { + t.testcase.err <- fmt.Errorf("monitor alias is incorrect: %s (must be: %s)", info.MonitorsAlias[0], alias) + return + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + case error: + pid := t.testcase.input.(gen.PID) + t.SendExit(pid, m) + return + + case gen.MessageDownAlias: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func (t *t6) MonitoringAliasNodeDown(input any) { + switch m := input.(type) { + case initcase: + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + v, err := t.Call(pid, "alias") + if err != nil { + t.testcase.err <- err + return + } + alias := v.(gen.Alias) + + if err := t.MonitorAlias(alias); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.MonitorsAlias[0] != alias { + t.testcase.err <- fmt.Errorf("monitor alias is incorrect: %s (must be: %s)", info.MonitorsAlias[0], alias) + return + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + case error: + remoteNode, err := t.Node().Network().Node(t.remote) + if err != nil { + t.testcase.err <- err + return + + } + remoteNode.Disconnect() + return + + case gen.MessageDownAlias: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func (t *t6) TestMonitorRemoteEvent(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"MonitoringEvent", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + reason := gen.TerminateReasonShutdown + t.Send(pid, reason) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageDownEvent) + t.Node().Kill(pid) + if exit.Reason != reason { + t.testcase.err <- fmt.Errorf("incorrect termination reason: %s", exit.Reason) + return + } + + t.testcase.err <- nil +} + +func (t *t6) TestMonitorRemoteEventNodeDown(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"MonitoringEventNodeDown", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.Send(pid, fmt.Errorf("doDisconnect")) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageDownEvent) + t.Node().Kill(pid) + if exit.Reason != gen.ErrNoConnection { + t.testcase.err <- fmt.Errorf("incorrect down reason: %s", exit.Reason) + return + } + + t.testcase.err <- nil +} + +func (t *t6) MonitoringEvent(input any) { + switch m := input.(type) { + case initcase: + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + v, err := t.Call(pid, "event") + if err != nil { + t.testcase.err <- err + return + } + ev := v.(gen.Event) + + if _, err := t.MonitorEvent(ev); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.MonitorsEvent[0] != ev { + t.testcase.err <- fmt.Errorf("monitor event is incorrect: %s (must be: %s)", info.MonitorsEvent[0], ev) + return + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + case error: + pid := t.testcase.input.(gen.PID) + t.SendExit(pid, m) + return + + case gen.MessageDownEvent: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func (t *t6) MonitoringEventNodeDown(input any) { + switch m := input.(type) { + case initcase: + t.SetTrapExit(true) + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + v, err := t.Call(pid, "event") + if err != nil { + t.testcase.err <- err + return + } + ev := v.(gen.Event) + + if _, err := t.MonitorEvent(ev); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.MonitorsEvent[0] != ev { + t.testcase.err <- fmt.Errorf("monitor event is incorrect: %s (must be: %s)", info.MonitorsEvent[0], ev) + return + } + t.testcase.input = pid + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + case error: + remoteNode, err := t.Node().Network().Node(t.remote) + if err != nil { + t.testcase.err <- err + return + + } + remoteNode.Disconnect() + return + + case gen.MessageDownEvent: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func (t *t6) TestMonitorRemoteNodeDown(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t6, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"MonitoringNodeDown", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + t.Send(pid, fmt.Errorf("doDisconnect")) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + exit := newtc.output.(gen.MessageDownNode) + t.Node().Kill(pid) + if exit.Name != t.remote { + t.testcase.err <- fmt.Errorf("incorrect down node name: %s", exit.Name) + return + } + + t.testcase.err <- nil +} + +func (t *t6) MonitoringNodeDown(input any) { + switch m := input.(type) { + case initcase: + + if err := t.MonitorNode(t.remote); err != nil { + t.testcase.err <- err + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.MonitorsNode[0] != t.remote { + t.testcase.err <- fmt.Errorf("monitor node is incorrect: %s (must be: %s)", info.MonitorsNode[0], t.remote) + return + } + t.testcase.err <- nil + + // waiting exit reason for sending it to the child + return + case error: + remoteNode, err := t.Node().Network().Node(t.remote) + if err != nil { + t.testcase.err <- err + return + + } + remoteNode.Disconnect() + return + + case gen.MessageDownNode: + t.testcase.output = m + t.testcase.err <- nil + return + } + + panic(input) +} + +func TestT6MonitorRemote(t *testing.T) { + options1 := gen.NodeOptions{} + options1.Network.Cookie = "123" + options1.Network.MaxMessageSize = 567 + options1.Log.DefaultLogger.Disable = true + options1.Log.Level = gen.LogLevelTrace + node1, err := ergo.StartNode("distT0node1MonitorRemote@localhost", options1) + if err != nil { + t.Fatal(err) + } + defer node1.Stop() + + options2 := gen.NodeOptions{} + options2.Network.Cookie = "123" + options2.Network.MaxMessageSize = 765 + options2.Log.DefaultLogger.Disable = true + options2.Log.Level = gen.LogLevelTrace + node2, err := ergo.StartNode("distT0node2MonitorRemote@localhost", options2) + if err != nil { + t.Fatal(err) + } + defer node2.Stop() + + if err := node2.Network().EnableSpawn("pong", factory_t6pong); err != nil { + t.Fatal(err) + } + + // make connection to the node2 + if _, err := node1.Network().GetNode(node2.Name()); err != nil { + t.Fatal(err) + } + + pid, err := node1.Spawn(factory_t6, gen.ProcessOptions{}, node2.Name()) + if err != nil { + panic(err) + } + + t6cases := []*testcase{ + {"TestMonitorRemotePID", nil, nil, make(chan error)}, + {"TestMonitorRemotePIDNodeDown", nil, nil, make(chan error)}, + {"TestMonitorRemoteProcessID", nil, nil, make(chan error)}, + {"TestMonitorRemoteProcessIDNodeDown", nil, nil, make(chan error)}, + {"TestMonitorRemoteAlias", nil, nil, make(chan error)}, + {"TestMonitorRemoteAliasNodeDown", nil, nil, make(chan error)}, + {"TestMonitorRemoteEvent", nil, nil, make(chan error)}, + {"TestMonitorRemoteEventNodeDown", nil, nil, make(chan error)}, + {"TestMonitorRemoteNodeDown", nil, nil, make(chan error)}, + } + for _, tc := range t6cases { + t.Run(tc.name, func(t *testing.T) { + node1.Send(pid, tc) + if err := tc.wait(1); err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/tests/002_distributed/t007_event_test.go b/tests/002_distributed/t007_event_test.go new file mode 100644 index 00000000..2458063a --- /dev/null +++ b/tests/002_distributed/t007_event_test.go @@ -0,0 +1,226 @@ +package distributed + +import ( + "fmt" + "reflect" + "testing" + + "ergo.services/ergo" + "ergo.services/ergo/act" + "ergo.services/ergo/gen" + "ergo.services/ergo/lib" +) + +func factory_t7pong() gen.ProcessBehavior { + return &t7pong{} +} + +type t7pong struct { + act.Actor + + token gen.Ref + name gen.Atom +} + +func (t *t7pong) HandleCall(from gen.PID, ref gen.Ref, request any) (any, error) { + req := request.(string) + switch req { + case "createEvent": + name := gen.Atom(lib.RandomString(10)) + token, err := t.RegisterEvent(name, gen.EventOptions{Buffer: 1}) + if err != nil { + t.Log().Error("unable to register event: %s", err) + break + } + + t.token = token + t.name = name + ev := gen.Event{Name: name, Node: t.Node().Name()} + + t.SendEvent(name, token, "testevent1") + return ev, nil + + case "sendEvent": + t.SendEvent(t.name, t.token, "testevent2") + return true, nil + } + return nil, nil +} + +func factory_t7() gen.ProcessBehavior { + return &t7{} +} + +type t7 struct { + act.Actor + + remote gen.Atom + testcase *testcase +} + +func (t *t7) Init(args ...any) error { + t.remote = args[0].(gen.Atom) + return nil +} +func (t *t7) HandleMessage(from gen.PID, message any) error { + if t.testcase == nil { + t.testcase = message.(*testcase) + message = initcase{} + } + + // get method by name + method := reflect.ValueOf(t).MethodByName(t.testcase.name) + if method.IsValid() == false { + t.testcase.err <- fmt.Errorf("unknown method %q", t.testcase.name) + t.testcase = nil + return nil + } + method.Call([]reflect.Value{reflect.ValueOf(message)}) + return nil +} + +func (t *t7) TestRemoteEvent(input any) { + defer func() { + t.testcase = nil + }() + pid, err := t.Spawn(factory_t7, gen.ProcessOptions{}, t.remote) + if err != nil { + t.testcase.err <- err + return + } + newtc := &testcase{"EventProducing", nil, nil, make(chan error)} + t.Send(pid, newtc) + if err := newtc.wait(1); err != nil { + t.Node().Kill(pid) + t.testcase.err <- err + return + } + + if s, ok := newtc.output.(string); ok { + if s != "testevent2" { + t.testcase.err <- fmt.Errorf("incorrect event message: %#v", newtc.output) + return + } + } else { + t.testcase.err <- fmt.Errorf("incorrect value: %#v", newtc.output) + return + } + t.Node().Kill(pid) + + t.testcase.err <- nil +} + +func (t *t7) EventProducing(input any) { + switch input.(type) { + case initcase: + pid, err := t.RemoteSpawn(t.remote, "pong", gen.ProcessOptions{}) + if err != nil { + t.testcase.err <- err + return + } + + v, err := t.Call(pid, "createEvent") + if err != nil { + t.testcase.err <- err + return + } + ev := v.(gen.Event) + + evlist, err := t.MonitorEvent(ev) + if err != nil { + t.testcase.err <- err + return + } + + if len(evlist) != 1 { + t.testcase.err <- fmt.Errorf("there must be at least 1 event") + return + } + if s, ok := evlist[0].Message.(string); ok { + if s != "testevent1" { + t.testcase.err <- fmt.Errorf("incorrect event message: %#v", evlist[0].Message) + return + } + } else { + t.testcase.err <- fmt.Errorf("incorrect value: %#v", evlist[0]) + return + } + + info, err := t.Info() + if err != nil { + t.testcase.err <- err + return + } + if info.MonitorsEvent[0] != ev { + t.testcase.err <- fmt.Errorf("monitor event is incorrect: %s (must be: %s)", info.MonitorsEvent[0], ev) + return + } + + // ask producer to send event + if _, err := t.Call(pid, "sendEvent"); err != nil { + t.testcase.err <- err + return + } + + // waiting for event in HandleEvent callback + return + } + + panic(input) +} + +func (t *t7) HandleEvent(message gen.MessageEvent) error { + t.testcase.output = message.Message + t.testcase.err <- nil + return nil +} + +func TestT7EventRemote(t *testing.T) { + options1 := gen.NodeOptions{} + options1.Network.Cookie = "123" + options1.Network.MaxMessageSize = 567 + options1.Log.DefaultLogger.Disable = true + options1.Log.Level = gen.LogLevelTrace + node1, err := ergo.StartNode("distT0node1EventRemote@localhost", options1) + if err != nil { + t.Fatal(err) + } + defer node1.Stop() + + options2 := gen.NodeOptions{} + options2.Network.Cookie = "123" + options2.Network.MaxMessageSize = 765 + options2.Log.DefaultLogger.Disable = true + options2.Log.Level = gen.LogLevelTrace + node2, err := ergo.StartNode("distT0node2EventRemote@localhost", options2) + if err != nil { + t.Fatal(err) + } + defer node2.Stop() + + if err := node2.Network().EnableSpawn("pong", factory_t7pong); err != nil { + t.Fatal(err) + } + + // make connection to the node2 + if _, err := node1.Network().GetNode(node2.Name()); err != nil { + t.Fatal(err) + } + + pid, err := node1.Spawn(factory_t7, gen.ProcessOptions{}, node2.Name()) + if err != nil { + panic(err) + } + + t7cases := []*testcase{ + {"TestRemoteEvent", nil, nil, make(chan error)}, + } + for _, tc := range t7cases { + t.Run(tc.name, func(t *testing.T) { + node1.Send(pid, tc) + if err := tc.wait(1); err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/tests/application_test.go b/tests/application_test.go deleted file mode 100644 index 7880f0ac..00000000 --- a/tests/application_test.go +++ /dev/null @@ -1,521 +0,0 @@ -package tests - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" -) - -type testApplication struct { - gen.Application -} - -func (a *testApplication) Load(args ...etf.Term) (gen.ApplicationSpec, error) { - lifeSpan := args[0].(time.Duration) - name := args[1].(string) - nameGS := "testAppGS" - if len(args) == 3 { - nameGS = args[2].(string) - } - return gen.ApplicationSpec{ - Name: name, - Description: "My Test Applicatoin", - Version: "v.0.1", - Env: map[gen.EnvKey]interface{}{ - "envName1": 123, - "envName2": "Hello world", - }, - Children: []gen.ApplicationChildSpec{ - { - Child: &testAppGenServer{}, - Name: nameGS, - }, - }, - Lifespan: lifeSpan, - }, nil -} - -func (a *testApplication) Start(p gen.Process, args ...etf.Term) { - //p.SetEnv("env123", 456) -} - -// test gen.Server -type testAppGenServer struct { - gen.Server -} - -func (gs *testAppGenServer) Init(process *gen.ServerProcess, args ...etf.Term) error { - process.SetEnv("env123", 456) - return nil -} - -func (gs *testAppGenServer) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) { - return nil, gen.ServerStatusStop -} - -// testing application -func TestApplicationBasics(t *testing.T) { - - fmt.Printf("\n=== Test Application load/unload/start/stop\n") - fmt.Printf("\nStarting node nodeTestAplication@localhost:") - ctx := context.Background() - mynode, err := ergo.StartNodeWithContext(ctx, "nodeTestApplication@localhost", "cookies", node.Options{}) - if err != nil { - t.Fatal(err) - } else { - fmt.Println("OK") - } - - app := &testApplication{} - lifeSpan := 0 * time.Second - - // - // case 1: loading/unloading app - // - fmt.Printf("... loading application: ") - loaded, err := mynode.ApplicationLoad(app, lifeSpan, "testapp") - if err != nil { - t.Fatal(err) - } - if loaded != "testapp" { - t.Fatal("can't load application") - } - - la := mynode.LoadedApplications() - - // there are default applications - KernelApp, SystemApp thats why it - // should be equal 3. - if len(la) != 3 { - t.Fatal("total number of loaded application mismatch") - } - fmt.Println("OK") - - wa := mynode.WhichApplications() - if len(wa) > 2 { - t.Fatal("total number of running application mismatch") - } - - fmt.Printf("... unloading application: ") - if err := mynode.ApplicationUnload("testapp"); err != nil { - t.Fatal(err) - } - la = mynode.LoadedApplications() - if len(la) > 2 { - t.Fatal("total number of loaded application mismatch") - } - fmt.Println("OK") - - // - // case 2: start(and try to unload running app)/stop(normal) application - // - fmt.Printf("... starting application: ") - if _, err := mynode.ApplicationLoad(app, lifeSpan, "testapp1", "testAppGS1"); err != nil { - t.Fatal(err) - } - - p, e := mynode.ApplicationStart("testapp1") - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("... try to unload started application (shouldn't be able): ") - if e := mynode.ApplicationUnload("testapp1"); e != lib.ErrAppAlreadyStarted { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("... check total number of running applications (should be 3 including KernelApp, SystemApp): ") - wa = mynode.WhichApplications() - if n := len(wa); n != 3 { - t.Fatal(n) - } - fmt.Println("OK") - - fmt.Printf("... check the name of running application (should be 'testapp1'): ") - found := false - for _, a := range wa { - if a.Name == "testapp1" { - found = true - break - } - - } - if !found { - t.Fatal("can't find testapp1 among the running applications") - } - fmt.Println("OK") - - // case 2.1: test env vars - fmt.Printf("... application's environment variables: ") - p.SetEnv("env123", 123) - p.SetEnv("envStr", "123") - - gs := mynode.ProcessByName("testAppGS1") - if gs == nil { - t.Fatal("process testAppGS1 is not found by name") - } - env := gs.Env("env123") - if env == nil { - t.Fatal("incorrect environment variable: not found") - } - - if i, ok := env.(int); !ok || i != 456 { - t.Fatal("incorrect environment variable: value should be overrided by child process") - } - - if envUnknown := gs.Env("unknown"); envUnknown != nil { - t.Fatal("incorrect environment variable: undefined variable should have nil value") - } - - envs := gs.ListEnv() - if x, ok := envs["env123"]; !ok || x != 456 { - t.Fatal("incorrect environment variable: list of variables has no env123 value or its wrong") - } - - if x, ok := envs["envStr"]; !ok || x != "123" { - t.Fatal("incorrect environment variable: list of variables has no envStr value or its wrong") - } - - fmt.Println("OK") - - // case 2.2: get list of children' pid - fmt.Printf("... application's children list: ") - list, _ := p.Children() - if len(list) != 1 || list[0] != gs.Self() { - t.Fatal("incorrect children list") - } - fmt.Println("OK") - - // case 2.3: get application info - - fmt.Printf("... getting application info: ") - info, errInfo := mynode.ApplicationInfo("testapp1") - if errInfo != nil { - t.Fatal(errInfo) - } - if p.Self() != info.PID { - t.Fatal("incorrect pid in application info") - } - fmt.Println("OK") - - fmt.Printf("... stopping application: ") - if e := mynode.ApplicationStop("testapp1"); e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - if e := p.WaitWithTimeout(100 * time.Millisecond); e != nil { - t.Fatal(e) - } - wa = mynode.WhichApplications() - if len(wa) != 2 { - fmt.Println("waa: ", wa) - t.Fatal("total number of running application mismatch") - } - - // - // case 3: start/stop (brutal) application - // - fmt.Printf("... starting application for brutal kill: ") - if _, err := mynode.ApplicationLoad(app, lifeSpan, "testappBrutal", "testAppGS2Brutal"); err != nil { - t.Fatal(err) - } - p, e = mynode.ApplicationStart("testappBrutal") - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - fmt.Printf("... kill application: ") - p.Kill() - if e := p.WaitWithTimeout(100 * time.Millisecond); e != nil { - t.Fatal("timed out") - } - fmt.Println("OK") - - mynode.ApplicationUnload("testappBrutal") - - // - // case 4: start with limited lifespan - // - fmt.Printf("... starting application with lifespan 150ms: ") - lifeSpan = 150 * time.Millisecond - if _, err := mynode.ApplicationLoad(app, lifeSpan, "testapp2", "testAppGS2"); err != nil { - t.Fatal(err) - } - tStart := time.Now() - p, e = mynode.ApplicationStart("testapp2") - if e != nil { - t.Fatal(e) - } - // due to small lifespantiming it is ok to get real lifespan longer almost twice - if e := p.WaitWithTimeout(300 * time.Millisecond); e != nil { - t.Fatal("application lifespan was longer than 150ms") - } - fmt.Println("OK") - tLifeSpan := time.Since(tStart) - - fmt.Printf("... application should be self stopped in 150ms: ") - if p.IsAlive() { - t.Fatal("still alive") - } - - if tLifeSpan < lifeSpan { - t.Fatal("lifespan was shorter(", tLifeSpan, ") than ", lifeSpan) - } - - fmt.Println("OK [ real lifespan:", tLifeSpan, "]") - - mynode.Stop() -} - -func TestApplicationTypePermanent(t *testing.T) { - fmt.Printf("\n=== Test Application type Permanent\n") - fmt.Printf("\nStarting node nodeTestAplicationPermanent@localhost:") - ctx := context.Background() - mynode, _ := ergo.StartNodeWithContext(ctx, "nodeTestApplicationPermanent@localhost", "cookies", node.Options{}) - if mynode == nil { - t.Fatal("can't start node") - } else { - fmt.Println("OK") - } - - fmt.Printf("... starting application: ") - app := &testApplication{} - lifeSpan := time.Duration(0) - if _, err := mynode.ApplicationLoad(app, lifeSpan, "testapp"); err != nil { - t.Fatal(err) - } - - p, e := mynode.ApplicationStartPermanent("testapp") - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - gs := mynode.ProcessByName("testAppGS") - if gs == nil { - t.Fatal("process testAppGS is not found by name") - } - fmt.Printf("... stop child with 'abnormal' reason: ") - gs.Exit("abnormal") - if e := gs.WaitWithTimeout(100 * time.Millisecond); e != nil { - t.Fatal("timeout on waiting child") - } - fmt.Println("OK") - - if e := p.WaitWithTimeout(1 * time.Second); e != nil { - t.Fatal("timeout on waiting application stopping") - } - - if e := mynode.WaitWithTimeout(1 * time.Second); e != nil { - t.Fatal("node shouldn't be alive here") - } - - if mynode.IsAlive() { - t.Fatal("node shouldn't be alive here") - } - -} - -func TestApplicationTypeTransient(t *testing.T) { - fmt.Printf("\n=== Test Application type Transient\n") - fmt.Printf("\nStarting node nodeTestAplicationTypeTransient@localhost:") - ctx := context.Background() - mynode, _ := ergo.StartNodeWithContext(ctx, "nodeTestApplicationTypeTransient@localhost", "cookies", node.Options{}) - if mynode == nil { - t.Fatal("can't start node") - } else { - fmt.Println("OK") - } - - app1 := &testApplication{} - app2 := &testApplication{} - lifeSpan := time.Duration(0) - - if _, err := mynode.ApplicationLoad(app1, lifeSpan, "testapp1", "testAppGS1"); err != nil { - t.Fatal(err) - } - - if _, err := mynode.ApplicationLoad(app2, lifeSpan, "testapp2", "testAppGS2"); err != nil { - t.Fatal(err) - } - - fmt.Printf("... starting application testapp1: ") - p1, e1 := mynode.ApplicationStartTransient("testapp1") - if e1 != nil { - t.Fatal(e1) - } - fmt.Println("OK") - - fmt.Printf("... starting application testapp2: ") - p2, e2 := mynode.ApplicationStartTransient("testapp2") - if e2 != nil { - t.Fatal(e2) - } - fmt.Println("OK") - - fmt.Printf("... stopping testAppGS1 with 'normal' reason (shouldn't affect testAppGS2): ") - gs := mynode.ProcessByName("testAppGS1") - if gs == nil { - t.Fatal("process testAppGS1 is not found by name") - } - gs.Exit("normal") - if e := gs.WaitWithTimeout(100 * time.Millisecond); e != nil { - t.Fatal(e) - } - - if e := p1.WaitWithTimeout(100 * time.Millisecond); e != lib.ErrTimeout { - t.Fatal("application testapp1 should be alive here") - } - - fmt.Printf("... stopping application testapp1: ") - p1.Kill() - if e := p1.WaitWithTimeout(100 * time.Millisecond); e != nil { - t.Fatal("application testapp1 shouldn't be alive here:", e) - } - fmt.Println("OK") - - p2.WaitWithTimeout(100 * time.Millisecond) - if !p2.IsAlive() { - t.Fatal("testAppGS2 should be alive here") - } - - if !mynode.IsAlive() { - t.Fatal("node should be alive here") - } - - fmt.Println("OK") - - fmt.Printf("... starting application testapp1: ") - p1, e1 = mynode.ApplicationStartTransient("testapp1") - if e1 != nil { - t.Fatal(e1) - } - fmt.Println("OK") - - fmt.Printf("... stopping testAppGS1 with 'abnormal' reason (node will shutdown): ") - gs = mynode.ProcessByName("testAppGS1") - gs.Exit("abnormal") - - if e := gs.WaitWithTimeout(100 * time.Millisecond); e != nil { - t.Fatal(e) - } - - if e := p1.WaitWithTimeout(100 * time.Millisecond); e != nil { - t.Fatal("testapp1 shouldn't be alive here") - } - - if e := p2.WaitWithTimeout(100 * time.Millisecond); e != nil { - t.Fatal("testapp2 shouldn't be alive here") - } - - if e := mynode.WaitWithTimeout(100 * time.Millisecond); e != nil { - t.Fatal("node shouldn't be alive here") - } - fmt.Println("OK") -} - -func TestApplicationTypeTemporary(t *testing.T) { - fmt.Printf("\n=== Test Application type Temporary\n") - fmt.Printf("\nStarting node nodeTestAplicationStop@localhost:") - ctx := context.Background() - mynode, _ := ergo.StartNodeWithContext(ctx, "nodeTestApplicationStop@localhost", "cookies", node.Options{}) - if mynode == nil { - t.Fatal("can't start node") - } else { - fmt.Println("OK") - } - fmt.Printf("... starting application: ") - app := &testApplication{} - lifeSpan := time.Duration(0) - if _, err := mynode.ApplicationLoad(app, lifeSpan, "testapp"); err != nil { - t.Fatal(err) - } - - _, e := mynode.ApplicationStart("testapp") // default start type is Temporary - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("... stopping testAppGS with 'normal' reason: ") - gs := mynode.ProcessByName("testAppGS") - if gs == nil { - t.Fatal("process testAppGS is not found by name") - } - gs.Exit("normal") - if e := gs.WaitWithTimeout(100 * time.Millisecond); e != nil { - t.Fatal(e) - } - - if e := mynode.WaitWithTimeout(100 * time.Millisecond); e != lib.ErrTimeout { - t.Fatal("node should be alive here") - } - - if !mynode.IsAlive() { - t.Fatal("node should be alive here") - } - fmt.Println("OK") - - mynode.Stop() -} - -func TestApplicationStop(t *testing.T) { - fmt.Printf("\n=== Test Application stopping\n") - fmt.Printf("\nStarting node nodeTestAplicationTypeTemporary@localhost:") - ctx := context.Background() - mynode, _ := ergo.StartNodeWithContext(ctx, "nodeTestApplicationTypeTemporary@localhost", "cookies", node.Options{}) - if mynode == nil { - t.Fatal("can't start node") - } else { - fmt.Println("OK") - } - fmt.Printf("... starting applications testapp1, testapp2: ") - lifeSpan := time.Duration(0) - app := &testApplication{} - if _, e := mynode.ApplicationLoad(app, lifeSpan, "testapp1", "testAppGS1"); e != nil { - t.Fatal(e) - } - - app1 := &testApplication{} - if _, e := mynode.ApplicationLoad(app1, lifeSpan, "testapp2", "testAppGS2"); e != nil { - t.Fatal(e) - } - - _, e1 := mynode.ApplicationStartPermanent("testapp1") - if e1 != nil { - t.Fatal(e1) - } - p2, e2 := mynode.ApplicationStartPermanent("testapp2") - if e2 != nil { - t.Fatal(e2) - } - fmt.Println("OK") - - // case 1: stopping via node.ApplicatoinStop - fmt.Printf("... stopping testapp1 via node.ApplicationStop (shouldn't affect testapp2):") - if e := mynode.ApplicationStop("testapp1"); e != nil { - t.Fatal("can't stop application via node.ApplicationStop", e) - } - - if !p2.IsAlive() { - t.Fatal("testapp2 should be alive here") - } - - if !mynode.IsAlive() { - t.Fatal("node should be alive here") - } - - fmt.Println("OK") - - mynode.Stop() - -} diff --git a/tests/atomcache_test.go b/tests/atomcache_test.go deleted file mode 100644 index dd5f3882..00000000 --- a/tests/atomcache_test.go +++ /dev/null @@ -1,1264 +0,0 @@ -package tests - -import ( - "fmt" - "math/rand" - "strings" - "testing" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" - "github.com/ergo-services/ergo/proto/dist" -) - -func TestAtomCacheLess255Uniq(t *testing.T) { - fmt.Printf("\n=== Test Atom Cache (less 255 uniq) \n") - opts1 := node.Options{} - protoOptions := node.DefaultProtoOptions() - protoOptions.NumHandlers = 2 - opts1.Proto = dist.CreateProto(protoOptions) - fmt.Printf("Starting node: nodeAtomCache1Less255@localhost with NumHandlers = 2: ") - node1, e := ergo.StartNode("nodeAtomCache1Less255@localhost", "cookie", opts1) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting node: nodeAtomCache2Less255@localhost with NubHandlers = 2: ") - opts2 := node.Options{} - opts2.Proto = dist.CreateProto(protoOptions) - node2, e := ergo.StartNode("nodeAtomCache2Less255@localhost", "cookie", opts2) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - defer node1.Stop() - defer node2.Stop() - - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs2 := &testMonitor{ - v: make(chan interface{}, 2), - } - - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - fmt.Printf(" wait for start of gs2 on %#v: ", node2.Name()) - node2gs2, _ := node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - atoms2K := make(etf.List, 2100) - s := lib.RandomString(240) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i/10)) - } - - long := make([]byte, 66*1024) - for i := range long { - long[i] = byte(i % 255) - } - - fmt.Printf("case 1: sending 2.1K atoms: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - - fmt.Printf("case 2: sending a tuple with 2.1K atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - - fmt.Printf("case 3: sending 2.1K UTF-8 long atoms: ") - s = strings.Repeat("🚀", 252) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i/10)) - } - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 4: sending a tuple with 2.1K UTF-8 long atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") -} - -func TestAtomCacheMore255Uniq(t *testing.T) { - fmt.Printf("\n=== Test Atom Cache (more 255 uniq) \n") - opts1 := node.Options{} - protoOptions := node.DefaultProtoOptions() - protoOptions.NumHandlers = 2 - opts1.Proto = dist.CreateProto(protoOptions) - fmt.Printf("Starting node: nodeAtomCache1More255@localhost with NumHandlers = 2: ") - node1, e := ergo.StartNode("nodeAtomCache1More255@localhost", "cookie", opts1) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting node: nodeAtomCache2More255@localhost with NubHandlers = 2: ") - opts2 := node.Options{} - opts2.Proto = dist.CreateProto(protoOptions) - node2, e := ergo.StartNode("nodeAtomCache2More255@localhost", "cookie", opts2) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - defer node1.Stop() - defer node2.Stop() - - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs2 := &testMonitor{ - v: make(chan interface{}, 2), - } - - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - fmt.Printf(" wait for start of gs2 on %#v: ", node2.Name()) - node2gs2, _ := node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - atoms2K := make(etf.List, 2100) - s := lib.RandomString(240) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i)) - } - - long := make([]byte, 66*1024) - for i := range long { - long[i] = byte(i % 255) - } - - fmt.Printf("case 1: sending 2.1K atoms: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 2: sending a tuple with 2.1K atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - - fmt.Printf("case 3: sending 2.1K UTF-8 long atoms: ") - s = strings.Repeat("🚀", 251) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i)) - } - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - - fmt.Printf("case 4: sending a tuple with 2.1K UTF-8 long atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") -} - -func TestAtomCacheLess255UniqWithCompression(t *testing.T) { - fmt.Printf("\n=== Test Atom Cache (less 255 uniq) with Compression \n") - opts1 := node.Options{} - protoOptions := node.DefaultProtoOptions() - protoOptions.NumHandlers = 2 - opts1.Proto = dist.CreateProto(protoOptions) - fmt.Printf("Starting node: nodeAtomCache1Less255Compression@localhost with NumHandlers = 2: ") - node1, e := ergo.StartNode("nodeAtomCache1Less255Compression@localhost", "cookie", opts1) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting node: nodeAtomCache2Less255Compression@localhost with NubHandlers = 2: ") - opts2 := node.Options{} - opts2.Proto = dist.CreateProto(protoOptions) - node2, e := ergo.StartNode("nodeAtomCache2Less255Compression@localhost", "cookie", opts2) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - defer node1.Stop() - defer node2.Stop() - - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs2 := &testMonitor{ - v: make(chan interface{}, 2), - } - - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - node1gs1.SetCompression(true) - - fmt.Printf(" wait for start of gs2 on %#v: ", node2.Name()) - node2gs2, _ := node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - atoms2K := make(etf.List, 2100) - s := lib.RandomString(240) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i/10)) - } - - long := make([]byte, 66*1024) - for i := range long { - long[i] = byte(i % 255) - } - - fmt.Printf("case 1: sending 2.1K atoms: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 2: sending a tuple with 2.1K atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - - fmt.Printf("case 3: sending 2.1K UTF-8 long atoms: ") - s = strings.Repeat("🚀", 252) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i/10)) - } - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 4: sending a tuple with 2.1K UTF-8 long atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") -} -func TestAtomCacheMore255UniqWithCompression(t *testing.T) { - fmt.Printf("\n=== Test Atom Cache (more 255 uniq) with Compression \n") - opts1 := node.Options{} - protoOptions := node.DefaultProtoOptions() - protoOptions.NumHandlers = 2 - opts1.Proto = dist.CreateProto(protoOptions) - fmt.Printf("Starting node: nodeAtomCache1More255Compression@localhost with NumHandlers = 2: ") - node1, e := ergo.StartNode("nodeAtomCache1More255Compression@localhost", "cookie", opts1) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting node: nodeAtomCache2More255Compression@localhost with NubHandlers = 2: ") - opts2 := node.Options{} - opts2.Proto = dist.CreateProto(protoOptions) - node2, e := ergo.StartNode("nodeAtomCache2More255Compression@localhost", "cookie", opts2) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - defer node1.Stop() - defer node2.Stop() - - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs2 := &testMonitor{ - v: make(chan interface{}, 2), - } - - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - node1gs1.SetCompression(true) - - fmt.Printf(" wait for start of gs2 on %#v: ", node2.Name()) - node2gs2, _ := node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - atoms2K := make(etf.List, 2100) - s := lib.RandomString(240) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i)) - } - - long := make([]byte, 66*1024) - for i := range long { - long[i] = byte(i % 255) - } - - fmt.Printf("case 1: sending 2.1K atoms: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 2: sending a tuple with 2.1K atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - - fmt.Printf("case 3: sending 2.1K UTF-8 long atoms: ") - s = strings.Repeat("🚀", 251) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i)) - } - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 4: sending a tuple with 2.1K UTF-8 long atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") -} - -func TestAtomCacheLess255UniqViaProxy(t *testing.T) { - fmt.Printf("\n=== Test Atom Cache (less 255 uniq) via Proxy connection \n") - opts1 := node.Options{} - protoOptions := node.DefaultProtoOptions() - protoOptions.NumHandlers = 2 - opts1.Proto = dist.CreateProto(protoOptions) - fmt.Printf("Starting node: nodeAtomCache1Less255ViaProxy@localhost with NumHandlers = 2: ") - node1, e := ergo.StartNode("nodeAtomCache1Less255ViaProxy@localhost", "cookie", opts1) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting node: nodeAtomCacheTLess255ViaProxy@localhost with NubHandlers = 2: ") - optsT := node.Options{} - optsT.Proxy.Transit = true - optsT.Proto = dist.CreateProto(protoOptions) - nodeT, e := ergo.StartNode("nodeAtomCacheTLess255ViaProxy@localhost", "cookie", optsT) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting node: nodeAtomCache2Less255ViaProxy@localhost with NubHandlers = 2: ") - opts2 := node.Options{} - opts2.Proxy.Accept = true - opts2.Proto = dist.CreateProto(protoOptions) - node2, e := ergo.StartNode("nodeAtomCache2Less255ViaProxy@localhost", "cookie", opts2) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - defer node1.Stop() - defer node2.Stop() - defer nodeT.Stop() - - fmt.Printf(" connect %s with %s via proxy %s: ", node1.Name(), node2.Name(), nodeT.Name()) - route := node.ProxyRoute{ - Name: node2.Name(), - Proxy: nodeT.Name(), - } - node1.AddProxyRoute(route) - - if err := node1.Connect(node2.Name()); err != nil { - t.Fatal(err) - } - - indirectNodes := node2.NodesIndirect() - if len(indirectNodes) != 1 || indirectNodes[0] != node1.Name() { - t.Fatal("wrong result:", indirectNodes) - } - fmt.Println("OK") - - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs2 := &testMonitor{ - v: make(chan interface{}, 2), - } - - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - fmt.Printf(" wait for start of gs2 on %#v: ", node2.Name()) - node2gs2, _ := node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - atoms2K := make(etf.List, 2100) - s := lib.RandomString(240) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i/10)) - } - - long := make([]byte, 66*1024) - for i := range long { - long[i] = byte(i % 255) - } - - fmt.Printf("case 1: sending 2.1K atoms: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 2: sending a tuple with 2.1K atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - - fmt.Printf("case 3: sending 2.1K UTF-8 long atoms: ") - s = strings.Repeat("🚀", 252) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i/10)) - } - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 4: sending a tuple with 2.1K UTF-8 long atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") -} - -func TestAtomCacheMore255UniqViaProxy(t *testing.T) { - fmt.Printf("\n=== Test Atom Cache (more 255 uniq) via Proxy connection \n") - opts1 := node.Options{} - protoOptions := node.DefaultProtoOptions() - protoOptions.NumHandlers = 2 - opts1.Proto = dist.CreateProto(protoOptions) - fmt.Printf("Starting node: nodeAtomCache1More255ViaProxy@localhost with NumHandlers = 2: ") - node1, e := ergo.StartNode("nodeAtomCache1More255ViaProxy@localhost", "cookie", opts1) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting node: nodeAtomCacheTMore255ViaProxy@localhost with NubHandlers = 2: ") - optsT := node.Options{} - optsT.Proxy.Transit = true - optsT.Proto = dist.CreateProto(protoOptions) - nodeT, e := ergo.StartNode("nodeAtomCacheTMore255ViaProxy@localhost", "cookie", optsT) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting node: nodeAtomCache2More255ViaProxy@localhost with NubHandlers = 2: ") - opts2 := node.Options{} - opts2.Proxy.Accept = true - opts2.Proto = dist.CreateProto(protoOptions) - node2, e := ergo.StartNode("nodeAtomCache2More255ViaProxy@localhost", "cookie", opts2) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - defer node1.Stop() - defer node2.Stop() - defer nodeT.Stop() - - fmt.Printf(" connect %s with %s via proxy %s: ", node1.Name(), node2.Name(), nodeT.Name()) - route := node.ProxyRoute{ - Name: node2.Name(), - Proxy: nodeT.Name(), - } - node1.AddProxyRoute(route) - - if err := node1.Connect(node2.Name()); err != nil { - t.Fatal(err) - } - - indirectNodes := node2.NodesIndirect() - if len(indirectNodes) != 1 || indirectNodes[0] != node1.Name() { - t.Fatal("wrong result:", indirectNodes) - } - fmt.Println("OK") - - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs2 := &testMonitor{ - v: make(chan interface{}, 2), - } - - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - fmt.Printf(" wait for start of gs2 on %#v: ", node2.Name()) - node2gs2, _ := node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - atoms2K := make(etf.List, 2100) - s := lib.RandomString(240) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i)) - } - - long := make([]byte, 66*1024) - for i := range long { - long[i] = byte(i % 255) - } - - fmt.Printf("case 1: sending 2.1K atoms: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 2: sending a tuple with 2.1K atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - - fmt.Printf("case 3: sending 2.1K UTF-8 long atoms: ") - s = strings.Repeat("🚀", 252) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i/10)) - } - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 4: sending a tuple with 2.1K UTF-8 long atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") -} - -func TestAtomCacheLess255UniqViaProxyWithEncryption(t *testing.T) { - fmt.Printf("\n=== Test Atom Cache (less 255 uniq) via Proxy connection with Encryption\n") - opts1 := node.Options{} - protoOptions := node.DefaultProtoOptions() - protoOptions.NumHandlers = 2 - opts1.Proto = dist.CreateProto(protoOptions) - - opts1.Proxy.Flags = node.DefaultProxyFlags() - opts1.Proxy.Flags.EnableEncryption = true - - fmt.Printf("Starting node: nodeAtomCache1Less255ViaProxyEnc@localhost with NumHandlers = 2: ") - node1, e := ergo.StartNode("nodeAtomCache1Less255ViaProxyEnc@localhost", "cookie", opts1) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting node: nodeAtomCacheTLess255ViaProxyEnc@localhost with NubHandlers = 2: ") - optsT := node.Options{} - optsT.Proxy.Transit = true - optsT.Proto = dist.CreateProto(protoOptions) - nodeT, e := ergo.StartNode("nodeAtomCacheTLess255ViaProxyEnc@localhost", "cookie", optsT) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting node: nodeAtomCache2Less255ViaProxyEnc@localhost with NubHandlers = 2: ") - opts2 := node.Options{} - opts2.Proxy.Accept = true - opts2.Proto = dist.CreateProto(protoOptions) - node2, e := ergo.StartNode("nodeAtomCache2Less255ViaProxyEnc@localhost", "cookie", opts2) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - defer node1.Stop() - defer node2.Stop() - defer nodeT.Stop() - - fmt.Printf(" connect %s with %s via proxy %s: ", node1.Name(), node2.Name(), nodeT.Name()) - route := node.ProxyRoute{ - Name: node2.Name(), - Proxy: nodeT.Name(), - } - node1.AddProxyRoute(route) - - if err := node1.Connect(node2.Name()); err != nil { - t.Fatal(err) - } - - indirectNodes := node2.NodesIndirect() - if len(indirectNodes) != 1 || indirectNodes[0] != node1.Name() { - t.Fatal("wrong result:", indirectNodes) - } - fmt.Println("OK") - - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs2 := &testMonitor{ - v: make(chan interface{}, 2), - } - - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - fmt.Printf(" wait for start of gs2 on %#v: ", node2.Name()) - node2gs2, _ := node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - atoms2K := make(etf.List, 2100) - s := lib.RandomString(240) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i/10)) - } - - long := make([]byte, 66*1024) - for i := range long { - long[i] = byte(i % 255) - } - - fmt.Printf("case 1: sending 2.1K atoms: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 2: sending a tuple with 2.1K atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - - fmt.Printf("case 3: sending 2.1K UTF-8 long atoms: ") - s = strings.Repeat("🚀", 252) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i/10)) - } - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 4: sending a tuple with 2.1K UTF-8 long atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") -} - -func TestAtomCacheMore255UniqViaProxyWithEncryption(t *testing.T) { - fmt.Printf("\n=== Test Atom Cache (more 255 uniq) via Proxy connection with Encriptin \n") - opts1 := node.Options{} - protoOptions := node.DefaultProtoOptions() - protoOptions.NumHandlers = 2 - opts1.Proto = dist.CreateProto(protoOptions) - - opts1.Proxy.Flags = node.DefaultProxyFlags() - opts1.Proxy.Flags.EnableEncryption = true - - fmt.Printf("Starting node: nodeAtomCache1More255ViaProxyEnc@localhost with NumHandlers = 2: ") - node1, e := ergo.StartNode("nodeAtomCache1More255ViaProxyEnc@localhost", "cookie", opts1) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting node: nodeAtomCacheTMore255ViaProxyEnc@localhost with NubHandlers = 2: ") - optsT := node.Options{} - optsT.Proxy.Transit = true - optsT.Proto = dist.CreateProto(protoOptions) - nodeT, e := ergo.StartNode("nodeAtomCacheTMore255ViaProxyEnc@localhost", "cookie", optsT) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting node: nodeAtomCache2More255ViaProxyEnc@localhost with NubHandlers = 2: ") - opts2 := node.Options{} - opts2.Proxy.Accept = true - opts2.Proto = dist.CreateProto(protoOptions) - node2, e := ergo.StartNode("nodeAtomCache2More255ViaProxyEnc@localhost", "cookie", opts2) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - defer node1.Stop() - defer node2.Stop() - defer nodeT.Stop() - - fmt.Printf(" connect %s with %s via proxy %s: ", node1.Name(), node2.Name(), nodeT.Name()) - route := node.ProxyRoute{ - Name: node2.Name(), - Proxy: nodeT.Name(), - } - node1.AddProxyRoute(route) - - if err := node1.Connect(node2.Name()); err != nil { - t.Fatal(err) - } - - indirectNodes := node2.NodesIndirect() - if len(indirectNodes) != 1 || indirectNodes[0] != node1.Name() { - t.Fatal("wrong result:", indirectNodes) - } - fmt.Println("OK") - - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs2 := &testMonitor{ - v: make(chan interface{}, 2), - } - - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - fmt.Printf(" wait for start of gs2 on %#v: ", node2.Name()) - node2gs2, _ := node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - atoms2K := make(etf.List, 2100) - s := lib.RandomString(240) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i)) - } - - long := make([]byte, 66*1024) - for i := range long { - long[i] = byte(i % 255) - } - - fmt.Printf("case 1: sending 2.1K atoms: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 2: sending a tuple with 2.1K atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - - fmt.Printf("case 3: sending 2.1K UTF-8 long atoms: ") - s = strings.Repeat("🚀", 252) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i/10)) - } - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 4: sending a tuple with 2.1K UTF-8 long atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") -} - -func TestAtomCacheLess255UniqViaProxyWithEncryptionCompression(t *testing.T) { - fmt.Printf("\n=== Test Atom Cache (less 255 uniq) via Proxy connection with Encryption and Compression\n") - opts1 := node.Options{} - protoOptions := node.DefaultProtoOptions() - protoOptions.NumHandlers = 2 - opts1.Proto = dist.CreateProto(protoOptions) - - opts1.Proxy.Flags = node.DefaultProxyFlags() - opts1.Proxy.Flags.EnableEncryption = true - - fmt.Printf("Starting node: nodeAtomCache1Less255ViaProxyEncComp@localhost with NumHandlers = 2: ") - node1, e := ergo.StartNode("nodeAtomCache1Less255ViaProxyEncComp@localhost", "cookie", opts1) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting node: nodeAtomCacheTLess255ViaProxyEncComp@localhost with NubHandlers = 2: ") - optsT := node.Options{} - optsT.Proxy.Transit = true - optsT.Proto = dist.CreateProto(protoOptions) - nodeT, e := ergo.StartNode("nodeAtomCacheTLess255ViaProxyEncComp@localhost", "cookie", optsT) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting node: nodeAtomCache2Less255ViaProxyEncComp@localhost with NubHandlers = 2: ") - opts2 := node.Options{} - opts2.Proxy.Accept = true - opts2.Proto = dist.CreateProto(protoOptions) - node2, e := ergo.StartNode("nodeAtomCache2Less255ViaProxyEncComp@localhost", "cookie", opts2) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - defer node1.Stop() - defer node2.Stop() - defer nodeT.Stop() - - fmt.Printf(" connect %s with %s via proxy %s: ", node1.Name(), node2.Name(), nodeT.Name()) - route := node.ProxyRoute{ - Name: node2.Name(), - Proxy: nodeT.Name(), - } - node1.AddProxyRoute(route) - - if err := node1.Connect(node2.Name()); err != nil { - t.Fatal(err) - } - - indirectNodes := node2.NodesIndirect() - if len(indirectNodes) != 1 || indirectNodes[0] != node1.Name() { - t.Fatal("wrong result:", indirectNodes) - } - fmt.Println("OK") - - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs2 := &testMonitor{ - v: make(chan interface{}, 2), - } - - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - node1gs1.SetCompression(true) - - fmt.Printf(" wait for start of gs2 on %#v: ", node2.Name()) - node2gs2, _ := node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - atoms2K := make(etf.List, 2100) - s := lib.RandomString(240) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i/10)) - } - - long := make([]byte, 66*1024) - for i := range long { - long[i] = byte(i % 255) - } - - fmt.Printf("case 1: sending 2.1K atoms: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 2: sending a tuple with 2.1K atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - - fmt.Printf("case 3: sending 2.1K UTF-8 long atoms: ") - s = strings.Repeat("🚀", 252) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i/10)) - } - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 4: sending a tuple with 2.1K UTF-8 long atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") -} - -func TestAtomCacheMore255UniqViaProxyWithEncryptionCompression(t *testing.T) { - fmt.Printf("\n=== Test Atom Cache (more 255 uniq) via Proxy connection with Encription and Compression \n") - opts1 := node.Options{} - protoOptions := node.DefaultProtoOptions() - protoOptions.NumHandlers = 2 - opts1.Proto = dist.CreateProto(protoOptions) - - opts1.Proxy.Flags = node.DefaultProxyFlags() - opts1.Proxy.Flags.EnableEncryption = true - - fmt.Printf("Starting node: nodeAtomCache1More255ViaProxyEncComp@localhost with NumHandlers = 2: ") - node1, e := ergo.StartNode("nodeAtomCache1More255ViaProxyEncComp@localhost", "cookie", opts1) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting node: nodeAtomCacheTMore255ViaProxyEncComp@localhost with NubHandlers = 2: ") - optsT := node.Options{} - optsT.Proxy.Transit = true - optsT.Proto = dist.CreateProto(protoOptions) - nodeT, e := ergo.StartNode("nodeAtomCacheTMore255ViaProxyEncComp@localhost", "cookie", optsT) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting node: nodeAtomCache2More255ViaProxyEncComp@localhost with NubHandlers = 2: ") - opts2 := node.Options{} - opts2.Proxy.Accept = true - opts2.Proto = dist.CreateProto(protoOptions) - node2, e := ergo.StartNode("nodeAtomCache2More255ViaProxyEncComp@localhost", "cookie", opts2) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - defer node1.Stop() - defer node2.Stop() - defer nodeT.Stop() - - fmt.Printf(" connect %s with %s via proxy %s: ", node1.Name(), node2.Name(), nodeT.Name()) - route := node.ProxyRoute{ - Name: node2.Name(), - Proxy: nodeT.Name(), - } - node1.AddProxyRoute(route) - - if err := node1.Connect(node2.Name()); err != nil { - t.Fatal(err) - } - - indirectNodes := node2.NodesIndirect() - if len(indirectNodes) != 1 || indirectNodes[0] != node1.Name() { - t.Fatal("wrong result:", indirectNodes) - } - fmt.Println("OK") - - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs2 := &testMonitor{ - v: make(chan interface{}, 2), - } - - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - node1gs1.SetCompression(true) - - fmt.Printf(" wait for start of gs2 on %#v: ", node2.Name()) - node2gs2, _ := node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - atoms2K := make(etf.List, 2100) - s := lib.RandomString(240) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i)) - } - - long := make([]byte, 66*1024) - for i := range long { - long[i] = byte(i % 255) - } - - fmt.Printf("case 1: sending 2.1K atoms: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 2: sending a tuple with 2.1K atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - - fmt.Printf("case 3: sending 2.1K UTF-8 long atoms: ") - s = strings.Repeat("🚀", 252) - for i := range atoms2K { - atoms2K[i] = etf.Atom(fmt.Sprintf("%s%d", s, i/10)) - } - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := atoms2K - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") - fmt.Printf("case 4: sending a tuple with 2.1K UTF-8 long atoms and 66K binary: ") - rand.Shuffle(len(atoms2K), func(i, j int) { - atoms2K[i], atoms2K[j] = atoms2K[j], atoms2K[i] - }) - for i := 0; i < 10; i++ { - result := etf.Tuple{atoms2K, long} - node1gs1.Send(node2gs2.Self(), result) - if err := waitForResultWithValueReturnError(t, gs2.v, result); err != nil { - t.Fatal(err) - } - } - fmt.Println("OK") -} diff --git a/tests/core_test.go b/tests/core_test.go deleted file mode 100644 index fae8f7bb..00000000 --- a/tests/core_test.go +++ /dev/null @@ -1,224 +0,0 @@ -package tests - -import ( - "fmt" - "testing" - "time" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" -) - -type TestCoreGenserver struct { - gen.Server -} - -func (trg *TestCoreGenserver) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) { - // fmt.Printf("TestCoreGenserver ({%s, %s}): HandleCall: %#v, From: %#v\n", trg.process.name, trg.process.Node.Name(), message, from) - return message, gen.ServerStatusOK -} - -func (trg *TestCoreGenserver) HandleDirect(process *gen.ServerProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - switch m := message.(type) { - case makeCall: - return process.Call(m.to, m.message) - } - return nil, lib.ErrUnsupportedRequest -} - -func TestCore(t *testing.T) { - fmt.Printf("\n=== Test Registrar\n") - fmt.Printf("Starting nodes: nodeR1@localhost, nodeR2@localhost: ") - node1, _ := ergo.StartNode("nodeR1@localhost", "cookies", node.Options{}) - defer node1.Stop() - if node1 == nil { - t.Fatal("can't start nodes") - } else { - fmt.Println("OK") - } - - gs := &TestCoreGenserver{} - fmt.Printf("Starting TestCoreGenserver. registering as 'gs1' on %s and create an alias: ", node1.Name()) - node1gs1, err := node1.Spawn("gs1", gen.ProcessOptions{}, gs, nil) - if err != nil { - t.Fatal(err) - } - alias, err := node1gs1.CreateAlias() - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("...get process by name 'gs1': ") - p := node1.ProcessByName("gs1") - if p == nil { - message := fmt.Sprintf("missing process %v on %s", node1gs1.Self(), node1.Name()) - t.Fatal(message) - } - fmt.Println("OK") - fmt.Printf("...get process by pid of 'gs1': ") - p1 := node1.ProcessByPid(node1gs1.Self()) - if p1 == nil { - message := fmt.Sprintf("missing process %v on %s", node1gs1.Self(), node1.Name()) - t.Fatal(message) - } - - if p != p1 { - message := fmt.Sprintf("not equal: %v on %s", p.Self(), p1.Self()) - t.Fatal(message) - } - fmt.Println("OK") - - fmt.Printf("...get process by alias of 'gs1': ") - p2 := node1.ProcessByAlias(alias) - if p2 == nil { - message := fmt.Sprintf("missing process %v on %s", node1gs1.Self(), node1.Name()) - t.Fatal(message) - } - - if p1 != p2 { - message := fmt.Sprintf("not equal: %v on %s", p1.Self(), p2.Self()) - t.Fatal(message) - } - fmt.Println("OK") - - fmt.Printf("...registering name 'test' related to %v: ", node1gs1.Self()) - if e := node1.RegisterName("test", node1gs1.Self()); e != nil { - t.Fatal(e) - } else { - if e := node1.RegisterName("test", node1gs1.Self()); e == nil { - t.Fatal("registered duplicate name") - } - } - fmt.Println("OK") - fmt.Printf("...unregistering name 'test' related to %v: ", node1gs1.Self()) - node1.UnregisterName("test") - if e := node1.RegisterName("test", node1gs1.Self()); e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("Starting TestCoreGenserver and registering as 'gs2' on %s: ", node1.Name()) - node1gs2, err := node1.Spawn("gs2", gen.ProcessOptions{}, gs, nil) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("...try to unregister 'test' related to %v using gs2 process (not allowed): ", node1gs1.Self()) - if err := node1gs2.UnregisterName("test"); err != lib.ErrNameOwner { - t.Fatal("not allowed to unregister by not an owner") - } - fmt.Println("OK") - - fmt.Printf("...try to unregister 'test' related to %v using gs1 process (owner): ", node1gs1.Self()) - if err := node1gs1.UnregisterName("test"); err != nil { - t.Fatal(err) - } - - fmt.Println("OK") -} - -func TestCoreAlias(t *testing.T) { - fmt.Printf("\n=== Test Registrar Alias\n") - fmt.Printf("Starting node: nodeR1Alias@localhost: ") - node1, _ := ergo.StartNode("nodeR1Alias@localhost", "cookies", node.Options{}) - defer node1.Stop() - if node1 == nil { - t.Fatal("can't start nodes") - } else { - fmt.Println("OK") - } - - gs := &TestCoreGenserver{} - fmt.Printf(" Starting gs1 and gs2 GenServers on %s: ", node1.Name()) - node1gs1, err := node1.Spawn("gs1", gen.ProcessOptions{}, gs, nil) - if err != nil { - t.Fatal(err) - } - node1gs2, err := node1.Spawn("gs2", gen.ProcessOptions{}, gs, nil) - if err != nil { - t.Fatal(err) - } - if len(node1gs1.Aliases()) > 0 || len(node1gs2.Aliases()) > 0 { - t.Fatal("alias table must be empty") - } - - fmt.Println("OK") - - fmt.Printf(" Create gs1 alias: ") - alias, err := node1gs1.CreateAlias() - if err != nil { - t.Fatal(err) - } - prc := node1gs1.ProcessByAlias(alias) - if prc == nil { - t.Fatal("missing alias") - } - if prc.Self() != node1gs1.Self() { - t.Fatal("wrong alias") - } - fmt.Println("OK") - - fmt.Printf(" Make a call to gs1 via alias: ") - call := makeCall{ - to: alias, - message: "hi", - } - if reply, err := node1gs2.Direct(call); err == nil { - if r, ok := reply.(string); !ok || r != "hi" { - t.Fatal("wrong result", reply) - } - } else { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf(" Delete gs1 alias by gs2 (not allowed): ") - if err := node1gs2.DeleteAlias(alias); err != lib.ErrAliasOwner { - t.Fatal(" expected ErrAliasOwner, got:", err) - } - fmt.Println("OK") - fmt.Printf(" Delete gs1 alias by itself: ") - if err := node1gs1.DeleteAlias(alias); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - if a := node1gs1.Aliases(); len(a) > 0 { - t.Fatal("alias table (registrar) must be empty on gs1 process", a) - } - - if a := node1gs2.Aliases(); len(a) > 0 { - t.Fatal("alias table (process) must be empty on gs2 process", a) - - } - fmt.Printf(" Aliases must be cleaned up once the owner is down: ") - alias1, _ := node1gs1.CreateAlias() - alias2, _ := node1gs1.CreateAlias() - alias3, _ := node1gs1.CreateAlias() - if a := node1gs1.Aliases(); len(a) != 3 { - t.Fatal("alias table of gs1 must have 3 aliases", a) - } - - if !node1.IsAlias(alias1) || !node1.IsAlias(alias2) || !node1.IsAlias(alias3) { - t.Fatal("not an alias", alias1, alias2, alias3) - } - - node1gs1.Kill() - time.Sleep(100 * time.Millisecond) - if a := node1gs1.Aliases(); len(a) != 0 { - t.Fatal("alias table must be empty", a) - } - fmt.Println("OK") - - fmt.Printf(" Create gs1 alias on a stopped process (shouldn't be allowed): ") - alias, err = node1gs1.CreateAlias() - if err != lib.ErrProcessTerminated { - t.Fatal(err) - } - fmt.Println("OK") - -} diff --git a/tests/monitor_test.go b/tests/monitor_test.go deleted file mode 100644 index 4d432575..00000000 --- a/tests/monitor_test.go +++ /dev/null @@ -1,1814 +0,0 @@ -package tests - -import ( - "fmt" - "reflect" - "sort" - "testing" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" -) - -type testMonitor struct { - gen.Server - v chan interface{} -} - -func (tgs *testMonitor) Init(process *gen.ServerProcess, args ...etf.Term) error { - tgs.v <- process.Self() - return nil -} -func (tgs *testMonitor) HandleCast(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - tgs.v <- message - return gen.ServerStatusOK -} -func (tgs *testMonitor) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) { - return message, gen.ServerStatusOK -} -func (tgs *testMonitor) HandleInfo(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - tgs.v <- message - return gen.ServerStatusOK -} - -/* - Test cases for Local-Local - Monitor - by Pid - doesnt_exist, terminate, demonitor - by Name - doesnt_exist, terminate, demonitor - by Tuple - doesnt_exist, terminate, demonitor -*/ -func TestMonitorLocalLocal(t *testing.T) { - fmt.Printf("\n=== Test Monitor Local-Local\n") - fmt.Printf("Starting node: nodeM1LocalLocal@localhost: ") - node1, _ := ergo.StartNode("nodeM1LocalLocal@localhost", "cookies", node.Options{}) - if node1 == nil { - t.Fatal("can't start node") - } - fmt.Println("OK") - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs2 := &testMonitor{ - v: make(chan interface{}, 2), - } - // starting gen servers - - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - fmt.Printf(" wait for start of gs2 on %#v: ", node1.Name()) - node1gs2, _ := node1.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node1gs2.Self()) - - // by Pid - fmt.Printf("... by Pid Local-Local: gs1 -> gs2. monitor/demonitor: ") - ref := node1gs1.MonitorProcess(node1gs2.Self()) - - if !node1gs2.IsMonitor(ref) { - t.Fatal("monitor reference has been lost") - } - node1gs1.DemonitorProcess(ref) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... by Pid Local-Local: gs1 -> gs2. monitor/terminate: ") - ref = node1gs1.MonitorProcess(node1gs2.Self()) - node1gs2.Exit("normal") - result := gen.MessageDown{ - Ref: ref, - Pid: node1gs2.Self(), - Reason: "normal", - } - waitForResultWithValue(t, gs1.v, result) - - if err := checkCleanProcessRef(node1gs2, ref); err != nil { - t.Fatal(err) - } - - fmt.Print("... by Pid Local-Local: gs1 -> monitor unknownPid: ") - ref = node1gs1.MonitorProcess(node1gs2.Self()) - result = gen.MessageDown{ - Ref: ref, - Pid: node1gs2.Self(), - Reason: "noproc", - } - waitForResultWithValue(t, gs1.v, result) - - fmt.Printf(" wait for start of gs2 on %#v: ", node1.Name()) - node1gs2, _ = node1.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node1gs2.Self()) - // by Name - fmt.Printf("... by Name Local-Local: gs1 -> gs2. monitor/demonitor: ") - ref = node1gs1.MonitorProcess("gs2") - if err := checkCleanProcessRef(node1gs1, ref); err == nil { - t.Fatal("monitor reference has been lost") - } - node1gs1.DemonitorProcess(ref) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... by Name Local-Local: gs1 -> gs2. monitor/terminate: ") - ref = node1gs1.MonitorProcess("gs2") - node1gs2.Exit("normal") - result = gen.MessageDown{ - Ref: ref, - ProcessID: gen.ProcessID{Name: "gs2", Node: node1.Name()}, - Reason: "normal", - } - waitForResultWithValue(t, gs1.v, result) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - fmt.Print("... by Name Local-Local: gs1 -> monitor unknown name: ") - ref = node1gs1.MonitorProcess("asdfasdf") - result = gen.MessageDown{ - Ref: ref, - ProcessID: gen.ProcessID{Name: "asdfasdf", Node: node1.Name()}, - Reason: "noproc", - } - waitForResultWithValue(t, gs1.v, result) - - fmt.Printf(" wait for start of gs2 on %#v: ", node1.Name()) - node1gs2, _ = node1.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node1gs2.Self()) - - // by Name gen.ProcessID{ProcessName, Node} - fmt.Printf("... by gen.ProcessID{Name, Node} Local-Local: gs1 -> gs2. demonitor: ") - processID := gen.ProcessID{Name: "gs2", Node: node1.Name()} - ref = node1gs1.MonitorProcess(processID) - if err := checkCleanProcessRef(node1gs1, ref); err == nil { - t.Fatal("monitor reference has been lost") - } - node1gs1.DemonitorProcess(ref) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - fmt.Printf("... by gen.ProcessID{Name, Node} Local-Local: gs1 -> gs2. terminate: ") - ref = node1gs1.MonitorProcess(processID) - node1gs2.Exit("normal") - result = gen.MessageDown{ - Ref: ref, - ProcessID: processID, - Reason: "normal", - } - waitForResultWithValue(t, gs1.v, result) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - fmt.Print("... by gen.ProcessID{Name, Node} Local-Local: gs1 -> unknownPid: ") - processID = gen.ProcessID{Name: "gs2222", Node: node1.Name()} - ref = node1gs1.MonitorProcess(processID) - result = gen.MessageDown{ - Ref: ref, - ProcessID: processID, - Reason: "noproc", - } - waitForResultWithValue(t, gs1.v, result) - - node1.Stop() -} - -/* - Test cases for Local-Remote - Monitor - by Pid - doesnt_exist, terminate, demonitor, on_node_down, node_unknown - by Tuple - doesnt_exist, terminate, demonitor, on_node_down, node_unknown - Link - by Pid - doesnt_exist, terminate, unlink, node_down, node_unknown -*/ - -func TestMonitorLocalRemoteByPid(t *testing.T) { - fmt.Printf("\n=== Test Monitor Local-Remote by Pid\n") - fmt.Printf("Starting nodes: nodeM1LocalRemoteByPid@localhost, nodeM2LocalRemoteByPid@localhost: ") - node1, err1 := ergo.StartNode("nodeM1LocalRemoteByPid@localhost", "cookies", node.Options{}) - node2, err2 := ergo.StartNode("nodeM2LocalRemoteByPid@localhost", "cookies", node.Options{}) - if err1 != nil { - t.Fatal("can't start node1:", err1) - } - if err2 != nil { - t.Fatal("can't start node2:", err2) - } - - fmt.Println("OK") - - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs2 := &testMonitor{ - v: make(chan interface{}, 2), - } - - // starting gen servers - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - fmt.Printf(" wait for start of gs2 on %#v: ", node2.Name()) - node2gs2, _ := node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - // by Pid - fmt.Printf("... by Pid Local-Remote: gs1 -> gs2. monitor/demonitor: ") - ref := node1gs1.MonitorProcess(node2gs2.Self()) - // wait a bit for the MessageDown if something went wrong - waitForTimeout(t, gs1.v) - if node1gs1.IsMonitor(ref) == false { - t.Fatal("monitor reference has been lost on node 1") - } - if node2gs2.IsMonitor(ref) == false { - t.Fatal("monitor reference has been lost on node 2") - } - if found := node1gs1.DemonitorProcess(ref); found == false { - t.Fatal("lost monitoring reference on node1") - } - // Demonitoring is the async message with nothing as a feedback. - // use waitForTimeout just as a short timer - waitForTimeout(t, gs1.v) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - if err := checkCleanProcessRef(node2gs2, ref); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... by Pid Local-Remote: gs1 -> gs2. monitor/terminate: ") - ref = node1gs1.MonitorProcess(node2gs2.Self()) - // wait a bit for the MessageDown if something went wrong - waitForTimeout(t, gs1.v) - node2gs2.Exit("normal") - result := gen.MessageDown{ - Ref: ref, - Pid: node2gs2.Self(), - Reason: "normal", - } - waitForResultWithValue(t, gs1.v, result) - - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - if err := checkCleanProcessRef(node2gs2, ref); err != nil { - t.Fatal(err) - } - - fmt.Printf("... by Pid Local-Remote: gs1 -> monitor unknownPid: ") - ref = node1gs1.MonitorProcess(node2gs2.Self()) - result = gen.MessageDown{ - Ref: ref, - Pid: node2gs2.Self(), - Reason: "noproc", - } - waitForResultWithValue(t, gs1.v, result) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - - fmt.Printf(" wait for start of gs2 on %#v: ", node2.Name()) - node2gs2, _ = node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - fmt.Printf("... by Pid Local-Remote: gs1 -> gs2. monitor/NodeDown: ") - ref = node1gs1.MonitorProcess(node2gs2.Self()) - // wait a bit for the MessageDown if something went wrong - waitForTimeout(t, gs1.v) - node1.Disconnect(node2.Name()) - node2.Stop() - result = gen.MessageDown{ - Ref: ref, - Pid: node2gs2.Self(), - Reason: "noconnection", - } - waitForResultWithValue(t, gs1.v, result) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - - fmt.Printf("... by Pid Local-Remote: gs1 -> gs2. monitor unknown node: ") - ref = node1gs1.MonitorProcess(node2gs2.Self()) - result.Ref = ref - waitForResultWithValue(t, gs1.v, result) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - node1.Stop() -} - -func TestMonitorLocalRemoteByName(t *testing.T) { - fmt.Printf("\n=== Test Monitor Local-Remote by Name\n") - fmt.Printf("Starting nodes: nodeM1LocalRemoteByTuple@localhost, nodeM2LocalRemoteByTuple@localhost: ") - node1, _ := ergo.StartNode("nodeM1LocalRemoteByTuple@localhost", "cookies", node.Options{}) - node2, _ := ergo.StartNode("nodeM2LocalRemoteByTuple@localhost", "cookies", node.Options{}) - if node1 == nil || node2 == nil { - t.Fatal("can't start nodes") - } else { - fmt.Println("OK") - } - - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs2 := &testMonitor{ - v: make(chan interface{}, 2), - } - - // starting gen servers - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - fmt.Printf(" wait for start of gs2 on %#v: ", node2.Name()) - node2gs2, _ := node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - processID := gen.ProcessID{Name: "gs2", Node: node2.Name()} - - fmt.Printf("... by gen.ProcessID{Name, Node} Local-Remote: gs1 -> gs2. monitor/demonitor: ") - ref := node1gs1.MonitorProcess(processID) - // wait a bit for the MessageDown if something went wrong - waitForTimeout(t, gs1.v) - if err := checkCleanProcessRef(node1gs1, ref); err == nil { - t.Fatal("monitor reference has been lost on node 1") - } - if err := checkCleanProcessRef(node2gs2, ref); err == nil { - t.Fatal("monitor reference has been lost on node 2") - } - if found := node1gs1.DemonitorProcess(ref); found == false { - t.Fatal("lost monitoring reference on node1") - } - // Demonitoring is the async message with nothing as a feedback. - // use waitForTimeout just as a short timer - waitForTimeout(t, gs1.v) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - if err := checkCleanProcessRef(node2gs2, ref); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... by gen.ProcessID{Name, Node} Local-Remote: gs1 -> gs2. monitor/terminate: ") - ref = node1gs1.MonitorProcess(processID) - // wait a bit for the MessageDown if something went wrong - waitForTimeout(t, gs1.v) - node2gs2.Exit("normal") - result := gen.MessageDown{ - Ref: ref, - ProcessID: processID, - Reason: "normal", - } - waitForResultWithValue(t, gs1.v, result) - - if node1gs1.IsMonitor(ref) { - t.Fatal("monitor ref is still alive") - } - if node2gs2.IsMonitor(ref) { - t.Fatal("monitor ref is still alive") - } - - fmt.Printf("... by gen.ProcessID{Name, Node} Local-Remote: gs1 -> monitor unknown remote name: ") - ref = node1gs1.MonitorProcess(processID) - result = gen.MessageDown{ - Ref: ref, - ProcessID: processID, - Reason: "noproc", - } - waitForResultWithValue(t, gs1.v, result) - if node1gs1.IsMonitor(ref) { - t.Fatal("monitor ref is still alive") - } - - fmt.Printf(" wait for start of gs2 on %#v: ", node2.Name()) - node2gs2, _ = node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - fmt.Printf("... by gen.ProcessID{Name, Node} Local-Remote: gs1 -> gs2. monitor/onNodeDown: ") - ref = node1gs1.MonitorProcess(processID) - node1.Disconnect(node2.Name()) - node2.Stop() - result = gen.MessageDown{ - Ref: ref, - ProcessID: processID, - Reason: "noconnection", - } - waitForResultWithValue(t, gs1.v, result) - if node1gs1.IsMonitor(ref) { - t.Fatal("monitor ref is still alive") - } - - fmt.Printf("... by gen.ProcessID{Name, Node} Local-Remote: gs1 -> gs2. monitor unknown node: ") - ref = node1gs1.MonitorProcess(processID) - result.Ref = ref - waitForResultWithValue(t, gs1.v, result) - if node1gs1.IsMonitor(ref) { - t.Fatal("monitor ref is still alive") - } - node1.Stop() -} - -func TestMonitorLocalProxyRemoteByPid(t *testing.T) { - fmt.Printf("\n=== Test Monitor Remote via Proxy by Pid\n") - fmt.Printf("Starting nodes: nodeM1ProxyRemoteByPid@localhost, nodeM2ProxyRemoteByPid@localhost, nodeM3ProxyRemoteByPid@localhost : ") - opts1 := node.Options{} - opts1.Proxy.Flags = node.DefaultProxyFlags() - opts1.Proxy.Flags.EnableMonitor = false - node1, err := ergo.StartNode("nodeM1ProxyRemoteByPid@localhost", "cookies", opts1) - if err != nil { - t.Fatal("can't start node:", err) - } - opts2 := node.Options{} - opts2.Proxy.Transit = true - node2, err := ergo.StartNode("nodeM2ProxyRemoteByPid@localhost", "cookies", opts2) - if err != nil { - t.Fatal("can't start node:", err, node2.Name()) - } - opts3 := node.Options{} - opts3.Proxy.Accept = true - node3, err := ergo.StartNode("nodeM3ProxyRemoteByPid@localhost", "cookies", opts3) - if err != nil { - t.Fatal("can't start node:", err) - } - - route := node.ProxyRoute{ - Name: node3.Name(), - Proxy: node2.Name(), - } - node1.AddProxyRoute(route) - node1.Connect(node3.Name()) - fmt.Println("OK") - - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs3 := &testMonitor{ - v: make(chan interface{}, 2), - } - - // starting gen servers - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - fmt.Printf(" wait for start of gs3 on %#v: ", node3.Name()) - node3gs3, _ := node3.Spawn("gs3", gen.ProcessOptions{}, gs3, nil) - waitForResultWithValue(t, gs3.v, node3gs3.Self()) - - // by Pid - fmt.Printf("... by Pid Local-Proxy-Remote: gs1 -> gs3. monitor/demonitor: ") - ref := node1gs1.MonitorProcess(node3gs3.Self()) - // wait a bit for the MessageDown if something went wrong - waitForTimeout(t, gs1.v) - if node1gs1.IsMonitor(ref) == false { - t.Fatal("monitor reference has been lost on node 1") - } - if node3gs3.IsMonitor(ref) == false { - t.Fatal("monitor reference has been lost on node 3") - } - if found := node1gs1.DemonitorProcess(ref); found == false { - t.Fatal("lost monitoring reference on node1") - } - // Demonitoring is the async message with nothing as a feedback. - // use waitForTimeout just as a short timer - waitForTimeout(t, gs1.v) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - if err := checkCleanProcessRef(node3gs3, ref); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... by Pid Local-Proxy-Remote: gs1 -> gs3. monitor/terminate: ") - ref = node1gs1.MonitorProcess(node3gs3.Self()) - // wait a bit for the MessageDown if something went wrong - waitForTimeout(t, gs1.v) - node3gs3.Exit("normal") - result := gen.MessageDown{ - Ref: ref, - Pid: node3gs3.Self(), - Reason: "normal", - } - waitForResultWithValue(t, gs1.v, result) - - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - if err := checkCleanProcessRef(node3gs3, ref); err != nil { - t.Fatal(err) - } - - fmt.Printf("... by Pid Local-Proxy-Remote: gs1 -> monitor unknownPid: ") - ref = node1gs1.MonitorProcess(node3gs3.Self()) - result = gen.MessageDown{ - Ref: ref, - Pid: node3gs3.Self(), - Reason: "noproc", - } - waitForResultWithValue(t, gs1.v, result) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - - fmt.Printf(" wait for start of gs3 on %#v: ", node3.Name()) - node3gs3, _ = node3.Spawn("gs3", gen.ProcessOptions{}, gs3, nil) - waitForResultWithValue(t, gs3.v, node3gs3.Self()) - - fmt.Printf("... by Pid Local-Proxy-Remote: gs3 -> gs1. monitor/(node1: ProxyFlags.EnableMonitor = false): ") - - ref = node3gs3.MonitorProcess(node1gs1.Self()) - result = gen.MessageDown{ - Ref: ref, - Pid: node1gs1.Self(), - Reason: "unsupported", - } - waitForResultWithValue(t, gs3.v, result) - - fmt.Printf("... by Pid Local-Proxy-Remote: gs1 -> gs3. monitor/ProxyDown: ") - ref = node1gs1.MonitorProcess(node3gs3.Self()) - waitForTimeout(t, gs1.v) - node2.Stop() - result = gen.MessageDown{ - Ref: ref, - Pid: node3gs3.Self(), - Reason: "noproxy", - } - waitForResultWithValue(t, gs1.v, result) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - - node2, err = ergo.StartNode("nodeM2ProxyRemoteByPid@localhost", "cookies", opts2) - if err != nil { - t.Fatal("can't start node:", err, node2.Name()) - } - - fmt.Printf("... by Pid Local-Proxy-Remote: gs1 -> gs3. monitor/NodeDown: ") - ref = node1gs1.MonitorProcess(node3gs3.Self()) - // wait a bit for the MessageDown if something went wrong - waitForTimeout(t, gs1.v) - node3.Stop() - result = gen.MessageDown{ - Ref: ref, - Pid: node3gs3.Self(), - Reason: "noconnection", - } - waitForResultWithValue(t, gs1.v, result) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - - fmt.Printf("... by Pid Local-Proxy-Remote: gs1 -> gs3. monitor unknown node: ") - ref = node1gs1.MonitorProcess(node3gs3.Self()) - result.Ref = ref - waitForResultWithValue(t, gs1.v, result) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - node1.Stop() -} - -func TestMonitorLocalProxyRemoteByName(t *testing.T) { - fmt.Printf("\n=== Test Monitor Local-Proxy-Remote by Name\n") - fmt.Printf("Starting nodes: nodeM1ProxyRemoteByName@localhost, nodeM2RemoteByName@localhost, nodeM3RemoteByName@localhost: ") - opts1 := node.Options{} - opts1.Proxy.Flags = node.DefaultProxyFlags() - opts1.Proxy.Flags.EnableMonitor = false - node1, err := ergo.StartNode("nodeM1RemoteByName@localhost", "cookies", opts1) - if err != nil { - t.Fatal("can't start node:", err) - } - opts2 := node.Options{} - opts2.Proxy.Transit = true - node2, err := ergo.StartNode("nodeM2RemoteByName@localhost", "cookies", opts2) - if err != nil { - t.Fatal("can't start node:", err) - } - opts3 := node.Options{} - opts3.Proxy.Accept = true - node3, err := ergo.StartNode("nodeM3RemoteByName@localhost", "cookies", opts3) - if err != nil { - t.Fatal("can't start node:", err) - } - route := node.ProxyRoute{ - Name: node3.Name(), - Proxy: node2.Name(), - } - node1.AddProxyRoute(route) - node1.Connect(node3.Name()) - fmt.Println("OK") - - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs3 := &testMonitor{ - v: make(chan interface{}, 2), - } - - // starting gen servers - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - fmt.Printf(" wait for start of gs3 on %#v: ", node3.Name()) - node3gs3, _ := node3.Spawn("gs3", gen.ProcessOptions{}, gs3, nil) - waitForResultWithValue(t, gs3.v, node3gs3.Self()) - - processID := gen.ProcessID{Name: "gs3", Node: node3.Name()} - - fmt.Printf("... by gen.ProcessID{Name, Node} Local-Proxy-Remote: gs1 -> gs3. monitor/demonitor: ") - ref := node1gs1.MonitorProcess(processID) - // wait a bit for the MessageDown if something went wrong - waitForTimeout(t, gs1.v) - if err := checkCleanProcessRef(node1gs1, ref); err == nil { - t.Fatal("monitor reference has been lost on node 1") - } - if err := checkCleanProcessRef(node3gs3, ref); err == nil { - t.Fatal("monitor reference has been lost on node 3") - } - if found := node1gs1.DemonitorProcess(ref); found == false { - t.Fatal("lost monitoring reference on node1") - } - // Demonitoring is the async message with nothing as a feedback. - // use waitForTimeout just as a short timer - waitForTimeout(t, gs1.v) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - if err := checkCleanProcessRef(node3gs3, ref); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... by gen.ProcessID{Name, Node} Local-Proxy-Remote: gs1 -> gs3. monitor/terminate: ") - ref = node1gs1.MonitorProcess(processID) - // wait a bit for the MessageDown if something went wrong - waitForTimeout(t, gs1.v) - if node1gs1.IsMonitor(ref) == false { - t.Fatal("monitor reference has been lost on node 1") - } - if node3gs3.IsMonitor(ref) == false { - t.Fatal("monitor reference has been lost on node 3") - } - node3gs3.Exit("normal") - result := gen.MessageDown{ - Ref: ref, - ProcessID: processID, - Reason: "normal", - } - waitForResultWithValue(t, gs1.v, result) - - if node1gs1.IsMonitor(ref) { - t.Fatal("monitor ref is still alive") - } - if node3gs3.IsMonitor(ref) { - t.Fatal("monitor ref is still alive") - } - - fmt.Printf("... by gen.ProcessID{Name, Node} Local-Proxy-Remote: gs1 -> monitor unknown remote name: ") - ref = node1gs1.MonitorProcess(processID) - result = gen.MessageDown{ - Ref: ref, - ProcessID: processID, - Reason: "noproc", - } - waitForResultWithValue(t, gs1.v, result) - if node1gs1.IsMonitor(ref) { - t.Fatal("monitor ref is still alive") - } - - fmt.Printf(" wait for start of gs3 on %#v: ", node3.Name()) - node3gs3, _ = node3.Spawn("gs3", gen.ProcessOptions{}, gs3, nil) - waitForResultWithValue(t, gs3.v, node3gs3.Self()) - - fmt.Printf("... by gen.ProcessID{Name, Node} Local-Proxy-Remote: gs3 -> gs1. monitor/(node1: ProxyFlags.EnableMonitor = false): ") - - processID1 := gen.ProcessID{Name: node1gs1.Name(), Node: node1.Name()} - ref = node3gs3.MonitorProcess(processID1) - result = gen.MessageDown{ - Ref: ref, - ProcessID: processID1, - Reason: "unsupported", - } - waitForResultWithValue(t, gs3.v, result) - - fmt.Printf("... by gen.ProcessID{Name, Node} Local-Proxy-Remote: gs1 -> gs3. monitor/ProxyDown: ") - ref = node1gs1.MonitorProcess(processID) - waitForTimeout(t, gs1.v) - node2.Stop() - result = gen.MessageDown{ - Ref: ref, - ProcessID: processID, - Reason: "noproxy", - } - waitForResultWithValue(t, gs1.v, result) - if err := checkCleanProcessRef(node1gs1, ref); err != nil { - t.Fatal(err) - } - - node2, err = ergo.StartNode("nodeM2RemoteByName@localhost", "cookies", opts2) - if err != nil { - t.Fatal("can't start node:", err, node2.Name()) - } - - fmt.Printf("... by gen.ProcessID{Name, Node} Local-Proxy-Remote: gs1 -> gs3. monitor/NodeDown: ") - ref = node1gs1.MonitorProcess(processID) - waitForTimeout(t, gs1.v) - node3.Stop() - result = gen.MessageDown{ - Ref: ref, - ProcessID: processID, - Reason: "noconnection", - } - waitForResultWithValue(t, gs1.v, result) - if node1gs1.IsMonitor(ref) { - t.Fatal("monitor ref is still alive") - } - - fmt.Printf("... by gen.ProcessID{Name, Node} Local-Proxy-Remote: gs1 -> gs3. monitor unknown node: ") - ref = node1gs1.MonitorProcess(processID) - result.Ref = ref - waitForResultWithValue(t, gs1.v, result) - if node1gs1.IsMonitor(ref) { - t.Fatal("monitor ref is still alive") - } - node1.Stop() -} - -/* - Test cases for Local-Local - Link - by Pid - equal_pids, already_linked, doesnt_exist, terminate, unlink -*/ - -func TestLinkLocal(t *testing.T) { - fmt.Printf("\n=== Test Link Local\n") - fmt.Printf("Starting node: nodeL1LocalLocal@localhost: ") - node1, _ := ergo.StartNode("nodeL1LocalLocal@localhost", "cookies", node.Options{}) - if node1 == nil { - t.Fatal("can't start node") - } else { - fmt.Println("OK") - } - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs2 := &testMonitor{ - v: make(chan interface{}, 2), - } - // starting gen servers - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - fmt.Printf(" wait for start of gs2 on %#v: ", node1.Name()) - node1gs2, _ := node1.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node1gs2.Self()) - - fmt.Printf("Testing Link process (by Pid only) Local-Local: gs1 -> gs1 (link to itself is not allowed): ") - node1gs1.Link(node1gs1.Self()) - if err := checkCleanLinkPid(node1gs1, node1gs1.Self()); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("Testing Link process (by Pid only) Local-Local: gs1 -> gs2. link/unlink: ") - node1gs1.Link(node1gs2.Self()) - - if err := checkLinkPid(node1gs1, node1gs2.Self()); err != nil { - t.Fatal(err) - } - if err := checkLinkPid(node1gs2, node1gs1.Self()); err != nil { - t.Fatal(err) - } - - node1gs1.Unlink(node1gs2.Self()) - if err := checkCleanLinkPid(node1gs1, node1gs2.Self()); err != nil { - t.Fatal(err) - } - if err := checkCleanLinkPid(node1gs2, node1gs1.Self()); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("Testing Link process (by Pid only) Local-Local: gs1 -> gs2. already_linked: ") - node1gs1.Link(node1gs2.Self()) - if err := checkLinkPid(node1gs1, node1gs2.Self()); err != nil { - t.Fatal(err) - } - if err := checkLinkPid(node1gs2, node1gs1.Self()); err != nil { - t.Fatal("link missing for node1gs2") - } - gs1links := node1gs1.Links() - gs2links := node1gs2.Links() - node1gs2.Link(node1gs1.Self()) - - if len(gs1links) != len(node1gs1.Links()) || len(gs2links) != len(node1gs2.Links()) { - t.Fatal("number of links has changed on the second Link call") - } - fmt.Println("OK") - - fmt.Printf("Testing Link process (by Pid only) Local-Local: gs1 -> gs2. terminate (trap_exit = true): ") - // do not link these process since they are already linked after the previous test - //node1gs1.Link(node1gs2.Self()) - - if checkLinkPid(node1gs1, node1gs2.Self()) != nil { - t.Fatal("link missing for node1gs1") - } - if checkLinkPid(node1gs2, node1gs1.Self()) != nil { - t.Fatal("link missing for node1gs2") - } - - node1gs1.SetTrapExit(true) - node1gs2.Exit("normal") - result := gen.MessageExit{Pid: node1gs2.Self(), Reason: "normal"} - waitForResultWithValue(t, gs1.v, result) - - if err := checkCleanLinkPid(node1gs2, node1gs1.Self()); err != nil { - t.Fatal(err) - } - if node1gs2.IsAlive() { - t.Fatal("node1gs2 must be terminated") - } - if err := checkCleanLinkPid(node1gs1, node1gs2.Self()); err != nil { - t.Fatal(err) - } - if !node1gs1.IsAlive() { - t.Fatal("gs1 should be alive after gs2 exit due to enabled trap exit on gs1") - } - - fmt.Printf("Testing Link process (by Pid only) Local-Local: gs1 -> gs2. doesnt_exist: ") - node1gs1.Link(node1gs2.Self()) - result = gen.MessageExit{Pid: node1gs2.Self(), Reason: "noproc"} - waitForResultWithValue(t, gs1.v, result) - - fmt.Printf(" wait for start of gs2 on %#v: ", node1.Name()) - node1gs2, _ = node1.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node1gs2.Self()) - - node1gs1.SetTrapExit(false) - fmt.Printf("Testing Link process (by Pid only) Local-Local: gs1 -> gs2. terminate (trap_exit = false): ") - node1gs1.Link(node1gs2.Self()) - - if checkLinkPid(node1gs2, node1gs1.Self()) != nil { - t.Fatal("link missing for node1gs1") - } - if checkLinkPid(node1gs1, node1gs2.Self()) != nil { - t.Fatal("link missing for node1gs2") - } - - node1gs2.Exit("normal") - - // wait a bit to make sure if we receive anything (shouldnt receive) - waitForTimeout(t, gs1.v) - fmt.Println("OK") - - if err := checkCleanLinkPid(node1gs2, node1gs1.Self()); err != nil { - t.Fatal(err) - } - if err := checkCleanLinkPid(node1gs1, node1gs2.Self()); err != nil { - t.Fatal(err) - } - if node1gs1.IsAlive() { - t.Fatal("gs1 shouldnt be alive after gs2 exit due to disable trap exit on gs1") - } - if node1gs2.IsAlive() { - t.Fatal("node1gs2 must be terminated") - } - - node1.Stop() -} - -/* - Test cases for Local-Remote - Link - by Pid - already_linked, doesnt_exist, terminate, unlink, node_down, node_unknown -*/ -func TestLinkRemote(t *testing.T) { - fmt.Printf("\n=== Test Link Remote by Pid\n") - fmt.Printf("Starting nodes: nodeL1LocalRemoteByPid@localhost, nodeL2LocalRemoteByPid@localhost: ") - node1, _ := ergo.StartNode("nodeL1LocalRemoteByPid@localhost", "cookies", node.Options{}) - node2, _ := ergo.StartNode("nodeL2LocalRemoteByPid@localhost", "cookies", node.Options{}) - if node1 == nil || node2 == nil { - t.Fatal("can't start nodes") - } else { - fmt.Println("OK") - } - - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs2 := &testMonitor{ - v: make(chan interface{}, 2), - } - - // starting gen servers - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - fmt.Printf(" wait for start of gs2 on %#v: ", node2.Name()) - node2gs2, _ := node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - fmt.Printf("Testing Link process (by Pid only) Local-Remote: gs1 -> gs2. unlink: ") - node1gs1.Link(node2gs2.Self()) - // wait a bit since linking process is async - waitForTimeout(t, gs1.v) - - if checkLinkPid(node1gs1, node2gs2.Self()) != nil { - t.Fatal("link missing on node1gs1") - } - if checkLinkPid(node2gs2, node1gs1.Self()) != nil { - t.Fatal("link missing on node2gs2 ") - } - - node1gs1.Unlink(node2gs2.Self()) - // wait a bit since unlinking process is async - waitForTimeout(t, gs1.v) - if err := checkCleanLinkPid(node1gs1, node2gs2.Self()); err != nil { - t.Fatal(err) - } - if err := checkCleanLinkPid(node2gs2, node1gs1.Self()); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("Testing Link process (by Pid only) Local-Remote: gs1 -> gs2. already_linked: ") - node1gs1.Link(node2gs2.Self()) - if checkLinkPid(node1gs1, node2gs2.Self()) != nil { - t.Fatal("link missing on node1gs1") - } - // wait a bit since linking process is async - waitForTimeout(t, gs1.v) - if checkLinkPid(node2gs2, node1gs1.Self()) != nil { - t.Fatal("link missing on node2gs2") - } - ll1 := len(node1gs1.Links()) - ll2 := len(node2gs2.Links()) - - node2gs2.Link(node1gs1.Self()) - // wait a bit since linking process is async - waitForTimeout(t, gs2.v) - - if ll1 != len(node1gs1.Links()) || ll2 != len(node2gs2.Links()) { - t.Fatal("number of links has changed on the second Link call") - } - fmt.Println("OK") - - fmt.Printf("Testing Link process (by Pid only) Local-Remote: gs1 -> gs2. terminate (trap_exit = true): ") - // do not link these process since they are already linked after the previous test - - node1gs1.SetTrapExit(true) - - node2gs2.Exit("normal") - result := gen.MessageExit{Pid: node2gs2.Self(), Reason: "normal"} - waitForResultWithValue(t, gs1.v, result) - - if err := checkCleanLinkPid(node1gs1, node2gs2.Self()); err != nil { - t.Fatal(err) - } - if err := checkCleanLinkPid(node2gs2, node1gs1.Self()); err != nil { - t.Fatal(err) - } - if !node1gs1.IsAlive() { - t.Fatal("gs1 should be alive after gs2 exit due to enabled trap exit on gs1") - } - - fmt.Printf("Testing Link process (by Pid only) Local-Remote: gs1 -> gs2. doesnt_exist: ") - ll1 = len(node1gs1.Links()) - node1gs1.Link(node2gs2.Self()) - result = gen.MessageExit{Pid: node2gs2.Self(), Reason: "noproc"} - waitForResultWithValue(t, gs1.v, result) - if ll1 != len(node1gs1.Links()) { - t.Fatal("number of links has changed on the second Link call") - } - - fmt.Printf(" wait for start of gs2 on %#v: ", node1.Name()) - node2gs2, _ = node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - node1gs1.SetTrapExit(false) - fmt.Printf("Testing Link process (by Pid only) Local-Local: gs1 -> gs2. terminate (trap_exit = false): ") - node1gs1.Link(node2gs2.Self()) - waitForTimeout(t, gs2.v) - - if checkLinkPid(node1gs1, node2gs2.Self()) != nil { - t.Fatal("link missing on node1gs1") - } - if checkLinkPid(node2gs2, node1gs1.Self()) != nil { - t.Fatal("link missing on node2gs2") - } - - node2gs2.Exit("normal") - - // wait a bit to make sure if we receive anything (shouldnt receive) - waitForTimeout(t, gs1.v) - - if err := checkCleanLinkPid(node1gs1, node2gs2.Self()); err != nil { - t.Fatal(err) - } - if err := checkCleanLinkPid(node2gs2, node1gs1.Self()); err != nil { - t.Fatal(err) - } - if node1gs1.IsAlive() { - t.Fatal("gs1 shouldnt be alive after gs2 exit due to disable trap exit on gs1") - } - if node2gs2.IsAlive() { - t.Fatal("gs2 must be terminated") - } - fmt.Println("OK") - - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ = node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - fmt.Printf(" wait for start of gs2 on %#v: ", node2.Name()) - node2gs2, _ = node2.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.v, node2gs2.Self()) - - node1gs1.SetTrapExit(true) - fmt.Printf("Testing Link process (by Pid only) Local-Remote: gs1 -> gs2. node_down: ") - node1gs1.Link(node2gs2.Self()) - waitForTimeout(t, gs1.v) - - if checkLinkPid(node1gs1, node2gs2.Self()) != nil { - t.Fatal("link missing for node1gs1") - } - if checkCleanLinkPid(node2gs2, node1gs1.Self()) == nil { - t.Fatal("link missing for node2gs2") - } - - // race conditioned case. - // processing of the process termination (on the remote peer) can be done faster than - // the link termination there, so MessageExit with "kill" reason will be arrived - // earlier. - node2.Stop() - result1 := gen.MessageExit{Pid: node2gs2.Self(), Reason: "noconnection"} - result2 := gen.MessageExit{Pid: node2gs2.Self(), Reason: "kill"} - - waitForResultWithValueOrValue(t, gs1.v, result1, result2) - - if err := checkCleanLinkPid(node1gs1, node2gs2.Self()); err != nil { - t.Fatal(err) - } - // must wait a bit - waitForTimeout(t, gs1.v) - if err := checkCleanLinkPid(node2gs2, node1gs1.Self()); err != nil { - t.Fatal(err) - } - - ll1 = len(node1gs1.Links()) - fmt.Printf("Testing Link process (by Pid only) Local-Remote: gs1 -> gs2. node_unknown: ") - node1gs1.Link(node2gs2.Self()) - result = gen.MessageExit{Pid: node2gs2.Self(), Reason: "noconnection"} - waitForResultWithValue(t, gs1.v, result) - - if ll1 != len(node1gs1.Links()) { - t.Fatal("number of links has changed on the second Link call") - } - node1.Stop() -} - -func TestLinkRemoteProxy(t *testing.T) { - fmt.Printf("\n=== Test Link Remote Via Proxy\n") - fmt.Printf("Starting nodes: nodeL1RemoteViaProxy@localhost, nodeL2RemoteViaProxy@localhost, nodeL3RemoteViaProxy@localhost: ") - node1, err := ergo.StartNode("nodeL1RemoteViaProxy@localhost", "cookies", node.Options{}) - if err != nil { - t.Fatal(err) - } - node2opts := node.Options{} - node2opts.Proxy.Transit = true - node2, err := ergo.StartNode("nodeL2RemoteViaProxy@localhost", "cookies", node2opts) - if err != nil { - t.Fatal(err) - } - node3opts := node.Options{} - node3opts.Proxy.Accept = true - node3, err := ergo.StartNode("nodeL3RemoteViaProxy@localhost", "cookies", node3opts) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - route := node.ProxyRoute{ - Name: node3.Name(), - Proxy: node2.Name(), - } - route.Flags = node.DefaultProxyFlags() - route.Flags.EnableLink = false - node1.AddProxyRoute(route) - - fmt.Printf(" check connectivity of %s with %s via proxy %s: ", node1.Name(), node3.Name(), node2.Name()) - if err := node1.Connect(node3.Name()); err != nil { - t.Fatal(err) - } - node1indirect := node1.NodesIndirect() - node3indirect := node3.NodesIndirect() - if len(node1indirect) != 1 || len(node3indirect) != 1 { - t.Fatal("wrong indirect nodes (node1:", node1indirect, "; node3:", node3indirect, ")") - } - if node1indirect[0] != node3.Name() || node3indirect[0] != node1.Name() { - t.Fatal("wrong indirect nodes (node1:", node1indirect, "; node3:", node3indirect, ")") - } - fmt.Println("OK") - gs1 := &testMonitor{ - v: make(chan interface{}, 2), - } - gs3 := &testMonitor{ - v: make(chan interface{}, 2), - } - - // starting gen servers - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - fmt.Printf(" wait for start of gs3 on %#v: ", node3.Name()) - node3gs3, _ := node3.Spawn("gs3", gen.ProcessOptions{}, gs3, nil) - waitForResultWithValue(t, gs3.v, node3gs3.Self()) - - fmt.Printf("Testing Link process Local-Proxy-Remote: gs1 -> gs3. unlink: ") - node1gs1.Link(node3gs3.Self()) - // wait a bit since linking process is async - waitForTimeout(t, gs1.v) - - if checkLinkPid(node1gs1, node3gs3.Self()) != nil { - t.Fatal("link missing on node1gs1") - } - if checkLinkPid(node3gs3, node1gs1.Self()) != nil { - t.Fatal("link missing on node3gs3 ") - } - - node1gs1.Unlink(node3gs3.Self()) - // wait a bit since unlinking process is async - waitForTimeout(t, gs1.v) - if err := checkCleanLinkPid(node1gs1, node3gs3.Self()); err != nil { - t.Fatal(err) - } - if err := checkCleanLinkPid(node3gs3, node1gs1.Self()); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("Testing Link process Local-Proxy-Remote: gs1 -> gs3. already_linked: ") - node1gs1.Link(node3gs3.Self()) - if checkLinkPid(node1gs1, node3gs3.Self()) != nil { - t.Fatal("link missing on node1gs1") - } - // wait a bit since linking process is async - waitForTimeout(t, gs1.v) - if checkLinkPid(node3gs3, node1gs1.Self()) != nil { - t.Fatal("link missing on node3gs3") - } - ll1 := len(node1gs1.Links()) - ll3 := len(node3gs3.Links()) - - node3gs3.Link(node1gs1.Self()) - // wait a bit since linking process is async - waitForTimeout(t, gs3.v) - - if ll1 != len(node1gs1.Links()) || ll3 != len(node3gs3.Links()) { - t.Fatal("number of links has changed on the second Link call") - } - fmt.Println("OK") - - fmt.Printf("Testing Link process Local-Proxy-Remote: gs1 -> gs3. terminate (trap_exit = true): ") - // do not link these process since they are already linked after the previous test - - node1gs1.SetTrapExit(true) - - node3gs3.Exit("normal") - result := gen.MessageExit{Pid: node3gs3.Self(), Reason: "normal"} - waitForResultWithValue(t, gs1.v, result) - - if err := checkCleanLinkPid(node1gs1, node3gs3.Self()); err != nil { - t.Fatal(err) - } - if err := checkCleanLinkPid(node3gs3, node1gs1.Self()); err != nil { - t.Fatal(err) - } - if !node1gs1.IsAlive() { - t.Fatal("gs1 should be alive after gs3 exit due to enabled trap exit on gs1") - } - - fmt.Printf("Testing Link process Local-Proxy-Remote: gs1 -> gs3. doesnt_exist: ") - ll1 = len(node1gs1.Links()) - node1gs1.Link(node3gs3.Self()) - result = gen.MessageExit{Pid: node3gs3.Self(), Reason: "noproc"} - waitForResultWithValue(t, gs1.v, result) - if ll1 != len(node1gs1.Links()) { - t.Fatal("number of links has changed on the second Link call") - } - - fmt.Printf(" wait for start of gs3 on %#v: ", node1.Name()) - node3gs3, _ = node3.Spawn("gs3", gen.ProcessOptions{}, gs3, nil) - waitForResultWithValue(t, gs3.v, node3gs3.Self()) - - node1gs1.SetTrapExit(false) - fmt.Printf("Testing Link process Local-Proxy-Remote: gs1 -> gs3. terminate (trap_exit = false): ") - node1gs1.Link(node3gs3.Self()) - waitForTimeout(t, gs3.v) - - if checkLinkPid(node1gs1, node3gs3.Self()) != nil { - t.Fatal("link missing on node1gs1") - } - if checkLinkPid(node3gs3, node1gs1.Self()) != nil { - t.Fatal("link missing on node3gs3") - } - - node3gs3.Exit("normal") - - // wait a bit to make sure if we receive anything (shouldnt receive) - waitForTimeout(t, gs1.v) - - if err := checkCleanLinkPid(node1gs1, node3gs3.Self()); err != nil { - t.Fatal(err) - } - if err := checkCleanLinkPid(node3gs3, node1gs1.Self()); err != nil { - t.Fatal(err) - } - if node1gs1.IsAlive() { - t.Fatal("gs1 shouldnt be alive after gs3 exit due to disable trap exit on gs1") - } - if node3gs3.IsAlive() { - t.Fatal("gs3 must be terminated") - } - fmt.Println("OK") - - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ = node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - fmt.Printf(" wait for start of gs3 on %#v: ", node3.Name()) - node3gs3, _ = node3.Spawn("gs3", gen.ProcessOptions{}, gs3, nil) - waitForResultWithValue(t, gs3.v, node3gs3.Self()) - - node1gs1.SetTrapExit(true) - fmt.Printf("Testing Link process Local-Proxy-Remote: gs1 -> gs3. node_down: ") - node1gs1.Link(node3gs3.Self()) - waitForTimeout(t, gs1.v) - - if checkLinkPid(node1gs1, node3gs3.Self()) != nil { - t.Fatal("link missing for node1gs1") - } - if checkCleanLinkPid(node3gs3, node1gs1.Self()) == nil { - t.Fatal("link missing for node3gs3") - } - - // race conditioned case. - // processing of the process termination (on the remote peer) can be done faster than - // the link termination there, so MessageExit with "kill" reason will be arrived - // earlier. - node3.Stop() - result1 := gen.MessageExit{Pid: node3gs3.Self(), Reason: "noconnection"} - result2 := gen.MessageExit{Pid: node3gs3.Self(), Reason: "kill"} - - waitForResultWithValueOrValue(t, gs1.v, result1, result2) - - if err := checkCleanLinkPid(node1gs1, node3gs3.Self()); err != nil { - t.Fatal(err) - } - // must wait a bit - waitForTimeout(t, gs1.v) - if err := checkCleanLinkPid(node3gs3, node1gs1.Self()); err != nil { - t.Fatal(err) - } - - ll1 = len(node1gs1.Links()) - fmt.Printf("Testing Link process Local-Proxy-Remote: gs1 -> gs3. node_unknown: ") - node1gs1.Link(node3gs3.Self()) - result = gen.MessageExit{Pid: node3gs3.Self(), Reason: "noconnection"} - waitForResultWithValue(t, gs1.v, result) - - if ll1 != len(node1gs1.Links()) { - t.Fatal("number of links has changed on the second Link call") - } - - node3opts = node.Options{} - node3opts.Proxy.Accept = true - node3, err = ergo.StartNode("nodeL3RemoteViaProxy@localhost", "cookies", node3opts) - fmt.Printf(" starting node: %s", node3.Name()) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - fmt.Printf(" wait for start of gs3 on %#v: ", node3.Name()) - node3gs3, _ = node3.Spawn("gs3", gen.ProcessOptions{}, gs3, nil) - waitForResultWithValue(t, gs3.v, node3gs3.Self()) - - if err := node1.Connect(node3.Name()); err != nil { - t.Fatal(err) - } - - node3gs3.SetTrapExit(true) - fmt.Printf("Testing Proxy Local-Proxy-Remote for link gs3 -> gs1 (Node1 ProxyFlags.EnableLink = false): ") - node3gs3.Link(node1gs1.Self()) - result = gen.MessageExit{Pid: node1gs1.Self(), Reason: lib.ErrPeerUnsupported.Error()} - waitForResultWithValue(t, gs3.v, result) - - node1gs1.Link(node3gs3.Self()) - waitForTimeout(t, gs1.v) - - if checkLinkPid(node1gs1, node3gs3.Self()) != nil { - t.Fatal("link missing for node1gs1") - } - if checkCleanLinkPid(node3gs3, node1gs1.Self()) == nil { - t.Fatal("link missing for node3gs3") - } - fmt.Println("Testing Proxy Down Local-Proxy-Remote for linked gs1 -> gs3 (trap_exit = true): ") - node2.Stop() - - fmt.Printf(" wait for MessageExit with reason 'noproxy' on gs1: ") - result = gen.MessageExit{Pid: node3gs3.Self(), Reason: "noproxy"} - waitForResultWithValue(t, gs1.v, result) - - fmt.Printf(" wait for MessageExit with reason 'noproxy' on gs3: ") - result = gen.MessageExit{Pid: node1gs1.Self(), Reason: "noproxy"} - waitForResultWithValue(t, gs3.v, result) - - node1.Stop() -} - -func TestMonitorNode(t *testing.T) { - fmt.Printf("\n=== Test Monitor Node \n") - fmt.Printf("... start nodes A, B, C, D: ") - optsA := node.Options{} - nodeA, e := ergo.StartNode("monitornodeAproxy@localhost", "secret", optsA) - if e != nil { - t.Fatal(e) - } - optsB := node.Options{} - optsB.Proxy.Transit = true - nodeB, e := ergo.StartNode("monitornodeBproxy@localhost", "secret", optsB) - if e != nil { - t.Fatal(e) - } - optsC := node.Options{} - optsC.Proxy.Transit = true - nodeC, e := ergo.StartNode("monitornodeCproxy@localhost", "secret", optsC) - if e != nil { - t.Fatal(e) - } - - optsD := node.Options{} - optsD.Proxy.Accept = true - nodeD, e := ergo.StartNode("monitornodeDproxy@localhost", "secret", optsD) - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - gsA := &testMonitor{ - v: make(chan interface{}, 2), - } - gsB := &testMonitor{ - v: make(chan interface{}, 2), - } - gsD := &testMonitor{ - v: make(chan interface{}, 2), - } - fmt.Printf("... start processA on node A: ") - pA, err := nodeA.Spawn("", gen.ProcessOptions{}, gsA) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, gsA.v, pA.Self()) - fmt.Printf("... start processB on node B: ") - pB, err := nodeB.Spawn("", gen.ProcessOptions{}, gsB) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, gsB.v, pB.Self()) - fmt.Printf("... start processD on node D: ") - pD, err := nodeD.Spawn("", gen.ProcessOptions{}, gsD) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, gsD.v, pD.Self()) - fmt.Printf("... add proxy route on A to the node D via B: ") - routeAtoDviaB := node.ProxyRoute{ - Name: nodeD.Name(), - Proxy: nodeB.Name(), - } - if err := nodeA.AddProxyRoute(routeAtoDviaB); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... add proxy transit route on B to the node D via C: ") - route := node.ProxyRoute{ - Name: nodeD.Name(), - Proxy: nodeC.Name(), - } - if err := nodeB.AddProxyRoute(route); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... monitor D by processA (via proxy connection): ") - refA := pA.MonitorNode(nodeD.Name()) - fmt.Println("OK") - fmt.Printf("... monitor A by processD (via proxy connection): ") - refD := pD.MonitorNode(nodeA.Name()) - fmt.Println("OK") - fmt.Printf("... monitor C by processB (via direct connection): ") - refB := pB.MonitorNode(nodeC.Name()) - fmt.Println("OK") - fmt.Printf("... check connectivity (A -> D via B and C, D -> A via C and B): ") - nodelist := []string{nodeB.Name(), nodeD.Name()} - nodesA := nodeA.Nodes() - sort.Strings(nodesA) - if reflect.DeepEqual(nodesA, nodelist) == false { - t.Fatal("node A has wrong peers", nodeA.Nodes()) - } - if reflect.DeepEqual(nodeA.NodesIndirect(), []string{nodeD.Name()}) == false { - t.Fatal("node A has wrong proxy peer", nodeA.NodesIndirect()) - } - nodelist = []string{nodeA.Name(), nodeC.Name()} - nodesB := nodeB.Nodes() - sort.Strings(nodesB) - if reflect.DeepEqual(nodesB, nodelist) == false { - t.Fatal("node B has wrong peers", nodeB.Nodes()) - } - if reflect.DeepEqual(nodeB.NodesIndirect(), []string{}) == false { - t.Fatal("node B has wrong proxy peer", nodeB.NodesIndirect()) - } - nodelist = []string{nodeB.Name(), nodeD.Name()} - nodesC := nodeC.Nodes() - sort.Strings(nodesC) - if reflect.DeepEqual(nodesC, nodelist) == false { - t.Fatal("node C has wrong peers", nodeC.Nodes()) - } - if reflect.DeepEqual(nodeC.NodesIndirect(), []string{}) == false { - t.Fatal("node C has wrong proxy peer", nodeC.NodesIndirect()) - } - nodelist = []string{nodeA.Name(), nodeC.Name()} - nodesD := nodeD.Nodes() - sort.Strings(nodesD) - if reflect.DeepEqual(nodesD, nodelist) == false { - t.Fatal("node D has wrong peers", nodeD.Nodes()) - } - if reflect.DeepEqual(nodeD.NodesIndirect(), []string{nodeA.Name()}) == false { - t.Fatal("node D has wrong proxy peer", nodeD.NodesIndirect()) - } - fmt.Println("OK") - fmt.Printf("... stop node C : ") - nodeC.Stop() - fmt.Println("OK") - resultMessageProxyDown := gen.MessageProxyDown{Ref: refD, Node: nodeC.Name(), Proxy: nodeD.Name(), Reason: "noconnection"} - fmt.Printf("... processD must receive gen.MessageProxyDown{Node: C, Proxy: D,...}: ") - waitForResultWithValue(t, gsD.v, resultMessageProxyDown) - resultMessageProxyDown = gen.MessageProxyDown{Ref: refA, Node: nodeC.Name(), Proxy: nodeB.Name(), Reason: "noconnection"} - fmt.Printf("... processA must receive gen.MessageProxyDown{Node: C, Proxy: B,...}: ") - waitForResultWithValue(t, gsA.v, resultMessageProxyDown) - resultMessageDown := gen.MessageNodeDown{Ref: refB, Name: nodeC.Name()} - fmt.Printf("... processB must receive gen.MessageDown: ") - waitForResultWithValue(t, gsB.v, resultMessageDown) - - fmt.Printf("... check connectivity (A <-> B, C is down, D has no peers): ") - if reflect.DeepEqual(nodeA.Nodes(), []string{nodeB.Name()}) == false { - t.Fatal("node A has wrong peer", nodeA.Nodes()) - } - if reflect.DeepEqual(nodeB.Nodes(), []string{nodeA.Name()}) == false { - t.Fatal("node B has wrong peer", nodeB.Nodes()) - } - if nodeC.IsAlive() == true { - t.Fatal("node C is still alive") - } - if reflect.DeepEqual(nodeC.Nodes(), []string{}) == false { - t.Fatal("node C has peers", nodeC.Nodes()) - } - if reflect.DeepEqual(nodeD.Nodes(), []string{}) == false { - t.Fatal("node D has peers", nodeD.Nodes()) - } - fmt.Println("OK") - nodeD.Stop() - nodeA.Stop() - nodeB.Stop() -} - -type testMonitorEvent struct { - gen.Server - v chan interface{} -} - -type testEventCmdRegister struct { - event gen.Event - messages []gen.EventMessage -} -type testEventCmdUnregister struct { - event gen.Event -} -type testEventCmdMonitor struct { - event gen.Event -} -type testEventCmdSend struct { - event gen.Event - message gen.EventMessage -} - -type testMessageEventA struct { - value string -} - -func (tgs *testMonitorEvent) Init(process *gen.ServerProcess, args ...etf.Term) error { - tgs.v <- process.Self() - return nil -} -func (tgs *testMonitorEvent) HandleDirect(process *gen.ServerProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - switch cmd := message.(type) { - case testEventCmdRegister: - return nil, process.RegisterEvent(cmd.event, cmd.messages...) - case testEventCmdUnregister: - return nil, process.UnregisterEvent(cmd.event) - case testEventCmdMonitor: - return nil, process.MonitorEvent(cmd.event) - case testEventCmdSend: - return nil, process.SendEventMessage(cmd.event, cmd.message) - - default: - return nil, fmt.Errorf("unknown cmd") - - } -} - -func (tgs *testMonitorEvent) HandleInfo(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - tgs.v <- message - return gen.ServerStatusOK -} - -func TestMonitorEvents(t *testing.T) { - fmt.Printf("\n=== Test Monitor Events\n") - fmt.Printf("Starting node: nodeM1Events@localhost: ") - node1, _ := ergo.StartNode("nodeM1Events@localhost", "cookies", node.Options{}) - if node1 == nil { - t.Fatal("can't start node") - } - defer node1.Stop() - - fmt.Println("OK") - gs1 := &testMonitorEvent{ - v: make(chan interface{}, 2), - } - gs2 := &testMonitorEvent{ - v: make(chan interface{}, 2), - } - gs3 := &testMonitorEvent{ - v: make(chan interface{}, 2), - } - // starting gen servers - - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, err := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, gs1.v, node1gs1.Self()) - - fmt.Printf(" wait for start of gs2 on %#v: ", node1.Name()) - node1gs2, err := node1.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, gs2.v, node1gs2.Self()) - - fmt.Printf(" wait for start of gs3 on %#v: ", node1.Name()) - node1gs3, err := node1.Spawn("gs3", gen.ProcessOptions{}, gs3, nil) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, gs3.v, node1gs3.Self()) - - fmt.Printf("... register new event : ") - cmd := testEventCmdRegister{ - event: "testEvent", - messages: []gen.EventMessage{testMessageEventA{}}, - } - _, err = node1gs1.Direct(cmd) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... register new event with the same name: ") - _, err = node1gs1.Direct(cmd) - if err != lib.ErrTaken { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... unregister unknown event: ") - cmd1 := testEventCmdUnregister{ - event: "unknownEvent", - } - _, err = node1gs1.Direct(cmd1) - if err != lib.ErrEventUnknown { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... unregister event by not an owner: ") - cmd1 = testEventCmdUnregister{ - event: "testEvent", - } - _, err = node1gs2.Direct(cmd1) - if err != lib.ErrEventOwner { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... unregister event by the owner: ") - cmd1 = testEventCmdUnregister{ - event: "testEvent", - } - _, err = node1gs1.Direct(cmd1) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... monitor unknown event: ") - cmd2 := testEventCmdMonitor{ - event: "testEvent", - } - _, err = node1gs2.Direct(cmd2) - if err != lib.ErrEventUnknown { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... monitor event: ") - cmd = testEventCmdRegister{ - event: "testEvent", - messages: []gen.EventMessage{testMessageEventA{}}, - } - _, err = node1gs1.Direct(cmd) - if err != nil { - t.Fatal(err) - } - - cmd2 = testEventCmdMonitor{ - event: "testEvent", - } - _, err = node1gs2.Direct(cmd2) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... monitor event itself: ") - cmd2 = testEventCmdMonitor{ - event: "testEvent", - } - _, err = node1gs1.Direct(cmd2) - if err != lib.ErrEventSelf { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... send unknown event: ") - msg := testMessageEventA{value: "test"} - cmd3 := testEventCmdSend{ - event: "unknownEvent", - message: msg, - } - _, err = node1gs1.Direct(cmd3) - if err != lib.ErrEventUnknown { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... send event with wrong message type: ") - msgWrong := "wrong type" - cmd3 = testEventCmdSend{ - event: "testEvent", - message: msgWrong, - } - _, err = node1gs1.Direct(cmd3) - if err != lib.ErrEventMismatch { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... send event by not an owner: ") - cmd3 = testEventCmdSend{ - event: "testEvent", - message: msg, - } - _, err = node1gs2.Direct(cmd3) - if err != lib.ErrEventOwner { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... send event: ") - cmd3 = testEventCmdSend{ - event: "testEvent", - message: msg, - } - _, err = node1gs1.Direct(cmd3) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, gs2.v, msg) - - fmt.Printf("... monitor event twice: ") - cmd2 = testEventCmdMonitor{ - event: "testEvent", - } - _, err = node1gs2.Direct(cmd2) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... send event. must be received twice: ") - cmd3 = testEventCmdSend{ - event: "testEvent", - message: msg, - } - _, err = node1gs1.Direct(cmd3) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - fmt.Printf("... receive first event message: ") - waitForResultWithValue(t, gs2.v, msg) - fmt.Printf("... receive second event message: ") - waitForResultWithValue(t, gs2.v, msg) - - down := gen.MessageEventDown{ - Event: "testEvent", - Reason: "unregistered", - } - fmt.Printf("... unregister event owner. must be received gen.MessageEventDown twice with reason 'unregistered': ") - cmd1 = testEventCmdUnregister{ - event: "testEvent", - } - _, err = node1gs1.Direct(cmd1) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - fmt.Printf("... receive first event down message: ") - waitForResultWithValue(t, gs2.v, down) - fmt.Printf("... receive second event down message: ") - waitForResultWithValue(t, gs2.v, down) - - cmd = testEventCmdRegister{ - event: "testEvent", - messages: []gen.EventMessage{testMessageEventA{}}, - } - _, err = node1gs3.Direct(cmd) - if err != nil { - t.Fatal(err) - } - - cmd2 = testEventCmdMonitor{ - event: "testEvent", - } - _, err = node1gs2.Direct(cmd2) - if err != nil { - t.Fatal(err) - } - fmt.Printf("... terminate event owner. must be received gen.MessageEventDown with reason 'kill': ") - node1gs3.Kill() - down = gen.MessageEventDown{ - Event: "testEvent", - Reason: "kill", - } - waitForResultWithValue(t, gs2.v, down) -} - -// helpers -func checkCleanProcessRef(p gen.Process, ref etf.Ref) error { - if p.IsMonitor(ref) { - return fmt.Errorf("monitor process reference hasn't been cleaned correctly") - } - - return nil -} - -func checkCleanLinkPid(p gen.Process, pid etf.Pid) error { - for _, l := range p.Links() { - if l == pid { - return fmt.Errorf("process link reference hasn't been cleaned correctly") - } - } - return nil -} -func checkLinkPid(p gen.Process, pid etf.Pid) error { - for _, l := range p.Links() { - if l == pid { - return nil - } - } - return fmt.Errorf("process %s has no link to %s", p.Self(), pid) -} diff --git a/tests/node_test.go b/tests/node_test.go deleted file mode 100644 index 04a229bf..00000000 --- a/tests/node_test.go +++ /dev/null @@ -1,1777 +0,0 @@ -package tests - -import ( - "context" - "crypto/md5" - "crypto/tls" - "fmt" - "math/rand" - "net" - "reflect" - "sync" - "testing" - "time" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" - "github.com/ergo-services/ergo/proto/dist" -) - -type benchCase struct { - name string - value etf.Term -} - -func TestNode(t *testing.T) { - ctx := context.Background() - listener := node.Listener{ - Listen: 25001, - } - opts := node.Options{ - Listeners: []node.Listener{listener}, - Registrar: dist.CreateRegistrarWithLocalEPMD("", 24999), - } - - node1, _ := ergo.StartNodeWithContext(ctx, "node123@localhost", "cookies", opts) - optsTaken := node.Options{ - Registrar: dist.CreateRegistrarWithLocalEPMD("", 24999), - } - if _, err := ergo.StartNodeWithContext(ctx, "node123@localhost", "cookies", optsTaken); err == nil { - t.Fatal("must be failed here") - } - - if conn, err := net.Dial("tcp", ":25001"); err != nil { - fmt.Println("Connect to the node' listening port FAILED") - t.Fatal(err) - } else { - defer conn.Close() - } - - if conn, err := net.Dial("tcp", ":24999"); err != nil { - fmt.Println("Connect to the node' listening EPMD port FAILED") - t.Fatal(err) - } else { - defer conn.Close() - } - - gs1 := &testServer{ - res: make(chan interface{}, 2), - } - p, e := node1.Spawn("", gen.ProcessOptions{}, gs1) - if e != nil { - t.Fatal(e) - } - - if !p.IsAlive() { - t.Fatal("IsAlive: expect 'true', but got 'false'") - } - - _, ee := node1.ProcessInfo(p.Self()) - if ee != nil { - t.Fatal(ee) - } - - node1.Stop() -} - -type testFragmentationGS struct { - gen.Server -} - -func (f *testFragmentationGS) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) { - md5original := message.(etf.Tuple)[0].(string) - blob := message.(etf.Tuple)[1].([]byte) - - result := etf.Atom("ok") - md5 := fmt.Sprint(md5.Sum(blob)) - if !reflect.DeepEqual(md5original, md5) { - result = etf.Atom("mismatch") - } - - return result, gen.ServerStatusOK -} - -type makeCall struct { - to interface{} - message interface{} -} -type makeCast struct { - to interface{} - message interface{} -} - -type asyncDirect struct { - ref etf.Ref - val etf.Term -} - -type syncDirect struct { - val etf.Term -} - -func (f *testFragmentationGS) HandleDirect(process *gen.ServerProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - switch m := message.(type) { - case makeCall: - return process.Call(m.to, m.message) - } - return nil, lib.ErrUnsupportedRequest -} - -func TestNodeFragmentation(t *testing.T) { - var wg sync.WaitGroup - - blob := make([]byte, 1024*1024) - rand.Read(blob) - md5 := fmt.Sprint(md5.Sum(blob)) - message := etf.Tuple{md5, blob} - - node1, _ := ergo.StartNode("nodeT1Fragmentation@localhost", "secret", node.Options{}) - node2, _ := ergo.StartNode("nodeT2Fragmentation@localhost", "secret", node.Options{}) - - tgs := &testFragmentationGS{} - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, tgs) - p2, e2 := node2.Spawn("", gen.ProcessOptions{}, tgs) - - if e1 != nil { - t.Fatal(e1) - } - if e2 != nil { - t.Fatal(e2) - } - - // check single call - call := makeCall{ - to: p2.Self(), - message: message, - } - check, e := p1.Direct(call) - if e != nil { - t.Fatal(e) - } - if check != etf.Atom("ok") { - t.Fatal("md5sum mismatch") - } - - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - p1, _ := node1.Spawn("", gen.ProcessOptions{}, tgs) - p2, _ := node2.Spawn("", gen.ProcessOptions{}, tgs) - defer wg.Done() - for k := 0; k < 100; k++ { - call := makeCall{ - to: p2.Self(), - message: message, - } - check, e := p1.Direct(call) - if e != nil { - panic("err on call") - } - if check != etf.Atom("ok") { - panic("md5sum mismatch") - } - } - - }() - } - wg.Wait() -} - -func TestNodeStaticRoute(t *testing.T) { - nodeName1 := "nodeT1StaticRoute@localhost" - nodeName2 := "nodeT2StaticRoute@localhost" - nodeStaticPort := uint16(9876) - - node1, e1 := ergo.StartNode(nodeName1, "secret", node.Options{}) - if e1 != nil { - t.Fatal(e1) - } - defer node1.Stop() - - node2, e2 := ergo.StartNode(nodeName2, "secret", node.Options{}) - if e2 != nil { - t.Fatal(e2) - } - defer node2.Stop() - - nr, err := node1.Resolve(nodeName2) - if err != nil { - t.Fatal("Can't resolve port number for ", nodeName2, err) - } - - // override route for nodeName2 with static port - e := node1.AddStaticRoutePort(nodeName2, nodeStaticPort, node.RouteOptions{}) - if e != nil { - t.Fatal(e) - } - // should be overrided by the new value of nodeStaticPort - if r, err := node1.Resolve(nodeName2); err != nil || r.Port != nodeStaticPort { - t.Fatal("Wrong port number after adding static route. Got", r.Port, "Expected", nodeStaticPort) - } - - node1.RemoveStaticRoute(nodeName2) - - // should be resolved into the original port number - if nr2, err := node1.Resolve(nodeName2); err != nil || nr.Port != nr2.Port { - t.Fatal("Wrong port number after removing static route") - } -} - -type handshakeGenServer struct { - gen.Server -} - -func (h *handshakeGenServer) Init(process *gen.ServerProcess, args ...etf.Term) error { - return nil -} - -func (h *handshakeGenServer) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) { - return "pass", gen.ServerStatusOK -} -func (h *handshakeGenServer) HandleDirect(process *gen.ServerProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - switch m := message.(type) { - case makeCall: - return process.Call(m.to, m.message) - } - return nil, lib.ErrUnsupportedRequest -} - -func TestNodeDistHandshake(t *testing.T) { - fmt.Printf("\n=== Test Node Handshake versions\n") - - cert, err := lib.GenerateSelfSignedCert("localhost") - if err != nil { - t.Fatal(err) - } - - // handshake version 5 - handshake5options := dist.HandshakeOptions{ - Version: dist.HandshakeVersion5, - } - - // handshake version 6 - handshake6options := dist.HandshakeOptions{ - Version: dist.HandshakeVersion6, - } - - hgs := &handshakeGenServer{} - - type Pair struct { - name string - nodeA node.Node - nodeB node.Node - } - node1Options5 := node.Options{ - Handshake: dist.CreateHandshake(handshake5options), - } - node1, e1 := ergo.StartNode("node1Handshake5@localhost", "secret", node1Options5) - if e1 != nil { - t.Fatal(e1) - } - node2Options5 := node.Options{ - Handshake: dist.CreateHandshake(handshake5options), - } - node2, e2 := ergo.StartNode("node2Handshake5@localhost", "secret", node2Options5) - if e2 != nil { - t.Fatal(e2) - } - node3Options5 := node.Options{ - Handshake: dist.CreateHandshake(handshake5options), - } - node3, e3 := ergo.StartNode("node3Handshake5@localhost", "secret", node3Options5) - if e3 != nil { - t.Fatal(e3) - } - node4Options6 := node.Options{ - Handshake: dist.CreateHandshake(handshake6options), - } - node4, e4 := ergo.StartNode("node4Handshake6@localhost", "secret", node4Options6) - if e4 != nil { - t.Fatal(e4) - } - // node5, _ := ergo.StartNode("node5Handshake6@localhost", "secret", nodeOptions6) - // node6, _ := ergo.StartNode("node6Handshake5@localhost", "secret", nodeOptions5) - node7Options6 := node.Options{ - Handshake: dist.CreateHandshake(handshake6options), - } - node7, e7 := ergo.StartNode("node7Handshake6@localhost", "secret", node7Options6) - if e7 != nil { - t.Fatal(e7) - } - node8Options6 := node.Options{ - Handshake: dist.CreateHandshake(handshake6options), - } - node8, e8 := ergo.StartNode("node8Handshake6@localhost", "secret", node8Options6) - if e8 != nil { - t.Fatal(e8) - } - node9Options5WithTLS := node.Options{ - Handshake: dist.CreateHandshake(handshake5options), - TLS: &tls.Config{Certificates: []tls.Certificate{cert}, InsecureSkipVerify: true}, - } - node9, e9 := ergo.StartNode("node9Handshake5@localhost", "secret", node9Options5WithTLS) - if e9 != nil { - t.Fatal(e9) - } - node10Options5WithTLS := node.Options{ - Handshake: dist.CreateHandshake(handshake5options), - TLS: &tls.Config{Certificates: []tls.Certificate{cert}, InsecureSkipVerify: true}, - } - node10, e10 := ergo.StartNode("node10Handshake5@localhost", "secret", node10Options5WithTLS) - if e10 != nil { - t.Fatal(e10) - } - node11Options5WithTLS := node.Options{ - Handshake: dist.CreateHandshake(handshake5options), - TLS: &tls.Config{Certificates: []tls.Certificate{cert}, InsecureSkipVerify: true}, - } - node11, e11 := ergo.StartNode("node11Handshake5@localhost", "secret", node11Options5WithTLS) - if e11 != nil { - t.Fatal(e11) - } - node12Options6WithTLS := node.Options{ - Handshake: dist.CreateHandshake(handshake6options), - TLS: &tls.Config{Certificates: []tls.Certificate{cert}, InsecureSkipVerify: true}, - } - node12, e12 := ergo.StartNode("node12Handshake6@localhost", "secret", node12Options6WithTLS) - if e12 != nil { - t.Fatal(e12) - } - // node13, _ := ergo.StartNode("node13Handshake6@localhost", "secret", nodeOptions6WithTLS) - // node14, _ := ergo.StartNode("node14Handshake5@localhost", "secret", nodeOptions5WithTLS) - node15Options6WithTLS := node.Options{ - Handshake: dist.CreateHandshake(handshake6options), - TLS: &tls.Config{Certificates: []tls.Certificate{cert}, InsecureSkipVerify: true}, - } - node15, e15 := ergo.StartNode("node15Handshake6@localhost", "secret", node15Options6WithTLS) - if e15 != nil { - t.Fatal(e15) - } - node16Options6WithTLS := node.Options{ - Handshake: dist.CreateHandshake(handshake6options), - TLS: &tls.Config{Certificates: []tls.Certificate{cert}, InsecureSkipVerify: true}, - } - node16, e16 := ergo.StartNode("node16Handshake6@localhost", "secret", node16Options6WithTLS) - if e16 != nil { - t.Fatal(e16) - } - - nodes := []Pair{ - {"No TLS. version 5 -> version 5", node1, node2}, - {"No TLS. version 5 -> version 6", node3, node4}, - //Pair{ "No TLS. version 6 -> version 5", node5, node6 }, - {"No TLS. version 6 -> version 6", node7, node8}, - {"With TLS. version 5 -> version 5", node9, node10}, - {"With TLS. version 5 -> version 6", node11, node12}, - //Pair{ "With TLS. version 6 -> version 5", node13, node14 }, - {"With TLS. version 6 -> version 6", node15, node16}, - } - - defer func(nodes []Pair) { - for i := range nodes { - nodes[i].nodeA.Stop() - nodes[i].nodeB.Stop() - } - }(nodes) - - var pA, pB gen.Process - var e error - var result etf.Term - for i := range nodes { - pair := nodes[i] - fmt.Printf(" %s %s -> %s: ", pair.name, pair.nodeA.Name(), pair.nodeB.Name()) - pA, e = pair.nodeA.Spawn("", gen.ProcessOptions{}, hgs) - if e != nil { - t.Fatal(e) - } - pB, e = pair.nodeB.Spawn("", gen.ProcessOptions{}, hgs) - if e != nil { - t.Fatal(e) - } - - call := makeCall{ - to: pB.Self(), - message: "test", - } - result, e = pA.Direct(call) - if e != nil { - t.Fatal(e) - } - if r, ok := result.(string); !ok || r != "pass" { - t.Fatal("wrong result") - } - fmt.Println("OK") - } -} - -func TestNodeRemoteSpawn(t *testing.T) { - fmt.Printf("\n=== Test Node Remote Spawn\n") - node1opts := node.Options{} - node1opts.Proxy.Flags = node.DefaultProxyFlags() - node1opts.Proxy.Flags.EnableRemoteSpawn = false - - node1, _ := ergo.StartNode("node1remoteSpawn@localhost", "secret", node1opts) - node2opts := node.Options{} - node2opts.Proxy.Transit = true - node2, _ := ergo.StartNode("node2remoteSpawn@localhost", "secret", node2opts) - node3opts := node.Options{} - node3opts.Proxy.Accept = true - node3, _ := ergo.StartNode("node3remoteSpawn@localhost", "secret", node3opts) - route := node.ProxyRoute{ - Name: node3.Name(), - Proxy: node2.Name(), - } - node1.AddProxyRoute(route) - defer node1.Stop() - defer node2.Stop() - defer node3.Stop() - - if err := node1.Connect(node3.Name()); err != nil { - t.Fatal(err) - } - - node2.ProvideRemoteSpawn("remote", &handshakeGenServer{}) - process, err := node1.Spawn("gs1", gen.ProcessOptions{}, &handshakeGenServer{}) - if err != nil { - t.Fatal(err) - } - - opts := gen.RemoteSpawnOptions{ - Name: "remote", - } - fmt.Printf(" process gs1@node1 request to spawn new process on node2 and register this process with name 'remote': ") - gotPid, err := process.RemoteSpawn(node2.Name(), "remote", opts, 1, 2, 3) - if err != nil { - t.Fatal(err) - } - p := node2.ProcessByName("remote") - if p == nil { - t.Fatal("can't find process 'remote' on node2") - } - if gotPid != p.Self() { - t.Fatal("process pid mismatch") - } - fmt.Println("OK") - - fmt.Printf(" process gs1@node1 request to spawn new process on node2 with the same name (must be failed): ") - _, err = process.RemoteSpawn(node2.Name(), "remote", opts, 1, 2, 3) - if err != lib.ErrTaken { - t.Fatal(err) - } - fmt.Println("OK") - fmt.Printf(" process gs1@node1 request to spawn new process on node2 with unregistered behavior name (must be failed): ") - _, err = process.RemoteSpawn(node2.Name(), "randomname", opts, 1, 2, 3) - if err != lib.ErrBehaviorUnknown { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf(" process gs1@node1 request to spawn new process on node3 via proxy node2 and register this process with name 'remote': ") - node3.ProvideRemoteSpawn("remote", &handshakeGenServer{}) - gotPid, err = process.RemoteSpawn(node3.Name(), "remote", opts, 1, 2, 3) - if err != nil { - t.Fatal(err) - } - p = node3.ProcessByName("remote") - if p == nil { - t.Fatal("can't find process 'remote' on node2") - } - if gotPid != p.Self() { - t.Fatal("process pid mismatch") - } - fmt.Println("OK") - fmt.Printf(" process gs3@node3 request to spawn new process on node1 via proxy node2 (node1 ProxyFlags.RemoteSpawn: false): ") - process3, err := node3.Spawn("gs3", gen.ProcessOptions{}, &handshakeGenServer{}) - if err != nil { - t.Fatal(err) - } - gotPid, err = process3.RemoteSpawn(node1.Name(), "remote", opts, 1, 2, 3) - if err != lib.ErrPeerUnsupported { - t.Fatal(err) - } - fmt.Println("OK") -} - -func TestNodeResolveExtra(t *testing.T) { - cert, err := lib.GenerateSelfSignedCert("localhost") - if err != nil { - t.Fatal(err) - } - fmt.Printf("\n=== Test Node Resolve Extra \n") - fmt.Printf("... starting node1 with disabled TLS: ") - opts1 := node.Options{ - TLS: &tls.Config{InsecureSkipVerify: true}, - } - node1, err := ergo.StartNode("node1resolveExtra@localhost", "secret", opts1) - if err != nil { - t.Fatal(err) - } - defer node1.Stop() - fmt.Println("OK") - opts2 := node.Options{ - TLS: &tls.Config{Certificates: []tls.Certificate{cert}, InsecureSkipVerify: true}, - } - fmt.Printf("... starting node2 with enabled TLS: ") - node2, err := ergo.StartNode("node2resolveExtra@localhost", "secret", opts2) - if err != nil { - t.Fatal(err) - } - defer node2.Stop() - fmt.Println("OK") - - fmt.Printf("... node1 resolves node2 with enabled TLS: ") - route1, err := node1.Resolve("node2resolveExtra@localhost") - if err != nil { - t.Fatal(err) - } - if route1.Options.TLS == nil { - t.Fatal("expected TLS value") - } - fmt.Println("OK") - - fmt.Printf("... node2 resolves node1 with disabled TLS: ") - route2, err := node2.Resolve("node1resolveExtra@localhost") - if err != nil { - t.Fatal(err) - } - if route2.Options.TLS != nil { - t.Fatal("expected nil value for TLS") - } - fmt.Println("OK") - - fmt.Printf("... node1 connect to node2: ") - if err := node1.Connect(node2.Name()); err != nil { - t.Fatal(err) - } - if len(node1.Nodes()) != 1 { - t.Fatal("no peers") - } - if node1.Nodes()[0] != node2.Name() { - t.Fatal("wrong peer") - } - fmt.Println("OK") - - fmt.Printf("... disconnecting nodes: ") - time.Sleep(300 * time.Millisecond) - if err := node1.Disconnect(node2.Name()); err != nil { - t.Fatal(err) - } - if len(node1.Nodes()) > 0 { - t.Fatal("still connected") - } - fmt.Println("OK") - - fmt.Printf("... node2 connect to node1: ") - if err := node2.Connect(node1.Name()); err != nil { - t.Fatal(err) - } - if len(node2.Nodes()) != 1 { - t.Fatal("no peers") - } - if node2.Nodes()[0] != node1.Name() { - t.Fatal("wrong peer") - } - fmt.Println("OK") -} - -type failoverServer struct { - gen.Server - v chan interface{} -} - -func (f *failoverServer) Init(process *gen.ServerProcess, args ...etf.Term) error { - return nil -} -func (f *failoverServer) HandleInfo(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - if _, yes := gen.IsMessageFallback(message); yes { - f.v <- message - return gen.ServerStatusOK - } - time.Sleep(300 * time.Millisecond) - return gen.ServerStatusOK -} -func TestNodeProcessFallback(t *testing.T) { - fmt.Printf("\n=== Test Node Process Fallback\n") - fmt.Printf("... start node1: ") - node1, e := ergo.StartNode("node1processfallback@localhost", "secret", node.Options{}) - if e != nil { - t.Fatal(e) - } - defer node1.Stop() - fmt.Println("OK") - popts1 := gen.ProcessOptions{ - MailboxSize: 2, - Fallback: gen.ProcessFallback{ - Name: "fp", - Tag: "test_tag", - }, - } - gsf := &failoverServer{ - v: make(chan interface{}, 2), - } - - fmt.Printf("... start process p1 (with mailbox size = 2 and fallback process = \"fp\"): ") - p1, err := node1.Spawn("", popts1, &failoverServer{}) - if err != nil { - t.Fatal(e) - } - fmt.Println("OK") - fmt.Printf("... start failover process p2 (with name = \"fp\"): ") - _, err = node1.Spawn("fp", gen.ProcessOptions{}, gsf) - if err != nil { - t.Fatal(e) - } - fmt.Println("OK") - fmt.Printf("... sending 4 messages to p1 (4th must wrapped into gen.MessageFallback and forwarded to \"fp\" ): ") - p1.Send(p1.Self(), "m1") - p1.Send(p1.Self(), "m2") - p1.Send(p1.Self(), "m3") - // bellow message must be forwarded - p1.Send(p1.Self(), "m4") - - result := gen.MessageFallback{Process: p1.Self(), Tag: "test_tag", Message: "m4"} - waitForResultWithValue(t, gsf.v, result) -} - -type compressionServer struct { - gen.Server -} - -func (c *compressionServer) Init(process *gen.ServerProcess, args ...etf.Term) error { - return nil -} - -func (c *compressionServer) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) { - blob := message.(etf.Tuple)[1].([]byte) - md5original := message.(etf.Tuple)[0].(string) - md5sum := fmt.Sprint(md5.Sum(blob)) - result := etf.Atom("ok") - if !reflect.DeepEqual(md5original, md5sum) { - result = etf.Atom("mismatch") - } - return result, gen.ServerStatusOK -} -func (c *compressionServer) HandleDirect(process *gen.ServerProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - switch m := message.(type) { - case makeCall: - return process.Call(m.to, m.message) - } - return nil, lib.ErrUnsupportedRequest -} -func TestNodeCompression(t *testing.T) { - fmt.Printf("\n=== Test Node Compression \n") - opts1 := node.Options{} - opts1.Compression.Enable = true - // need 1 handler to make Atom cache work - protoOptions := node.DefaultProtoOptions() - protoOptions.NumHandlers = 1 - opts1.Proto = dist.CreateProto(protoOptions) - node1, e := ergo.StartNode("node1compression@localhost", "secret", opts1) - if e != nil { - t.Fatal(e) - } - defer node1.Stop() - node2, e := ergo.StartNode("node2compression@localhost", "secret", node.Options{}) - if e != nil { - t.Fatal(e) - } - defer node2.Stop() - - n1p1, err := node1.Spawn("", gen.ProcessOptions{}, &compressionServer{}) - if err != nil { - t.Fatal(err) - } - n2p1, err := node2.Spawn("", gen.ProcessOptions{}, &compressionServer{}) - if err != nil { - t.Fatal(err) - } - - fmt.Printf("... send 1MB compressed. no fragmentation: ") - // empty data (no fragmentation) - blob := make([]byte, 1024*1024) - md5sum := fmt.Sprint(md5.Sum(blob)) - message := etf.Tuple{md5sum, blob} - - // send 3 times. that is how atom cache is working - - // atoms are encoding from cache on 2nd or 3rd sending - call := makeCall{ - to: n2p1.Self(), - message: message, - } - for i := 0; i < 3; i++ { - result, e := n1p1.Direct(call) - if e != nil { - t.Fatal(e) - } - if result != etf.Atom("ok") { - t.Fatal(result) - } - } - fmt.Println("OK") - - fmt.Printf("... send 1MB compressed. with fragmentation: ") - // will be fragmented - rnd := lib.RandomString(1024 * 1024) - blob = []byte(rnd) // compression rate for random string around 50% - //rand.Read(blob[:66000]) // compression rate for 1MB of random data - 0 % (entropy too big) - md5sum = fmt.Sprint(md5.Sum(blob)) - message = etf.Tuple{md5sum, blob} - - call = makeCall{ - to: n2p1.Self(), - message: message, - } - for i := 0; i < 3; i++ { - result, e := n1p1.Direct(call) - if e != nil { - t.Fatal(e) - } - if result != etf.Atom("ok") { - t.Fatal(result) - } - } - fmt.Println("OK") -} - -func TestNodeProxyConnect(t *testing.T) { - fmt.Printf("\n=== Test Node Proxy\n") - fmt.Printf("... connect NodeA to NodeC via NodeB: ") - optsA := node.Options{} - nodeA, e := ergo.StartNode("nodeAproxy@localhost", "secret", optsA) - if e != nil { - t.Fatal(e) - } - defer nodeA.Stop() - - route := node.ProxyRoute{ - Name: "nodeCproxy@localhost", - Proxy: "nodeBproxy@localhost", - } - nodeA.AddProxyRoute(route) - - optsB := node.Options{} - optsB.Proxy.Transit = true - nodeB, e := ergo.StartNode("nodeBproxy@localhost", "secret", optsB) - if e != nil { - t.Fatal(e) - } - defer nodeB.Stop() - optsC := node.Options{} - optsC.Proxy.Accept = true - nodeC, e := ergo.StartNode("nodeCproxy@localhost", "secret", optsC) - if e != nil { - t.Fatal(e) - } - defer nodeC.Stop() - - if err := nodeA.Connect("nodeCproxy@localhost"); err != nil { - t.Fatal(err) - } - - indirectNodes := nodeA.NodesIndirect() - if len(indirectNodes) != 1 { - t.Fatal("wrong result:", indirectNodes) - } - if indirectNodes[0] != "nodeCproxy@localhost" { - t.Fatal("wrong result:", indirectNodes) - } - indirectNodes = nodeC.NodesIndirect() - if len(indirectNodes) != 1 { - t.Fatal("wrong result:", indirectNodes) - } - if indirectNodes[0] != "nodeAproxy@localhost" { - t.Fatal("wrong result:", indirectNodes) - } - if len(nodeB.NodesIndirect()) > 0 { - t.Fatal("wrong result:", nodeB.NodesIndirect()) - } - fmt.Println("OK") - - fmt.Printf("... disconnect NodeC from NodeA: ") - nodeC.Disconnect("nodeAproxy@localhost") - if len(nodeC.NodesIndirect()) > 0 { - t.Fatal("wrong result:", nodeC.NodesIndirect()) - } - - time.Sleep(100 * time.Millisecond) - if len(nodeA.NodesIndirect()) > 0 { - t.Fatal("wrong result:", nodeA.NodesIndirect()) - } - fmt.Println("OK") - nodeB.Stop() - optsB.Proxy.Transit = false - nodeB, e = ergo.StartNode("nodeBproxy@localhost", "secret", optsB) - if e != nil { - t.Fatal(e) - } - fmt.Printf("... connect NodeA to NodeC via NodeB(transit proxy disabled): ") - e = nodeA.Connect("nodeCproxy@localhost") - if e == nil { - t.Fatal("must be error here") - } - errMessage := "[nodeBproxy@localhost] proxy feature disabled" - if e.Error() != errMessage { - t.Fatal(e) - } - fmt.Println("OK") - nodeB.Stop() - nodeC.Stop() - - nodeB.Stop() - optsB.Proxy.Transit = true - nodeB, e = ergo.StartNode("nodeBproxy@localhost", "secret", optsB) - if e != nil { - t.Fatal(e) - } - - optsC.Flags = node.DefaultFlags() - optsC.Flags.EnableProxy = false - nodeC, e = ergo.StartNode("nodeCproxy@localhost", "secret", optsC) - if e != nil { - t.Fatal(e) - } - fmt.Printf("... connect NodeA to NodeC (proxy feature support disabled) via NodeB: ") - e = nodeA.Connect("nodeCproxy@localhost") - if e == nil { - t.Fatal("must be error here") - } - errMessage = "[nodeBproxy@localhost] peer does not support this feature" - if e.Error() != errMessage { - t.Fatal(e) - } - fmt.Println("OK") - nodeC.Stop() - - optsC = node.Options{} - optsC.Proxy.Cookie = "123" - optsC.Proxy.Accept = true - nodeC, e = ergo.StartNode("nodeCproxy@localhost", "secret", optsC) - if e != nil { - t.Fatal(e) - } - fmt.Printf("... connect NodeA to NodeC (with wrong cookie) via NodeB: ") - e = nodeA.Connect("nodeCproxy@localhost") - if e == nil { - t.Fatal("must be error here") - } - errMessage = "[nodeCproxy@localhost] can't establish proxy connection" - if e.Error() != errMessage { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("... connect NodeA to NodeC (with correct cookie) via NodeB: ") - if nodeA.RemoveProxyRoute("nodeCproxy@localhost") == false { - t.Fatal("proxy route not found") - } - route = node.ProxyRoute{ - Name: "nodeCproxy@localhost", - Proxy: "nodeBproxy@localhost", - Cookie: "123", - } - nodeA.AddProxyRoute(route) - - e = nodeA.Connect("nodeCproxy@localhost") - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - fmt.Printf("... connect NodeA to NodeD (with enabled encryption) via NodeB: ") - optsD := node.Options{} - optsD.Proxy.Cookie = "123" - optsD.Proxy.Accept = true - optsD.Proxy.Flags = node.DefaultProxyFlags() - optsD.Proxy.Flags.EnableEncryption = true - nodeD, e := ergo.StartNode("nodeDproxy@localhost", "secret", optsD) - if e != nil { - t.Fatal(e) - } - defer nodeD.Stop() - - route = node.ProxyRoute{ - Name: "nodeDproxy@localhost", - Proxy: "nodeBproxy@localhost", - Cookie: "123", - } - nodeA.AddProxyRoute(route) - e = nodeA.Connect("nodeDproxy@localhost") - if e != nil { - t.Fatal(e) - } - fmt.Println("OK") - - // use gen serv from test_monitor - gsA := &testMonitor{ - v: make(chan interface{}, 2), - } - gsC := &testMonitor{ - v: make(chan interface{}, 2), - } - gsD := &testMonitor{ - v: make(chan interface{}, 2), - } - fmt.Printf("... start processA on NodeA: ") - pA, err := nodeA.Spawn("", gen.ProcessOptions{}, gsA) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, gsA.v, pA.Self()) - fmt.Printf("... start processC on NodeC: ") - pC, err := nodeC.Spawn("", gen.ProcessOptions{}, gsC) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, gsC.v, pC.Self()) - fmt.Printf("... start processD on NodeD: ") - pD, err := nodeD.Spawn("", gen.ProcessOptions{}, gsD) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, gsD.v, pD.Self()) - - fmt.Printf("... processA send short message to processC: ") - if e := pA.Send(pC.Self(), "test"); e != nil { - t.Fatal(e) - } - waitForResultWithValue(t, gsC.v, "test") - - fmt.Printf("... processA send short message to processD (encrypted): ") - pA.Send(pD.Self(), "test") - waitForResultWithValue(t, gsD.v, "test") - - randomString := []byte(lib.RandomString(1024 * 10)) - pA.SetCompression(true) - fmt.Printf("... processA send 10K message to processC (compressed): ") - pA.Send(pC.Self(), randomString) - waitForResultWithValue(t, gsC.v, randomString) - - fmt.Printf("... processA send 10K message to processD (compressed, encrypted): ") - pA.Send(pD.Self(), randomString) - waitForResultWithValue(t, gsD.v, randomString) - - pA.SetCompression(false) - randomString = []byte(lib.RandomString(1024 * 100)) - fmt.Printf("... processA send 100K message to processC (fragmented): ") - pA.Send(pC.Self(), randomString) - waitForResultWithValue(t, gsC.v, randomString) - - fmt.Printf("... processA send 100K message to processD (fragmented, encrypted): ") - pA.Send(pD.Self(), randomString) - waitForResultWithValue(t, gsD.v, randomString) - - pA.SetCompression(true) - randomString = []byte(lib.RandomString(1024 * 1024)) - fmt.Printf("... processA send 1M message to processC (fragmented, compressed): ") - pA.Send(pC.Self(), randomString) - waitForResultWithValue(t, gsC.v, randomString) - - fmt.Printf("... processA send 1M message to processD (fragmented, compressed, encrypted): ") - pA.Send(pD.Self(), randomString) - waitForResultWithValue(t, gsD.v, randomString) -} - -func TestNodeIncarnation(t *testing.T) { - fmt.Printf("\n=== Test Node Incarnation\n") - fmt.Printf("... start nodes: ") - optsA := node.Options{} - nodeA, e := ergo.StartNode("nodeAincarnation@localhost", "secret", optsA) - if e != nil { - t.Fatal(e) - } - defer nodeA.Stop() - route := node.ProxyRoute{ - Name: "nodeCincarnation@localhost", - Proxy: "nodeBincarnation@localhost", - } - nodeA.AddProxyRoute(route) - // add sleep to get Creation different value for the next node - optsB := node.Options{} - optsB.Proxy.Transit = true - nodeB, e := ergo.StartNode("nodeBincarnation@localhost", "secret", optsB) - if e != nil { - t.Fatal(e) - } - defer nodeB.Stop() - optsC := node.Options{ - Creation: 1234, - } - optsC.Proxy.Accept = true - nodeC, e := ergo.StartNode("nodeCincarnation@localhost", "secret", optsC) - if e != nil { - t.Fatal(e) - } - defer nodeC.Stop() - - if err := nodeA.Connect("nodeCincarnation@localhost"); err != nil { - t.Fatal(err) - } - - indirectNodes := nodeA.NodesIndirect() - if len(indirectNodes) != 1 { - t.Fatal("wrong result:", indirectNodes) - } - if indirectNodes[0] != "nodeCincarnation@localhost" { - t.Fatal("wrong result:", indirectNodes) - } - indirectNodes = nodeC.NodesIndirect() - if len(indirectNodes) != 1 { - t.Fatal("wrong result:", indirectNodes) - } - if indirectNodes[0] != "nodeAincarnation@localhost" { - t.Fatal("wrong result:", indirectNodes) - } - if len(nodeB.NodesIndirect()) > 0 { - t.Fatal("wrong result:", nodeB.NodesIndirect()) - } - fmt.Println("OK") - - // use gen serv from test_monitor - gsA := &testMonitor{ - v: make(chan interface{}, 2), - } - gsB := &testMonitor{ - v: make(chan interface{}, 2), - } - gsC := &testMonitor{ - v: make(chan interface{}, 2), - } - fmt.Printf("... start processA on NodeA: ") - pA, err := nodeA.Spawn("", gen.ProcessOptions{}, gsA) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, gsA.v, pA.Self()) - - fmt.Printf("... start processB on NodeB: ") - pB, err := nodeB.Spawn("", gen.ProcessOptions{}, gsB) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, gsB.v, pB.Self()) - - fmt.Printf("... start processC on NodeC: ") - pC, err := nodeC.Spawn("", gen.ProcessOptions{}, gsC) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, gsC.v, pC.Self()) - - pidC := pC.Self() - - fmt.Printf("... processA send a message to processC (via proxy): ") - if e := pA.Send(pidC, "test"); e != nil { - t.Fatal(e) - } - waitForResultWithValue(t, gsC.v, "test") - fmt.Printf("... processB send short message to processC: ") - if e := pB.Send(pidC, "test"); e != nil { - t.Fatal(e) - } - waitForResultWithValue(t, gsC.v, "test") - fmt.Printf("... restart nodeC and processC: ") - nodeC.Stop() - nodeC.Wait() - - optsC.Creation = 12345 - nodeC, e = ergo.StartNode("nodeCincarnation@localhost", "secret", optsC) - if e != nil { - t.Fatal(e) - } - - if err := nodeA.Connect("nodeCincarnation@localhost"); err != nil { - t.Fatal(err) - } - pC, err = nodeC.Spawn("", gen.ProcessOptions{}, gsC) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, gsC.v, pC.Self()) - - fmt.Printf("... processA send a message to previous incarnation of processC (via proxy): ") - if e := pA.Send(pidC, "test"); e != lib.ErrProcessIncarnation { - t.Fatal("must be ErrProcessIncarnation here", e) - } - fmt.Println("OK") - fmt.Printf("... processB send short message to previous incarnation of processC: ") - if e := pB.Send(pidC, "test"); e != lib.ErrProcessIncarnation { - t.Fatal(e) - } - fmt.Println("OK") - - indirectNodes = nodeA.NodesIndirect() - if len(indirectNodes) != 1 { - t.Fatal("wrong result:", indirectNodes) - } - if indirectNodes[0] != "nodeCincarnation@localhost" { - t.Fatal("wrong result:", indirectNodes) - } - indirectNodes = nodeC.NodesIndirect() - if len(indirectNodes) != 1 { - t.Fatal("wrong result:", indirectNodes) - } - if indirectNodes[0] != "nodeAincarnation@localhost" { - t.Fatal("wrong result:", indirectNodes) - } - if len(nodeB.NodesIndirect()) > 0 { - t.Fatal("wrong result:", nodeB.NodesIndirect()) - } -} - -func BenchmarkNodeCompressionDisabled1MBempty(b *testing.B) { - node1name := fmt.Sprintf("nodeB1compressionDis_%d@localhost", b.N) - node2name := fmt.Sprintf("nodeB2compressionDis_%d@localhost", b.N) - node1, _ := ergo.StartNode(node1name, "bench", node.Options{}) - node2, _ := ergo.StartNode(node2name, "bench", node.Options{}) - defer node1.Stop() - defer node2.Stop() - if err := node1.Connect(node2.Name()); err != nil { - b.Fatal(err) - } - - bgs := &benchGS{} - - var empty [1024 * 1024]byte - b.SetParallelism(15) - b.RunParallel(func(pb *testing.PB) { - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p2, e2 := node2.Spawn("", gen.ProcessOptions{}, bgs) - if e2 != nil { - b.Fatal(e2) - } - b.ResetTimer() - for pb.Next() { - call := makeCall{ - to: p2.Self(), - message: empty, - } - _, e := p1.DirectWithTimeout(call, 30) - if e != nil { - b.Fatal(e) - } - } - - }) -} -func BenchmarkNodeCompressionEnabled1MBempty(b *testing.B) { - node1name := fmt.Sprintf("nodeB1compressionEn_%d@localhost", b.N) - node2name := fmt.Sprintf("nodeB2compressionEn_%d@localhost", b.N) - node1, _ := ergo.StartNode(node1name, "bench", node.Options{}) - node2, _ := ergo.StartNode(node2name, "bench", node.Options{}) - defer node1.Stop() - defer node2.Stop() - if err := node1.Connect(node2.Name()); err != nil { - b.Fatal(err) - } - - bgs := &benchGS{} - - var empty [1024 * 1024]byte - //b.SetParallelism(15) - b.RunParallel(func(pb *testing.PB) { - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p1.SetCompression(true) - p1.SetCompressionLevel(5) - p2, e2 := node2.Spawn("", gen.ProcessOptions{}, bgs) - if e2 != nil { - b.Fatal(e2) - } - b.ResetTimer() - for pb.Next() { - call := makeCall{ - to: p2.Self(), - message: empty, - } - _, e := p1.DirectWithTimeout(call, 30) - if e != nil { - b.Fatal(e) - } - } - - }) -} - -func BenchmarkNodeCompressionEnabled1MBstring(b *testing.B) { - node1name := fmt.Sprintf("nodeB1compressionEnStr_%d@localhost", b.N) - node2name := fmt.Sprintf("nodeB2compressionEnStr_%d@localhost", b.N) - node1, e := ergo.StartNode(node1name, "bench", node.Options{}) - if e != nil { - b.Fatal(e) - } - node2, e := ergo.StartNode(node2name, "bench", node.Options{}) - if e != nil { - b.Fatal(e) - } - defer node1.Stop() - defer node2.Stop() - if err := node1.Connect(node2.Name()); err != nil { - b.Fatal(err) - } - - bgs := &benchGS{} - - randomString := []byte(lib.RandomString(1024 * 1024)) - b.SetParallelism(15) - b.RunParallel(func(pb *testing.PB) { - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p1.SetCompression(true) - p1.SetCompressionLevel(5) - p2, e2 := node2.Spawn("", gen.ProcessOptions{}, bgs) - if e2 != nil { - b.Fatal(e2) - } - b.ResetTimer() - for pb.Next() { - call := makeCall{ - to: p2.Self(), - message: randomString, - } - _, e := p1.DirectWithTimeout(call, 30) - if e != nil { - b.Fatal(e) - } - } - - }) -} - -type benchGS struct { - gen.Server -} - -func (b *benchGS) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) { - return etf.Atom("ok"), gen.ServerStatusOK -} -func (b *benchGS) HandleDirect(process *gen.ServerProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - switch m := message.(type) { - case makeCall: - return process.CallWithTimeout(m.to, m.message, 30) - } - return nil, lib.ErrUnsupportedRequest -} - -func BenchmarkNodeSequentialNetwork(b *testing.B) { - - node1name := fmt.Sprintf("nodeB1_%d@localhost", b.N) - node2name := fmt.Sprintf("nodeB2_%d@localhost", b.N) - node1, _ := ergo.StartNode(node1name, "bench", node.Options{}) - node2, _ := ergo.StartNode(node2name, "bench", node.Options{}) - - bgs := &benchGS{} - - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - p2, e2 := node2.Spawn("", gen.ProcessOptions{}, bgs) - - if e1 != nil { - b.Fatal(e1) - } - if e2 != nil { - b.Fatal(e2) - } - - call := makeCall{ - to: p2.Self(), - message: 1, - } - if _, e := p1.Direct(call); e != nil { - b.Fatal("single ping", e) - } - - b.ResetTimer() - for _, c := range benchCases() { - b.Run(c.name, func(b *testing.B) { - for i := 0; i < b.N; i++ { - call := makeCall{ - to: p2.Self(), - message: c.value, - } - _, e := p1.Direct(call) - if e != nil { - b.Fatal(e, i) - } - } - }) - } -} - -func BenchmarkNodeSequentialLocal(b *testing.B) { - - node1name := fmt.Sprintf("nodeB1Local_%d@localhost", b.N) - node1, _ := ergo.StartNode(node1name, "bench", node.Options{}) - - bgs := &benchGS{} - - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - p2, e2 := node1.Spawn("", gen.ProcessOptions{}, bgs) - - if e1 != nil { - b.Fatal(e1) - } - if e2 != nil { - b.Fatal(e2) - } - - call := makeCall{ - to: p2.Self(), - message: 1, - } - if _, e := p1.Direct(call); e != nil { - b.Fatal("single ping", e) - } - - b.ResetTimer() - for _, c := range benchCases() { - b.Run(c.name, func(b *testing.B) { - for i := 0; i < b.N; i++ { - call := makeCall{ - to: p2.Self(), - message: c.value, - } - _, e := p1.Direct(call) - if e != nil { - b.Fatal(e, i) - } - } - }) - } -} - -func BenchmarkNodeParallel(b *testing.B) { - - node1name := fmt.Sprintf("nodeB1Parallel_%d@localhost", b.N) - node2name := fmt.Sprintf("nodeB2Parallel_%d@localhost", b.N) - node1, _ := ergo.StartNode(node1name, "bench", node.Options{}) - node2, _ := ergo.StartNode(node2name, "bench", node.Options{}) - - bgs := &benchGS{} - - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p2, e2 := node2.Spawn("", gen.ProcessOptions{}, bgs) - if e2 != nil { - b.Fatal(e2) - } - - call := makeCall{ - to: p2.Self(), - message: "hi", - } - if _, e := p1.Direct(call); e != nil { - b.Fatal("single ping", e) - } - - b.SetParallelism(15) - b.RunParallel(func(pb *testing.PB) { - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p2, e2 := node2.Spawn("", gen.ProcessOptions{}, bgs) - if e2 != nil { - b.Fatal(e2) - } - b.ResetTimer() - for pb.Next() { - call := makeCall{ - to: p2.Self(), - message: etf.Atom("ping"), - } - _, e := p1.Direct(call) - if e != nil { - b.Fatal(e) - } - } - - }) -} - -func BenchmarkNodeParallelSingleNode(b *testing.B) { - - node1name := fmt.Sprintf("nodeB1ParallelLocal_%d@localhost", b.N) - node1, _ := ergo.StartNode(node1name, "bench", node.Options{}) - - bgs := &benchGS{} - - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p2, e2 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e2 != nil { - b.Fatal(e2) - } - - call := makeCall{ - to: p2.Self(), - message: "hi", - } - if _, e := p1.Direct(call); e != nil { - b.Fatal("single ping", e) - } - b.SetParallelism(15) - b.RunParallel(func(pb *testing.PB) { - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p2, e2 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e2 != nil { - b.Fatal(e2) - } - b.ResetTimer() - for pb.Next() { - call := makeCall{ - to: p2.Self(), - message: etf.Atom("ping"), - } - _, e := p1.Direct(call) - if e != nil { - b.Fatal(e) - } - } - - }) -} - -func BenchmarkNodeProxy_NodeA_to_NodeC_direct_Message_1K(b *testing.B) { - node1name := fmt.Sprintf("nodeB1ProxyDisabled%d@localhost", b.N) - node2name := fmt.Sprintf("nodeB2ProxyDisabled%d@localhost", b.N) - node1, _ := ergo.StartNode(node1name, "bench", node.Options{}) - node2, _ := ergo.StartNode(node2name, "bench", node.Options{}) - defer node1.Stop() - defer node2.Stop() - - bgs := &benchGS{} - - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p2, e2 := node2.Spawn("", gen.ProcessOptions{}, bgs) - if e2 != nil { - b.Fatal(e2) - } - - call := makeCall{ - to: p2.Self(), - message: "hi", - } - if _, e := p1.Direct(call); e != nil { - b.Fatal("single ping", e) - } - - randomString := []byte(lib.RandomString(1024)) - b.SetParallelism(15) - b.RunParallel(func(pb *testing.PB) { - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p2, e2 := node2.Spawn("", gen.ProcessOptions{}, bgs) - if e2 != nil { - b.Fatal(e2) - } - b.ResetTimer() - for pb.Next() { - call := makeCall{ - to: p2.Self(), - message: randomString, - } - _, e := p1.Direct(call) - if e != nil { - b.Fatal(e) - } - } - - }) -} -func BenchmarkNodeProxy_NodeA_to_NodeC_via_NodeB_Message_1K(b *testing.B) { - node1name := fmt.Sprintf("nodeB1ProxyEnabled1K%d@localhost", b.N) - node2name := fmt.Sprintf("nodeB2ProxyEnabled1K%d@localhost", b.N) - node3name := fmt.Sprintf("nodeB3ProxyEnabled1K%d@localhost", b.N) - node1, _ := ergo.StartNode(node1name, "bench", node.Options{}) - opts2 := node.Options{} - opts2.Proxy.Transit = true - node2, _ := ergo.StartNode(node2name, "bench", opts2) - node3, _ := ergo.StartNode(node3name, "bench", node.Options{}) - defer node1.Stop() - defer node2.Stop() - defer node3.Stop() - route := node.ProxyRoute{ - Name: node3.Name(), - Proxy: node2.Name(), - } - node1.AddProxyRoute(route) - - bgs := &benchGS{} - - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p3, e3 := node3.Spawn("", gen.ProcessOptions{}, bgs) - if e3 != nil { - b.Fatal(e3) - } - - call := makeCall{ - to: p3.Self(), - message: "hi", - } - if _, e := p1.Direct(call); e != nil { - b.Fatal("single ping", e) - } - - randomString := []byte(lib.RandomString(1024)) - b.SetParallelism(15) - b.RunParallel(func(pb *testing.PB) { - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p3, e3 := node3.Spawn("", gen.ProcessOptions{}, bgs) - if e3 != nil { - b.Fatal(e3) - } - b.ResetTimer() - for pb.Next() { - call := makeCall{ - to: p3.Self(), - message: randomString, - } - _, e := p1.Direct(call) - if e != nil { - b.Fatal(e) - } - } - - }) -} - -func BenchmarkNodeProxy_NodeA_to_NodeC_via_NodeB_Message_1K_Encrypted(b *testing.B) { - node1name := fmt.Sprintf("nodeB1ProxyEnabled1K%d@localhost", b.N) - node2name := fmt.Sprintf("nodeB2ProxyEnabled1K%d@localhost", b.N) - node3name := fmt.Sprintf("nodeB3ProxyEnabled1K%d@localhost", b.N) - opts1 := node.Options{} - opts1.Proxy.Flags = node.DefaultProxyFlags() - opts1.Proxy.Flags.EnableEncryption = true - node1, _ := ergo.StartNode(node1name, "bench", opts1) - opts2 := node.Options{} - opts2.Proxy.Transit = true - node2, _ := ergo.StartNode(node2name, "bench", opts2) - node3, _ := ergo.StartNode(node3name, "bench", node.Options{}) - defer node1.Stop() - defer node2.Stop() - defer node3.Stop() - route := node.ProxyRoute{ - Name: node3.Name(), - Proxy: node2.Name(), - } - node1.AddProxyRoute(route) - - bgs := &benchGS{} - - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p3, e3 := node3.Spawn("", gen.ProcessOptions{}, bgs) - if e3 != nil { - b.Fatal(e3) - } - - call := makeCall{ - to: p3.Self(), - message: "hi", - } - if _, e := p1.Direct(call); e != nil { - b.Fatal("single ping", e) - } - - randomString := []byte(lib.RandomString(1024)) - b.SetParallelism(15) - b.RunParallel(func(pb *testing.PB) { - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p3, e3 := node3.Spawn("", gen.ProcessOptions{}, bgs) - if e3 != nil { - b.Fatal(e3) - } - b.ResetTimer() - for pb.Next() { - call := makeCall{ - to: p3.Self(), - message: randomString, - } - _, e := p1.Direct(call) - if e != nil { - b.Fatal(e) - } - } - - }) -} - -func BenchmarkNodeProxy_NodeA_to_NodeC_via_NodeB_Message_1M_Compressed(b *testing.B) { - node1name := fmt.Sprintf("nodeB1ProxyEnabled1K%d@localhost", b.N) - node2name := fmt.Sprintf("nodeB2ProxyEnabled1K%d@localhost", b.N) - node3name := fmt.Sprintf("nodeB3ProxyEnabled1K%d@localhost", b.N) - opts1 := node.Options{} - opts1.Proxy.Flags = node.DefaultProxyFlags() - opts1.Proxy.Flags.EnableEncryption = false - node1, _ := ergo.StartNode(node1name, "bench", opts1) - opts2 := node.Options{} - opts2.Proxy.Transit = true - node2, _ := ergo.StartNode(node2name, "bench", opts2) - node3, _ := ergo.StartNode(node3name, "bench", node.Options{}) - defer node1.Stop() - defer node2.Stop() - defer node3.Stop() - route := node.ProxyRoute{ - Name: node3.Name(), - Proxy: node2.Name(), - } - node1.AddProxyRoute(route) - - bgs := &benchGS{} - - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p3, e3 := node3.Spawn("", gen.ProcessOptions{}, bgs) - if e3 != nil { - b.Fatal(e3) - } - - call := makeCall{ - to: p3.Self(), - message: "hi", - } - if _, e := p1.Direct(call); e != nil { - b.Fatal("single ping", e) - } - - randomString := []byte(lib.RandomString(1024 * 1024)) - b.SetParallelism(15) - b.RunParallel(func(pb *testing.PB) { - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p3, e3 := node3.Spawn("", gen.ProcessOptions{}, bgs) - if e3 != nil { - b.Fatal(e3) - } - b.ResetTimer() - for pb.Next() { - call := makeCall{ - to: p3.Self(), - message: randomString, - } - _, e := p1.Direct(call) - if e != nil { - b.Fatal(e) - } - } - - }) -} - -func BenchmarkNodeProxy_NodeA_to_NodeC_via_NodeB_Message_1M_CompressedEncrypted(b *testing.B) { - node1name := fmt.Sprintf("nodeB1ProxyEnabled1K%d@localhost", b.N) - node2name := fmt.Sprintf("nodeB2ProxyEnabled1K%d@localhost", b.N) - node3name := fmt.Sprintf("nodeB3ProxyEnabled1K%d@localhost", b.N) - opts1 := node.Options{} - opts1.Proxy.Flags = node.DefaultProxyFlags() - opts1.Proxy.Flags.EnableEncryption = true - node1, _ := ergo.StartNode(node1name, "bench", opts1) - opts2 := node.Options{} - opts2.Proxy.Transit = true - node2, _ := ergo.StartNode(node2name, "bench", opts2) - node3, _ := ergo.StartNode(node3name, "bench", node.Options{}) - defer node1.Stop() - defer node2.Stop() - defer node3.Stop() - route := node.ProxyRoute{ - Name: node3.Name(), - Proxy: node2.Name(), - } - node1.AddProxyRoute(route) - - bgs := &benchGS{} - - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p3, e3 := node3.Spawn("", gen.ProcessOptions{}, bgs) - if e3 != nil { - b.Fatal(e3) - } - - call := makeCall{ - to: p3.Self(), - message: "hi", - } - if _, e := p1.Direct(call); e != nil { - b.Fatal("single ping", e) - } - - randomString := []byte(lib.RandomString(1024 * 1024)) - b.SetParallelism(15) - b.RunParallel(func(pb *testing.PB) { - p1, e1 := node1.Spawn("", gen.ProcessOptions{}, bgs) - if e1 != nil { - b.Fatal(e1) - } - p1.SetCompression(true) - p3, e3 := node3.Spawn("", gen.ProcessOptions{}, bgs) - if e3 != nil { - b.Fatal(e3) - } - b.ResetTimer() - for pb.Next() { - call := makeCall{ - to: p3.Self(), - message: randomString, - } - _, e := p1.Direct(call) - if e != nil { - b.Fatal(e) - } - } - - }) -} - -func benchCases() []benchCase { - return []benchCase{ - {"number", 12345}, - {"string", "hello world"}, - {"tuple (PID)", - etf.Pid{ - Node: "node@localhost", - ID: 1000, - Creation: 1, - }, - }, - {"binary 1MB", make([]byte, 1024*1024)}, - } -} diff --git a/tests/raft_data_test.go b/tests/raft_data_test.go deleted file mode 100644 index 1f252f18..00000000 --- a/tests/raft_data_test.go +++ /dev/null @@ -1,209 +0,0 @@ -//go:build !manual - -package tests - -import ( - "fmt" - "testing" - "time" - - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" -) - -// F - follower -// M - quorum member -// L - leader -// Q - quorum (all members) -// -// append cases: -// 1. F -> M -> L -> Q ... broadcast -// 2. F -> L -> Q ... broadcast -// 3. M -> L - Q ... broadcast -// 4. L -> Q ... broadcast -// - -func TestRaftAppend(t *testing.T) { - fmt.Printf("\n=== Test GenRaft - append data\n") - server := &testRaft{ - n: 6, - qstate: gen.RaftQuorumState5, - } - nodes, rafts, leaderSerial := startRaftCluster("append", server) - - fmt.Printf(" append on a follower (send to the quorum member and forward to the leader: ") - for _, raft := range rafts { - q := raft.Quorum() - // find the follower - if q.Member == true { - continue - } - - // cases 1 and 2 - send to the quorum member. - // the follower isn't able to send it to the leader (case 2) - // since it has no info about the quorum leader (quorum member forwards it - // to the leader under the hood) - ref, err := raft.Append("asdfkey", "asdfvalue") - if err != nil { - t.Fatal(err) - } - leaderSerial = checkAppend(t, server, ref, rafts, leaderSerial) - break - } - fmt.Println("OK") - fmt.Printf(" append on a quorum member (send to the leader): ") - for _, raft := range rafts { - q := raft.Quorum() - // find the quorum member - if q.Member == false { - continue - } - - // case 3 - quorum member sends append to the leader - ref, err := raft.Append("asdfkey", "asdfvalue") - if err != nil { - t.Fatal(err) - } - leaderSerial = checkAppend(t, server, ref, rafts, leaderSerial) - break - } - fmt.Println("OK") - fmt.Printf(" append on a leader: ") - for _, raft := range rafts { - l := raft.Leader() - // finde the quorum leader - if l == nil || l.Leader != raft.Self() { - continue - } - - // case 4 - leader makes append - ref, err := raft.Append("asdfkey", "asdfvalue") - if err != nil { - t.Fatal(err) - } - leaderSerial = checkAppend(t, server, ref, rafts, leaderSerial) - break - } - fmt.Println("OK") - - for _, node := range nodes { - node.Stop() - } -} - -// get serial case: run through the raft process and get all the data to achieve -// the same serial -func TestRaftGet(t *testing.T) { - fmt.Printf("\n=== Test GenRaft - get data\n") - server := &testRaft{ - n: 6, - qstate: gen.RaftQuorumState5, - } - nodes, rafts, leaderSerial := startRaftCluster("get", server) - for _, raft := range rafts { - fmt.Println(" started raft process", raft.Self(), "with serial", raft.Serial()) - } - - for _, raft := range rafts { - _, err := raft.Get(12341234) - if err != gen.ErrRaftNoSerial { - t.Fatal("must be ErrRaftNoSerial here") - } - - if raft.Serial() == leaderSerial { - fmt.Println(" raft process", raft.Self(), "has already latest serial. skip it") - continue - } - - fmt.Printf(" get serials (%d...%d) on %s to reach the leader's serial: ", raft.Serial()+1, leaderSerial, raft.Self()) - serial := raft.Serial() - gotFrom := []etf.Pid{} - for i := serial; i < leaderSerial; i++ { - ref, err := raft.Get(i + 1) - if err != nil { - t.Fatal(err) - } - result := waitGetRef(t, server, ref) - - // compare with the original - original := data[result.key] - if original.serial != result.serial { - t.Fatal("wrong serial") - } - if original.value != result.value { - t.Fatal("wrong value") - } - // check internal data - internal := raft.State.(*raftState) - if internal.serials[result.serial] != result.key { - t.Fatal("serial mismatch the result key") - } - d, exist := internal.data[result.key] - if exist == false { - t.Fatal("internal data hasn't been updated") - } - if d.serial != result.serial { - t.Fatal("intenal data serial mismatch") - } - if d.value != result.value { - t.Fatal("intarnal data value mismatch") - } - gotFrom = append(gotFrom, result.process.Self()) - } - fmt.Println("OK") - fmt.Println(" got from:", gotFrom, ")") - } - - //fmt.Println("-----------") - //for _, raft := range rafts { - // s := raft.State.(*raftState) - // fmt.Println(raft.Self(), "INTERNAL", s) - //} - - for _, node := range nodes { - node.Stop() - } -} - -func waitGetRef(t *testing.T, server *testRaft, ref etf.Ref) raftResult { - var result raftResult - select { - case result = <-server.s: - if result.ref != ref { - t.Fatal("wrong ref") - } - return result - case <-time.After(30 * time.Second): - t.Fatal("get timeout") - } - return result -} - -func checkAppend(t *testing.T, server *testRaft, ref etf.Ref, rafts []*gen.RaftProcess, serial uint64) uint64 { - appends := 0 - for { - select { - case result := <-server.a: - if result.serial != serial+1 { - t.Fatalf("wrong serial %d (must be %d)", result.serial, serial+1) - } - appends++ - //fmt.Println("got append on ", result.process.Self(), "total appends", appends) - if appends != len(rafts) { - continue - } - // check serials - for _, r := range rafts { - s := r.Serial() - if s != serial+1 { - t.Fatalf("wrong serial %d on %s", s, r.Self()) - } - } - return serial + 1 - case <-time.After(30 * time.Second): - t.Fatal("append timeout") - - } - } - -} diff --git a/tests/raft_manual_test.go b/tests/raft_manual_test.go deleted file mode 100644 index 41567501..00000000 --- a/tests/raft_manual_test.go +++ /dev/null @@ -1,138 +0,0 @@ -//go:build manual -// +build manual - -// to run this test: -// go test -run TestRaft -ergo.norecover -tags manual -// -// enable debug printing in the gen/raft.go -// quorum building debuging: %s/\/\/ QUODBG // -// leader election debuging: %s/\/\/ LDRDBG // -// heartbeat debuging: %s/\/\/ HRTDBG // - -package tests - -import ( - "fmt" - "math/rand" - "testing" - "time" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/node" -) - -type testRaft struct { - gen.Raft - res chan interface{} -} - -func (tr *testRaft) InitRaft(process *gen.RaftProcess, args ...etf.Term) (gen.RaftOptions, error) { - var options gen.RaftOptions - if len(args) > 0 { - options.Peers = args[0].([]gen.ProcessID) - options.Serial = uint64(rand.Intn(10)) - } - - fmt.Println(process.Self(), process.Name(), " ----------", options.Serial) - return options, gen.RaftStatusOK -} - -func (tr *testRaft) HandleQuorum(process *gen.RaftProcess, q *gen.RaftQuorum) gen.RaftStatus { - if q == nil { - fmt.Println("QQQ quorum", process.Name(), "state: NONE") - return gen.RaftStatusOK - } else { - fmt.Println("QQQ quorum", process.Name(), "state:", q.State, q.Member, q.Peers) - } - if sent, _ := process.State.(int); sent != 1 { - process.SendAfter(process.Self(), "ok", 7*time.Second) - process.State = 1 - } - //tr.res <- qs - return gen.RaftStatusOK -} - -func (tr *testRaft) HandleLeader(process *gen.RaftProcess, leader *gen.RaftLeader) gen.RaftStatus { - fmt.Println("LLL leader", process.Name(), leader) - return gen.RaftStatusOK -} - -func (tr *testRaft) HandleAppend(process *gen.RaftProcess, ref etf.Ref, serial uint64, key string, value etf.Term) gen.RaftStatus { - fmt.Println("AAA append", ref, serial, value) - return gen.RaftStatusOK -} - -func (tr *testRaft) HandleGet(process *gen.RaftProcess, serial uint64) (string, etf.Term, gen.RaftStatus) { - fmt.Println("GGG get", process.Name(), serial) - return "", nil, gen.RaftStatusOK -} - -func (tr *testRaft) HandleRaftInfo(process *gen.RaftProcess, message etf.Term) gen.ServerStatus { - q := process.Quorum() - if q == nil { - fmt.Println("III info", process.Name(), "state: NONE", "message:", message) - } else { - fmt.Println("III info", process.Name(), "Q:", q.State, q.Member, "", process.Leader(), "message:", message) - } - if l := process.Leader(); l != nil && l.Leader == process.Self() { - fmt.Println("III i'm leader. freeze", process.Self()) - time.Sleep(35 * time.Second) - } - process.State = 0 - return gen.ServerStatusOK -} - -func TestRaftLeader(t *testing.T) { - fmt.Printf("\n=== Test GenRaft\n") - var N int = 4 - - fmt.Printf("Starting %d nodes: nodeGenRaftXX@localhost...", N) - - nodes := make([]node.Node, N) - for i := range nodes { - name := fmt.Sprintf("nodeGenRaft%02d@localhost", i) - node, err := ergo.StartNode(name, "cookies", node.Options{}) - if err != nil { - t.Fatal(err) - } - nodes[i] = node - } - - defer func() { - for i := range nodes { - nodes[i].Stop() - } - }() - fmt.Println("OK") - - rafts := make([]gen.Process, N) - results := make([]chan interface{}, N) - var args []etf.Term - var peer gen.ProcessID - for i := range rafts { - name := fmt.Sprintf("raft%02d", i+1) - if i == 0 { - args = nil - } else { - peer.Node = nodes[i-1].Name() - peer.Name = rafts[i-1].Name() - peers := []gen.ProcessID{peer} - args = []etf.Term{peers} - } - tr := &testRaft{ - res: make(chan interface{}, 2), - } - results[i] = tr.res - raft, err := nodes[i].Spawn(name, gen.ProcessOptions{}, tr, args...) - if err != nil { - t.Fatal(err) - } - rafts[i] = raft - //time.Sleep(300 * time.Millisecond) - } - - time.Sleep(50 * time.Second) - -} diff --git a/tests/raft_test.go b/tests/raft_test.go deleted file mode 100644 index a6f8e92e..00000000 --- a/tests/raft_test.go +++ /dev/null @@ -1,360 +0,0 @@ -//go:build !manual - -package tests - -import ( - "fmt" - "math/rand" - "testing" - "time" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/node" -) - -type testCaseRaft struct { - n int - state gen.RaftQuorumState - name string -} - -var ( - ql string = "quorum of %2d members with 1 leader: " - qlf string = "quorum of %2d members with 1 leader + %d follower(s): " - cases = []testCaseRaft{ - testCaseRaft{n: 2, name: "no quorum, no leader: "}, - testCaseRaft{n: 3, name: ql, state: gen.RaftQuorumState3}, - testCaseRaft{n: 4, name: qlf, state: gen.RaftQuorumState3}, - testCaseRaft{n: 5, name: ql, state: gen.RaftQuorumState5}, - testCaseRaft{n: 6, name: qlf, state: gen.RaftQuorumState5}, - testCaseRaft{n: 7, name: ql, state: gen.RaftQuorumState7}, - - // - // cases below are work well, but quorum building takes too long some time. - //testCaseRaft{n: 8, name: qlf, state: gen.RaftQuorumState7}, - //testCaseRaft{n: 9, name: ql, state: gen.RaftQuorumState9}, - //testCaseRaft{n: 10, name: qlf, state: gen.RaftQuorumState9}, - //testCaseRaft{n: 11, name: ql, state: gen.RaftQuorumState11}, - //testCaseRaft{n: 12, name: qlf, state: gen.RaftQuorumState11}, - //testCaseRaft{n: 15, name: qlf, state: gen.RaftQuorumState11}, - //testCaseRaft{n: 25, name: qlf, state: gen.RaftQuorumState11}, - } - - data = map[string]dataValueSerial{ - "key0": dataValueSerial{"value0", 0}, - "key1": dataValueSerial{"value1", 1}, - "key2": dataValueSerial{"value2", 2}, - "key3": dataValueSerial{"value3", 3}, - "key4": dataValueSerial{"value4", 4}, - "key5": dataValueSerial{"value5", 5}, - "key6": dataValueSerial{"value6", 6}, - "key7": dataValueSerial{"value7", 7}, - "key8": dataValueSerial{"value8", 8}, - "key9": dataValueSerial{"value9", 9}, - } - keySerials = []string{ - "key0", - "key1", - "key2", - "key3", - "key4", - "key5", - "key6", - "key7", - "key8", - "key9", - } -) - -type dataValueSerial struct { - value string - serial uint64 -} - -type testRaft struct { - gen.Raft - n int - qstate gen.RaftQuorumState - p chan *gen.RaftProcess // Init - q chan *gen.RaftQuorum // HandleQuorum - l chan *gen.RaftLeader // HandleLeader - a chan raftResult // HandleAppend - s chan raftResult // HandleSerial -} - -type raftResult struct { - process *gen.RaftProcess - ref etf.Ref - serial uint64 - key string - value etf.Term -} - -type raftArgs struct { - peers []gen.ProcessID - serial uint64 -} -type raftState struct { - data map[string]dataValueSerial - serials []string -} - -func (tr *testRaft) InitRaft(process *gen.RaftProcess, args ...etf.Term) (gen.RaftOptions, error) { - var options gen.RaftOptions - ra := args[0].(raftArgs) - options.Peers = ra.peers - options.Serial = ra.serial - - state := &raftState{ - data: make(map[string]dataValueSerial), - } - for i := 0; i < int(ra.serial)+1; i++ { - key := keySerials[i] - state.data[key] = data[key] - state.serials = append(state.serials, key) - } - process.State = state - tr.p <- process - - return options, gen.RaftStatusOK -} - -func (tr *testRaft) HandleQuorum(process *gen.RaftProcess, quorum *gen.RaftQuorum) gen.RaftStatus { - //fmt.Println(process.Self(), "QQQ", quorum) - if quorum != nil { - tr.q <- quorum - } - return gen.RaftStatusOK -} - -func (tr *testRaft) HandleLeader(process *gen.RaftProcess, leader *gen.RaftLeader) gen.RaftStatus { - //fmt.Println(process.Self(), "LLL", leader) - // leader elected within a quorum - q := process.Quorum() - if q == nil { - return gen.RaftStatusOK - } - if leader != nil && q.State == tr.qstate { - tr.l <- leader - } - - return gen.RaftStatusOK -} - -func (tr *testRaft) HandleAppend(process *gen.RaftProcess, ref etf.Ref, serial uint64, key string, value etf.Term) gen.RaftStatus { - //fmt.Println(process.Self(), "HANDLE APPEND member:", process.Quorum().Member, "append", ref, serial, key, value) - - result := raftResult{ - process: process, - ref: ref, - serial: serial, - key: key, - value: value, - } - tr.a <- result - return gen.RaftStatusOK -} - -func (tr *testRaft) HandleGet(process *gen.RaftProcess, serial uint64) (string, etf.Term, gen.RaftStatus) { - var key string - //fmt.Println(process.Self(), "HANDLE GET member:", process.Quorum().Member, "get", serial) - - state := process.State.(*raftState) - if len(state.serials) < int(serial) { - // fmt.Println(process.Self(), "NO DATA for", serial) - return key, nil, gen.RaftStatusOK - } - key = state.serials[int(serial)] - data := state.data[key] - return key, data.value, gen.RaftStatusOK -} - -func (tr *testRaft) HandleSerial(process *gen.RaftProcess, ref etf.Ref, serial uint64, key string, value etf.Term) gen.RaftStatus { - //fmt.Println(process.Self(), "HANDLE SERIAL member:", process.Quorum().Member, "append", ref, serial, key, value) - result := raftResult{ - process: process, - ref: ref, - serial: serial, - key: key, - value: value, - } - s := process.Serial() - if s != serial { - fmt.Println(process.Self(), "ERROR: disordered serial request") - tr.s <- raftResult{} - return gen.RaftStatusOK - } - state := process.State.(*raftState) - state.serials = append(state.serials, key) - state.data[key] = dataValueSerial{ - value: value.(string), - serial: serial, - } - tr.s <- result - return gen.RaftStatusOK -} -func (tr *testRaft) HandleCancel(process *gen.RaftProcess, ref etf.Ref, reason string) gen.RaftStatus { - return gen.RaftStatusOK -} - -func (tr *testRaft) HandleRaftInfo(process *gen.RaftProcess, message etf.Term) gen.ServerStatus { - return gen.ServerStatusOK -} - -func TestRaftLeader(t *testing.T) { - fmt.Printf("\n=== Test GenRaft - build quorum, leader election\n") - for _, c := range cases { - fmt.Printf(" cluster with %2d distributed raft processes. ", c.n) - if c.n == 2 { - fmt.Printf(c.name) - } else { - f := c.n - int(c.state) - if f == 0 { - fmt.Printf(c.name, c.state) - } else { - fmt.Printf(c.name, c.state, c.n-int(c.state)) - } - } - - server := &testRaft{ - n: c.n, - qstate: c.state, - } - // start distributed raft processes and wait until - // they build a quorum and elect their leader - nodes, rafts, leaderSerial := startRaftCluster("append", server) - ok := true - if c.n > 2 { - ok = false - for _, raft := range rafts { - q := raft.Quorum() - if q == nil { - continue - } - if q.Member == false { - continue - } - - l := raft.Leader() - if l == nil { - continue - } - if l.Serial != leaderSerial { - t.Fatal("wrong leader serial") - } - ok = true - break - } - } - if ok == false { - t.Fatal("no quorum or leader found") - } - fmt.Println("OK") - // stop cluster - for _, node := range nodes { - node.Stop() - } - } - -} - -func startRaftCluster(name string, server *testRaft) ([]node.Node, []*gen.RaftProcess, uint64) { - nodes := make([]node.Node, server.n) - for i := range nodes { - name := fmt.Sprintf("nodeGenRaft-%s-cluster-%02dNode%02d@localhost", name, server.n, i) - node, err := ergo.StartNode(name, "cookies", node.Options{}) - if err != nil { - panic(err) - } - nodes[i] = node - } - - processes := make([]gen.Process, server.n) - server.p = make(chan *gen.RaftProcess, 1000) - server.q = make(chan *gen.RaftQuorum, 1000) - server.l = make(chan *gen.RaftLeader, 1000) - server.a = make(chan raftResult, 1000) - server.s = make(chan raftResult, 1000) - leaderSerial := uint64(0) - var peer gen.ProcessID - for i := range processes { - name := fmt.Sprintf("raft%02d", i+1) - args := raftArgs{ - serial: uint64(rand.Intn(9)), - } - if args.serial > leaderSerial { - leaderSerial = args.serial - } - if i > 0 { - peer.Node = nodes[i-1].Name() - peer.Name = processes[i-1].Name() - args.peers = []gen.ProcessID{peer} - } - p, err := nodes[i].Spawn(name, gen.ProcessOptions{}, server, args) - if err != nil { - panic(err) - } - processes[i] = p - } - - rafts := []*gen.RaftProcess{} - // how many results with 'leader' should be awaiting - resultsL := 0 - // how many results with 'quorum' should be awaiting - resultsQ := 0 - for { - select { - case p := <-server.p: - rafts = append(rafts, p) - - if len(rafts) < server.n { - continue - } - - if server.n == 2 { - // no leader, no quorum - return nodes, rafts, leaderSerial - } - continue - - case q := <-server.q: - - if q.State != server.qstate { - continue - } - - resultsQ++ - if resultsQ < int(server.qstate) { - continue - } - - if resultsL < int(server.qstate) { - continue - } - // all quorum members are received leader election result - return nodes, rafts, leaderSerial - - case l := <-server.l: - if l.State != server.qstate { - continue - } - - resultsL++ - if resultsL < int(server.qstate) { - continue - } - - if resultsQ < server.n { - continue - } - - // all quorum members are received leader election result - return nodes, rafts, leaderSerial - - case <-time.After(30 * time.Second): - panic("can't start raft cluster") - } - } -} diff --git a/tests/saga_cancel_test.go b/tests/saga_cancel_test.go deleted file mode 100644 index 638ae9cc..00000000 --- a/tests/saga_cancel_test.go +++ /dev/null @@ -1,550 +0,0 @@ -package tests - -import ( - "fmt" - "testing" - "time" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/node" -) - -// this test implements cases below -// 1. Single saga cancels Tx -// 2. Saga1 -> Tx -> Saga2 -> Tx -> Saga3 -// a) Saga1 cancels Tx -// Saga1 -> cancel -> Saga2 -> cancel -> Saga3 -// b) Saga2 cancels Tx -// Saga1 <- cancel <- Saga2 -> cancel -> Saga3 -// c) Saga3 cancels Tx -// Saga1 <- cancel <- Saga2 <- cancel <- Saga3 -// d) Saga1 sets TrapCancel, Saga2 process/node is going down, Saga1 sends Tx to the Saga4 -// -> Tx -> Saga4 -> Tx -> Saga3 -// / -// Saga1 <- signal Down <- Saga2 (terminates) -> signal Down -> Saga3 - -// -// Case 1 -// - -type taskSagaCancelCase1 struct { - workerRes chan interface{} - sagaRes chan interface{} -} - -type testSagaCancelWorker1 struct { - gen.SagaWorker -} - -func (w *testSagaCancelWorker1) HandleJobStart(process *gen.SagaWorkerProcess, job gen.SagaJob) error { - process.State = job.Value - return nil -} -func (w *testSagaCancelWorker1) HandleJobCancel(process *gen.SagaWorkerProcess, reason string) { - if err := process.SendInterim(1); err != gen.ErrSagaTxCanceled { - panic("shouldn't be able to send interim result") - } - if err := process.SendResult(1); err != gen.ErrSagaTxCanceled { - panic("shouldn't be able to send the result") - } - task := process.State.(taskSagaCancelCase1) - task.workerRes <- "ok" - return -} - -type testSagaCancel1 struct { - gen.Saga -} - -func (gs *testSagaCancel1) InitSaga(process *gen.SagaProcess, args ...etf.Term) (gen.SagaOptions, error) { - worker := &testSagaCancelWorker1{} - opts := gen.SagaOptions{ - Worker: worker, - } - return opts, nil -} - -func (gs *testSagaCancel1) HandleTxNew(process *gen.SagaProcess, id gen.SagaTransactionID, value interface{}) gen.SagaStatus { - process.State = value - task := process.State.(taskSagaCancelCase1) - task.sagaRes <- "startTX" - - _, err := process.StartJob(id, gen.SagaJobOptions{}, value) - if err != nil { - panic(err) - } - task.workerRes <- "startWorker" - if err := process.CancelTransaction(id, "test cancel"); err != nil { - panic(err) - } - - // try to cancel unknown TX - if err := process.CancelTransaction(gen.SagaTransactionID{}, "bla bla"); err != gen.ErrSagaTxUnknown { - panic("must be ErrSagaTxUnknown") - } - task.sagaRes <- "cancelTX" - return gen.SagaStatusOK -} - -func (gs *testSagaCancel1) HandleTxCancel(process *gen.SagaProcess, id gen.SagaTransactionID, reason string) gen.SagaStatus { - task := process.State.(taskSagaCancelCase1) - if reason == "test cancel" { - task.sagaRes <- "ok" - } - return gen.SagaStatusOK -} - -func (gs *testSagaCancel1) HandleTxResult(process *gen.SagaProcess, id gen.SagaTransactionID, from gen.SagaNextID, result interface{}) gen.SagaStatus { - return gen.SagaStatusOK -} - -func (gs *testSagaCancel1) HandleSagaDirect(process *gen.SagaProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - - process.StartTransaction(gen.SagaTransactionOptions{}, message) - return nil, nil -} - -func TestSagaCancelSimple(t *testing.T) { - - fmt.Printf("\n=== Test GenSagaCancelSimple\n") - fmt.Printf("Starting node: nodeGenSagaCancelSimple01@localhost...") - - node, _ := ergo.StartNode("nodeGenSagaCancelSimple01@localhost", "cookies", node.Options{}) - if node == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - defer node.Stop() - - fmt.Printf("... Starting Saga processes: ") - saga := &testSagaCancel1{} - saga_process, err := node.Spawn("saga", gen.ProcessOptions{MailboxSize: 10000}, saga) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - task := taskSagaCancelCase1{ - workerRes: make(chan interface{}, 2), - sagaRes: make(chan interface{}, 2), - } - _, err = saga_process.Direct(task) - if err != nil { - t.Fatal(err) - } - fmt.Printf("... Start new TX on saga: ") - waitForResultWithValue(t, task.sagaRes, "startTX") - fmt.Printf("... Start new worker on saga: ") - waitForResultWithValue(t, task.workerRes, "startWorker") - fmt.Printf("... Cancel TX on saga: ") - waitForResultWithValue(t, task.sagaRes, "cancelTX") - fmt.Printf("... Saga worker handled TX cancelation: ") - waitForResultWithValue(t, task.workerRes, "ok") - fmt.Printf("... Saga handled TX cancelation: ") - waitForResultWithValue(t, task.sagaRes, "ok") -} - -// -// Case 2.a -// Node1.Saga1 -> Tx -> Node2.Saga2 -> Tx -> Node3.Saga3 -// Node1.Saga1 cancels Tx -// Node1.Saga1 -> cancel -> Node2.Saga2 -> cancel -> Node3Saga3 -// - -type testSagaCancelWorker2 struct { - gen.SagaWorker -} - -func (w *testSagaCancelWorker2) HandleJobStart(process *gen.SagaWorkerProcess, job gen.SagaJob) error { - process.State = job.Value - return nil -} -func (w *testSagaCancelWorker2) HandleJobCancel(process *gen.SagaWorkerProcess, reason string) { - if err := process.SendInterim(1); err != gen.ErrSagaTxCanceled { - panic("shouldn't be able to send interim result") - } - if err := process.SendResult(1); err != gen.ErrSagaTxCanceled { - panic("shouldn't be able to send the result") - } - args := process.State.(testSagaCancel2Args) - args.workerRes <- reason - return -} - -type testSagaCancel2 struct { - gen.Saga -} - -type testSagaCancel2Args struct { - workerRes chan interface{} - sagaRes chan interface{} -} - -func (gs *testSagaCancel2) InitSaga(process *gen.SagaProcess, args ...etf.Term) (gen.SagaOptions, error) { - worker := &testSagaCancelWorker2{} - opts := gen.SagaOptions{ - Worker: worker, - } - process.State = args[0] // testSagaCancel2Args - return opts, nil -} - -func (gs *testSagaCancel2) HandleTxNew(process *gen.SagaProcess, id gen.SagaTransactionID, value interface{}) gen.SagaStatus { - args := process.State.(testSagaCancel2Args) - args.sagaRes <- id - - _, err := process.StartJob(id, gen.SagaJobOptions{}, process.State) - if err != nil { - panic(err) - } - args.workerRes <- id - - next := gen.SagaNext{} - switch process.Name() { - case "saga1": - trapCancel, _ := value.(bool) - if trapCancel { - // case 2.D - next.TrapCancel = true - } - next.Saga = gen.ProcessID{Name: "saga2", Node: "node2GenSagaCancelCases@localhost"} - case "saga2": - next.Saga = gen.ProcessID{Name: "saga3", Node: "node3GenSagaCancelCases@localhost"} - default: - return gen.SagaStatusOK - } - process.Next(id, next) - - return gen.SagaStatusOK -} - -func (gs *testSagaCancel2) HandleTxCancel(process *gen.SagaProcess, id gen.SagaTransactionID, reason string) gen.SagaStatus { - args := process.State.(testSagaCancel2Args) - args.sagaRes <- reason - return gen.SagaStatusOK -} - -func (gs *testSagaCancel2) HandleTxResult(process *gen.SagaProcess, id gen.SagaTransactionID, from gen.SagaNextID, result interface{}) gen.SagaStatus { - return gen.SagaStatusOK -} - -type testSagaStartTX struct { - TrapCancel bool -} -type testSagaCancelTX struct { - ID gen.SagaTransactionID - Reason string -} - -func (gs *testSagaCancel2) HandleSagaInfo(process *gen.SagaProcess, message etf.Term) gen.ServerStatus { - args := process.State.(testSagaCancel2Args) - switch m := message.(type) { - case gen.MessageSagaCancel: - args.sagaRes <- m.Reason - next := gen.SagaNext{} - next.Saga = gen.ProcessID{Name: "saga4", Node: "node4GenSagaCancelCases@localhost"} - process.Next(m.TransactionID, next) - args.sagaRes <- m.TransactionID - } - return gen.ServerStatusOK -} - -func (gs *testSagaCancel2) HandleSagaDirect(process *gen.SagaProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - - switch m := message.(type) { - case testSagaStartTX: - return process.StartTransaction(gen.SagaTransactionOptions{}, m.TrapCancel), nil - case testSagaCancelTX: - return nil, process.CancelTransaction(m.ID, m.Reason) - } - return nil, nil -} - -func TestSagaCancelCases(t *testing.T) { - fmt.Printf("\n=== Test GenSagaCancelCases\n") - - fmt.Printf("Starting node: node1GenSagaCancelCases@localhost...") - node1, _ := ergo.StartNode("node1GenSagaCancelCases@localhost", "cookies", node.Options{}) - - if node1 == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - defer node1.Stop() - - fmt.Printf("Starting node: node2GenSagaCancelCases@localhost...") - node2, _ := ergo.StartNode("node2GenSagaCancelCases@localhost", "cookies", node.Options{}) - - if node2 == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - defer node2.Stop() - - fmt.Printf("Starting node: node3GenSagaCancelCases@localhost...") - node3, _ := ergo.StartNode("node3GenSagaCancelCases@localhost", "cookies", node.Options{}) - - if node3 == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - defer node3.Stop() - - args1 := testSagaCancel2Args{ - workerRes: make(chan interface{}, 2), - sagaRes: make(chan interface{}, 2), - } - - fmt.Printf("Starting saga1 on node1GenSagaCancelCases@localhost...") - saga1 := &testSagaCancel2{} - saga1_process, err := node1.Spawn("saga1", gen.ProcessOptions{MailboxSize: 10000}, saga1, args1) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - args2 := testSagaCancel2Args{ - workerRes: make(chan interface{}, 2), - sagaRes: make(chan interface{}, 2), - } - fmt.Printf("Starting saga2 on node2GenSagaCancelCases@localhost...") - saga2 := &testSagaCancel2{} - saga2_process, err := node2.Spawn("saga2", gen.ProcessOptions{MailboxSize: 10000}, saga2, args2) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - args3 := testSagaCancel2Args{ - workerRes: make(chan interface{}, 2), - sagaRes: make(chan interface{}, 2), - } - fmt.Printf("Starting saga3 on node3GenSagaCancelCases@localhost...") - saga3 := &testSagaCancel2{} - saga3_process, err := node3.Spawn("saga3", gen.ProcessOptions{MailboxSize: 10000}, saga3, args3) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - // - // case 2.A - // - fmt.Println(" Case A (cancel TX on Node1.Saga1): Node1.Saga1 -> cancel -> Node2.Saga2 -> cancel -> Node3.Saga3") - - ValueTXID, err := saga1_process.Direct(testSagaStartTX{}) - if err != nil { - t.Fatal(err) - } - TXID, ok := ValueTXID.(gen.SagaTransactionID) - if !ok { - t.Fatal("not a gen.SagaTransactionID") - } - - fmt.Printf("... Start new TX %v on saga1: ", TXID) - waitForResultWithValue(t, args1.sagaRes, TXID) - fmt.Printf("... Start new worker on saga1 with TX %v: ", TXID) - waitForResultWithValue(t, args1.workerRes, TXID) - - fmt.Printf("... Start new TX %v on saga2: ", TXID) - waitForResultWithValue(t, args2.sagaRes, TXID) - fmt.Printf("... Start new worker on saga2 with TX %v: ", TXID) - waitForResultWithValue(t, args2.workerRes, TXID) - - fmt.Printf("... Start new TX %v on saga3: ", TXID) - waitForResultWithValue(t, args3.sagaRes, TXID) - fmt.Printf("... Start new worker on saga3 with TX %v: ", TXID) - waitForResultWithValue(t, args3.workerRes, TXID) - - fmt.Printf("... Cancel TX %v on saga1: ", TXID) - cancelReason := "cancel case1" - _, err = saga1_process.Direct(testSagaCancelTX{ID: TXID, Reason: cancelReason}) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, args1.sagaRes, cancelReason) - fmt.Printf("... saga1 cancels TX %v on its worker: ", TXID) - waitForResultWithValue(t, args1.workerRes, cancelReason) - fmt.Printf("... cancels TX %v on saga2: ", TXID) - waitForResultWithValue(t, args2.sagaRes, cancelReason) - fmt.Printf("... saga2 cancels TX %v on its worker: ", TXID) - waitForResultWithValue(t, args2.workerRes, cancelReason) - fmt.Printf("... cancels TX %v on saga3: ", TXID) - waitForResultWithValue(t, args3.sagaRes, cancelReason) - fmt.Printf("... saga3 cancels TX %v on its worker: ", TXID) - waitForResultWithValue(t, args3.workerRes, cancelReason) - // - // case 2.B - // - - fmt.Println(" Case B (cancel TX on Node.Saga2): Node1.Saga1 <- cancel <- Node2.Saga2 -> cancel -> Node3.Saga3") - - ValueTXID, err = saga1_process.Direct(testSagaStartTX{}) - if err != nil { - t.Fatal(err) - } - TXID, ok = ValueTXID.(gen.SagaTransactionID) - if !ok { - t.Fatal("not a gen.SagaTransactionID") - } - - fmt.Printf("... Start new TX %v on saga1: ", TXID) - waitForResultWithValue(t, args1.sagaRes, TXID) - fmt.Printf("... Start new worker on saga1 with TX %v: ", TXID) - waitForResultWithValue(t, args1.workerRes, TXID) - - fmt.Printf("... Start new TX %v on saga2: ", TXID) - waitForResultWithValue(t, args2.sagaRes, TXID) - fmt.Printf("... Start new worker on saga2 with TX %v: ", TXID) - waitForResultWithValue(t, args2.workerRes, TXID) - - fmt.Printf("... Start new TX %v on saga3: ", TXID) - waitForResultWithValue(t, args3.sagaRes, TXID) - fmt.Printf("... Start new worker on saga3 with TX %v: ", TXID) - waitForResultWithValue(t, args3.workerRes, TXID) - - fmt.Printf("... Cancel TX %v on saga2: ", TXID) - cancelReason = "cancel case2" - _, err = saga2_process.Direct(testSagaCancelTX{ID: TXID, Reason: cancelReason}) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, args2.sagaRes, cancelReason) - fmt.Printf("... saga2 cancels TX %v on its worker: ", TXID) - waitForResultWithValue(t, args2.workerRes, cancelReason) - fmt.Printf("... cancels TX %v on saga1: ", TXID) - waitForResultWithValue(t, args1.sagaRes, cancelReason) - fmt.Printf("... saga1 cancels TX %v on its worker: ", TXID) - waitForResultWithValue(t, args1.workerRes, cancelReason) - fmt.Printf("... cancels TX %v on saga3: ", TXID) - waitForResultWithValue(t, args3.sagaRes, cancelReason) - fmt.Printf("... saga3 cancels TX %v on its worker: ", TXID) - waitForResultWithValue(t, args3.workerRes, cancelReason) - // - // case 2.C - // - fmt.Println(" Case C (cancel TX on Node.Saga3): Node1.Saga1 <- cancel <- Node2.Saga2 <- cancel <- Node3.Saga3") - - ValueTXID, err = saga1_process.Direct(testSagaStartTX{}) - if err != nil { - t.Fatal(err) - } - TXID, ok = ValueTXID.(gen.SagaTransactionID) - if !ok { - t.Fatal("not a gen.SagaTransactionID") - } - - fmt.Printf("... Start new TX %v on saga1: ", TXID) - waitForResultWithValue(t, args1.sagaRes, TXID) - fmt.Printf("... Start new worker on saga1 with TX %v: ", TXID) - waitForResultWithValue(t, args1.workerRes, TXID) - - fmt.Printf("... Start new TX %v on saga2: ", TXID) - waitForResultWithValue(t, args2.sagaRes, TXID) - fmt.Printf("... Start new worker on saga2 with TX %v: ", TXID) - waitForResultWithValue(t, args2.workerRes, TXID) - - fmt.Printf("... Start new TX %v on saga3: ", TXID) - waitForResultWithValue(t, args3.sagaRes, TXID) - fmt.Printf("... Start new worker on saga3 with TX %v: ", TXID) - waitForResultWithValue(t, args3.workerRes, TXID) - - fmt.Printf("... Cancel TX %v on saga3: ", TXID) - cancelReason = "cancel case3" - _, err = saga3_process.Direct(testSagaCancelTX{ID: TXID, Reason: cancelReason}) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, args3.sagaRes, cancelReason) - fmt.Printf("... saga3 cancels TX %v on its worker: ", TXID) - waitForResultWithValue(t, args3.workerRes, cancelReason) - fmt.Printf("... cancels TX %v on saga2: ", TXID) - waitForResultWithValue(t, args2.sagaRes, cancelReason) - fmt.Printf("... saga2 cancels TX %v on its worker: ", TXID) - waitForResultWithValue(t, args2.workerRes, cancelReason) - fmt.Printf("... cancels TX %v on saga1: ", TXID) - waitForResultWithValue(t, args1.sagaRes, cancelReason) - fmt.Printf("... saga1 cancels TX %v on its worker: ", TXID) - waitForResultWithValue(t, args1.workerRes, cancelReason) - // - // Case 2.D - // - fmt.Println(" Case D: Saga1 sets TrapCancel, Saga2 process/node is going down, Saga1 sends Tx to the Saga4:") - - fmt.Printf("Starting node: node4GenSagaCancelCases@localhost...") - node4, _ := ergo.StartNode("node4GenSagaCancelCases@localhost", "cookies", node.Options{}) - - if node4 == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - defer node4.Stop() - - args4 := testSagaCancel2Args{ - workerRes: make(chan interface{}, 2), - sagaRes: make(chan interface{}, 2), - } - fmt.Printf("Starting saga4 on node4GenSagaCancelCases@localhost...") - saga4 := &testSagaCancel2{} - saga4_process, err := node4.Spawn("saga4", gen.ProcessOptions{MailboxSize: 10000}, saga4, args4) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK", saga4_process.Self()) - - // TrapCancel will be enabled on the Saga1 only - ValueTXID, err = saga1_process.Direct(testSagaStartTX{TrapCancel: true}) - if err != nil { - t.Fatal(err) - } - TXID, ok = ValueTXID.(gen.SagaTransactionID) - if !ok { - t.Fatal("not a gen.SagaTransactionID") - } - - fmt.Printf("... Start new TX %v on saga1: ", TXID) - waitForResultWithValue(t, args1.sagaRes, TXID) - fmt.Printf("... Start new worker on saga1 with TX %v: ", TXID) - waitForResultWithValue(t, args1.workerRes, TXID) - - fmt.Printf("... Start new TX %v on saga2: ", TXID) - waitForResultWithValue(t, args2.sagaRes, TXID) - fmt.Printf("... Start new worker on saga2 with TX %v: ", TXID) - waitForResultWithValue(t, args2.workerRes, TXID) - - fmt.Printf("... Start new TX %v on saga3: ", TXID) - waitForResultWithValue(t, args3.sagaRes, TXID) - fmt.Printf("... Start new worker on saga3 with TX %v: ", TXID) - waitForResultWithValue(t, args3.workerRes, TXID) - - fmt.Printf("... Terminate saga2 process: ") - time.Sleep(200 * time.Millisecond) - saga2_process.Kill() - if err := saga2_process.WaitWithTimeout(2 * time.Second); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... handle trapped cancelation TX %v on saga1: ", TXID) - cancelReason = fmt.Sprintf("next saga %s is down", gen.ProcessID{Name: saga2_process.Name(), Node: node2.Name()}) - waitForResultWithValue(t, args1.sagaRes, cancelReason) - fmt.Printf("... cancels TX %v on saga3: ", TXID) - cancelReason = fmt.Sprintf("parent saga %s is down", saga2_process.Self()) - waitForResultWithValue(t, args3.sagaRes, cancelReason) - fmt.Printf("... saga3 cancels TX %v on its worker: ", TXID) - waitForResultWithValue(t, args3.workerRes, cancelReason) - - fmt.Printf("... forward (trapped) canceled TX %v on saga1 to Saga4: ", TXID) - waitForResultWithValue(t, args1.sagaRes, TXID) - fmt.Printf("... Start new TX %v on saga4: ", TXID) - waitForResultWithValue(t, args4.sagaRes, TXID) - fmt.Printf("... Start new worker on saga4 with TX %v: ", TXID) - waitForResultWithValue(t, args4.workerRes, TXID) -} diff --git a/tests/saga_commit_test.go b/tests/saga_commit_test.go deleted file mode 100644 index c07aa008..00000000 --- a/tests/saga_commit_test.go +++ /dev/null @@ -1,259 +0,0 @@ -package tests - -import ( - "fmt" - "testing" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/node" -) - -type argsSagaCommitArgs struct { - workerRes chan interface{} - sagaRes chan interface{} - testCaseDist bool -} - -type testSagaCommitWorker1 struct { - gen.SagaWorker -} - -func (w *testSagaCommitWorker1) HandleJobStart(process *gen.SagaWorkerProcess, job gen.SagaJob) error { - process.State = job.Value - process.SendResult(123) - args := process.State.(argsSagaCommitArgs) - args.workerRes <- "jobresult" - return nil -} -func (w *testSagaCommitWorker1) HandleJobCommit(process *gen.SagaWorkerProcess, final interface{}) { - args := process.State.(argsSagaCommitArgs) - args.workerRes <- final - return -} -func (w *testSagaCommitWorker1) HandleJobCancel(process *gen.SagaWorkerProcess, reason string) { - return -} - -func (w *testSagaCommitWorker1) HandleWorkerTerminate(process *gen.SagaWorkerProcess, reason string) { - args := process.State.(argsSagaCommitArgs) - args.workerRes <- reason -} - -type testSagaCommit1 struct { - gen.Saga -} - -func (gs *testSagaCommit1) InitSaga(process *gen.SagaProcess, args ...etf.Term) (gen.SagaOptions, error) { - worker := &testSagaCommitWorker1{} - opts := gen.SagaOptions{ - Worker: worker, - } - process.State = args[0] - return opts, nil -} - -func (gs *testSagaCommit1) HandleTxNew(process *gen.SagaProcess, id gen.SagaTransactionID, value interface{}) gen.SagaStatus { - args := process.State.(argsSagaCommitArgs) - args.sagaRes <- "newtx" - - _, err := process.StartJob(id, gen.SagaJobOptions{}, args) - if err != nil { - panic(err) - } - - if args.testCaseDist { - next := gen.SagaNext{ - Saga: gen.ProcessID{Name: "saga2", Node: "nodeGenSagaCommitDist02@localhost"}, - } - process.Next(id, next) - } - - return gen.SagaStatusOK -} - -func (gs *testSagaCommit1) HandleTxDone(process *gen.SagaProcess, id gen.SagaTransactionID, result interface{}) (interface{}, gen.SagaStatus) { - args := process.State.(argsSagaCommitArgs) - args.sagaRes <- "txdone" - return 6.28, gen.SagaStatusOK -} - -func (gs *testSagaCommit1) HandleTxCancel(process *gen.SagaProcess, id gen.SagaTransactionID, reason string) gen.SagaStatus { - return gen.SagaStatusOK -} - -func (gs *testSagaCommit1) HandleTxCommit(process *gen.SagaProcess, id gen.SagaTransactionID, final interface{}) gen.SagaStatus { - args := process.State.(argsSagaCommitArgs) - args.sagaRes <- final - return gen.SagaStatusOK -} - -func (gs *testSagaCommit1) HandleTxResult(process *gen.SagaProcess, id gen.SagaTransactionID, from gen.SagaNextID, result interface{}) gen.SagaStatus { - args := process.State.(argsSagaCommitArgs) - args.sagaRes <- "txresult" - return gen.SagaStatusOK -} - -func (gs *testSagaCommit1) HandleJobResult(process *gen.SagaProcess, id gen.SagaTransactionID, from gen.SagaJobID, result interface{}) gen.SagaStatus { - return gen.SagaStatusOK -} - -type testSagaCommitStartTx struct{} -type testSagaCommitSendRes struct { - id gen.SagaTransactionID -} - -func (gs *testSagaCommit1) HandleSagaDirect(process *gen.SagaProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - - switch m := message.(type) { - case testSagaCommitStartTx: - id := process.StartTransaction(gen.SagaTransactionOptions{TwoPhaseCommit: true}, m) - return id, nil - case testSagaCommitSendRes: - return nil, process.SendResult(m.id, 3.14) - } - return nil, nil -} - -func TestSagaCommitSimple(t *testing.T) { - - fmt.Printf("\n=== Test GenSagaCommitSimple\n") - fmt.Printf("Starting node: nodeGenSagaCommitSimple01@localhost...") - - node, _ := ergo.StartNode("nodeGenSagaCommitSimple01@localhost", "cookies", node.Options{}) - if node == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - defer node.Stop() - - fmt.Printf("... Starting Saga processes: ") - args := argsSagaCommitArgs{ - workerRes: make(chan interface{}, 2), - sagaRes: make(chan interface{}, 2), - } - saga := &testSagaCommit1{} - saga_process, err := node.Spawn("saga", gen.ProcessOptions{MailboxSize: 10000}, saga, args) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - ValueTXID, err := saga_process.Direct(testSagaCommitStartTx{}) - if err != nil { - t.Fatal(err) - } - TXID, ok := ValueTXID.(gen.SagaTransactionID) - if !ok { - t.Fatal("not a gen.SagaTransactionID") - } - fmt.Printf("... Start new TX on saga: ") - waitForResultWithValue(t, args.sagaRes, "newtx") - fmt.Printf("... Start new worker on saga: ") - waitForResultWithValue(t, args.workerRes, "jobresult") - fmt.Printf("... Sending result on saga: ") - if _, err := saga_process.Direct(testSagaCommitSendRes{id: TXID}); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - fmt.Printf("... Handle TX done on saga: ") - waitForResultWithValue(t, args.sagaRes, "txdone") - fmt.Printf("... Handle TX commit with final value on worker: ") - waitForResultWithValue(t, args.workerRes, 6.28) - fmt.Printf("... Worker terminated: ") - waitForResultWithValue(t, args.workerRes, "normal") -} - -func TestSagaCommitDistributed(t *testing.T) { - - fmt.Printf("\n=== Test GenSagaCommitDistributed\n") - - fmt.Printf("Starting node: nodeGenSagaCommitDist01@localhost...") - node1, _ := ergo.StartNode("nodeGenSagaCommitDist01@localhost", "cookies", node.Options{}) - if node1 == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - defer node1.Stop() - - fmt.Printf("Starting node: nodeGenSagaCommitDist02@localhost...") - node2, _ := ergo.StartNode("nodeGenSagaCommitDist02@localhost", "cookies", node.Options{}) - if node2 == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - defer node2.Stop() - - args1 := argsSagaCommitArgs{ - workerRes: make(chan interface{}, 2), - sagaRes: make(chan interface{}, 2), - testCaseDist: true, - } - fmt.Printf("... Starting Saga1 processes on node1: ") - saga1 := &testSagaCommit1{} - saga1_process, err := node1.Spawn("saga1", gen.ProcessOptions{MailboxSize: 10000}, saga1, args1) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - args2 := argsSagaCommitArgs{ - workerRes: make(chan interface{}, 2), - sagaRes: make(chan interface{}, 2), - } - fmt.Printf("... Starting Saga2 processes on node2: ") - saga2 := &testSagaCommit1{} - saga2_process, err := node2.Spawn("saga2", gen.ProcessOptions{MailboxSize: 10000}, saga2, args2) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - ValueTXID, err := saga1_process.Direct(testSagaCommitStartTx{}) - if err != nil { - t.Fatal(err) - } - TXID, ok := ValueTXID.(gen.SagaTransactionID) - if !ok { - t.Fatal("not a gen.SagaTransactionID") - } - fmt.Printf("... Start new TX on saga1: ") - waitForResultWithValue(t, args1.sagaRes, "newtx") - fmt.Printf("... Start new worker on saga1: ") - waitForResultWithValue(t, args1.workerRes, "jobresult") - fmt.Printf("... Start new TX on saga2: ") - waitForResultWithValue(t, args2.sagaRes, "newtx") - fmt.Printf("... Start new worker on saga2: ") - waitForResultWithValue(t, args2.workerRes, "jobresult") - fmt.Printf("... Try to send the result on saga1 (must be error ErrSagaTxInProgress): ") - if _, err := saga1_process.Direct(testSagaCommitSendRes{id: TXID}); err != gen.ErrSagaTxInProgress { - t.Fatal("must be error here") - } - fmt.Println("OK") - - fmt.Printf("... Send the result on saga2 : ") - if _, err := saga2_process.Direct(testSagaCommitSendRes{id: TXID}); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... Handle TX result on saga1 (from saga2): ") - waitForResultWithValue(t, args1.sagaRes, "txresult") - fmt.Printf("... Send the result on saga1 : ") - if _, err := saga1_process.Direct(testSagaCommitSendRes{id: TXID}); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - fmt.Printf("... Handle TX done on saga1: ") - waitForResultWithValue(t, args1.sagaRes, "txdone") - fmt.Printf("... Handle TX commit with final value on saga1 worker: ") - waitForResultWithValue(t, args1.workerRes, 6.28) - fmt.Printf("... Handle TX commit on saga2: ") - waitForResultWithValue(t, args2.sagaRes, 6.28) - fmt.Printf("... Handle TX commit with final value on saga2 worker: ") - waitForResultWithValue(t, args2.workerRes, 6.28) -} diff --git a/tests/saga_dist_test.go b/tests/saga_dist_test.go deleted file mode 100644 index cf6ae902..00000000 --- a/tests/saga_dist_test.go +++ /dev/null @@ -1,332 +0,0 @@ -package tests - -import ( - "fmt" - "math/rand" - "testing" - "time" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/node" - "github.com/ergo-services/ergo/proto/dist" -) - -// this test implemets distributed case of computing sum for the given -// slice of int numbers -// -// -> Saga2 (run workers 1..n) -// / -// Saga1 -> -// \ -// -> Saga3 (run workers 1..n) -// -// Saga1 creates a slice of int numbers, split it and sends to the Saga2 and Saga3. -// Each saga runs on a separated node. - -// -// Saga1 -// -type testSaga1 struct { - gen.Saga - res chan interface{} - result int -} - -type testSaga1State struct { - txs map[gen.SagaTransactionID]*txvalue -} -type txvalue struct { - res int - nexts map[gen.SagaNextID]bool -} - -func (gs *testSaga1) InitSaga(process *gen.SagaProcess, args ...etf.Term) (gen.SagaOptions, error) { - opts := gen.SagaOptions{} - gs.res = make(chan interface{}, 2) - process.State = &testSaga1State{ - txs: make(map[gen.SagaTransactionID]*txvalue), - } - return opts, nil -} - -func (gs *testSaga1) HandleTxNew(process *gen.SagaProcess, id gen.SagaTransactionID, value interface{}) gen.SagaStatus { - state := process.State.(*testSaga1State) - task := value.(taskTX) - - txval := &txvalue{ - nexts: make(map[gen.SagaNextID]bool), - } - // split it into two parts (two sagas) - values := splitSlice(task.value, len(task.value)/2+1) - - // send to the saga2 - next := gen.SagaNext{ - Saga: saga2_process, - Value: values[0], - } - next_id, err := process.Next(id, next) - if err != nil { - panic(err) - } - txval.nexts[next_id] = true - - // send to the saga3 - if len(values) > 1 { - next = gen.SagaNext{ - Saga: saga3_process, - Value: values[1], - } - next_id, err := process.Next(id, next) - if err != nil { - panic(err) - } - txval.nexts[next_id] = true - } - - state.txs[id] = txval - return gen.SagaStatusOK -} - -func (gs *testSaga1) HandleTxCancel(process *gen.SagaProcess, id gen.SagaTransactionID, reason string) gen.SagaStatus { - return gen.SagaStatusOK -} - -func (gs *testSaga1) HandleTxResult(process *gen.SagaProcess, id gen.SagaTransactionID, from gen.SagaNextID, result interface{}) gen.SagaStatus { - state := process.State.(*testSaga1State) - txval := state.txs[id] - switch r := result.(type) { - case int: - txval.res += r - case int64: - txval.res += int(r) - } - delete(txval.nexts, from) - if len(txval.nexts) == 0 { - process.SendResult(id, txval.res) - } - return gen.SagaStatusOK -} - -func (gs *testSaga1) HandleTxDone(process *gen.SagaProcess, id gen.SagaTransactionID, result interface{}) (interface{}, gen.SagaStatus) { - state := process.State.(*testSaga1State) - txval := state.txs[id] - delete(state.txs, id) - gs.result += txval.res - if len(state.txs) == 0 { - gs.res <- gs.result - } - - return nil, gen.SagaStatusOK -} -func (gs *testSaga1) HandleSagaDirect(process *gen.SagaProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - switch m := message.(type) { - case task: - values := splitSlice(m.value, m.split) - fmt.Printf(" process %d txs with %v value(s) each distributing them on Saga2 and Saga3: ", len(values), m.split) - for i := range values { - txValue := taskTX{ - value: values[i], - chunks: m.chunks, - } - process.StartTransaction(gen.SagaTransactionOptions{}, txValue) - } - - return nil, nil - } - - return nil, fmt.Errorf("unknown request %#v", message) -} - -// -// Saga2/Saga3 -// - -var ( - saga2_process = gen.ProcessID{ - Name: "saga2", - Node: "nodeGenSagaDist02@localhost", - } - - saga3_process = gen.ProcessID{ - Name: "saga3", - Node: "nodeGenSagaDist03@localhost", - } -) - -type testSagaN struct { - gen.Saga -} - -type testSagaNState struct { - txs map[gen.SagaTransactionID]*txjobs -} - -func (gs *testSagaN) InitSaga(process *gen.SagaProcess, args ...etf.Term) (gen.SagaOptions, error) { - opts := gen.SagaOptions{ - Worker: &testSagaWorkerN{}, - } - process.State = &testSagaState{ - txs: make(map[gen.SagaTransactionID]*txjobs), - } - return opts, nil -} -func (gs *testSagaN) HandleTxNew(process *gen.SagaProcess, id gen.SagaTransactionID, value interface{}) gen.SagaStatus { - var vv []int - if err := etf.TermIntoStruct(value, &vv); err != nil { - panic(err) - } - state := process.State.(*testSagaState) - j := txjobs{ - jobs: make(map[gen.SagaJobID]bool), - } - - values := splitSlice(vv, 5) - for i := range values { - job_id, err := process.StartJob(id, gen.SagaJobOptions{}, values[i]) - if err != nil { - return err - } - j.jobs[job_id] = true - } - state.txs[id] = &j - return gen.SagaStatusOK -} - -func (gs *testSagaN) HandleTxCancel(process *gen.SagaProcess, id gen.SagaTransactionID, reason string) gen.SagaStatus { - return gen.SagaStatusOK -} - -func (gs *testSagaN) HandleTxResult(process *gen.SagaProcess, id gen.SagaTransactionID, from gen.SagaNextID, result interface{}) gen.SagaStatus { - return gen.SagaStatusOK -} - -func (gs *testSagaN) HandleJobResult(process *gen.SagaProcess, id gen.SagaTransactionID, from gen.SagaJobID, result interface{}) gen.SagaStatus { - state := process.State.(*testSagaState) - j := state.txs[id] - j.result += result.(int) - delete(j.jobs, from) - - if len(j.jobs) == 0 { - process.SendResult(id, j.result) - } - - return gen.SagaStatusOK -} - -// -// SagaWorkerN -// -type testSagaWorkerN struct { - gen.SagaWorker -} - -func (w *testSagaWorkerN) HandleJobStart(process *gen.SagaWorkerProcess, job gen.SagaJob) error { - values := job.Value.([]int) - result := sumSlice(values) - if err := process.SendResult(result); err != nil { - panic(err) - } - return nil -} - -func (w *testSagaWorkerN) HandleJobCancel(process *gen.SagaWorkerProcess, reason string) { - return -} - -func TestSagaDist(t *testing.T) { - fmt.Printf("\n=== Test GenSagaDist\n") - - fmt.Printf("Starting node: nodeGenSagaDist01@localhost...") - opts1 := node.Options{} - protoOptions := node.DefaultProtoOptions() - protoOptions.NumHandlers = 2 - opts1.Proto = dist.CreateProto(protoOptions) - node1, _ := ergo.StartNode("nodeGenSagaDist01@localhost", "cookies", opts1) - if node1 == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - fmt.Printf("Starting node: nodeGenSagaDist02@localhost...") - opts2 := node.Options{} - opts2.Proto = dist.CreateProto(protoOptions) - node2, _ := ergo.StartNode("nodeGenSagaDist02@localhost", "cookies", opts2) - if node2 == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - fmt.Printf("Starting node: nodeGenSagaDist03@localhost...") - opts3 := node.Options{} - opts3.Proto = dist.CreateProto(protoOptions) - fmt.Printf("Starting node: nodeGenSagaDist02@localhost...") - node3, _ := ergo.StartNode("nodeGenSagaDist03@localhost", "cookies", opts3) - if node3 == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - - fmt.Printf("... Starting Saga1 processes (on node1): ") - saga1 := &testSaga1{} - saga1_process, err := node1.Spawn("saga1", gen.ProcessOptions{MailboxSize: 10000}, saga1) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... Starting Saga2 processes (on node2): ") - saga2 := &testSagaN{} - _, err = node2.Spawn("saga2", gen.ProcessOptions{MailboxSize: 10000}, saga2) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("... Starting Saga3 processes (on node3): ") - saga3 := &testSagaN{} - _, err = node3.Spawn("saga3", gen.ProcessOptions{MailboxSize: 10000}, saga3) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - rand.Seed(time.Now().Unix()) - - slice1 := rand.Perm(1000) - sum1 := sumSlice(slice1) - saga1.result = 0 - startTask1 := task{ - value: slice1, - split: 1, - } - saga1_process.Direct(startTask1) - waitForResultWithValue(t, saga1.res, sum1) - - slice2 := rand.Perm(1000) - sum2 := sumSlice(slice2) - saga1.result = 0 - startTask2 := task{ - value: slice2, - split: 100, - } - saga1_process.Direct(startTask2) - waitForResultWithValue(t, saga1.res, sum2) - - slice3 := rand.Perm(1000) - sum3 := sumSlice(slice3) - saga1.result = 0 - startTask3 := task{ - value: slice3, - split: 1000, - } - saga1_process.Direct(startTask3) - waitForResultWithValue(t, saga1.res, sum3) - - // stop all nodes - node3.Stop() - node2.Stop() - node1.Stop() -} diff --git a/tests/saga_test.go b/tests/saga_test.go deleted file mode 100644 index 1876fb21..00000000 --- a/tests/saga_test.go +++ /dev/null @@ -1,245 +0,0 @@ -package tests - -import ( - "fmt" - "math/rand" - "testing" - "time" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/node" -) - -// -// Worker -// -type testSagaWorker struct { - gen.SagaWorker -} - -func (w *testSagaWorker) HandleJobStart(process *gen.SagaWorkerProcess, job gen.SagaJob) error { - values := job.Value.([]int) - result := sumSlice(values) - if err := process.SendResult(result); err != nil { - panic(err) - } - return nil -} -func (w *testSagaWorker) HandleJobCancel(process *gen.SagaWorkerProcess, reason string) { - return -} - -// -// Saga -// -type testSaga struct { - gen.Saga - res chan interface{} - result int -} - -type testSagaState struct { - txs map[gen.SagaTransactionID]*txjobs -} - -type txjobs struct { - result int - jobs map[gen.SagaJobID]bool -} - -func (gs *testSaga) InitSaga(process *gen.SagaProcess, args ...etf.Term) (gen.SagaOptions, error) { - opts := gen.SagaOptions{ - Worker: &testSagaWorker{}, - } - gs.res = make(chan interface{}, 2) - process.State = &testSagaState{ - txs: make(map[gen.SagaTransactionID]*txjobs), - } - return opts, nil -} - -func (gs *testSaga) HandleTxNew(process *gen.SagaProcess, id gen.SagaTransactionID, value interface{}) gen.SagaStatus { - task := value.(taskTX) - values := splitSlice(task.value, task.chunks) - state := process.State.(*testSagaState) - j := txjobs{ - jobs: make(map[gen.SagaJobID]bool), - } - for i := range values { - job_id, err := process.StartJob(id, gen.SagaJobOptions{}, values[i]) - if err != nil { - return err - } - j.jobs[job_id] = true - } - state.txs[id] = &j - return gen.SagaStatusOK -} - -func (gs *testSaga) HandleTxDone(process *gen.SagaProcess, id gen.SagaTransactionID, result interface{}) (interface{}, gen.SagaStatus) { - state := process.State.(*testSagaState) - - gs.result += result.(int) - delete(state.txs, id) - if len(state.txs) == 0 { - gs.res <- gs.result - } - return nil, gen.SagaStatusOK -} - -func (gs *testSaga) HandleTxCancel(process *gen.SagaProcess, id gen.SagaTransactionID, reason string) gen.SagaStatus { - return gen.SagaStatusOK -} - -func (gs *testSaga) HandleTxResult(process *gen.SagaProcess, id gen.SagaTransactionID, from gen.SagaNextID, result interface{}) gen.SagaStatus { - return gen.SagaStatusOK -} - -func (gs *testSaga) HandleTxInterim(process *gen.SagaProcess, id gen.SagaTransactionID, from gen.SagaNextID, interim interface{}) gen.SagaStatus { - - return gen.SagaStatusOK -} - -func (gs *testSaga) HandleJobResult(process *gen.SagaProcess, id gen.SagaTransactionID, from gen.SagaJobID, result interface{}) gen.SagaStatus { - state := process.State.(*testSagaState) - j := state.txs[id] - j.result += result.(int) - delete(j.jobs, from) - - if len(j.jobs) == 0 { - process.SendResult(id, j.result) - } - return gen.SagaStatusOK -} - -type task struct { - value []int - split int - chunks int -} - -type taskTX struct { - value []int - chunks int -} - -func (gs *testSaga) HandleSagaDirect(process *gen.SagaProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - switch m := message.(type) { - case task: - values := splitSlice(m.value, m.split) - fmt.Printf(" process %v txs with %v value(s) each and chunk size %v: ", len(values), m.split, m.chunks) - for i := range values { - txValue := taskTX{ - value: values[i], - chunks: m.chunks, - } - process.StartTransaction(gen.SagaTransactionOptions{}, txValue) - } - - return nil, gen.DirectStatusOK - } - - return nil, fmt.Errorf("unknown request %#v", message) -} - -func TestSagaSimple(t *testing.T) { - fmt.Printf("\n=== Test GenSagaSimple\n") - fmt.Printf("Starting node: nodeGenSagaSimple01@localhost...") - - node, _ := ergo.StartNode("nodeGenSagaSimple01@localhost", "cookies", node.Options{}) - - if node == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - - fmt.Printf("... Starting Saga processes: ") - saga := &testSaga{} - saga_process, err := node.Spawn("saga", gen.ProcessOptions{MailboxSize: 10000}, saga) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - rand.Seed(time.Now().Unix()) - - slice1 := rand.Perm(1000) - sum1 := sumSlice(slice1) - startTask1 := task{ - value: slice1, - split: 10, // 10 items per tx - chunks: 5, // size of slice for worker - } - _, err = saga_process.Direct(startTask1) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, saga.res, sum1) - - saga.result = 0 - slice2 := rand.Perm(100) - sum2 := sumSlice(slice2) - startTask2 := task{ - value: slice2, - split: 1, // 1 items per tx - chunks: 5, // size of slice for worker - } - _, err = saga_process.Direct(startTask2) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, saga.res, sum2) - - saga.result = 0 - slice3 := rand.Perm(100) - sum3 := sumSlice(slice3) - startTask3 := task{ - value: slice3, - split: 100, // 100 items per tx - chunks: 5, // size of slice for worker - } - _, err = saga_process.Direct(startTask3) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, saga.res, sum3) - - saga.result = 0 - slice4 := rand.Perm(10000) - sum4 := sumSlice(slice4) - startTask4 := task{ - value: slice4, - split: 100, // 100 items per tx - chunks: 5, // size of slice for worker - } - _, err = saga_process.Direct(startTask4) - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, saga.res, sum4) - node.Stop() - node.Wait() -} - -func splitSlice(slice []int, size int) [][]int { - var chunks [][]int - for i := 0; i < len(slice); i += size { - end := i + size - if end > len(slice) { - end = len(slice) - } - chunks = append(chunks, slice[i:end]) - } - return chunks -} - -func sumSlice(slice []int) int { - var result int - for i := range slice { - result += slice[i] - } - return result -} diff --git a/tests/server_test.go b/tests/server_test.go deleted file mode 100644 index b6099c56..00000000 --- a/tests/server_test.go +++ /dev/null @@ -1,943 +0,0 @@ -package tests - -import ( - "fmt" - "reflect" - "sync" - "testing" - "time" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" -) - -type testServer struct { - gen.Server - res chan interface{} -} - -func (tgs *testServer) Init(process *gen.ServerProcess, args ...etf.Term) error { - tgs.res <- nil - return nil -} -func (tgs *testServer) HandleCast(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - tgs.res <- message - return gen.ServerStatusOK -} -func (tgs *testServer) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) { - return message, gen.ServerStatusOK -} -func (tgs *testServer) HandleInfo(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - tgs.res <- message - return gen.ServerStatusOK -} - -func (tgs *testServer) HandleDirect(process *gen.ServerProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - switch m := message.(type) { - case makeCall: - return process.Call(m.to, m.message) - case makeCast: - return nil, process.Cast(m.to, m.message) - - } - return nil, lib.ErrUnsupportedRequest -} -func (tgs *testServer) Terminate(process *gen.ServerProcess, reason string) { - tgs.res <- reason -} - -type testServerDirect struct { - gen.Server - err chan error -} - -func (tgsd *testServerDirect) Init(process *gen.ServerProcess, args ...etf.Term) error { - tgsd.err <- nil - return nil -} -func (tgsd *testServerDirect) HandleDirect(process *gen.ServerProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - switch m := message.(type) { - case asyncDirect: - m.ref = ref - process.Cast(process.Self(), m) - return nil, gen.DirectStatusIgnore - case syncDirect: - return m.val, gen.DirectStatusOK - } - return message, gen.DirectStatusOK -} -func (tgsd *testServerDirect) HandleCast(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - switch m := message.(type) { - case asyncDirect: - process.Reply(m.ref, m.val, nil) - } - return gen.ServerStatusOK -} - -func TestServer(t *testing.T) { - fmt.Printf("\n=== Test Server\n") - fmt.Printf("Starting nodes: nodeGS1@localhost, nodeGS2@localhost: ") - node1, _ := ergo.StartNode("nodeGS1@localhost", "cookies", node.Options{}) - node2, _ := ergo.StartNode("nodeGS2@localhost", "cookies", node.Options{}) - if node1 == nil || node2 == nil { - t.Fatal("can't start nodes") - } else { - fmt.Println("OK") - } - - gs1 := &testServer{ - res: make(chan interface{}, 2), - } - gs2 := &testServer{ - res: make(chan interface{}, 2), - } - gs3 := &testServer{ - res: make(chan interface{}, 2), - } - gsDirect := &testServerDirect{ - err: make(chan error, 2), - } - - fmt.Printf(" wait for start of gs1 on %#v: ", node1.Name()) - node1gs1, _ := node1.Spawn("gs1", gen.ProcessOptions{}, gs1, nil) - waitForResultWithValue(t, gs1.res, nil) - - fmt.Printf(" wait for start of gs2 on %#v: ", node1.Name()) - node1gs2, _ := node1.Spawn("gs2", gen.ProcessOptions{}, gs2, nil) - waitForResultWithValue(t, gs2.res, nil) - - fmt.Printf(" wait for start of gs3 on %#v: ", node2.Name()) - node2gs3, _ := node2.Spawn("gs3", gen.ProcessOptions{}, gs3, nil) - waitForResultWithValue(t, gs3.res, nil) - - fmt.Printf(" wait for start of gsDirect on %#v: ", node2.Name()) - node2gsDirect, _ := node2.Spawn("gsDirect", gen.ProcessOptions{}, gsDirect, nil) - waitForResult(t, gsDirect.err) - - fmt.Println("Testing Server process:") - - fmt.Printf(" process.Send (by Pid) local (gs1) -> local (gs2) : ") - node1gs1.Send(node1gs2.Self(), etf.Atom("hi")) - waitForResultWithValue(t, gs2.res, etf.Atom("hi")) - - cast := makeCast{ - to: node1gs2.Self(), - message: etf.Atom("hi cast"), - } - node1gs1.Direct(cast) - fmt.Printf(" process.Cast (by Pid) local (gs1) -> local (gs2) : ") - waitForResultWithValue(t, gs2.res, etf.Atom("hi cast")) - - fmt.Printf(" process.Call (by Pid) local (gs1) -> local (gs2): ") - v := etf.Atom("hi call") - call := makeCall{ - to: node1gs2.Self(), - message: v, - } - if v1, err := node1gs1.Direct(call); err != nil { - t.Fatal(err) - } else { - if v == v1 { - fmt.Println("OK") - } else { - e := fmt.Errorf("expected: %#v , got: %#v", v, v1) - t.Fatal(e) - } - } - - fmt.Printf(" process.Send (by Name) local (gs1) -> local (gs2) : ") - node1gs1.Send(etf.Atom("gs2"), etf.Atom("hi")) - waitForResultWithValue(t, gs2.res, etf.Atom("hi")) - - cast = makeCast{ - to: etf.Atom("gs2"), - message: etf.Atom("hi cast"), - } - node1gs1.Direct(cast) - fmt.Printf(" process.Cast (by Name) local (gs1) -> local (gs2) : ") - waitForResultWithValue(t, gs2.res, etf.Atom("hi cast")) - - fmt.Printf(" process.Call (by Name) local (gs1) -> local (gs2): ") - call = makeCall{ - to: etf.Atom("gs2"), - message: v, - } - if v1, err := node1gs1.Direct(call); err != nil { - t.Fatal(err) - } else { - if v == v1 { - fmt.Println("OK") - } else { - e := fmt.Errorf("expected: %#v , got: %#v", v, v1) - t.Fatal(e) - } - } - alias, err := node1gs2.CreateAlias() - if err != nil { - t.Fatal(err) - } - fmt.Printf(" process.Send (by Alias) local (gs1) -> local (gs2) : ") - node1gs1.Send(alias, etf.Atom("hi")) - waitForResultWithValue(t, gs2.res, etf.Atom("hi")) - - cast = makeCast{ - to: alias, - message: etf.Atom("hi cast"), - } - node1gs1.Direct(cast) - fmt.Printf(" process.Cast (by Alias) local (gs1) -> local (gs2) : ") - waitForResultWithValue(t, gs2.res, etf.Atom("hi cast")) - - fmt.Printf(" process.Call (by Alias) local (gs1) -> local (gs2): ") - call = makeCall{ - to: alias, - message: v, - } - if v1, err := node1gs1.Direct(call); err != nil { - t.Fatal(err) - } else { - if v == v1 { - fmt.Println("OK") - } else { - e := fmt.Errorf("expected: %#v , got: %#v", v, v1) - t.Fatal(e) - } - } - - fmt.Printf(" process.Send (by Pid) local (gs1) -> remote (gs3) : ") - node1gs1.Send(node2gs3.Self(), etf.Atom("hi")) - waitForResultWithValue(t, gs3.res, etf.Atom("hi")) - - cast = makeCast{ - to: node2gs3.Self(), - message: etf.Atom("hi cast"), - } - node1gs1.Direct(cast) - fmt.Printf(" process.Cast (by Pid) local (gs1) -> remote (gs3) : ") - waitForResultWithValue(t, gs3.res, etf.Atom("hi cast")) - - fmt.Printf(" process.Call (by Pid) local (gs1) -> remote (gs3): ") - call = makeCall{ - to: node2gs3.Self(), - message: v, - } - if v1, err := node1gs1.Direct(call); err != nil { - t.Fatal(err) - } else { - if v == v1 { - fmt.Println("OK") - } else { - e := fmt.Errorf("expected: %#v , got: %#v", v, v1) - t.Fatal(e) - } - } - - fmt.Printf(" process.Send (by Name) local (gs1) -> remote (gs3) : ") - processName := gen.ProcessID{Name: "gs3", Node: node2.Name()} - node1gs1.Send(processName, etf.Atom("hi")) - waitForResultWithValue(t, gs3.res, etf.Atom("hi")) - - cast = makeCast{ - to: processName, - message: etf.Atom("hi cast"), - } - node1gs1.Direct(cast) - fmt.Printf(" process.Cast (by Name) local (gs1) -> remote (gs3) : ") - waitForResultWithValue(t, gs3.res, etf.Atom("hi cast")) - - fmt.Printf(" process.Call (by Name) local (gs1) -> remote (gs3): ") - call = makeCall{ - to: processName, - message: v, - } - if v1, err := node1gs1.Direct(call); err != nil { - t.Fatal(err) - } else { - if v == v1 { - fmt.Println("OK") - } else { - e := fmt.Errorf("expected: %#v , got: %#v", v, v1) - t.Fatal(e) - } - } - - fmt.Printf(" process.Send (by Alias) local (gs1) -> remote (gs3) : ") - alias, err = node2gs3.CreateAlias() - if err != nil { - t.Fatal(err) - } - - node1gs1.Send(alias, etf.Atom("hi")) - waitForResultWithValue(t, gs3.res, etf.Atom("hi")) - - cast = makeCast{ - to: alias, - message: etf.Atom("hi cast"), - } - node1gs1.Direct(cast) - fmt.Printf(" process.Cast (by Alias) local (gs1) -> remote (gs3) : ") - waitForResultWithValue(t, gs3.res, etf.Atom("hi cast")) - - fmt.Printf(" process.Call (by Alias) local (gs1) -> remote (gs3): ") - call = makeCall{ - to: alias, - message: v, - } - if v1, err := node1gs1.Direct(call); err != nil { - t.Fatal(err) - } else { - if v == v1 { - fmt.Println("OK") - } else { - e := fmt.Errorf("expected: %#v , got: %#v", v, v1) - t.Fatal(e) - } - } - - fmt.Printf(" process.Direct (without HandleDirect implementation): ") - if _, err := node1gs1.Direct(nil); err != lib.ErrUnsupportedRequest { - t.Fatal("must be ErrUnsupportedRequest") - } else { - fmt.Println("OK") - } - fmt.Printf(" process.Direct (with HandleDirect implementation): ") - if v1, err := node2gsDirect.Direct(v); err != nil { - t.Fatal(err) - } else { - if v == v1 { - fmt.Println("OK") - } else { - e := fmt.Errorf("expected: %#v , got: %#v", v, v1) - t.Fatal(e) - } - } - fmt.Printf(" process.Direct (with HandleDirect implementation with async reply): ") - - av := etf.Atom("async direct") - if v1, err := node2gsDirect.Direct(asyncDirect{val: av}); err != nil { - t.Fatal(err) - } else { - if av == v1 { - fmt.Println("OK") - } else { - e := fmt.Errorf("expected: %#v , got: %#v", av, v1) - t.Fatal(e) - } - } - - fmt.Printf(" process.SetTrapExit(true) and call process.Exit() gs2: ") - node1gs2.SetTrapExit(true) - node1gs2.Exit("test trap") - waitForResultWithValue(t, gs2.res, gen.MessageExit{Pid: node1gs2.Self(), Reason: "test trap"}) - fmt.Printf(" check process.IsAlive gs2 (must be alive): ") - if !node1gs2.IsAlive() { - t.Fatal("should be alive") - } - fmt.Println("OK") - - fmt.Printf(" process.SetTrapExit(false) and call process.Exit() gs2: ") - node1gs2.SetTrapExit(false) - node1gs2.Exit("test trap") - waitForResultWithValue(t, gs2.res, "test trap") - - fmt.Printf(" check process.IsAlive gs2 (must be died): ") - if node1gs2.IsAlive() { - t.Fatal("shouldn't be alive") - } - fmt.Println("OK") - - fmt.Printf("Stopping nodes: %v, %v\n", node1.Name(), node2.Name()) - node1.Stop() - node2.Stop() -} -func TestServerDirect(t *testing.T) { - fmt.Printf("\n=== Test Server Direct\n") - fmt.Printf("Starting node: nodeGS1Direct@localhost: ") - node1, _ := ergo.StartNode("nodeGS1Direct@localhost", "cookies", node.Options{}) - if node1 == nil { - t.Fatal("can't start nodes") - } else { - fmt.Println("OK") - } - defer node1.Stop() - - gsDirect := &testServerDirect{ - err: make(chan error, 2), - } - - fmt.Printf(" wait for start of gsDirect on %#v: ", node1.Name()) - node1gsDirect, _ := node1.Spawn("gsDirect", gen.ProcessOptions{}, gsDirect, nil) - waitForResult(t, gsDirect.err) - - var wg sync.WaitGroup - - fmt.Println(" process.Direct with 1000 goroutines:") - direct := func() { - v := etf.Atom("sync direct") - defer wg.Done() - repeat: - if v1, err := node1gsDirect.Direct(syncDirect{val: v}); err != nil { - if err == lib.ErrProcessBusy { - goto repeat - } - t.Fatal(err) - } else { - if v != v1 { - e := fmt.Errorf("expected: %#v , got: %#v", v, v1) - t.Fatal(e) - } - } - } - n := 1000 - for i := 0; i < n; i++ { - wg.Add(1) - go direct() - } - - wg.Wait() - fmt.Println("OK") -} - -type messageOrderGS struct { - gen.Server - n int - res chan interface{} -} - -type testCase1 struct { - n int -} - -type testCase2 struct { - n int -} -type testCase3 struct { - n int -} - -func (gs *messageOrderGS) Init(process *gen.ServerProcess, args ...etf.Term) error { - gs.res <- nil - return nil -} - -func (gs *messageOrderGS) HandleInfo(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - switch m := message.(type) { - case testCase1: - if gs.n+1 != m.n { - panic(fmt.Sprintf("Disordered messages on %d (awaited: %d)", m.n, gs.n+1)) - } - gs.n = m.n - - if gs.n == 100 { - gs.res <- 1000 - } - return gen.ServerStatusOK - - case testCase2: - if gs.n != m.n { - panic(fmt.Sprintf("Disordered messages on %d (awaited: %d)", m.n, gs.n+1)) - } - gs.n = m.n - 1 - value, err := process.Call("gs3order", message) - if err != nil { - panic(err) - } - if value.(string) != "ok" { - panic("wrong result") - } - - if gs.n == 0 { - gs.res <- 123 - } - return gen.ServerStatusOK - } - - return gen.ServerStatusStop -} - -func (gs *messageOrderGS) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) { - switch message.(type) { - case testCase2: - return "ok", gen.ServerStatusOK - case testCase3: - return "ok", gen.ServerStatusOK - } - return nil, fmt.Errorf("incorrect call") -} - -func (gs *messageOrderGS) HandleDirect(process *gen.ServerProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - switch m := message.(type) { - case testCase3: - for i := 0; i < m.n; i++ { - value, err := process.Call("gs3order", message) - if err != nil { - panic(err) - } - if value.(string) != "ok" { - panic("wrong result") - } - } - return nil, gen.DirectStatusOK - - } - return nil, fmt.Errorf("incorrect direct call") -} - -type GSCallPanic struct { - gen.Server -} - -func (gs *GSCallPanic) Init(process *gen.ServerProcess, args ...etf.Term) error { - return nil -} - -func (gs *GSCallPanic) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) { - m := message.(string) - if m == "panic" { - panic("test") - } - - return "ok", gen.ServerStatusOK -} - -func (gs *GSCallPanic) HandleDirect(process *gen.ServerProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - - pids, ok := message.([]etf.Pid) - if !ok { - return nil, fmt.Errorf("not a pid") - } - fmt.Printf(" making a call p1node1 -> p1node2 (panic): ") - if _, err := process.CallWithTimeout(pids[0], "panic", 1); err == nil { - return nil, fmt.Errorf("must be error here") - } else { - fmt.Println("OK") - } - fmt.Printf(" making a call p1node1 -> p2node2: ") - v, err := process.Call(pids[1], "test") - if err != nil { - return nil, err - } - if v.(string) != "ok" { - return nil, fmt.Errorf("wrong result %#v", v) - } - fmt.Println("OK") - - return nil, gen.DirectStatusOK -} - -func TestServerCallServerWithPanic(t *testing.T) { - fmt.Printf("\n=== Test Server. Making a Call to Server with panic (issue 86) \n") - fmt.Printf("Starting node: nodeGSCallWithPanic1@localhost: ") - node1, err1 := ergo.StartNode("nodeGSCallWithPanic1@localhost", "cookies", node.Options{}) - if err1 != nil { - t.Fatal("can't start node", err1) - } else { - fmt.Println("OK") - } - fmt.Printf("Starting node: nodeGSCallWithPanic2@localhost: ") - node2, err2 := ergo.StartNode("nodeGSCallWithPanic2@localhost", "cookies", node.Options{}) - if err2 != nil { - t.Fatal("can't start node", err2) - } else { - fmt.Println("OK") - } - - p1n1, err := node1.Spawn("p1node1", gen.ProcessOptions{}, &GSCallPanic{}) - if err != nil { - t.Fatal(err) - } - p1n2, err := node2.Spawn("p1node2", gen.ProcessOptions{}, &GSCallPanic{}) - if err != nil { - t.Fatal(err) - } - p2n2, err := node2.Spawn("2node2", gen.ProcessOptions{}, &GSCallPanic{}) - if err != nil { - t.Fatal(err) - } - - pids := []etf.Pid{p1n2.Self(), p2n2.Self()} - - if _, err := p1n1.Direct(pids); err != nil { - t.Fatal(err) - } -} - -func TestServerMessageOrder(t *testing.T) { - fmt.Printf("\n=== Test Server message order\n") - fmt.Printf("Starting node: nodeGS1MessageOrder@localhost: ") - node1, _ := ergo.StartNode("nodeGS1MessageOrder@localhost", "cookies", node.Options{}) - if node1 == nil { - t.Fatal("can't start nodes") - } else { - fmt.Println("OK") - } - - gs1 := &messageOrderGS{ - res: make(chan interface{}, 2), - } - - gs2 := &messageOrderGS{ - res: make(chan interface{}, 2), - } - - gs3 := &messageOrderGS{ - res: make(chan interface{}, 2), - } - - fmt.Printf(" wait for start of gs1order on %#v: ", node1.Name()) - node1gs1, err1 := node1.Spawn("gs1order", gen.ProcessOptions{}, gs1, nil) - if err1 != nil { - panic(err1) - } - waitForResultWithValue(t, gs1.res, nil) - - fmt.Printf(" wait for start of gs2order on %#v: ", node1.Name()) - node1gs2, err2 := node1.Spawn("gs2order", gen.ProcessOptions{}, gs2, nil) - if err2 != nil { - panic(err2) - } - waitForResultWithValue(t, gs2.res, nil) - - fmt.Printf(" wait for start of gs3order on %#v: ", node1.Name()) - node1gs3, err3 := node1.Spawn("gs3order", gen.ProcessOptions{}, gs3, nil) - if err3 != nil { - panic(err3) - } - waitForResultWithValue(t, gs3.res, nil) - - fmt.Printf(" sending 100 messages from gs1 to gs2. checking the order: ") - for i := 1; i < 101; i++ { - err := node1gs1.Send(node1gs2.Self(), testCase1{n: i}) - if err != nil { - t.Fatal(err) - } - } - waitForResultWithValue(t, gs2.res, 1000) - - fmt.Printf(" making Direct call with making a call from gs2 to gs3 1 time: ") - _, err := node1gs2.Direct(testCase3{n: 1}) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - fmt.Printf(" making Direct call with making a call from gs2 to gs3 100 times: ") - _, err = node1gs2.Direct(testCase3{n: 100}) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - gs2.n = 100 - - fmt.Printf(" sending 100 messages from gs1 to gs2 with making a call from gs2 to gs3: ") - for i := gs2.n; i > 0; i-- { - err := node1gs1.Send(node1gs2.Self(), testCase2{n: i}) - if err != nil { - t.Fatal(err) - } - } - waitForResultWithValue(t, gs2.res, 123) - node1gs3.Exit("normal") - node1.Stop() - node1.Wait() -} - -type messageFloodSourceGS struct { - gen.Server - id int - res chan interface{} -} - -type messageFlood struct { - id int - i int -} - -func (fl *messageFloodSourceGS) Init(process *gen.ServerProcess, args ...etf.Term) error { - fl.res <- nil - return nil -} - -func (fl *messageFloodSourceGS) HandleInfo(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - max := message.(int) - - for i := 1; i < max+1; i++ { - if err := process.Send("gsdest", messageFlood{id: fl.id - 1, i: i}); err != nil { - panic(fmt.Sprintf("err on making a send: %s", err)) - } - if err := process.Cast("gsdest", messageFlood{id: fl.id - 1, i: i}); err != nil { - panic(fmt.Sprintf("err on making a cast: %s", err)) - } - if _, err := process.Call("gsdest", messageFlood{id: fl.id - 1, i: i}); err != nil { - panic(fmt.Sprintf("err on making a call: %s", err)) - } - - } - - return gen.ServerStatusStop -} - -type messageFloodDestGS struct { - gen.Server - max int - info [5]int - cast [5]int - call [5]int - done int - res chan interface{} -} - -func (fl *messageFloodDestGS) Init(process *gen.ServerProcess, args ...etf.Term) error { - - fl.res <- nil - return nil -} - -func (fl *messageFloodDestGS) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) { - switch m := message.(type) { - case messageFlood: - if fl.call[m.id]+1 != m.i { - panic("wrong order") - } - fl.call[m.id] = m.i - if fl.call[m.id] == fl.max { - fl.done++ - } - if fl.done != len(fl.info)*3 { - return nil, gen.ServerStatusOK - } - default: - return nil, gen.ServerStatusStop - } - - fl.res <- nil - return nil, gen.ServerStatusOK -} - -func (fl *messageFloodDestGS) HandleCast(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - switch m := message.(type) { - case messageFlood: - if fl.cast[m.id]+1 != m.i { - panic("wrong order") - } - fl.cast[m.id] = m.i - if fl.cast[m.id] == fl.max { - fl.done++ - } - if fl.done != len(fl.info)*3 { - return gen.ServerStatusOK - } - default: - return gen.ServerStatusStop - } - - fl.res <- nil - return gen.ServerStatusOK -} - -func (fl *messageFloodDestGS) HandleInfo(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - switch m := message.(type) { - case messageFlood: - if fl.info[m.id]+1 != m.i { - panic("wrong order") - } - fl.info[m.id] = m.i - if fl.info[m.id] == fl.max { - fl.done++ - } - if fl.done != len(fl.info)*3 { - return gen.ServerStatusOK - } - default: - return gen.ServerStatusStop - } - - fl.res <- nil - return gen.ServerStatusOK -} - -type testCaseFlood struct { - id int -} - -func TestServerMessageFlood(t *testing.T) { - fmt.Printf("\n=== Test Server message flood \n") - fmt.Printf("Starting node: nodeGS1MessageFlood@localhost: ") - node1, _ := ergo.StartNode("nodeGS1MessageFlood@localhost", "cookies", node.Options{}) - if node1 == nil { - t.Fatal("can't start nodes") - } else { - fmt.Println("OK") - } - - gs1source := &messageFloodSourceGS{ - id: 1, - res: make(chan interface{}, 2), - } - gs2source := &messageFloodSourceGS{ - id: 2, - res: make(chan interface{}, 2), - } - gs3source := &messageFloodSourceGS{ - id: 3, - res: make(chan interface{}, 2), - } - gs4source := &messageFloodSourceGS{ - id: 4, - res: make(chan interface{}, 2), - } - gs5source := &messageFloodSourceGS{ - id: 5, - res: make(chan interface{}, 2), - } - - gsdest := &messageFloodDestGS{ - res: make(chan interface{}, 2), - } - fmt.Printf(" wait for start of gs1source on %#v: ", node1.Name()) - gs1sourceProcess, _ := node1.Spawn("gs1source", gen.ProcessOptions{}, gs1source, nil) - waitForResultWithValue(t, gs1source.res, nil) - - fmt.Printf(" wait for start of gs2source on %#v: ", node1.Name()) - gs2sourceProcess, _ := node1.Spawn("gs2source", gen.ProcessOptions{}, gs2source, nil) - waitForResultWithValue(t, gs2source.res, nil) - - fmt.Printf(" wait for start of gs3source on %#v: ", node1.Name()) - gs3sourceProcess, _ := node1.Spawn("gs3source", gen.ProcessOptions{}, gs3source, nil) - waitForResultWithValue(t, gs3source.res, nil) - - fmt.Printf(" wait for start of gs4source on %#v: ", node1.Name()) - gs4sourceProcess, _ := node1.Spawn("gs4source", gen.ProcessOptions{}, gs4source, nil) - waitForResultWithValue(t, gs4source.res, nil) - - fmt.Printf(" wait for start of gs5source on %#v: ", node1.Name()) - gs5sourceProcess, _ := node1.Spawn("gs5source", gen.ProcessOptions{}, gs5source, nil) - waitForResultWithValue(t, gs5source.res, nil) - - fmt.Printf(" wait for start of gsdest on %#v: ", node1.Name()) - node1.Spawn("gsdest", gen.ProcessOptions{}, gsdest, nil) - waitForResultWithValue(t, gsdest.res, nil) - - gsdest.max = 1000 - // start flood - gs1sourceProcess.Send(gs1sourceProcess.Self(), gsdest.max) - gs2sourceProcess.Send(gs2sourceProcess.Self(), gsdest.max) - gs3sourceProcess.Send(gs3sourceProcess.Self(), gsdest.max) - gs4sourceProcess.Send(gs4sourceProcess.Self(), gsdest.max) - gs5sourceProcess.Send(gs5sourceProcess.Self(), gsdest.max) - - waitForResultWithValue(t, gsdest.res, nil) -} - -func waitForResult(t *testing.T, w chan error) { - select { - case e := <-w: - if e == nil { - fmt.Println("OK") - return - } - - t.Fatal(e) - - case <-time.After(time.Second * time.Duration(1)): - t.Fatal("result timeout") - } -} - -func waitForResultWithMultiValue(t *testing.T, w chan interface{}, values etf.List) { - - select { - case v := <-w: - found := false - i := 0 - for { - if reflect.DeepEqual(v, values[i]) { - found = true - values[i] = values[0] - values = values[1:] - if len(values) == 0 { - return - } - // i dont care about stack growing since 'values' - // usually short - waitForResultWithMultiValue(t, w, values) - break - } - i++ - if i+1 > len(values) { - break - } - } - - if !found { - e := fmt.Errorf("got unexpected value: %#v", v) - t.Fatal(e) - } - - case <-time.After(time.Second * time.Duration(2)): - t.Fatal("result timeout") - } - fmt.Println("OK") -} - -func waitForResultWithValue(t *testing.T, w chan interface{}, value interface{}) { - select { - case v := <-w: - if reflect.DeepEqual(v, value) { - fmt.Println("OK") - } else { - e := fmt.Errorf("expected: %#v , got: %#v", value, v) - t.Fatal(e) - } - - case <-time.After(time.Second * time.Duration(2)): - t.Fatal("result timeout") - } -} - -func waitForResultWithValueReturnError(t *testing.T, w chan interface{}, value interface{}) error { - select { - case v := <-w: - if reflect.DeepEqual(v, value) { - return nil - } else { - return fmt.Errorf("expected: %#v , got: %#v", value, v) - } - - case <-time.After(time.Second * time.Duration(2)): - return fmt.Errorf("result timeout") - } -} - -func waitForResultWithValueOrValue(t *testing.T, w chan interface{}, value1, value2 interface{}) { - select { - case v := <-w: - if reflect.DeepEqual(v, value1) { - fmt.Println("OK") - } else { - if reflect.DeepEqual(v, value2) { - fmt.Println("OK") - } else { - e := fmt.Errorf("expected another value, but got: %#v", v) - t.Fatal(e) - } - } - - case <-time.After(time.Second * time.Duration(2)): - t.Fatal("result timeout") - } -} - -func waitForTimeout(t *testing.T, w chan interface{}) { - select { - case v := <-w: - e := fmt.Errorf("got value we shouldn't receive: %#v", v) - t.Fatal(e) - - case <-time.After(time.Millisecond * time.Duration(300)): - return - } -} diff --git a/tests/stage_test.go b/tests/stage_test.go deleted file mode 100644 index 14baa028..00000000 --- a/tests/stage_test.go +++ /dev/null @@ -1,973 +0,0 @@ -package tests - -import ( - "fmt" - "testing" - "time" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" -) - -const ( - defaultDispatcherBufferSize = 10000 - defaultAutoDemandCount = 3 -) - -type StageProducerTest struct { - gen.Stage - value chan interface{} - dispatcher gen.StageDispatcherBehavior -} - -type sendEvents struct { - events etf.List -} -type cancelSubscription struct { - subscription gen.StageSubscription - reason string -} - -type demandRequest struct { - subscription gen.StageSubscription - count uint -} -type newSubscription struct { - producer etf.Term - opts gen.StageSubscribeOptions -} -type demandHandle struct { - enable bool -} -type autoDemand struct { - subscription gen.StageSubscription - enable bool -} - -// -// a simple Stage Producer -// - -func (gs *StageProducerTest) InitStage(process *gen.StageProcess, args ...etf.Term) (gen.StageOptions, error) { - opts := gen.StageOptions{ - Dispatcher: gs.dispatcher, - } - return opts, nil -} -func (gs *StageProducerTest) HandleDemand(process *gen.StageProcess, subscription gen.StageSubscription, count uint) (etf.List, gen.StageStatus) { - gs.value <- etf.Tuple{subscription, count} - return nil, gen.StageStatusOK -} - -func (gs *StageProducerTest) HandleSubscribe(process *gen.StageProcess, subscription gen.StageSubscription, options gen.StageSubscribeOptions) gen.StageStatus { - gs.value <- subscription - return gen.StageStatusOK -} -func (gs *StageProducerTest) HandleCancel(process *gen.StageProcess, subscription gen.StageSubscription, reason string) gen.StageStatus { - gs.value <- etf.Tuple{"cancel", subscription} - return gen.StageStatusOK -} - -// add this callback only for the 'not a producer' case -func (gs *StageProducerTest) HandleCanceled(process *gen.StageProcess, subscription gen.StageSubscription, reason string) gen.StageStatus { - gs.value <- etf.Tuple{"canceled", subscription, reason} - return gen.StageStatusOK -} - -func (s *StageProducerTest) SendEvents(p gen.Process, events etf.List) error { - message := sendEvents{ - events: events, - } - _, err := p.Direct(message) - return err -} - -func (s *StageProducerTest) Cancel(p gen.Process, subscription gen.StageSubscription, reason string) error { - message := cancelSubscription{ - subscription: subscription, - reason: reason, - } - _, err := p.Direct(message) - return err -} -func (gs *StageProducerTest) Subscribe(p gen.Process, producer etf.Term, opts gen.StageSubscribeOptions) (gen.StageSubscription, error) { - message := newSubscription{ - producer: producer, - opts: opts, - } - s, err := p.Direct(message) - if err != nil { - return gen.StageSubscription{}, err - } - return s.(gen.StageSubscription), nil -} - -func (gs *StageProducerTest) SetDemandHandle(p gen.Process, enable bool) error { - message := demandHandle{ - enable: enable, - } - _, err := p.Direct(message) - if err != nil { - return err - } - return nil -} -func (gs *StageProducerTest) SetAutoDemand(p gen.Process, subscription gen.StageSubscription, enable bool) error { - message := autoDemand{ - subscription: subscription, - enable: enable, - } - _, err := p.Direct(message) - if err != nil { - return err - } - return nil -} - -func (s *StageProducerTest) HandleStageDirect(process *gen.StageProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - switch m := message.(type) { - case demandHandle: - process.SetDemandHandle(m.enable) - return nil, nil - case newSubscription: - return process.Subscribe(m.producer, m.opts) - case sendEvents: - process.SendEvents(m.events) - return nil, nil - - case cancelSubscription: - err := process.Cancel(m.subscription, m.reason) - return nil, err - case makeCall: - return process.Call(m.to, m.message) - case makeCast: - return nil, process.Cast(m.to, m.message) - - default: - return nil, lib.ErrUnsupportedRequest - } -} - -// -// a simple Stage Consumer -// -type StageConsumerTest struct { - gen.Stage - value chan interface{} -} - -func (gs *StageConsumerTest) InitStage(process *gen.StageProcess, args ...etf.Term) (gen.StageOptions, error) { - return gen.StageOptions{}, nil -} -func (gs *StageConsumerTest) HandleEvents(process *gen.StageProcess, subscription gen.StageSubscription, events etf.List) gen.StageStatus { - gs.value <- etf.Tuple{"events", subscription, events} - return gen.StageStatusOK -} -func (gs *StageConsumerTest) HandleSubscribed(process *gen.StageProcess, subscription gen.StageSubscription, opts gen.StageSubscribeOptions) (bool, gen.StageStatus) { - gs.value <- subscription - return opts.ManualDemand, gen.StageStatusOK -} -func (gs *StageConsumerTest) HandleCanceled(process *gen.StageProcess, subscription gen.StageSubscription, reason string) gen.StageStatus { - gs.value <- etf.Tuple{"canceled", subscription, reason} - return gen.StageStatusOK -} -func (gs *StageConsumerTest) HandleStageCall(process *gen.StageProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) { - gs.value <- message - return "ok", gen.ServerStatusOK -} -func (gs *StageConsumerTest) HandleStageCast(process *gen.StageProcess, message etf.Term) gen.ServerStatus { - gs.value <- message - return gen.ServerStatusOK -} -func (gs *StageConsumerTest) HandleStageInfo(process *gen.StageProcess, message etf.Term) gen.ServerStatus { - gs.value <- message - return gen.ServerStatusOK -} - -func (s *StageConsumerTest) HandleStageDirect(p *gen.StageProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - switch m := message.(type) { - case newSubscription: - return p.Subscribe(m.producer, m.opts) - case demandRequest: - err := p.Ask(m.subscription, m.count) - return nil, err - case autoDemand: - err := p.SetAutoDemand(m.subscription, m.enable) - return nil, err - - case cancelSubscription: - err := p.Cancel(m.subscription, m.reason) - return nil, err - case makeCall: - return p.Call(m.to, m.message) - case makeCast: - return nil, p.Cast(m.to, m.message) - } - return nil, lib.ErrUnsupportedRequest -} - -func (s *StageConsumerTest) HandleStageTerminate(p *gen.StageProcess, reason string) { - //fmt.Println("StageConsumerTest process terminated with reason", reason) -} - -func (gs *StageConsumerTest) SetAutoDemand(p gen.Process, subscription gen.StageSubscription, enable bool) error { - message := autoDemand{ - subscription: subscription, - enable: enable, - } - _, err := p.Direct(message) - if err != nil { - return err - } - return nil -} - -func (gs *StageConsumerTest) Subscribe(p gen.Process, producer etf.Term, opts gen.StageSubscribeOptions) (gen.StageSubscription, error) { - message := newSubscription{ - producer: producer, - opts: opts, - } - s, err := p.Direct(message) - if err != nil { - return gen.StageSubscription{}, err - } - return s.(gen.StageSubscription), nil -} - -func (s *StageConsumerTest) Cancel(p gen.Process, subscription gen.StageSubscription, reason string) error { - message := cancelSubscription{ - subscription: subscription, - reason: reason, - } - _, err := p.Direct(message) - return err -} - -func (s *StageConsumerTest) Ask(p gen.Process, subscription gen.StageSubscription, count uint) error { - message := demandRequest{ - subscription: subscription, - count: count, - } - _, err := p.Direct(message) - return err -} - -func TestStageSimple(t *testing.T) { - - fmt.Printf("\n=== Test StageSimple\n") - fmt.Printf("Starting node: nodeStageSimple01@localhost...") - - node, _ := ergo.StartNode("nodeStageSimple01@localhost", "cookies", node.Options{}) - - if node == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - - producer := &StageProducerTest{ - value: make(chan interface{}, 2), - } - consumer := &StageConsumerTest{ - value: make(chan interface{}, 2), - } - // producerProcess, _ := - fmt.Printf("... starting Producer and Consumer processes: ") - producerProcess, errP := node.Spawn("stageProducer", gen.ProcessOptions{}, producer, nil) - if errP != nil { - t.Fatal(errP) - } - consumerProcess, errC := node.Spawn("stageConsumer", gen.ProcessOptions{}, consumer, nil) - if errC != nil { - t.Fatal(errC) - } - fmt.Println("OK") - - subOpts := gen.StageSubscribeOptions{ - MinDemand: 4, - MaxDemand: 5, - ManualDemand: true, - // use Temporary to keep this process running - Cancel: gen.StageCancelTemporary, - } - - // case 1: subscribe - fmt.Println("Subscribing/resubscribing/cancelation:") - sub, _ := consumer.Subscribe(consumerProcess, "stageProducer", subOpts) - fmt.Printf("... Producer handled subscription request from Consumer: ") - waitForResultWithValue(t, producer.value, sub) - fmt.Printf("... Consumer handled subscription confirmation from Producer: ") - waitForResultWithValue(t, consumer.value, sub) - // case 2: subscribe one more time (prev subscription should be canceled automatically) - fmt.Printf("... Consumer subscribes to Producer without cancelation previous subscription: ") - sub1, _ := consumer.Subscribe(consumerProcess, "stageProducer", subOpts) - fmt.Println("OK") - // previous subscription should be canceled - fmt.Printf("... Producer canceled previous subscription and handled the new subscription request from Consumer: ") - waitForResultWithValue(t, producer.value, sub1) - fmt.Printf("... Consumer handled cancelation and subscription confirmation from Producer: ") - waitForResultWithMultiValue(t, consumer.value, etf.List{sub1, etf.Tuple{"canceled", sub, "resubscribed"}}) - - fmt.Printf("... Consumer invokes Cancel subscription explicitly:") - if err := consumer.Cancel(consumerProcess, sub1, "normal"); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - fmt.Printf("... Producer handled cancelation request from Consumer: ") - waitForResultWithValue(t, producer.value, etf.Tuple{"cancel", sub1}) - fmt.Printf("... Consumer handled cancelation confirmation from Producer: ") - waitForResultWithValue(t, consumer.value, etf.Tuple{"canceled", sub1, "normal"}) - - fmt.Println("make another subscription for the testing of explicit cancelation from the Producer side") - sub2, _ := consumer.Subscribe(consumerProcess, "stageProducer", subOpts) - fmt.Printf("... Producer handled subscription request from Consumer: ") - waitForResultWithValue(t, producer.value, sub2) - fmt.Printf("... Consumer handled subscription confirmation from Producer: ") - waitForResultWithValue(t, consumer.value, sub2) - if err := producer.Cancel(producerProcess, sub2, "normal"); err != nil { - t.Fatal(err) - } - fmt.Printf("... Producer handled cancelation request from itself: ") - waitForResultWithValue(t, producer.value, etf.Tuple{"cancel", sub2}) - fmt.Printf("... Consumer handled cancelation confirmation from Producer: ") - waitForResultWithValue(t, consumer.value, etf.Tuple{"canceled", sub2, "normal"}) - waitForTimeout(t, producer.value) - waitForTimeout(t, consumer.value) - - // case 3: - fmt.Printf("... trying to subscribe on Consumer (should fail with error 'not a producer'): ") - // let's subscribe using Pid instead of registered name "stageConsumer" - sub3, _ := producer.Subscribe(producerProcess, consumerProcess.Self(), subOpts) - waitForResultWithValue(t, producer.value, etf.Tuple{"canceled", sub3, "not a producer"}) - - // case 4: invoking Server callbacks - fmt.Printf("... Invoking Server's callback handlers HandleStageCall: ") - call := makeCall{ - to: "stageConsumer", - message: "test call", - } - if _, err := producerProcess.Direct(call); err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, consumer.value, "test call") - fmt.Printf("... Invoking Server's callback handlers HandleStageCast: ") - cast := makeCast{ - to: "stageConsumer", - message: "test cast", - } - producerProcess.Direct(cast) - waitForResultWithValue(t, consumer.value, "test cast") - fmt.Printf("... Invoking Server's callback handlers HandleStageInfo: ") - producerProcess.Send("stageConsumer", "test info") - waitForResultWithValue(t, consumer.value, "test info") - - // case 5: - // - subscribe, ask and send events - // - resubscribe with updated min/max, ask and send events - // keep this subscription for the next case - subOpts.MinDemand = 2 - subOpts.MaxDemand = 4 - subOpts.ManualDemand = true - fmt.Println("make yet another subscription for the testing subscribe/ask/send_events") - sub4, _ := consumer.Subscribe(consumerProcess, producerProcess.Self(), subOpts) - fmt.Printf("... Producer handled subscription request from Consumer: ") - waitForResultWithValue(t, producer.value, sub4) - fmt.Printf("... Consumer handled subscription confirmation from Producer: ") - waitForResultWithValue(t, consumer.value, sub4) - fmt.Printf("... Consumer sent 'Ask' request with count = 1 (min 2, max 4) : ") - consumer.Ask(consumerProcess, sub4, 1) - waitForResultWithValue(t, producer.value, etf.Tuple{sub4, uint(1)}) - waitForTimeout(t, consumer.value) // shouldn't receive anything here - events := etf.List{ - "a", "b", "c", "d", "e", - } - fmt.Printf("... Consumer sent 'Ask' request with count = 4 (min 2, max 4) : ") - consumer.Ask(consumerProcess, sub4, 4) - waitForResultWithValue(t, producer.value, etf.Tuple{sub4, uint(4)}) - fmt.Printf("... Producer sent 5 events. Consumer should receive 4: ") - producer.SendEvents(producerProcess, events) - expected := etf.Tuple{"events", sub4, events[0:4]} - waitForResultWithValue(t, consumer.value, expected) - events = etf.List{ - "f", - } - fmt.Printf("... Producer sent 1 event. Consumer shouldn't receive anything until make yet another 'Ask' request: ") - producer.SendEvents(producerProcess, events) - waitForTimeout(t, consumer.value) // shouldn't receive anything here - fmt.Println("OK") - fmt.Printf("1... Consumer sent 'Ask' request with count = 1 (min 2, max 4) : ") - if err := consumer.Ask(consumerProcess, sub4, 1); err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, producer.value, etf.Tuple{sub4, uint(1)}) - fmt.Printf("... Consumer should receive 2 events: ") - expected = etf.Tuple{"events", sub4, etf.List{"e", "f"}} - waitForResultWithValue(t, consumer.value, expected) - - // case 6: - // - ask, disable forwarding, send events, ask, enable forwarding - // keep this subscription for the next case - - fmt.Printf("... Disable handling demand on Producer: ") - if err := producer.SetDemandHandle(producerProcess, false); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - fmt.Printf("... Consumer sent 'Ask' request with count = 1 (min 2, max 4). Stage shouldn't call HandleDemand : ") - consumer.Ask(consumerProcess, sub4, 1) - waitForTimeout(t, producer.value) // shouldn't receive anything here - fmt.Println("OK") - - events = etf.List{ - "a", "b", "c", "d", "e", "f", "g", "h", "1", "2", - } - producer.SendEvents(producerProcess, events) - fmt.Printf("... Producer sent 10 events. Dispatcher should keep them all since demand was buffered: ") - waitForTimeout(t, consumer.value) // shouldn't receive anything here - fmt.Println("OK") - fmt.Printf("... Consumer sent yet another 'Ask'. Stage shouldn't call HandleDemand as well: ") - consumer.Ask(consumerProcess, sub4, 9) - waitForTimeout(t, producer.value) // shouldn't receive anything here - fmt.Println("OK") - - fmt.Printf("... Enable handling demand on Producer: ") - if err := producer.SetDemandHandle(producerProcess, true); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - fmt.Printf("... Producer should receive demands count =1 and count =9: ") - expected1 := etf.Tuple{sub4, uint(1)} - expected2 := etf.Tuple{sub4, uint(9)} - waitForResultWithMultiValue(t, producer.value, etf.List{expected1, expected2}) - fmt.Printf("... Customer should receive 3 messages with 4, 4 and 2 events: ") - expected1 = etf.Tuple{"events", sub4, etf.List{"a", "b", "c", "d"}} - expected2 = etf.Tuple{"events", sub4, etf.List{"e", "f", "g", "h"}} - expected3 := etf.Tuple{"events", sub4, etf.List{"1", "2"}} - waitForResultWithMultiValue(t, consumer.value, etf.List{expected1, expected2, expected3}) - - waitForTimeout(t, consumer.value) // shouldn't receive anything here - waitForTimeout(t, producer.value) // shouldn't receive anything here - - // case 7: - // - enable auto demand, send events, try to ask (should get error), disable auto demands - fmt.Printf("... Enable AutoDemand on the Producer (should fail): ") - if err := producer.SetAutoDemand(producerProcess, sub4, true); err == nil { - t.Fatal("should fail here") - } - fmt.Println("OK") - fmt.Printf("... Enable AutoDemand on the Consumer. Producer should receive demand with count %d: ", defaultAutoDemandCount) - if err := consumer.SetAutoDemand(consumerProcess, sub4, true); err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, producer.value, etf.Tuple{sub4, subOpts.MaxDemand}) - fmt.Printf("... Customer sent 'Ask' request (should fail): ") - if err := consumer.Ask(consumerProcess, sub4, 2); err == nil { - t.Fatal("should fail here") - } - fmt.Println("OK") - events = etf.List{ - "a", "b", "c", "d", "e", - } - producer.SendEvents(producerProcess, events) - fmt.Printf("... Producer sent 5 events. Consumer should receive 4. Demands counter = 2 now: ") - expected = etf.Tuple{"events", sub4, etf.List{"a", "b", "c", "d"}} - waitForResultWithValue(t, consumer.value, expected) - fmt.Printf("... Consumer should send auto demand with count %d: ", subOpts.MaxDemand) - waitForResultWithValue(t, producer.value, etf.Tuple{sub4, subOpts.MaxDemand}) - - node.Stop() -} - -func TestStageDistributed(t *testing.T) { - fmt.Printf("\n=== Test StageDistributed\n") - fmt.Printf("Starting node: nodeStageDistributed01@localhost...") - node1, _ := ergo.StartNode("nodeStageDistributed01@localhost", "cookies", node.Options{}) - if node1 == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - fmt.Printf("Starting node: nodeStageDistributed02@localhost...") - node2, _ := ergo.StartNode("nodeStageDistributed02@localhost", "cookies", node.Options{}) - if node2 == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - - producer := &StageProducerTest{ - value: make(chan interface{}, 2), - } - consumer := &StageConsumerTest{ - value: make(chan interface{}, 2), - } - fmt.Printf("... starting Producer and Consumer processes: ") - producerProcess, errP := node1.Spawn("stageProducer", gen.ProcessOptions{}, producer, nil) - if errP != nil { - t.Fatal(errP) - } - consumerProcess, errC := node2.Spawn("stageConsumer", gen.ProcessOptions{}, consumer, nil) - if errC != nil { - t.Fatal(errC) - } - fmt.Println("OK") - subOpts := gen.StageSubscribeOptions{ - MinDemand: 2, - MaxDemand: 4, - ManualDemand: true, - // use Temporary to keep this process running - Cancel: gen.StageCancelTemporary, - } - fmt.Println("Consumer@node2 subscribes on Producer@node1: ") - sub, _ := consumer.Subscribe(consumerProcess, gen.ProcessID{Name: "stageProducer", Node: "nodeStageDistributed01@localhost"}, subOpts) - fmt.Printf("... Producer@node1 handled subscription request from Consumer@node2: ") - waitForResultWithValue(t, producer.value, sub) - fmt.Printf("... Consumer@node2 handled subscription confirmation from Producer@node1: ") - waitForResultWithValue(t, consumer.value, sub) - - fmt.Printf("... Consumer@node2 sent 'Ask' request with count = 4 (min 2, max 4) : ") - if err := consumer.Ask(consumerProcess, sub, 4); err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, producer.value, etf.Tuple{sub, uint(4)}) - waitForTimeout(t, consumer.value) // shouldn't receive anything here - events := etf.List{ - "a", "b", "c", "d", "e", - } - fmt.Printf("... Producer@node1 sent 5 events. Consumer@node2 should receive 4: ") - producer.SendEvents(producerProcess, events) - expected := etf.Tuple{"events", sub, events[0:4]} - waitForResultWithValue(t, consumer.value, expected) - - producerProcess.Kill() - fmt.Printf("... Producer process killed. Consumer should receive 'canceled' with reason 'kill': ") - waitForResultWithValue(t, consumer.value, etf.Tuple{"canceled", sub, "kill"}) - if !consumerProcess.IsAlive() { - t.Fatal("Consumer process should be alive here") - } - - // case 2: StageCancelTransient - - fmt.Printf("... Starting Producer process (test StageCancelTransient) : ") - producerProcess1, errP1 := node1.Spawn("stageProducer", gen.ProcessOptions{}, producer, nil) - if errP1 != nil { - t.Fatal(errP1) - } - fmt.Println("OK") - - subOpts.Cancel = gen.StageCancelTransient - sub1, _ := consumer.Subscribe(consumerProcess, gen.ProcessID{Name: "stageProducer", Node: "nodeStageDistributed01@localhost"}, subOpts) - fmt.Printf("... Producer@node1 handled subscription request from Consumer@node2: ") - waitForResultWithValue(t, producer.value, sub1) - fmt.Printf("... Consumer@node2 handled subscription confirmation from Producer@node1 (StageCancelTransient): ") - waitForResultWithValue(t, consumer.value, sub1) - - producerProcess1.Kill() - if err := producerProcess1.WaitWithTimeout(500 * time.Millisecond); err != nil { - t.Fatal(err) - } - fmt.Printf("... Producer process killed. Consumer should receive 'canceled' with reason 'kill': ") - waitForResultWithValue(t, consumer.value, etf.Tuple{"canceled", sub1, "kill"}) - fmt.Printf("... Consumer process should be terminated due to reason 'kill': ") - if err := consumerProcess.WaitWithTimeout(500 * time.Millisecond); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - // case 3: StageCancelPermanent - - fmt.Printf("... Starting Producer process (test StageCancelPermanent) : ") - producerProcess2, errP2 := node1.Spawn("stageProducer", gen.ProcessOptions{}, producer, nil) - if errP2 != nil { - t.Fatal(errP2) - } - consumerProcess1, errC := node2.Spawn("stageConsumer", gen.ProcessOptions{}, consumer, nil) - if errC != nil { - t.Fatal(errC) - } - fmt.Println("OK") - - subOpts.Cancel = gen.StageCancelPermanent - sub2, _ := consumer.Subscribe(consumerProcess1, gen.ProcessID{Name: "stageProducer", Node: "nodeStageDistributed01@localhost"}, subOpts) - fmt.Printf("... Producer@node1 handled subscription request from Consumer@node2: ") - waitForResultWithValue(t, producer.value, sub2) - fmt.Printf("... Consumer@node2 handled subscription confirmation from Producer@node1 (StageCancelPermanent): ") - waitForResultWithValue(t, consumer.value, sub2) - - producerProcess2.Exit("normal") - if err := producerProcess2.WaitWithTimeout(500 * time.Millisecond); err != nil { - t.Fatal(err) - } - fmt.Printf("... Producer process terminated normally. Consumer should receive 'canceled' with reason 'normal': ") - waitForResultWithValue(t, consumer.value, etf.Tuple{"canceled", sub2, "normal"}) - fmt.Printf("... Consumer process should be terminated due to StageCancelPermanent mode: ") - if err := consumerProcess1.WaitWithTimeout(500 * time.Millisecond); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - // case 4: Cancel on producer's node termination - fmt.Printf("... Starting Producer process (test cancel on node termination) : ") - _, errP3 := node1.Spawn("stageProducer", gen.ProcessOptions{}, producer, nil) - if errP3 != nil { - t.Fatal(errP3) - } - consumerProcess2, errC := node2.Spawn("stageConsumer", gen.ProcessOptions{}, consumer, nil) - if errC != nil { - t.Fatal(errC) - } - fmt.Println("OK") - subOpts.Cancel = gen.StageCancelTemporary - sub3, err := consumer.Subscribe(consumerProcess2, gen.ProcessID{Name: "stageProducer", Node: "nodeStageDistributed01@localhost"}, subOpts) - if err != nil { - t.Fatal(err) - } - fmt.Printf("... Producer@node1 handled subscription request from Consumer@node2: ") - waitForResultWithValue(t, producer.value, sub3) - fmt.Printf("... Consumer@node2 handled subscription confirmation from Producer@node1 (StageCancelTemporary): ") - waitForResultWithValue(t, consumer.value, sub3) - - node2.Disconnect(node1.Name()) - node1.Stop() - fmt.Printf("... Stopping node1: ") - if err := node1.WaitWithTimeout(1000 * time.Millisecond); err != nil { - t.Fatal(err) - } - fmt.Println("OK") - waitForResultWithValue(t, consumer.value, etf.Tuple{"canceled", sub3, "noconnection"}) - - node2.Stop() -} - -func TestStageDispatcherDemand(t *testing.T) { - fmt.Printf("\n=== Test StageDispatcherDemand\n") - fmt.Printf("Starting node: StageDispatcherDemand@localhost...") - node, _ := ergo.StartNode("StageDispatcherDemand@localhost", "cookies", node.Options{}) - if node == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - - subOpts := gen.StageSubscribeOptions{ - MinDemand: 4, - MaxDemand: 4, - } - - producer := &StageProducerTest{ - value: make(chan interface{}, 2), - // StageDispatcherDemand - this is default dispatcher, so we shouldn't create it explicitly - // dispatcher: CreateStageDispatcherDemand(), - } - consumer := &StageConsumerTest{ - value: make(chan interface{}, 2), - } - fmt.Printf("... starting Producer (StageDispatcherDemand): ") - producerProcess, errP := node.Spawn("stageProducer", gen.ProcessOptions{}, producer, nil) - if errP != nil { - t.Fatal(errP) - } - fmt.Println("OK") - fmt.Printf("... starting Consumer1: ") - consumer1Process, errC1 := node.Spawn("stageConsumer1", gen.ProcessOptions{}, consumer, nil) - if errC1 != nil { - t.Fatal(errC1) - } - fmt.Println("OK") - sub1, _ := consumer.Subscribe(consumer1Process, "stageProducer", subOpts) - fmt.Printf("... Producer handled subscription request from Consumer1: ") - expected1 := etf.Tuple{sub1, uint(subOpts.MaxDemand)} - expected2 := sub1 - waitForResultWithMultiValue(t, producer.value, etf.List{expected1, expected2}) - - fmt.Printf("... Consumer1 handled subscription confirmation from Producer: ") - waitForResultWithValue(t, consumer.value, sub1) - - fmt.Printf("... starting Consumer2: ") - consumer2Process, errC2 := node.Spawn("stageConsumer2", gen.ProcessOptions{}, consumer, nil) - if errC2 != nil { - t.Fatal(errC2) - } - fmt.Println("OK") - sub2, _ := consumer.Subscribe(consumer2Process, "stageProducer", subOpts) - fmt.Printf("... Producer handled subscription request from Consumer2: ") - expected1 = etf.Tuple{sub2, uint(subOpts.MaxDemand)} - expected2 = sub2 - waitForResultWithMultiValue(t, producer.value, etf.List{expected1, expected2}) - fmt.Printf("... Consumer2 handled subscription confirmation from Producer: ") - waitForResultWithValue(t, consumer.value, sub2) - - fmt.Printf("... starting Consumer3: ") - consumer3Process, errC3 := node.Spawn("stageConsumer3", gen.ProcessOptions{}, consumer, nil) - if errC3 != nil { - t.Fatal(errC3) - } - fmt.Println("OK") - sub3, _ := consumer.Subscribe(consumer3Process, "stageProducer", subOpts) - fmt.Printf("... Producer handled subscription request from Consumer3: ") - expected1 = etf.Tuple{sub3, uint(subOpts.MaxDemand)} - expected2 = sub3 - waitForResultWithMultiValue(t, producer.value, etf.List{expected1, expected2}) - fmt.Printf("... Consumer3 handled subscription confirmation from Producer: ") - waitForResultWithValue(t, consumer.value, sub3) - - fmt.Printf("... Producer sent 5 events. Consumer1 should receive 4: ") - events := etf.List{ - "a", "b", "c", "d", "e", - } - producer.SendEvents(producerProcess, events) - expected := etf.Tuple{"events", sub1, events[0:4]} - waitForResultWithValue(t, consumer.value, expected) - - fmt.Printf("... Producer sent 5 events. Consumer2 should receive 4: ") - events = etf.List{ - "f", "g", "h", "1", "2", - } - producer.SendEvents(producerProcess, events) - expected = etf.Tuple{"events", sub2, etf.List{"e", "f", "g", "h"}} - waitForResultWithValue(t, consumer.value, expected) - - fmt.Printf("... Producer sent 2 events. Consumer3 should receive 4: ") - events = etf.List{ - "3", "4", - } - producer.SendEvents(producerProcess, events) - expected = etf.Tuple{"events", sub3, etf.List{"1", "2", "3", "4"}} - waitForResultWithValue(t, consumer.value, expected) - - node.Stop() -} - -func TestStageDispatcherBroadcast(t *testing.T) { - fmt.Printf("\n=== Test StageDispatcherBroadcast\n") - fmt.Printf("Starting node: StageDispatcherBroadcast@localhost...") - node, _ := ergo.StartNode("StageDispatcherBroadcast@localhost", "cookies", node.Options{}) - if node == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - - subOpts := gen.StageSubscribeOptions{ - MinDemand: 4, - MaxDemand: 4, - } - - producer := &StageProducerTest{ - value: make(chan interface{}, 2), - dispatcher: gen.CreateStageDispatcherBroadcast(), - } - consumer1 := &StageConsumerTest{ - value: make(chan interface{}, 2), - } - consumer2 := &StageConsumerTest{ - value: make(chan interface{}, 2), - } - consumer3 := &StageConsumerTest{ - value: make(chan interface{}, 2), - } - fmt.Printf("... starting Producer (StageDispatcherBroadcast): ") - producerProcess, errP := node.Spawn("stageProducer", gen.ProcessOptions{}, producer, nil) - if errP != nil { - t.Fatal(errP) - } - fmt.Println("OK") - fmt.Printf("... starting Consumer1: ") - consumer1Process, errC1 := node.Spawn("stageConsumer1", gen.ProcessOptions{}, consumer1, nil) - if errC1 != nil { - t.Fatal(errC1) - } - fmt.Println("OK") - sub1, _ := consumer1.Subscribe(consumer1Process, "stageProducer", subOpts) - fmt.Printf("... Producer handled subscription request from Consumer1: ") - expected1 := etf.Tuple{sub1, uint(subOpts.MaxDemand)} - expected2 := sub1 - waitForResultWithMultiValue(t, producer.value, etf.List{expected1, expected2}) - - fmt.Printf("... Consumer1 handled subscription confirmation from Producer: ") - waitForResultWithValue(t, consumer1.value, sub1) - - fmt.Printf("... starting Consumer2: ") - consumer2Process, errC2 := node.Spawn("stageConsumer2", gen.ProcessOptions{}, consumer2, nil) - if errC2 != nil { - t.Fatal(errC2) - } - fmt.Println("OK") - sub2, _ := consumer2.Subscribe(consumer2Process, "stageProducer", subOpts) - fmt.Printf("... Producer handled subscription request from Consumer2: ") - expected1 = etf.Tuple{sub2, uint(subOpts.MaxDemand)} - expected2 = sub2 - waitForResultWithMultiValue(t, producer.value, etf.List{expected1, expected2}) - fmt.Printf("... Consumer2 handled subscription confirmation from Producer: ") - waitForResultWithValue(t, consumer2.value, sub2) - - fmt.Printf("... starting Consumer3: ") - consumer3Process, errC3 := node.Spawn("stageConsumer3", gen.ProcessOptions{}, consumer3, nil) - if errC3 != nil { - t.Fatal(errC3) - } - fmt.Println("OK") - sub3, _ := consumer3.Subscribe(consumer3Process, "stageProducer", subOpts) - fmt.Printf("... Producer handled subscription request from Consumer3: ") - expected1 = etf.Tuple{sub3, uint(subOpts.MaxDemand)} - expected2 = sub3 - waitForResultWithMultiValue(t, producer.value, etf.List{expected1, expected2}) - fmt.Printf("... Consumer3 handled subscription confirmation from Producer: ") - waitForResultWithValue(t, consumer3.value, sub3) - - fmt.Printf("... Producer sent 5 events: ") - events := etf.List{ - "a", "b", "c", "d", "e", - } - producer.SendEvents(producerProcess, events) - fmt.Println("OK") - exp1 := etf.Tuple{"events", sub1, events[0:4]} - fmt.Printf("... Consumer1 should receive 4: ") - waitForResultWithValue(t, consumer1.value, exp1) - exp2 := etf.Tuple{"events", sub2, events[0:4]} - fmt.Printf("... Consumer2 should receive 4: ") - waitForResultWithValue(t, consumer2.value, exp2) - exp3 := etf.Tuple{"events", sub3, events[0:4]} - fmt.Printf("... Consumer2 should receive 4: ") - waitForResultWithValue(t, consumer3.value, exp3) - - node.Stop() - -} - -func TestStageDispatcherPartition(t *testing.T) { - fmt.Printf("\n=== Test StageDispatcherPartition\n") - fmt.Printf("Starting node: StageDispatcherPartition@localhost...") - node, _ := ergo.StartNode("StageDispatcherPartition@localhost", "cookies", node.Options{}) - if node == nil { - t.Fatal("can't start node") - return - } - fmt.Println("OK") - - subOpts1 := gen.StageSubscribeOptions{ - MinDemand: 3, - MaxDemand: 4, - Partition: 0, - } - subOpts2 := gen.StageSubscribeOptions{ - MinDemand: 3, - MaxDemand: 4, - Partition: 1, - } - subOpts3 := gen.StageSubscribeOptions{ - MinDemand: 3, - MaxDemand: 4, - Partition: 2, - } - - hash := func(t etf.Term) int { - i, ok := t.(int) - if !ok { - // filtering out - return -1 - } - - if i > 1000 { - return 2 - } - if i > 100 { - return 1 - } - return 0 - } - - producer := &StageProducerTest{ - value: make(chan interface{}, 2), - dispatcher: gen.CreateStageDispatcherPartition(3, hash), - } - consumer1 := &StageConsumerTest{ - value: make(chan interface{}, 2), - } - consumer2 := &StageConsumerTest{ - value: make(chan interface{}, 2), - } - consumer3 := &StageConsumerTest{ - value: make(chan interface{}, 2), - } - - fmt.Printf("... starting Producer (StageDispatcherPartition): ") - producerProcess, errP := node.Spawn("stageProducer", gen.ProcessOptions{}, producer, nil) - if errP != nil { - t.Fatal(errP) - } - fmt.Println("OK") - fmt.Printf("... starting Consumer1: ") - consumer1Process, errC1 := node.Spawn("stageConsumer1", gen.ProcessOptions{}, consumer1, nil) - if errC1 != nil { - t.Fatal(errC1) - } - fmt.Println("OK") - sub1, _ := consumer1.Subscribe(consumer1Process, "stageProducer", subOpts1) - fmt.Printf("... Producer handled subscription request from Consumer1: ") - expected1 := etf.Tuple{sub1, uint(subOpts1.MaxDemand)} - expected2 := sub1 - waitForResultWithMultiValue(t, producer.value, etf.List{expected1, expected2}) - - fmt.Printf("... Consumer1 handled subscription confirmation from Producer: ") - waitForResultWithValue(t, consumer1.value, sub1) - - fmt.Printf("... starting Consumer2: ") - consumer2Process, errC2 := node.Spawn("stageConsumer2", gen.ProcessOptions{}, consumer2, nil) - if errC2 != nil { - t.Fatal(errC2) - } - fmt.Println("OK") - sub2, _ := consumer2.Subscribe(consumer2Process, "stageProducer", subOpts2) - fmt.Printf("... Producer handled subscription request from Consumer2: ") - expected1 = etf.Tuple{sub2, uint(subOpts2.MaxDemand)} - expected2 = sub2 - waitForResultWithMultiValue(t, producer.value, etf.List{expected1, expected2}) - fmt.Printf("... Consumer2 handled subscription confirmation from Producer: ") - waitForResultWithValue(t, consumer2.value, sub2) - - fmt.Printf("... starting Consumer3: ") - consumer3Process, errC3 := node.Spawn("stageConsumer3", gen.ProcessOptions{}, consumer3, nil) - if errC3 != nil { - t.Fatal(errC3) - } - fmt.Println("OK") - sub3, _ := consumer3.Subscribe(consumer3Process, "stageProducer", subOpts3) - fmt.Printf("... Producer handled subscription request from Consumer3: ") - expected1 = etf.Tuple{sub3, uint(subOpts3.MaxDemand)} - expected2 = sub3 - waitForResultWithMultiValue(t, producer.value, etf.List{expected1, expected2}) - fmt.Printf("... Consumer3 handled subscription confirmation from Producer: ") - waitForResultWithValue(t, consumer3.value, sub3) - - fmt.Printf("... Producer sent 15 events. 3 of them should be discarded by 'hash' function: ") - events := etf.List{ - 1, 2000, 200, "a", 90, "b", 80, 3000, 600, 9000, "c", 5, 1000, 100, 30, - } - producer.SendEvents(producerProcess, events) - fmt.Println("OK") - expEvents1 := etf.List{ - 1, 90, 80, 5, // left in the queue: 100, 30, - } - exp1 := etf.Tuple{"events", sub1, expEvents1} - fmt.Printf("... Consumer1 should receive 4: ") - waitForResultWithValue(t, consumer1.value, exp1) - expEvents2 := etf.List{ - 200, 600, 1000, - } - exp2 := etf.Tuple{"events", sub2, expEvents2} - fmt.Printf("... Consumer2 should receive 3: ") - waitForResultWithValue(t, consumer2.value, exp2) - expEvents3 := etf.List{ - 2000, 3000, 9000, - } - exp3 := etf.Tuple{"events", sub3, expEvents3} - fmt.Printf("... Consumer2 should receive 3: ") - waitForResultWithValue(t, consumer3.value, exp3) - - node.Stop() - -} diff --git a/tests/supervisor_ofa_test.go b/tests/supervisor_ofa_test.go deleted file mode 100644 index afc3a900..00000000 --- a/tests/supervisor_ofa_test.go +++ /dev/null @@ -1,305 +0,0 @@ -package tests - -// - Supervisor - -// - one for all (permanent) -// start node1 -// start supevisor sv1 with genservers gs1,gs2,gs3 -// gs1.stop(normal) (sv1 stoping gs1) -// (sv1 stoping gs2,gs3) -// (sv1 starting gs1,gs2,gs3) -// gs2.stop(shutdown) (sv1 stoping gs2) -// (sv1 stoping gs1,gs3) -// (sv1 starting gs1,gs2,gs3) -// gs3.stop(panic) (sv1 stoping gs3) -// (sv1 stoping gs1,gs2) -// (sv1 starting gs1,gs2,gs3) - -// - one for all (transient) -// start node1 -// start supevisor sv1 with genservers gs1,gs2,gs3 -// gs3.stop(panic) (sv1 stoping gs3) -// (sv1 stopping gs1, gs2) -// (sv1 starting gs1, gs2, gs3) - -// gs1.stop(normal) (sv1 stoping gs1) -// ( gs2, gs3 - still working) -// gs2.stop(shutdown) (sv1 stoping gs2) -// (gs3 - still working) - -// - one for all (temoporary) -// start node1 -// start supevisor sv1 with genservers gs1,gs2,gs3 - -// gs3.stop(panic) (sv1 stoping gs3) -// (sv1 stopping gs1, gs2) - -// start again gs1, gs2, gs3 via sv1 -// gs1.stop(normal) (sv1 stopping gs1) -// (gs2, gs3 are still running) -// gs2.stop(shutdown) (sv1 stopping gs2) -// (gs3 are still running) - -import ( - "fmt" - "testing" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/node" -) - -type testSupervisorOneForAll struct { - gen.Supervisor - ch chan interface{} -} - -type ChildrenTestCase struct { - reason string - statuses []string - events int -} - -func TestSupervisorOneForAll(t *testing.T) { - fmt.Printf("\n=== Test Supervisor - one for all\n") - fmt.Printf("Starting node nodeSvOneForAll@localhost: ") - node1, _ := ergo.StartNode("nodeSvOneForAll@localhost", "cookies", node.Options{}) - if node1 == nil { - t.Fatal("can't start node") - } else { - fmt.Println("OK") - } - - // =================================================================================================== - // test SupervisorStrategyRestartPermanent - fmt.Printf("Starting supervisor 'testSupervisorPermanent' (%s)... ", gen.SupervisorStrategyRestartPermanent) - sv := &testSupervisorOneForAll{ - ch: make(chan interface{}, 10), - } - processSV, err := node1.Spawn("testSupervisorPermanent", gen.ProcessOptions{}, sv, gen.SupervisorStrategyRestartPermanent, sv.ch) - if err != nil { - t.Fatal(err) - } - children := make([]etf.Pid, 3) - children, err = waitNeventsSupervisorChildren(sv.ch, 3, children) - if err != nil { - t.Fatal(err) - } else { - fmt.Println("OK") - } - - // testing permanent - testCases := []ChildrenTestCase{ - { - reason: "normal", - statuses: []string{"new", "new", "new"}, - events: 6, // waiting for 3 terminating and 3 starting - }, - { - reason: "abnormal", - statuses: []string{"new", "new", "new"}, - events: 6, - }, - { - reason: "shutdown", - statuses: []string{"new", "new", "new"}, - events: 6, - }, - } - for i := range children { - fmt.Printf("... stopping child %d with '%s' reason and waiting for restarting all of them ... ", i+1, testCases[i].reason) - processSV.Send(children[i], testCases[i].reason) // stopping child - - if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[i].events, children); err != nil { - t.Fatal(err) - } else { - if checkExpectedChildrenStatus(children, children1, testCases[i].statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", testCases[i].statuses, children, children1) - t.Fatal(e) - } - } - } - - fmt.Printf("Stopping supervisor 'testSupervisorPermanent' (%s)... ", gen.SupervisorStrategyRestartPermanent) - processSV.Exit("x") - if children1, err := waitNeventsSupervisorChildren(sv.ch, 3, children); err != nil { - t.Fatal(err) - } else { - statuses := []string{"empty", "empty", "empty"} - if checkExpectedChildrenStatus(children[:], children1[:], statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - - // =================================================================================================== - // test SupervisorStrategyRestartTransient - fmt.Printf("Starting supervisor 'testSupervisorTransient' (%s)... ", gen.SupervisorStrategyRestartTransient) - sv = &testSupervisorOneForAll{ - ch: make(chan interface{}, 10), - } - processSV, _ = node1.Spawn("testSupervisorTransient", gen.ProcessOptions{}, sv, gen.SupervisorStrategyRestartTransient, sv.ch) - children, err = waitNeventsSupervisorChildren(sv.ch, 3, children) - if err != nil { - t.Fatal(err) - } else { - fmt.Println("OK") - } - - // testing transient - testCases = []ChildrenTestCase{ - { - reason: "normal", - statuses: []string{"empty", "old", "old"}, - events: 1, // waiting for 1 terminate - }, - { - reason: "abnormal", - statuses: []string{"empty", "new", "new"}, - events: 4, // waiting for 2 terminates and 2 starts - }, - { - reason: "shutdown", - statuses: []string{"empty", "old", "empty"}, - events: 1, // waiting for 1 terminate - }, - } - for i := range children { - fmt.Printf("... stopping child %d with '%s' reason and waiting for restarting all of them if reason != normal ... ", i+1, testCases[i].reason) - processSV.Send(children[i], testCases[i].reason) // stopping child - - if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[i].events, children); err != nil { - t.Fatal(err) - } else { - if checkExpectedChildrenStatus(children[:], children1[:], testCases[i].statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", testCases[i].statuses, children, children1) - t.Fatal(e) - } - } - } - - fmt.Printf("Stopping supervisor 'testSupervisorTransient' (%s)... ", gen.SupervisorStrategyRestartTransient) - processSV.Exit("x") - if children1, err := waitNeventsSupervisorChildren(sv.ch, 1, children); err != nil { - t.Fatal(err) - } else { - statuses := []string{"empty", "empty", "empty"} - if checkExpectedChildrenStatus(children[:], children1[:], statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - - // =================================================================================================== - // test SupervisorStrategyRestartTemporary - - // testing temporary - // A temporary child process is never restarted (even when the supervisor's - // restart strategy is rest_for_one or one_for_all and a sibling's death - // causes the temporary process to be terminated). - testCases = []ChildrenTestCase{ - { - reason: "normal", - statuses: []string{"empty", "old", "old"}, - events: 1, // waiting for 1 terminate - }, - { - reason: "abnormal", - statuses: []string{"old", "empty", "old"}, - events: 1, // waiting for 1 terminate - }, - { - reason: "shutdown", - statuses: []string{"old", "old", "empty"}, - events: 1, // waiting for 1 terminate - }, - } - - for i := range testCases { - fmt.Printf("Starting supervisor 'testSupervisorTemporary' (%s)... ", gen.SupervisorStrategyRestartTemporary) - sv = &testSupervisorOneForAll{ - ch: make(chan interface{}, 10), - } - processSV, _ = node1.Spawn("testSupervisorTemporary", gen.ProcessOptions{}, sv, gen.SupervisorStrategyRestartTemporary, sv.ch) - children, err = waitNeventsSupervisorChildren(sv.ch, 3, children) - if err != nil { - t.Fatal(err) - } else { - fmt.Println("OK") - } - - fmt.Printf("... stopping child %d with '%s' reason and no one should be restarted ... ", i+1, testCases[i].reason) - processSV.Send(children[i], testCases[i].reason) // stopping child - - if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[i].events, children); err != nil { - t.Fatal(err) - } else { - if checkExpectedChildrenStatus(children[:], children1[:], testCases[i].statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", testCases[i].statuses, children, children1) - t.Fatal(e) - } - } - - fmt.Printf("Stopping supervisor 'testSupervisorTemporary' (%s)... ", gen.SupervisorStrategyRestartTemporary) - processSV.Exit("x") - if children1, err := waitNeventsSupervisorChildren(sv.ch, len(children)-testCases[i].events, children); err != nil { - t.Fatal(err) - } else { - statuses := []string{"empty", "empty", "empty"} - if checkExpectedChildrenStatus(children[:], children1[:], statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - } - -} - -func (ts *testSupervisorOneForAll) Init(args ...etf.Term) (gen.SupervisorSpec, error) { - restart := args[0].(string) - ch := args[1].(chan interface{}) - return gen.SupervisorSpec{ - Children: []gen.SupervisorChildSpec{ - { - Name: "testGS1", - Child: &testSupervisorGenServer{}, - Args: []etf.Term{ch, 0}, - }, - { - Name: "testGS2", - Child: &testSupervisorGenServer{}, - Args: []etf.Term{ch, 1}, - }, - { - Name: "testGS3", - Child: &testSupervisorGenServer{}, - Args: []etf.Term{ch, 2}, - }, - }, - Strategy: gen.SupervisorStrategy{ - Type: gen.SupervisorStrategyOneForAll, - Intensity: 10, - Period: 5, - Restart: restart, - }, - }, nil -} diff --git a/tests/supervisor_ofo_test.go b/tests/supervisor_ofo_test.go deleted file mode 100644 index 56b1ce11..00000000 --- a/tests/supervisor_ofo_test.go +++ /dev/null @@ -1,346 +0,0 @@ -package tests - -// - Supervisor - -// - one for one (permanent) -// start node1 -// start supevisor sv1 with genservers gs1,gs2,gs3 -// gs1.stop(normal) (sv1 restarting gs1) -// gs2.stop(shutdown) (sv1 restarting gs2) -// gs3.stop(panic) (sv1 restarting gs3) - -// - one for one (transient) -// start node1 -// start supevisor sv1 with genservers gs1,gs2,gs3 -// gs1.stop(normal) (sv1 wont restart gs1) -// gs2.stop(shutdown) (sv1 wont restart gs2) -// gs3.stop(panic) (sv1 restarting gs3 only) - -// - one for one (temporary) -// start node1 -// start supevisor sv1 with genservers gs1,gs2,gs3 -// gs1.stop(normal) (sv1 wont restart gs1) -// gs2.stop(shutdown) (sv1 wont restart gs2) -// gs3.stop(panic) (sv1 wont gs3 only) - -import ( - "fmt" - "testing" - "time" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/lib" - "github.com/ergo-services/ergo/node" -) - -type testSupervisorOneForOne struct { - gen.Supervisor - ch chan interface{} -} - -type testSupervisorGenServer struct { - gen.Server -} - -type testMessageStarted struct { - pid etf.Pid - name string - order int -} - -type testMessageTerminated struct { - name string - order int - pid etf.Pid -} - -type testSupervisorGenServerState struct { - ch chan interface{} - order int -} - -func (tsv *testSupervisorGenServer) Init(process *gen.ServerProcess, args ...etf.Term) error { - st := &testSupervisorGenServerState{ - ch: args[0].(chan interface{}), - order: args[1].(int), - } - process.State = st - - st.ch <- testMessageStarted{ - pid: process.Self(), - name: process.Name(), - order: st.order, - } - - return nil -} -func (tsv *testSupervisorGenServer) HandleInfo(process *gen.ServerProcess, message etf.Term) gen.ServerStatus { - // message has the stop reason - - return gen.ServerStatusStopWithReason(message.(string)) -} -func (tsv *testSupervisorGenServer) HandleCall(process *gen.ServerProcess, from gen.ServerFrom, message etf.Term) (etf.Term, gen.ServerStatus) { - return message, gen.ServerStatusOK -} - -func (tsv *testSupervisorGenServer) HandleDirect(process *gen.ServerProcess, ref etf.Ref, message interface{}) (interface{}, gen.DirectStatus) { - switch m := message.(type) { - case makeCall: - return process.Call(m.to, m.message) - case makeCast: - return nil, process.Cast(m.to, m.message) - } - return nil, lib.ErrUnsupportedRequest -} - -func (tsv *testSupervisorGenServer) Terminate(process *gen.ServerProcess, reason string) { - st := process.State.(*testSupervisorGenServerState) - st.ch <- testMessageTerminated{ - name: process.Name(), - pid: process.Self(), - order: st.order, - } -} - -func TestSupervisorOneForOne(t *testing.T) { - var err error - - fmt.Printf("\n=== Test Supervisor - one for one\n") - fmt.Printf("Starting node nodeSvOneForOne@localhost: ") - node1, _ := ergo.StartNode("nodeSvOneForOne@localhost", "cookies", node.Options{}) - if node1 == nil { - t.Fatal("can't start node") - } else { - fmt.Println("OK") - } - - // =================================================================================================== - // test SupervisorStrategyRestartPermanent - fmt.Printf("Starting supervisor 'testSupervisorPermanent' (%s)... ", gen.SupervisorStrategyRestartPermanent) - sv := &testSupervisorOneForOne{ - ch: make(chan interface{}, 10), - } - processSV, err := node1.Spawn("testSupervisorPermanent", gen.ProcessOptions{}, sv, gen.SupervisorStrategyRestartPermanent, sv.ch) - children := make([]etf.Pid, 3) - - children, err = waitNeventsSupervisorChildren(sv.ch, 3, children) - if err != nil { - t.Fatal(err) - } else { - fmt.Println("OK") - } - - fmt.Printf("... stopping children with 'normal' reason and waiting for their starting ... ") - for i := range children { - processSV.Send(children[i], "normal") // stopping child with reason "normal" - } - - time.Sleep(1 * time.Second) - if children1, err := waitNeventsSupervisorChildren(sv.ch, 6, children); err != nil { // waiting for 3 terminates and 3 starts - t.Fatal(err) - } else { - statuses := []string{"new", "new", "new"} - if checkExpectedChildrenStatus(children[:], children1[:], statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - - fmt.Printf("Stopping supervisor 'testSupervisorPermanent' (%s)... ", gen.SupervisorStrategyRestartPermanent) - processSV.Exit("x") - if children1, err := waitNeventsSupervisorChildren(sv.ch, 3, children); err != nil { - t.Fatal(err) - } else { - statuses := []string{"empty", "empty", "empty"} - if checkExpectedChildrenStatus(children[:], children1[:], statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - - // =================================================================================================== - // test SupervisorStrategyRestartTransient - fmt.Printf("Starting supervisor 'testSupervisorTransient' (%s)... ", gen.SupervisorStrategyRestartTransient) - sv = &testSupervisorOneForOne{ - ch: make(chan interface{}, 10), - } - processSV, _ = node1.Spawn("testSupervisorTransient", gen.ProcessOptions{}, sv, gen.SupervisorStrategyRestartTransient, sv.ch) - children = make([]etf.Pid, 3) - - children, err = waitNeventsSupervisorChildren(sv.ch, 3, children) - if err != nil { - t.Fatal(err) - } else { - fmt.Println("OK") - } - fmt.Printf("... stopping children with 'abnormal' reason and waiting for their starting ... ") - for i := range children { - processSV.Send(children[i], "abnormal") // stopping child - } - - if children1, err := waitNeventsSupervisorChildren(sv.ch, 6, children); err != nil { // waiting for 3 terminates and 3 starts - t.Fatal(err) - } else { - statuses := []string{"new", "new", "new"} - if checkExpectedChildrenStatus(children[:], children1[:], statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - - fmt.Printf("... stopping children with 'normal' reason and they are haven't be restarted ... ") - for i := range children { - processSV.Send(children[i], "normal") // stopping child - } - - if children1, err := waitNeventsSupervisorChildren(sv.ch, 3, children); err != nil { - t.Fatal(err) - } else { - statuses := []string{"empty", "empty", "empty"} - if checkExpectedChildrenStatus(children[:], children1[:], statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - fmt.Printf("Stopping supervisor 'testSupervisorTransient' (%s)... ", gen.SupervisorStrategyRestartTransient) - processSV.Exit("x") - fmt.Println("OK") - - // =================================================================================================== - // test SupervisorStrategyRestartTemporary - - fmt.Printf("Starting supervisor 'testSupervisorTemporary' (%s)... ", gen.SupervisorStrategyRestartTemporary) - sv = &testSupervisorOneForOne{ - ch: make(chan interface{}, 10), - } - processSV, _ = node1.Spawn("testSupervisorTemporary", gen.ProcessOptions{}, sv, gen.SupervisorStrategyRestartTemporary, sv.ch) - children = make([]etf.Pid, 3) - - children, err = waitNeventsSupervisorChildren(sv.ch, 3, children) - if err != nil { - t.Fatal(err) - } else { - fmt.Println("OK") - } - - fmt.Printf("... stopping children with 'normal', 'abnornal','shutdown' reasons and they are haven't be restarted ... ") - processSV.Send(children[0], "normal") // stopping child - processSV.Send(children[1], "abnormal") // stopping child - processSV.Send(children[2], "shutdown") // stopping child - - if children1, err := waitNeventsSupervisorChildren(sv.ch, 3, children); err != nil { - t.Fatal(err) - } else { - statuses := []string{"empty", "empty", "empty"} - if checkExpectedChildrenStatus(children[:], children1[:], statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - fmt.Printf("Stopping supervisor 'testSupervisorTemporary' (%s)... ", gen.SupervisorStrategyRestartTemporary) - processSV.Exit("x") - fmt.Println("OK") - -} - -func (ts *testSupervisorOneForOne) Init(args ...etf.Term) (gen.SupervisorSpec, error) { - restart := args[0].(string) - ch := args[1].(chan interface{}) - return gen.SupervisorSpec{ - Children: []gen.SupervisorChildSpec{ - { - Name: "testGS1", - Child: &testSupervisorGenServer{}, - Args: []etf.Term{ch, 0}, - }, - { - Name: "testGS2", - Child: &testSupervisorGenServer{}, - Args: []etf.Term{ch, 1}, - }, - { - Name: "testGS3", - Child: &testSupervisorGenServer{}, - Args: []etf.Term{ch, 2}, - }, - }, - Strategy: gen.SupervisorStrategy{ - Type: gen.SupervisorStrategyOneForOne, - Intensity: 10, - Period: 5, - Restart: restart, - }, - }, nil -} - -func waitNeventsSupervisorChildren(ch chan interface{}, n int, children []etf.Pid) ([]etf.Pid, error) { - // n - number of events that have to be awaited - // start for-loop with 'n+1' to handle exceeded number of events - childrenNew := make([]etf.Pid, len(children)) - copy(childrenNew, children) - for i := 0; i < n+1; i++ { - select { - case c := <-ch: - switch child := c.(type) { - case testMessageTerminated: - childrenNew[child.order] = etf.Pid{} // set empty pid - case testMessageStarted: - childrenNew[child.order] = child.pid - } - - case <-time.After(200 * time.Millisecond): - if i == n { - return childrenNew, nil - } - if i < n { - return childrenNew, fmt.Errorf("expected %d events, but got %d. TIMEOUT", n, i) - } - - } - } - return childrenNew, fmt.Errorf("expected %d events, but got %d. ", n, n+1) -} - -func checkExpectedChildrenStatus(children, children1 []etf.Pid, statuses []string) bool { - empty := etf.Pid{} - for i := 0; i < len(statuses); i++ { - switch statuses[i] { - case "new": - if children1[i] == empty { // is the epmty pid (child has been stopped) - return false - } - if children[i] == children1[i] { // this value has to be different - return false - } - - case "epmty": - if children1[i] != empty { - return false - } - - case "old": - if children[i] == children1[i] { - return true - } - } - - } - return true -} diff --git a/tests/supervisor_rfo_test.go b/tests/supervisor_rfo_test.go deleted file mode 100644 index 86699f04..00000000 --- a/tests/supervisor_rfo_test.go +++ /dev/null @@ -1,310 +0,0 @@ -package tests - -// - Supervisor - -// - rest for one (permanent) -// start node1 -// start supevisor sv1 with genservers gs1,gs2,gs3 -// gs1.stop(normal) (sv1 stoping gs1) -// (sv1 stoping gs2,gs3) -// (sv1 starting gs1,gs2,gs3) -// gs2.stop(shutdown) (sv1 stoping gs2) -// (sv1 stoping gs1,gs3) -// (sv1 starting gs1,gs2,gs3) -// gs3.stop(panic) (sv1 stoping gs3) -// (sv1 stoping gs1,gs2) -// (sv1 starting gs1,gs2,gs3) -// -// - rest for one (transient) -// start node1 -// start supevisor sv1 with genservers gs1,gs2,gs3 -// gs3.stop(panic) (sv1 stoping gs3) -// (sv1 stopping gs1, gs2) -// (sv1 starting gs1, gs2, gs3) - -// gs1.stop(normal) (sv1 stoping gs1) -// ( gs2, gs3 - still working) -// gs2.stop(shutdown) (sv1 stoping gs2) -// (gs3 - still working) -// -// - rest for one (temoporary) -// start node1 -// start supevisor sv1 with genservers gs1,gs2,gs3 - -// gs3.stop(panic) (sv1 stoping gs3) -// (sv1 stopping gs1, gs2) - -// start again gs1, gs2, gs3 via sv1 -// gs1.stop(normal) (sv1 stopping gs1) -// (gs2, gs3 are still running) -// gs2.stop(shutdown) (sv1 stopping gs2) -// (gs3 are still running) - -import ( - "fmt" - "testing" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/node" - // "time" - // "github.com/ergo-services/ergo/etf" -) - -type testSupervisorRestForOne struct { - gen.Supervisor - ch chan interface{} -} - -func TestSupervisorRestForOne(t *testing.T) { - var err error - fmt.Printf("\n=== Test Supervisor - rest for one\n") - fmt.Printf("Starting node nodeSvRestForOne@localhost: ") - node1, _ := ergo.StartNode("nodeSvRestForOne@localhost", "cookies", node.Options{}) - if node1 == nil { - t.Fatal("can't start node") - } else { - fmt.Println("OK") - } - - // =================================================================================================== - // test SupervisorStrategyRestartPermanent - fmt.Printf("Starting supervisor 'testSupervisorPermanent' (%s)... ", gen.SupervisorStrategyRestartPermanent) - sv := &testSupervisorRestForOne{ - ch: make(chan interface{}, 10), - } - processSV, err := node1.Spawn("testSupervisorPermanent", gen.ProcessOptions{}, sv, gen.SupervisorStrategyRestartPermanent, sv.ch) - if err != nil { - t.Fatal(err) - } - children := make([]etf.Pid, 3) - - children, err = waitNeventsSupervisorChildren(sv.ch, 3, children) - if err != nil { - t.Fatal(err) - } else { - fmt.Println("OK") - } - - // testing permanent - testCases := []ChildrenTestCase{ - { - reason: "normal", - statuses: []string{"new", "new", "new"}, - events: 6, // waiting for 3 terminates and 3 starts - }, - { - reason: "abnormal", - statuses: []string{"old", "new", "new"}, - events: 4, // waiting for 2 terminates and 2 starts - }, - { - reason: "shutdown", - statuses: []string{"old", "old", "new"}, - events: 2, // waiting for 1 terminates and 1 starts - }, - } - for i := range children { - fmt.Printf("... stopping child %d with '%s' reason and waiting for restarting rest of them ... ", i+1, testCases[i].reason) - processSV.Send(children[i], testCases[i].reason) // stopping child - - if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[i].events, children); err != nil { - t.Fatal(err) - } else { - if checkExpectedChildrenStatus(children[:], children1[:], testCases[i].statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", testCases[i].statuses, children, children1) - t.Fatal(e) - } - } - } - - fmt.Printf("Stopping supervisor 'testSupervisorPermanent' (%s)... ", gen.SupervisorStrategyRestartPermanent) - processSV.Exit("x") - if children1, err := waitNeventsSupervisorChildren(sv.ch, 3, children); err != nil { - t.Fatal(err) - } else { - statuses := []string{"empty", "empty", "empty"} - if checkExpectedChildrenStatus(children[:], children1[:], statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - - // =================================================================================================== - // test SupervisorStrategyRestartTransient - fmt.Printf("Starting supervisor 'testSupervisorTransient' (%s)... ", gen.SupervisorStrategyRestartTransient) - sv = &testSupervisorRestForOne{ - ch: make(chan interface{}, 10), - } - processSV, err = node1.Spawn("testSupervisorTransient", gen.ProcessOptions{}, sv, gen.SupervisorStrategyRestartTransient, sv.ch) - if err != nil { - t.Fatal(err) - } - children = make([]etf.Pid, 3) - - children, err = waitNeventsSupervisorChildren(sv.ch, 3, children) - if err != nil { - t.Fatal(err) - } else { - fmt.Println("OK") - } - - // testing transient - testCases = []ChildrenTestCase{ - { - reason: "abnormal", - statuses: []string{"new", "new", "new"}, - events: 6, // waiting for 3 terminates and 3 starts - }, - { - reason: "normal", - statuses: []string{"old", "empty", "old"}, - events: 1, // waiting for 1 terminate - }, - { - reason: "shutdown", - statuses: []string{"old", "empty", "empty"}, - events: 1, // waiting for 1 terminates - }, - } - for i := range children { - fmt.Printf("... stopping child %d with '%s' reason and waiting for restarting if reason != normal ... ", i+1, testCases[i].reason) - processSV.Send(children[i], testCases[i].reason) // stopping child - - if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[i].events, children); err != nil { - t.Fatal(err) - } else { - if checkExpectedChildrenStatus(children[:], children1[:], testCases[i].statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", testCases[i].statuses, children, children1) - t.Fatal(e) - } - } - } - - fmt.Printf("Stopping supervisor 'testSupervisorTransient' (%s)... ", gen.SupervisorStrategyRestartTransient) - processSV.Exit("x") - if children1, err := waitNeventsSupervisorChildren(sv.ch, 1, children); err != nil { - t.Fatal(err) - } else { - statuses := []string{"empty", "empty", "empty"} - if checkExpectedChildrenStatus(children[:], children1[:], statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - - // =================================================================================================== - // test SupervisorStrategyRestartTemporary - - // testing temporary - // A temporary child process is never restarted (even when the supervisor's - // restart strategy is rest_for_one or one_for_all and a sibling's death - // causes the temporary process to be terminated). - testCases = []ChildrenTestCase{ - { - reason: "normal", - statuses: []string{"empty", "old", "old"}, - events: 1, // waiting for 1 terminate - }, - { - reason: "abnormal", - statuses: []string{"old", "empty", "old"}, - events: 1, // waiting for 1 terminate - }, - { - reason: "shutdown", - statuses: []string{"old", "old", "empty"}, - events: 1, // waiting for 1 terminate - }, - } - - for i := range testCases { - fmt.Printf("Starting supervisor 'testSupervisorTemporary' (%s)... ", gen.SupervisorStrategyRestartTemporary) - sv = &testSupervisorRestForOne{ - ch: make(chan interface{}, 10), - } - processSV, _ = node1.Spawn("testSupervisorTemporary", gen.ProcessOptions{}, sv, gen.SupervisorStrategyRestartTemporary, sv.ch) - children = make([]etf.Pid, 3) - - children, err = waitNeventsSupervisorChildren(sv.ch, 3, children) - if err != nil { - t.Fatal(err) - } else { - fmt.Println("OK") - } - - fmt.Printf("... stopping child %d with '%s' reason and without restarting ... ", i+1, testCases[i].reason) - processSV.Send(children[i], testCases[i].reason) // stopping child - - if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[i].events, children); err != nil { - t.Fatal(err) - } else { - if checkExpectedChildrenStatus(children[:], children1[:], testCases[i].statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", testCases[i].statuses, children, children1) - t.Fatal(e) - } - } - - fmt.Printf("Stopping supervisor 'testSupervisorTemporary' (%s)... ", gen.SupervisorStrategyRestartTemporary) - processSV.Exit("x") - if children1, err := waitNeventsSupervisorChildren(sv.ch, 3-testCases[i].events, children); err != nil { - t.Fatal(err) - } else { - statuses := []string{"empty", "empty", "empty"} - if checkExpectedChildrenStatus(children[:], children1[:], statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - } - -} - -func (ts *testSupervisorRestForOne) Init(args ...etf.Term) (gen.SupervisorSpec, error) { - restart := args[0].(string) - ch := args[1].(chan interface{}) - return gen.SupervisorSpec{ - Children: []gen.SupervisorChildSpec{ - { - Name: "testGS1", - Child: &testSupervisorGenServer{}, - Args: []etf.Term{ch, 0}, - }, - { - Name: "testGS2", - Child: &testSupervisorGenServer{}, - Args: []etf.Term{ch, 1}, - }, - { - Name: "testGS3", - Child: &testSupervisorGenServer{}, - Args: []etf.Term{ch, 2}, - }, - }, - Strategy: gen.SupervisorStrategy{ - Type: gen.SupervisorStrategyRestForOne, - Intensity: 10, - Period: 5, - Restart: restart, - }, - }, nil -} diff --git a/tests/supervisor_sofo_test.go b/tests/supervisor_sofo_test.go deleted file mode 100644 index 3ffafea8..00000000 --- a/tests/supervisor_sofo_test.go +++ /dev/null @@ -1,387 +0,0 @@ -package tests - -// - Supervisor - -// - simple one for one (permanent) -// start node1 -// start supevisor sv1 with genservers gs1,gs2,gs3 -// .... TODO: describe - -import ( - "fmt" - "testing" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/node" -) - -type testSupervisorSimpleOneForOne struct { - gen.Supervisor - ch chan interface{} -} - -func TestSupervisorSimpleOneForOne(t *testing.T) { - fmt.Printf("\n=== Test Supervisor - simple one for one\n") - fmt.Printf("Starting node nodeSvSimpleOneForOne@localhost: ") - node1, _ := ergo.StartNode("nodeSvSimpleOneForOne@localhost", "cookies", node.Options{}) - if node1 == nil { - t.Fatal("can't start node") - } else { - fmt.Println("OK") - } - sv := &testSupervisorSimpleOneForOne{ - ch: make(chan interface{}, 10), - } - - // =================================================================================================== - // test SupervisorStrategyRestartPermanent - testCases := []ChildrenTestCase{ - { - reason: "abnormal", - statuses: []string{"new", "new", "new", "new", "new", "new"}, - events: 12, // waiting for 6 terminates and 6 restarts - }, - { - reason: "normal", - statuses: []string{"new", "new", "new", "new", "new", "new"}, - events: 12, // waiting for 6 terminates and 6 restarts - }, - { - reason: "shutdown", - statuses: []string{"new", "new", "new", "new", "new", "new"}, - events: 12, // waiting for 6 terminates and 6 restarts - }, - } - - for c := range testCases { - fmt.Printf("Starting supervisor 'testSupervisorPermanent' (%s)... ", gen.SupervisorStrategyRestartPermanent) - processSV, err := node1.Spawn("testSupervisorPermanent", gen.ProcessOptions{}, sv, gen.SupervisorStrategyRestartPermanent) - if err != nil { - t.Fatal(err) - } - - children := make([]etf.Pid, 6) - // starting supervisor shouldn't cause start its children - children1, err := waitNeventsSupervisorChildren(sv.ch, 0, children) - if err != nil { - t.Fatal(err) - } else { - // must be equal - statuses := []string{"empty", "empty", "empty", "empty", "empty", "empty"} - if checkExpectedChildrenStatus(children, children1, statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - fmt.Printf("... starting 6 children ... ") - - // start children - for i := 0; i < 6; i = i + 2 { - p, e := sv.StartChild(processSV, fmt.Sprintf("testGS%d", i/2+1), sv.ch, i) - if e != nil { - t.Fatal(e) - } - children[i] = p.Self() - // start twice. must be able to start any number of child processes - // as it doesn't register this process with the given name. - p, e = sv.StartChild(processSV, fmt.Sprintf("testGS%d", i/2+1), sv.ch, i+1) - if e != nil { - t.Fatal(e) - } - children[i+1] = p.Self() - } - if children1, err := waitNeventsSupervisorChildren(sv.ch, 6, children); err != nil { - t.Fatal(err) - } else { - // they should be equal after start - statuses := []string{"old", "old", "old", "old", "old", "old"} - if checkExpectedChildrenStatus(children, children1, statuses) { - fmt.Println("OK") - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - - // kill them all with reason = testCases[c].reason - fmt.Printf("... stopping children with '%s' reason and waiting for restarting all of them ... ", testCases[c].reason) - - for k := range children { - processSV.Send(children[k], testCases[c].reason) - } - - if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[c].events, children); err != nil { - t.Fatal(err) - } else { - if checkExpectedChildrenStatus(children, children1, testCases[c].statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", testCases[c].statuses, children, children1) - t.Fatal(e) - } - } - - fmt.Printf("Stopping supervisor 'testSupervisor' (reason: %s)... ", testCases[c].reason) - processSV.Exit(testCases[c].reason) - if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[c].events-len(children), children); err != nil { - t.Fatal(err) - } else { - statuses := []string{"empty", "empty", "empty", "empty", "empty", "empty"} - if checkExpectedChildrenStatus(children, children1, statuses) { - fmt.Println("OK") - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - - } - - // =================================================================================================== - // test SupervisorStrategyRestartTransient - testCases = []ChildrenTestCase{ - { - reason: "abnormal", - statuses: []string{"new", "new", "new", "new", "new", "new"}, - events: 12, // waiting for 6 terminates and 6 restarts - }, - { - reason: "normal", - statuses: []string{"empty", "empty", "empty", "empty", "empty", "empty"}, - events: 6, // waiting for 6 terminates - }, - { - reason: "shutdown", - statuses: []string{"empty", "empty", "empty", "empty", "empty", "empty"}, - events: 6, // waiting for 6 terminates - }, - } - - for c := range testCases { - fmt.Printf("Starting supervisor 'testSupervisorTransient' (%s)... ", gen.SupervisorStrategyRestartTransient) - processSV, err := node1.Spawn("testSupervisorTransient", gen.ProcessOptions{}, sv, gen.SupervisorStrategyRestartTransient) - if err != nil { - t.Fatal(err) - } - - children := make([]etf.Pid, 6) - // starting supervisor shouldn't cause start its children - children1, err := waitNeventsSupervisorChildren(sv.ch, 0, children) - if err != nil { - t.Fatal(err) - } else { - // must be equal - statuses := []string{"empty", "empty", "empty", "empty", "empty", "empty"} - if checkExpectedChildrenStatus(children, children1, statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - fmt.Printf("... starting 6 children ... ") - - // start children - for i := 0; i < 6; i = i + 2 { - p, e := sv.StartChild(processSV, fmt.Sprintf("testGS%d", i/2+1), sv.ch, i) - if e != nil { - t.Fatal(e) - } - children[i] = p.Self() - // start twice. must be able to start any number of child processes - // as it doesn't register this process with the given name. - p, e = sv.StartChild(processSV, fmt.Sprintf("testGS%d", i/2+1), sv.ch, i+1) - if e != nil { - t.Fatal(e) - } - children[i+1] = p.Self() - } - if children1, err := waitNeventsSupervisorChildren(sv.ch, 6, children); err != nil { - t.Fatal(err) - } else { - // they should be equal after start - statuses := []string{"old", "old", "old", "old", "old", "old"} - if checkExpectedChildrenStatus(children, children1, statuses) { - fmt.Println("OK") - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - - // kill them all with reason = testCases[c].reason - fmt.Printf("... stopping children with '%s' reason and waiting for restarting some of them ... ", testCases[c].reason) - - for k := range children { - processSV.Send(children[k], testCases[c].reason) - } - - if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[c].events, children); err != nil { - t.Fatal(err) - } else { - if checkExpectedChildrenStatus(children, children1, testCases[c].statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", testCases[c].statuses, children, children1) - t.Fatal(e) - } - } - - fmt.Printf("Stopping supervisor 'testSupervisor' (reason: %s)... ", testCases[c].reason) - processSV.Exit(testCases[c].reason) - if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[c].events-len(children), children); err != nil { - t.Fatal(err) - } else { - statuses := []string{"empty", "empty", "empty", "empty", "empty", "empty"} - if checkExpectedChildrenStatus(children, children1, statuses) { - fmt.Println("OK") - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - - } - - // =================================================================================================== - // test SupervisorStrategyRestartTemporary - testCases = []ChildrenTestCase{ - { - reason: "abnormal", - statuses: []string{"empty", "empty", "empty", "empty", "empty", "empty"}, - events: 6, // waiting for 6 terminates - }, - { - reason: "normal", - statuses: []string{"empty", "empty", "empty", "empty", "empty", "empty"}, - events: 6, // waiting for 6 terminates - }, - { - reason: "shutdown", - statuses: []string{"empty", "empty", "empty", "empty", "empty", "empty"}, - events: 6, // waiting for 6 terminates - }, - } - - for c := range testCases { - fmt.Printf("Starting supervisor 'testSupervisorTemporary' (%s)... ", gen.SupervisorStrategyRestartTemporary) - processSV, err := node1.Spawn("testSupervisorTemporary", gen.ProcessOptions{}, sv, gen.SupervisorStrategyRestartTemporary) - if err != nil { - t.Fatal(err) - } - - children := make([]etf.Pid, 6) - // starting supervisor shouldn't cause start its children - children1, err := waitNeventsSupervisorChildren(sv.ch, 0, children) - if err != nil { - t.Fatal(err) - } else { - // must be equal - statuses := []string{"empty", "empty", "empty", "empty", "empty", "empty"} - if checkExpectedChildrenStatus(children, children1, statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - fmt.Printf("... starting 6 children ... ") - - // start children - for i := 0; i < 6; i = i + 2 { - p, e := sv.StartChild(processSV, fmt.Sprintf("testGS%d", i/2+1), sv.ch, i) - if e != nil { - t.Fatal(e) - } - children[i] = p.Self() - // start twice. must be able to start any number of child processes - // as it doesn't register this process with the given name. - p, e = sv.StartChild(processSV, fmt.Sprintf("testGS%d", i/2+1), sv.ch, i+1) - if e != nil { - t.Fatal(e) - } - children[i+1] = p.Self() - } - if children1, err := waitNeventsSupervisorChildren(sv.ch, 6, children); err != nil { - t.Fatal(err) - } else { - // they should be equal after start - statuses := []string{"old", "old", "old", "old", "old", "old"} - if checkExpectedChildrenStatus(children, children1, statuses) { - fmt.Println("OK") - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - - // kill them all with reason = testCases[c].reason - fmt.Printf("... stopping children with '%s' reason and waiting for exiting all of them ... ", testCases[c].reason) - - for k := range children { - processSV.Send(children[k], testCases[c].reason) - } - - if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[c].events, children); err != nil { - t.Fatal(err) - } else { - if checkExpectedChildrenStatus(children, children1, testCases[c].statuses) { - fmt.Println("OK") - children = children1 - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", testCases[c].statuses, children, children1) - t.Fatal(e) - } - } - - fmt.Printf("Stopping supervisor 'testSupervisor' (reason: %s)... ", testCases[c].reason) - processSV.Exit(testCases[c].reason) - if children1, err := waitNeventsSupervisorChildren(sv.ch, testCases[c].events-len(children), children); err != nil { - t.Fatal(err) - } else { - statuses := []string{"empty", "empty", "empty", "empty", "empty", "empty"} - if checkExpectedChildrenStatus(children, children1, statuses) { - fmt.Println("OK") - } else { - e := fmt.Errorf("got something else except we expected (%v). old: %v new: %v", statuses, children, children1) - t.Fatal(e) - } - } - - } -} - -func (ts *testSupervisorSimpleOneForOne) Init(args ...etf.Term) (gen.SupervisorSpec, error) { - restart := args[0].(string) - return gen.SupervisorSpec{ - Children: []gen.SupervisorChildSpec{ - { - Name: "testGS1", - Child: &testSupervisorGenServer{}, - }, - { - Name: "testGS2", - Child: &testSupervisorGenServer{}, - }, - { - Name: "testGS3", - Child: &testSupervisorGenServer{}, - }, - }, - Strategy: gen.SupervisorStrategy{ - Type: gen.SupervisorStrategySimpleOneForOne, - Intensity: 10, - Period: 5, - Restart: restart, - }, - }, nil -} diff --git a/tests/tcp_test.go b/tests/tcp_test.go deleted file mode 100644 index 13b09e56..00000000 --- a/tests/tcp_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package tests - -import ( - "fmt" - "net" - "testing" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/node" -) - -var ( - resChan = make(chan interface{}, 2) -) - -type testTCPHandler struct { - gen.TCPHandler -} - -type messageTestTCPConnect struct{} -type messageTestTCPStatusNext struct { - left int - await int -} - -func (r *testTCPHandler) HandleConnect(process *gen.TCPHandlerProcess, conn *gen.TCPConnection) gen.TCPHandlerStatus { - resChan <- messageTestTCPConnect{} - return gen.TCPHandlerStatusOK -} - -func (r *testTCPHandler) HandlePacket(process *gen.TCPHandlerProcess, packet []byte, conn *gen.TCPConnection) (int, int, gen.TCPHandlerStatus) { - l := len(packet) - //fmt.Println("GOT", process.Self(), packet, l, "bytes", l%10) - if l < 10 { - resChan <- messageTestTCPStatusNext{ - left: l, - await: 10 - l, - } - return l, 10 - l, gen.TCPHandlerStatusOK - } - if l > 10 { - resChan <- messageTestTCPStatusNext{ - left: l % 10, - await: 10 - (l % 10), - } - return l % 10, 10 - (l % 10), gen.TCPHandlerStatusOK - } - resChan <- packet - return 0, 0, gen.TCPHandlerStatusOK -} - -type testTCPServer struct { - gen.TCP -} - -func (ts *testTCPServer) InitTCP(process *gen.TCPProcess, args ...etf.Term) (gen.TCPOptions, error) { - var options gen.TCPOptions - options.Handler = &testTCPHandler{} - options.Port = 10101 - - return options, nil -} - -func TestTCP(t *testing.T) { - fmt.Printf("\n=== Test TCP Server\n") - fmt.Printf("Starting nodes: nodeTCP1@localhost: ") - node1, err := ergo.StartNode("nodeTCP1@localhost", "cookies", node.Options{}) - defer node1.Stop() - if err != nil { - t.Fatal("can't start node", err) - } else { - fmt.Println("OK") - } - - fmt.Printf("...starting process (gen.TCP): ") - tcpProcess, err := node1.Spawn("tcp", gen.ProcessOptions{}, &testTCPServer{}) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("...makeing a new connection: ") - conn, err := net.Dial("tcp", "localhost:10101") - if err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, resChan, messageTestTCPConnect{}) - - fmt.Printf("...send/recv data (10 bytes as 1 logic dataframe): ") - testData1 := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} - if _, err := conn.Write(testData1); err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, resChan, testData1) - - fmt.Printf("...send/recv data (7 bytes as a part of logic dataframe): ") - testData2 := []byte{11, 12, 13, 14, 15, 16, 17} - if _, err := conn.Write(testData2); err != nil { - t.Fatal(err) - } - - value := messageTestTCPStatusNext{ - left: 7, - await: 3, - } - - waitForResultWithValue(t, resChan, value) - - fmt.Printf("...send/recv data (5 bytes, must be 1 logic dataframe + extra 2 bytes): ") - testData2 = []byte{18, 19, 20, 21, 22} - if _, err := conn.Write(testData2); err != nil { - t.Fatal(err) - } - value = messageTestTCPStatusNext{ - left: 2, - await: 8, - } - waitForResultWithValue(t, resChan, value) - - fmt.Printf("...send/recv data (8 bytes, must be 1 logic dataframe): ") - testData2 = []byte{23, 24, 25, 26, 27, 28, 29, 30} - if _, err := conn.Write(testData2); err != nil { - t.Fatal(err) - } - waitForResultWithValue(t, resChan, []byte{21, 22, 23, 24, 25, 26, 27, 28, 29, 30}) - - tcpProcess.Kill() - tcpProcess.Wait() - - fmt.Printf("...stopping process (gen.TCP): ") - if _, err := net.Dial("tcp", "localhost:10101"); err == nil { - t.Fatal("error must be here") - } - fmt.Println("OK") -} diff --git a/tests/udp_test.go b/tests/udp_test.go deleted file mode 100644 index e974ae07..00000000 --- a/tests/udp_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package tests - -import ( - "fmt" - "net" - "testing" - "time" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/node" -) - -var ( - resUDPChan = make(chan interface{}, 2) -) - -type testUDPHandler struct { - gen.UDPHandler -} - -func (r *testUDPHandler) HandlePacket(process *gen.UDPHandlerProcess, data []byte, packet gen.UDPPacket) { - resUDPChan <- data - return -} - -type testUDPServer struct { - gen.UDP -} - -func (ts *testUDPServer) InitUDP(process *gen.UDPProcess, args ...etf.Term) (gen.UDPOptions, error) { - var options gen.UDPOptions - options.Handler = &testUDPHandler{} - options.Port = 10101 - - return options, nil -} - -func TestUDP(t *testing.T) { - fmt.Printf("\n=== Test UDP Server\n") - fmt.Printf("Starting nodes: nodeUDP1@localhost: ") - node1, err := ergo.StartNode("nodeUDP1@localhost", "cookies", node.Options{}) - defer node1.Stop() - if err != nil { - t.Fatal("can't start node", err) - } else { - fmt.Println("OK") - } - - fmt.Printf("...starting process (gen.UDP): ") - udpProcess, err := node1.Spawn("udp", gen.ProcessOptions{}, &testUDPServer{}) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("...send/receive data: ") - c, err := net.Dial("udp", "localhost:10101") - if err != nil { - t.Fatal(err) - } - defer c.Close() - data := []byte{1, 2, 3, 4, 5} - c.Write(data) - waitForResultWithValue(t, resUDPChan, data) - - fmt.Printf("...stopping process (gen.UDP): ") - udpProcess.Kill() - if err := udpProcess.WaitWithTimeout(time.Second); err != nil { - t.Fatal(err) - } - fmt.Println("OK") -} diff --git a/tests/web_test.go b/tests/web_test.go deleted file mode 100644 index ce014996..00000000 --- a/tests/web_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package tests - -import ( - "fmt" - "io/ioutil" - "net/http" - "testing" - - "github.com/ergo-services/ergo" - "github.com/ergo-services/ergo/etf" - "github.com/ergo-services/ergo/gen" - "github.com/ergo-services/ergo/node" -) - -var ( - testWebString = "hello world" -) - -type testWebHandler struct { - gen.WebHandler -} - -func (r *testWebHandler) HandleRequest(process *gen.WebHandlerProcess, request gen.WebMessageRequest) gen.WebHandlerStatus { - request.Response.Write([]byte(testWebString)) - return gen.WebHandlerStatusDone -} - -type testWebServer struct { - gen.Web -} - -func (w *testWebServer) InitWeb(process *gen.WebProcess, args ...etf.Term) (gen.WebOptions, error) { - var options gen.WebOptions - - mux := http.NewServeMux() - webHandler := process.StartWebHandler(&testWebHandler{}, gen.WebHandlerOptions{}) - mux.Handle("/", webHandler) - options.Handler = mux - - return options, nil -} - -func TestWeb(t *testing.T) { - fmt.Printf("\n=== Test Web Server\n") - fmt.Printf("Starting nodes: nodeWeb1@localhost: ") - node1, err := ergo.StartNode("nodeWeb1@localhost", "cookies", node.Options{}) - defer node1.Stop() - if err != nil { - t.Fatal("can't start node", err) - } else { - fmt.Println("OK") - } - - fmt.Printf("...starting process (gen.Web): ") - _, err = node1.Spawn("web", gen.ProcessOptions{}, &testWebServer{}) - if err != nil { - t.Fatal(err) - } - fmt.Println("OK") - - fmt.Printf("...making simple GET request: ") - res, err := http.Get("http://localhost:8080") - if err != nil { - t.Fatal(err) - } - out, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if string(out) != testWebString { - t.Fatal("mismatch result") - } - fmt.Println("OK") -} diff --git a/version.go b/version.go index 991c1ae9..062080b0 100644 --- a/version.go +++ b/version.go @@ -1,7 +1,11 @@ package ergo -const ( - Version = "2.2.4" // Ergo Framework version - VersionPrefix = "ergo" // Prefix using for the full version name - VersionOTP int = 25 // Erlang version support +import "ergo.services/ergo/gen" + +var ( + FrameworkVersion = gen.Version{ + Name: "Ergo Framework", + Release: "3.0.0", + License: gen.LicenseMIT, + } )