From 158901694ef7fb7d65b0153e287d196b5ec6f410 Mon Sep 17 00:00:00 2001 From: idanasulinStrech Date: Wed, 6 Dec 2023 18:02:44 +0200 Subject: [PATCH 01/16] add changed of Nats 2.9.22 --- conf/lex.go | 5 +- conf/lex_test.go | 78 +- conf/parse.go | 8 +- conf/parse_test.go | 349 ++- conf/simple.conf | 6 + go.mod | 8 +- go.sum | 18 +- internal/testhelper/logging.go | 3 + logger/log.go | 47 +- logger/log_test.go | 2 +- logger/syslog.go | 2 + logger/syslog_windows.go | 4 + main.go | 22 +- server/accounts.go | 288 +- server/accounts_test.go | 93 +- server/auth.go | 9 +- server/auth_test.go | 1 + server/certidp/certidp.go | 297 ++ server/certidp/messages.go | 106 + server/certidp/ocsp_responder.go | 83 + server/certstore/certstore.go | 102 + server/certstore/certstore_other.go | 46 + server/certstore/certstore_windows.go | 827 ++++++ server/certstore/errors.go | 73 + server/certstore_windows_test.go | 230 ++ server/ciphersuites.go | 3 + server/client.go | 485 ++-- server/client_test.go | 158 +- server/config_check_test.go | 27 +- .../configs/certs/tls/benchmark-ca-cert.pem | 23 + server/configs/certs/tls/benchmark-ca-key.pem | 28 + .../tls/benchmark-server-cert-ed25519.pem | 19 + .../tls/benchmark-server-cert-rsa-1024.pem | 22 + .../tls/benchmark-server-cert-rsa-2048.pem | 24 + .../tls/benchmark-server-cert-rsa-4096.pem | 30 + .../tls/benchmark-server-key-ed25519.pem | 3 + .../tls/benchmark-server-key-rsa-1024.pem | 16 + .../tls/benchmark-server-key-rsa-2048.pem | 28 + .../tls/benchmark-server-key-rsa-4096.pem | 52 + server/configs/reload/reload.conf | 1 + server/configs/tls/tls-ed25519.conf | 10 + server/configs/tls/tls-none.conf | 4 + server/configs/tls/tls-rsa-1024.conf | 10 + server/configs/tls/tls-rsa-2048.conf | 10 + server/configs/tls/tls-rsa-4096.conf | 10 + server/const.go | 16 +- server/consumer.go | 472 +++- server/core_benchmarks_test.go | 251 ++ server/dirstore.go | 7 + server/disk_avail.go | 2 +- server/errors_gen.go | 14 +- server/events.go | 511 +++- server/events_test.go | 49 +- server/filestore.go | 848 ++++-- server/filestore_test.go | 524 +++- server/gateway.go | 19 +- server/ipqueue.go | 37 +- server/ipqueue_test.go | 8 +- server/jetstream.go | 285 +- server/jetstream_api.go | 88 +- server/jetstream_benchmark_consume_test.go | 404 --- server/jetstream_benchmark_kv_test.go | 265 -- server/jetstream_benchmark_publish_test.go | 274 -- server/jetstream_benchmark_test.go | 1376 +++++++++ server/jetstream_chaos_cluster_test.go | 76 - server/jetstream_chaos_consumer_test.go | 615 ---- server/jetstream_chaos_helpers_test.go | 177 -- server/jetstream_chaos_kv_test.go | 456 --- server/jetstream_chaos_test.go | 1285 +++++++++ server/jetstream_cluster.go | 912 ++++-- server/jetstream_cluster_1_test.go | 27 +- server/jetstream_cluster_2_test.go | 25 +- server/jetstream_cluster_3_test.go | 2479 ++++++++++++++++- server/jetstream_errors.go | 12 - server/jetstream_errors_generated.go | 13 - server/jetstream_errors_test.go | 12 - server/jetstream_helpers_test.go | 124 +- server/jetstream_leafnode_test.go | 48 +- server/jetstream_super_cluster_test.go | 94 + server/jetstream_test.go | 804 +++++- server/jwt_test.go | 197 +- server/leafnode.go | 155 +- server/leafnode_test.go | 725 ++++- server/log.go | 17 +- server/log_test.go | 2 +- server/memstore.go | 109 +- server/monitor.go | 359 ++- server/monitor_sort_opts.go | 18 +- server/monitor_test.go | 520 +++- server/mqtt.go | 8 +- server/mqtt_test.go | 47 +- server/nkey.go | 2 +- server/norace_test.go | 1721 +++++++++++- server/ocsp.go | 276 +- server/ocsp_peer.go | 405 +++ server/ocsp_responsecache.go | 636 +++++ server/opts.go | 196 +- server/opts_test.go | 22 +- server/parser.go | 13 +- server/raft.go | 451 ++- server/raft_helpers_test.go | 276 ++ server/raft_test.go | 49 +- server/reload.go | 176 +- server/reload_test.go | 222 +- server/route.go | 53 +- server/routes_test.go | 88 +- server/sendq.go | 37 +- server/server.go | 288 +- server/server_test.go | 2 +- server/signal_test.go | 2 +- server/split_test.go | 1 + server/store.go | 5 +- server/stream.go | 616 ++-- server/sublist.go | 33 +- server/sublist_test.go | 40 +- server/test_test.go | 24 +- server/util.go | 3 + server/util_test.go | 1 + server/websocket.go | 57 +- server/websocket_test.go | 15 +- 120 files changed, 19212 insertions(+), 4934 deletions(-) create mode 100644 conf/simple.conf create mode 100644 server/certidp/certidp.go create mode 100644 server/certidp/messages.go create mode 100644 server/certidp/ocsp_responder.go create mode 100644 server/certstore/certstore.go create mode 100644 server/certstore/certstore_other.go create mode 100644 server/certstore/certstore_windows.go create mode 100644 server/certstore/errors.go create mode 100644 server/certstore_windows_test.go create mode 100644 server/configs/certs/tls/benchmark-ca-cert.pem create mode 100644 server/configs/certs/tls/benchmark-ca-key.pem create mode 100644 server/configs/certs/tls/benchmark-server-cert-ed25519.pem create mode 100644 server/configs/certs/tls/benchmark-server-cert-rsa-1024.pem create mode 100644 server/configs/certs/tls/benchmark-server-cert-rsa-2048.pem create mode 100644 server/configs/certs/tls/benchmark-server-cert-rsa-4096.pem create mode 100644 server/configs/certs/tls/benchmark-server-key-ed25519.pem create mode 100644 server/configs/certs/tls/benchmark-server-key-rsa-1024.pem create mode 100644 server/configs/certs/tls/benchmark-server-key-rsa-2048.pem create mode 100644 server/configs/certs/tls/benchmark-server-key-rsa-4096.pem create mode 100644 server/configs/tls/tls-ed25519.conf create mode 100644 server/configs/tls/tls-none.conf create mode 100644 server/configs/tls/tls-rsa-1024.conf create mode 100644 server/configs/tls/tls-rsa-2048.conf create mode 100644 server/configs/tls/tls-rsa-4096.conf create mode 100644 server/core_benchmarks_test.go delete mode 100644 server/jetstream_benchmark_consume_test.go delete mode 100644 server/jetstream_benchmark_kv_test.go delete mode 100644 server/jetstream_benchmark_publish_test.go create mode 100644 server/jetstream_benchmark_test.go delete mode 100644 server/jetstream_chaos_cluster_test.go delete mode 100644 server/jetstream_chaos_consumer_test.go delete mode 100644 server/jetstream_chaos_helpers_test.go delete mode 100644 server/jetstream_chaos_kv_test.go create mode 100644 server/jetstream_chaos_test.go create mode 100644 server/ocsp_peer.go create mode 100644 server/ocsp_responsecache.go create mode 100644 server/raft_helpers_test.go diff --git a/conf/lex.go b/conf/lex.go index be990a610..b1d68bda1 100644 --- a/conf/lex.go +++ b/conf/lex.go @@ -78,6 +78,7 @@ const ( topOptTerm = '}' blockStart = '(' blockEnd = ')' + mapEndString = string(mapEnd) ) type stateFn func(lx *lexer) stateFn @@ -681,7 +682,7 @@ func lexMapQuotedKey(lx *lexer) stateFn { return lexMapQuotedKey } -// lexMapQuotedKey consumes the text of a key between quotes. +// lexMapDubQuotedKey consumes the text of a key between quotes. func lexMapDubQuotedKey(lx *lexer) stateFn { if r := lx.peek(); r == eof { return lx.errorf("Unexpected EOF processing double quoted map key.") @@ -1061,7 +1062,7 @@ func lexNegNumberStart(lx *lexer) stateFn { return lexNegNumber } -// lexNumber consumes a negative integer or a float after seeing the first digit. +// lexNegNumber consumes a negative integer or a float after seeing the first digit. func lexNegNumber(lx *lexer) stateFn { r := lx.next() switch { diff --git a/conf/lex_test.go b/conf/lex_test.go index a91fdb674..2d2bec12d 100644 --- a/conf/lex_test.go +++ b/conf/lex_test.go @@ -1,15 +1,3 @@ -// Copyright 2012-2018 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. package conf import "testing" @@ -1483,6 +1471,8 @@ func TestJSONCompat(t *testing.T) { expected: []item{ {itemKey, "http_port", 3, 28}, {itemInteger, "8223", 3, 40}, + {itemKey, "}", 4, 25}, + {itemEOF, "", 0, 0}, }, }, { @@ -1490,14 +1480,16 @@ func TestJSONCompat(t *testing.T) { input: ` { "http_port": 8223, - "port": 4223 + "port": 6667 } `, expected: []item{ {itemKey, "http_port", 3, 28}, {itemInteger, "8223", 3, 40}, {itemKey, "port", 4, 28}, - {itemInteger, "4223", 4, 35}, + {itemInteger, "6667", 4, 35}, + {itemKey, "}", 5, 25}, + {itemEOF, "", 0, 0}, }, }, { @@ -1505,7 +1497,7 @@ func TestJSONCompat(t *testing.T) { input: ` { "http_port": 8223, - "port": 4223, + "port": 6667, "max_payload": "5MB", "debug": true, "max_control_line": 1024 @@ -1515,24 +1507,27 @@ func TestJSONCompat(t *testing.T) { {itemKey, "http_port", 3, 28}, {itemInteger, "8223", 3, 40}, {itemKey, "port", 4, 28}, - {itemInteger, "4223", 4, 35}, + {itemInteger, "6667", 4, 35}, {itemKey, "max_payload", 5, 28}, {itemString, "5MB", 5, 43}, {itemKey, "debug", 6, 28}, {itemBool, "true", 6, 36}, {itemKey, "max_control_line", 7, 28}, {itemInteger, "1024", 7, 47}, + {itemKey, "}", 8, 25}, + {itemEOF, "", 0, 0}, }, }, { name: "should support JSON not prettified", - input: `{"http_port": 8224,"port": 4224} + input: `{"http_port": 8224,"port": 6668} `, expected: []item{ {itemKey, "http_port", 1, 2}, {itemInteger, "8224", 1, 14}, {itemKey, "port", 1, 20}, - {itemInteger, "4224", 1, 27}, + {itemInteger, "6668", 1, 27}, + {itemEOF, "", 0, 0}, }, }, { @@ -1545,11 +1540,13 @@ func TestJSONCompat(t *testing.T) { {itemInteger, "8225", 1, 14}, {itemKey, "port", 1, 20}, {itemInteger, "4225", 1, 27}, + {itemKey, "}", 2, 25}, + {itemEOF, "", 0, 0}, }, }, { name: "should support uglified JSON with inner blocks", - input: `{"http_port": 8227,"port": 4227,"write_deadline": "1h","cluster": {"port": 6222,"routes": ["nats://127.0.0.1:6666","nats://127.0.0.1:4223","nats://127.0.0.1:4224"]}} + input: `{"http_port": 8227,"port": 4227,"write_deadline": "1h","cluster": {"port": 6222,"routes": ["nats://127.0.0.1:6666","nats://127.0.0.1:6667","nats://127.0.0.1:6668"]}} `, expected: []item{ {itemKey, "http_port", 1, 2}, @@ -1565,10 +1562,12 @@ func TestJSONCompat(t *testing.T) { {itemKey, "routes", 1, 81}, {itemArrayStart, "", 1, 91}, {itemString, "nats://127.0.0.1:6666", 1, 92}, - {itemString, "nats://127.0.0.1:4223", 1, 116}, - {itemString, "nats://127.0.0.1:4224", 1, 140}, + {itemString, "nats://127.0.0.1:6667", 1, 116}, + {itemString, "nats://127.0.0.1:6667", 1, 140}, {itemArrayEnd, "", 1, 163}, {itemMapEnd, "", 1, 164}, + {itemKey, "}", 14, 25}, + {itemEOF, "", 0, 0}, }, }, { @@ -1582,8 +1581,8 @@ func TestJSONCompat(t *testing.T) { "port": 6222, "routes": [ "nats://127.0.0.1:6666", - "nats://127.0.0.1:4223", - "nats://127.0.0.1:4224" + "nats://127.0.0.1:6667", + "nats://127.0.0.1:6668" ] } } @@ -1602,10 +1601,39 @@ func TestJSONCompat(t *testing.T) { {itemKey, "routes", 8, 30}, {itemArrayStart, "", 8, 40}, {itemString, "nats://127.0.0.1:6666", 9, 32}, - {itemString, "nats://127.0.0.1:4223", 10, 32}, - {itemString, "nats://127.0.0.1:4224", 11, 32}, + {itemString, "nats://127.0.0.1:6667", 10, 32}, + {itemString, "nats://127.0.0.1:6668", 11, 32}, {itemArrayEnd, "", 12, 30}, {itemMapEnd, "", 13, 28}, + {itemKey, "}", 14, 25}, + {itemEOF, "", 0, 0}, + }, + }, + { + name: "should support JSON with blocks", + input: `{ + "jetstream": { + "store_dir": "/tmp/nats" + "max_mem": 1000000, + }, + "port": 6666, + "server_name": "nats1" + } + `, + expected: []item{ + {itemKey, "jetstream", 2, 28}, + {itemMapStart, "", 2, 41}, + {itemKey, "store_dir", 3, 30}, + {itemString, "/tmp/nats", 3, 43}, + {itemKey, "max_mem", 4, 30}, + {itemInteger, "1000000", 4, 40}, + {itemMapEnd, "", 5, 28}, + {itemKey, "port", 6, 28}, + {itemInteger, "6666", 6, 35}, + {itemKey, "server_name", 7, 28}, + {itemString, "nats1", 7, 43}, + {itemKey, "}", 8, 25}, + {itemEOF, "", 0, 0}, }, }, } { diff --git a/conf/parse.go b/conf/parse.go index 23d082f54..649ff9850 100644 --- a/conf/parse.go +++ b/conf/parse.go @@ -147,16 +147,22 @@ func parse(data, fp string, pedantic bool) (p *parser, err error) { } p.pushContext(p.mapping) + var prevItem item for { it := p.next() if it.typ == itemEOF { + // Here we allow the final character to be a bracket '}' + // in order to support JSON like configurations. + if prevItem.typ == itemKey && prevItem.val != mapEndString { + return nil, fmt.Errorf("config is invalid (%s:%d:%d)", fp, it.line, it.pos) + } break } + prevItem = it if err := p.processItem(it, fp); err != nil { return nil, err } } - return p, nil } diff --git a/conf/parse_test.go b/conf/parse_test.go index b50cc8b80..5bb1662f4 100644 --- a/conf/parse_test.go +++ b/conf/parse_test.go @@ -1,20 +1,9 @@ -// Copyright 2012-2018 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. package conf import ( "fmt" "os" + "path/filepath" "reflect" "strings" "testing" @@ -415,3 +404,339 @@ func TestParserNoInfiniteLoop(t *testing.T) { } } } + +func TestParseWithNoValuesAreInvalid(t *testing.T) { + for _, test := range []struct { + name string + conf string + err string + }{ + { + "invalid key without values", + `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`, + "config is invalid (:1:41)", + }, + { + "invalid untrimmed key without values", + ` aaaaaaaaaaaaaaaaaaaaaaaaaaa`, + "config is invalid (:1:41)", + }, + { + "invalid untrimmed key without values", + ` aaaaaaaaaaaaaaaaaaaaaaaaaaa `, + "config is invalid (:1:41)", + }, + { + "invalid keys after comments", + ` + # with comments and no spaces to create key values + # is also an invalid config. + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + `, + "config is invalid (:5:25)", + }, + { + "comma separated without values are invalid", + ` + a,a,a,a,a,a,a,a,a,a,a + `, + "config is invalid (:3:25)", + }, + } { + t.Run(test.name, func(t *testing.T) { + if _, err := parse(test.conf, "", true); err == nil { + t.Error("expected an error") + } else if !strings.Contains(err.Error(), test.err) { + t.Errorf("expected invalid conf error, got: %v", err) + } + }) + } +} + +func TestParseWithNoValuesEmptyConfigsAreValid(t *testing.T) { + for _, test := range []struct { + name string + conf string + }{ + { + "empty conf", + "", + }, + { + "empty conf with line breaks", + ` + + + `, + }, + { + "just comments with no values", + ` + # just comments with no values + # is still valid. + `, + }, + } { + t.Run(test.name, func(t *testing.T) { + if _, err := parse(test.conf, "", true); err != nil { + t.Errorf("unexpected error: %v", err) + } + }) + } +} + +func TestParseWithTrailingBracketsAreValid(t *testing.T) { + for _, test := range []struct { + name string + conf string + }{ + { + "empty conf", + "{}", + }, + { + "just comments with no values", + ` + { + # comments in the body + } + `, + }, + { + // trailing brackets accidentally can become keys, + // this is valid since needed to support JSON like configs.. + "trailing brackets after config", + ` + accounts { users = [{}]} + } + `, + }, + { + "wrapped in brackets", + `{ + accounts { users = [{}]} + } + `, + }, + } { + t.Run(test.name, func(t *testing.T) { + if _, err := parse(test.conf, "", true); err != nil { + t.Errorf("unexpected error: %v", err) + } + }) + } +} + +func TestParseWithNoValuesIncludes(t *testing.T) { + for _, test := range []struct { + input string + includes map[string]string + err string + linepos string + }{ + { + `# includes + accounts { + foo { include 'foo.conf'} + bar { users = [{user = "bar"}] } + quux { include 'quux.conf'} + } + `, + map[string]string{ + "foo.conf": ``, + "quux.conf": `?????????????`, + }, + "error parsing include file 'quux.conf', config is invalid", + "quux.conf:1:1", + }, + { + `# includes + accounts { + foo { include 'foo.conf'} + bar { include 'bar.conf'} + quux { include 'quux.conf'} + } + `, + map[string]string{ + "foo.conf": ``, // Empty configs are ok + "bar.conf": `AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA`, + "quux.conf": ` + # just some comments, + # and no key values also ok. + `, + }, + "error parsing include file 'bar.conf', config is invalid", + "bar.conf:1:34", + }, + } { + t.Run("", func(t *testing.T) { + sdir := t.TempDir() + f, err := os.CreateTemp(sdir, "nats.conf-") + if err != nil { + t.Fatal(err) + } + if err := os.WriteFile(f.Name(), []byte(test.input), 066); err != nil { + t.Error(err) + } + if test.includes != nil { + for includeFile, contents := range test.includes { + inf, err := os.Create(filepath.Join(sdir, includeFile)) + if err != nil { + t.Fatal(err) + } + if err := os.WriteFile(inf.Name(), []byte(contents), 066); err != nil { + t.Error(err) + } + } + } + if _, err := parse(test.input, f.Name(), true); err == nil { + t.Error("expected an error") + } else if !strings.Contains(err.Error(), test.err) || !strings.Contains(err.Error(), test.linepos) { + t.Errorf("expected invalid conf error, got: %v", err) + } + }) + } +} + +func TestJSONParseCompat(t *testing.T) { + for _, test := range []struct { + name string + input string + includes map[string]string + expected map[string]interface{} + }{ + { + "JSON with nested blocks", + ` + { + "http_port": 8227, + "port": 4227, + "write_deadline": "1h", + "cluster": { + "port": 6222, + "routes": [ + "nats://127.0.0.1:6666", + "nats://127.0.0.1:6667", + "nats://127.0.0.1:6668" + ] + } + } + `, + nil, + map[string]interface{}{ + "http_port": int64(8227), + "port": int64(4227), + "write_deadline": "1h", + "cluster": map[string]interface{}{ + "port": int64(6222), + "routes": []interface{}{ + "nats://127.0.0.1:6666", + "nats://127.0.0.1:6667", + "nats://127.0.0.1:6668", + }, + }, + }, + }, + { + "JSON with nested blocks", + `{ + "jetstream": { + "store_dir": "/tmp/nats" + "max_mem": 1000000, + }, + "port": 6666, + "server_name": "nats1" + } + `, + nil, + map[string]interface{}{ + "jetstream": map[string]interface{}{ + "store_dir": "/tmp/nats", + "max_mem": int64(1_000_000), + }, + "port": int64(6666), + "server_name": "nats1", + }, + }, + { + "JSON empty object in one line", + `{}`, + nil, + map[string]interface{}{}, + }, + { + "JSON empty object with line breaks", + ` + { + } + `, + nil, + map[string]interface{}{}, + }, + { + "JSON includes", + ` + accounts { + foo { include 'foo.json' } + bar { include 'bar.json' } + quux { include 'quux.json' } + } + `, + map[string]string{ + "foo.json": `{ "users": [ {"user": "foo"} ] }`, + "bar.json": `{ + "users": [ {"user": "bar"} ] + }`, + "quux.json": `{}`, + }, + map[string]interface{}{ + "accounts": map[string]interface{}{ + "foo": map[string]interface{}{ + "users": []interface{}{ + map[string]interface{}{ + "user": "foo", + }, + }, + }, + "bar": map[string]interface{}{ + "users": []interface{}{ + map[string]interface{}{ + "user": "bar", + }, + }, + }, + "quux": map[string]interface{}{}, + }, + }, + }, + } { + t.Run(test.name, func(t *testing.T) { + sdir := t.TempDir() + f, err := os.CreateTemp(sdir, "nats.conf-") + if err != nil { + t.Fatal(err) + } + if err := os.WriteFile(f.Name(), []byte(test.input), 066); err != nil { + t.Error(err) + } + if test.includes != nil { + for includeFile, contents := range test.includes { + inf, err := os.Create(filepath.Join(sdir, includeFile)) + if err != nil { + t.Fatal(err) + } + if err := os.WriteFile(inf.Name(), []byte(contents), 066); err != nil { + t.Error(err) + } + } + } + m, err := ParseFile(f.Name()) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if !reflect.DeepEqual(m, test.expected) { + t.Fatalf("Not Equal:\nReceived: '%+v'\nExpected: '%+v'\n", m, test.expected) + } + }) + } +} diff --git a/conf/simple.conf b/conf/simple.conf new file mode 100644 index 000000000..8f75d73ad --- /dev/null +++ b/conf/simple.conf @@ -0,0 +1,6 @@ +listen: 127.0.0.1:4222 + +authorization { + include 'includes/users.conf' # Pull in from file + timeout: 0.5 +} diff --git a/go.mod b/go.mod index 50eeb729d..d27980bf9 100644 --- a/go.mod +++ b/go.mod @@ -9,14 +9,14 @@ require ( github.com/go-playground/validator/v10 v10.14.0 github.com/gofrs/uuid v4.4.0+incompatible github.com/jhump/protoreflect v1.13.0 - github.com/klauspost/compress v1.16.0 + github.com/klauspost/compress v1.16.7 github.com/minio/highwayhash v1.0.2 - github.com/nats-io/jwt/v2 v2.3.0 - github.com/nats-io/nats.go v1.25.0 + github.com/nats-io/jwt/v2 v2.5.0 + github.com/nats-io/nats.go v1.28.0 github.com/nats-io/nkeys v0.4.4 github.com/nats-io/nuid v1.0.1 github.com/tkanos/gonfig v0.0.0-20210106201359-53e13348de2f - go.uber.org/automaxprocs v1.5.1 + go.uber.org/automaxprocs v1.5.3 golang.org/x/crypto v0.14.0 golang.org/x/sys v0.13.0 golang.org/x/time v0.3.0 diff --git a/go.sum b/go.sum index d26ed40d5..7dcbca35f 100644 --- a/go.sum +++ b/go.sum @@ -241,8 +241,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= -github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= @@ -294,12 +294,11 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/nats-io/jwt/v2 v2.3.0 h1:z2mA1a7tIf5ShggOFlR1oBPgd6hGqcDYsISxZByUzdI= -github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= +github.com/nats-io/jwt/v2 v2.5.0 h1:WQQ40AAlqqfx+f6ku+i0pOVm+ASirD4fUh+oQsiE9Ak= +github.com/nats-io/jwt/v2 v2.5.0/go.mod h1:24BeQtRwxRV8ruvC4CojXlx/WQ/VjuwlYiH+vu/+ibI= github.com/nats-io/nats-server/v2 v2.9.5 h1:TlduKZ9YGoM0n34Lhm6AN0zRFOt/G3jTy9mPxXnE6dU= -github.com/nats-io/nats.go v1.25.0 h1:t5/wCPGciR7X3Mu8QOi4jiJaXaWM8qtkLu4lzGZvYHE= -github.com/nats-io/nats.go v1.25.0/go.mod h1:D2WALIhz7V8M0pH8Scx8JZXlg6Oqz5VG+nQkK8nJdvg= -github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +github.com/nats-io/nats.go v1.28.0 h1:Th4G6zdsz2d0OqXdfzKLClo6bOfoI/b1kInhRtFIy5c= +github.com/nats-io/nats.go v1.28.0/go.mod h1:XpbWUlOElGwTYbMR7imivs7jJj9GtK7ypv321Wp6pjc= github.com/nats-io/nkeys v0.4.4 h1:xvBJ8d69TznjcQl9t6//Q5xXuVhyYiSos6RPtvQNTwA= github.com/nats-io/nkeys v0.4.4/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= @@ -377,8 +376,8 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/automaxprocs v1.5.1 h1:e1YG66Lrk73dn4qhg8WFSvhF0JuFQF0ERIp4rpuV8Qk= -go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= +go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= +go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -396,7 +395,6 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= diff --git a/internal/testhelper/logging.go b/internal/testhelper/logging.go index ddabbd70d..d0382bb58 100644 --- a/internal/testhelper/logging.go +++ b/internal/testhelper/logging.go @@ -82,12 +82,15 @@ func (l *DummyLogger) Tracef(format string, v ...interface{}) { l.Msg = fmt.Sprintf(format, v...) l.aggregate() } + +// ** added by Memphis func (l *DummyLogger) Systemf(format string, v ...interface{}) { l.Lock() defer l.Unlock() l.Msg = fmt.Sprintf(format, v...) l.aggregate() } +// ** added by Memphis // NewDummyLogger creates a dummy logger and allows to ask for logs to be // retained instead of just keeping the most recent. Use retain to provide an diff --git a/logger/log.go b/logger/log.go index d8e8367a1..6afe9b499 100644 --- a/logger/log.go +++ b/logger/log.go @@ -10,6 +10,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + +// Package logger provides logging facilities for the NATS server package logger import ( @@ -33,17 +35,41 @@ type Logger struct { fatalLabel string debugLabel string traceLabel string - systemLabel string + systemLabel string // ** added by Memphis fl *fileLogger } -// NewStdLogger creates a logger with output directed to Stderr -func NewStdLogger(time, debug, trace, colors, pid bool) *Logger { +type LogOption interface { + isLoggerOption() +} + +// LogUTC controls whether timestamps in the log output should be UTC or local time. +type LogUTC bool + +func (l LogUTC) isLoggerOption() {} + +func logFlags(time bool, opts ...LogOption) int { flags := 0 if time { flags = log.LstdFlags | log.Lmicroseconds } + for _, opt := range opts { + switch v := opt.(type) { + case LogUTC: + if time && bool(v) { + flags |= log.LUTC + } + } + } + + return flags +} + +// NewStdLogger creates a logger with output directed to Stderr +func NewStdLogger(time, debug, trace, colors, pid bool, opts ...LogOption) *Logger { + flags := logFlags(time, opts...) + pre := "" if pid { pre = pidPrefix() @@ -64,6 +90,7 @@ func NewStdLogger(time, debug, trace, colors, pid bool) *Logger { return l } +// ** added by Memphis // HybridLogPublishFunc is a function used to publish logs type HybridLogPublishFunc func(string, []byte) @@ -140,13 +167,11 @@ func NewMemphisLogger(publishFunc HybridLogPublishFunc, fallbackPublishFunc Hybr hsl.canPublishMu.Unlock() } } +// ** added by Memphis // NewFileLogger creates a logger with output directed to a file -func NewFileLogger(filename string, time, debug, trace, pid bool) *Logger { - flags := 0 - if time { - flags = log.LstdFlags | log.Lmicroseconds - } +func NewFileLogger(filename string, time, debug, trace, pid bool, opts ...LogOption) *Logger { + flags := logFlags(time, opts...) pre := "" if pid { @@ -350,7 +375,7 @@ func setPlainLabelFormats(l *Logger) { l.errorLabel = "[ERR] " l.fatalLabel = "[FTL] " l.traceLabel = "[TRC] " - l.systemLabel = "[SYS] " + l.systemLabel = "[SYS] " // ** added by Memphis } func setColoredLabelFormats(l *Logger) { @@ -358,7 +383,7 @@ func setColoredLabelFormats(l *Logger) { l.infoLabel = fmt.Sprintf(colorFormat, "32", "INF") l.debugLabel = fmt.Sprintf(colorFormat, "36", "DBG") l.warnLabel = fmt.Sprintf(colorFormat, "0;93", "WRN") - l.systemLabel = fmt.Sprintf(colorFormat, "32", "SYS") + l.systemLabel = fmt.Sprintf(colorFormat, "32", "SYS") // ** added by Memphis l.errorLabel = fmt.Sprintf(colorFormat, "31", "ERR") l.fatalLabel = fmt.Sprintf(colorFormat, "31", "FTL") l.traceLabel = fmt.Sprintf(colorFormat, "33", "TRC") @@ -379,10 +404,12 @@ func (l *Logger) Errorf(format string, v ...interface{}) { l.logger.Printf(l.errorLabel+format, v...) } +// ** added by Memphis // Systemf logs an system statement func (l *Logger) Systemf(format string, v ...interface{}) { l.logger.Printf(l.systemLabel+format, v...) } +// ** added by Memphis // Fatalf logs a fatal error func (l *Logger) Fatalf(format string, v ...interface{}) { diff --git a/logger/log_test.go b/logger/log_test.go index a8d13f39f..f49c07e64 100644 --- a/logger/log_test.go +++ b/logger/log_test.go @@ -125,7 +125,7 @@ func TestFileLogger(t *testing.T) { file = createFileAtDir(t, tmpDir, "nats-server:log_") file.Close() - logger = NewFileLogger(file.Name(), true, true, true, true) + logger = NewFileLogger(file.Name(), true, false, true, true) defer logger.Close() logger.Errorf("foo") diff --git a/logger/syslog.go b/logger/syslog.go index 7d7183f27..b535a6593 100644 --- a/logger/syslog.go +++ b/logger/syslog.go @@ -132,7 +132,9 @@ func (l *SysLogger) Tracef(format string, v ...interface{}) { } } +// ** added by Memphis // Systemf logs a trace statement func (l *SysLogger) Systemf(format string, v ...interface{}) { l.writer.Notice(fmt.Sprintf(format, v...)) } +// ** added by Memphis diff --git a/logger/syslog_windows.go b/logger/syslog_windows.go index 35de340a7..af277a1ce 100644 --- a/logger/syslog_windows.go +++ b/logger/syslog_windows.go @@ -10,6 +10,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + +// Package logger logs to the windows event log package logger import ( @@ -109,9 +111,11 @@ func (l *SysLogger) Tracef(format string, v ...interface{}) { } } +// ** added by Memphis // Systemf logs a system statement func (l *SysLogger) Systemf(format string, v ...interface{}) { if l.trace { l.writer.Info(4, formatMsg("SYSTEM", format, v...)) } } +// ** added by Memphis diff --git a/main.go b/main.go index bea9e9512..16975a416 100644 --- a/main.go +++ b/main.go @@ -35,8 +35,9 @@ Usage: nats-server [options] Server Options: -a, --addr, --net Bind to host address (default: 0.0.0.0) - -p, --port Use port for clients (default: 6666) - -n, --name --server_name Server name (default: auto) + -p, --port Use port for clients (default: 4222) + -n, --name + --server_name Server name (default: auto) -P, --pid File to store PID -m, --http_port Use port for http monitoring -ms,--https_port Use port for https monitoring @@ -44,8 +45,8 @@ Server Options: -t Test configuration and exit -sl,--signal [=] Send signal to nats-server process (ldm, stop, quit, term, reopen, reload) pid> can be either a PID (e.g. 1) or the path to a PID file (e.g. /var/run/nats-server.pid) - --client_advertise Client URL to advertise to other servers - --ports_file_dir Creates a ports file in the specified directory (_.ports). + --client_advertise Client URL to advertise to other servers + --ports_file_dir Creates a ports file in the specified directory (_.ports). Logging Options: -l, --log File to redirect log output @@ -57,12 +58,12 @@ Logging Options: -VV Verbose trace (traces system account as well) -DV Debug and trace -DVV Debug and verbose trace (traces system account as well) - --log_size_limit Logfile size limit (default: auto) - --max_traced_msg_len Maximum printable length for traced messages (default: unlimited) + --log_size_limit Logfile size limit (default: auto) + --max_traced_msg_len Maximum printable length for traced messages (default: unlimited) JetStream Options: - -js, --jetstream Enable JetStream functionality. - -sd, --store_dir Set the storage directory. + -js, --jetstream Enable JetStream functionality + -sd, --store_dir Set the storage directory Authorization Options: --user User required for connections @@ -85,6 +86,9 @@ Cluster Options: --connect_retries For implicit routes, number of connect retries --cluster_listen Cluster url from which members can solicit routes +Profiling Options: + --profile Profiling HTTP port + Common Options: -h, --help Show this message -v, --version Show version @@ -204,8 +208,6 @@ func main() { s.Warnf("Failed to set GOMAXPROCS: %v", err) } else { defer undo() - // Reset these from the snapshots from init for monitor.go - server.SnapshotMonitorInfo() } s.Noticef("Established connection with the meta-data storage") diff --git a/server/accounts.go b/server/accounts.go index c0a792e79..4c132475a 100644 --- a/server/accounts.go +++ b/server/accounts.go @@ -1,4 +1,4 @@ -// Copyright 2018-2022 The NATS Authors +// Copyright 2018-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -21,6 +21,7 @@ import ( "hash/fnv" "hash/maphash" "io" + "io/fs" "math" "math/rand" "net/http" @@ -73,7 +74,9 @@ type Account struct { lqws map[string]int32 usersRevoked map[string]int64 mappings []*mapping + lmu sync.RWMutex lleafs []*client + leafClusters map[string]uint64 imports importMap exports exportMap js *jsAccount @@ -165,14 +168,17 @@ const ( Chunked ) -var commaSeparatorRegEx = regexp.MustCompile(`,\s*`) -var partitionMappingFunctionRegEx = regexp.MustCompile(`{{\s*[pP]artition\s*\((.*)\)\s*}}`) -var wildcardMappingFunctionRegEx = regexp.MustCompile(`{{\s*[wW]ildcard\s*\((.*)\)\s*}}`) -var splitFromLeftMappingFunctionRegEx = regexp.MustCompile(`{{\s*[sS]plit[fF]rom[lL]eft\s*\((.*)\)\s*}}`) -var splitFromRightMappingFunctionRegEx = regexp.MustCompile(`{{\s*[sS]plit[fF]rom[rR]ight\s*\((.*)\)\s*}}`) -var sliceFromLeftMappingFunctionRegEx = regexp.MustCompile(`{{\s*[sS]lice[fF]rom[lL]eft\s*\((.*)\)\s*}}`) -var sliceFromRightMappingFunctionRegEx = regexp.MustCompile(`{{\s*[sS]lice[fF]rom[rR]ight\s*\((.*)\)\s*}}`) -var splitMappingFunctionRegEx = regexp.MustCompile(`{{\s*[sS]plit\s*\((.*)\)\s*}}`) +// Subject mapping and transform setups. +var ( + commaSeparatorRegEx = regexp.MustCompile(`,\s*`) + partitionMappingFunctionRegEx = regexp.MustCompile(`{{\s*[pP]artition\s*\((.*)\)\s*}}`) + wildcardMappingFunctionRegEx = regexp.MustCompile(`{{\s*[wW]ildcard\s*\((.*)\)\s*}}`) + splitFromLeftMappingFunctionRegEx = regexp.MustCompile(`{{\s*[sS]plit[fF]rom[lL]eft\s*\((.*)\)\s*}}`) + splitFromRightMappingFunctionRegEx = regexp.MustCompile(`{{\s*[sS]plit[fF]rom[rR]ight\s*\((.*)\)\s*}}`) + sliceFromLeftMappingFunctionRegEx = regexp.MustCompile(`{{\s*[sS]lice[fF]rom[lL]eft\s*\((.*)\)\s*}}`) + sliceFromRightMappingFunctionRegEx = regexp.MustCompile(`{{\s*[sS]lice[fF]rom[rR]ight\s*\((.*)\)\s*}}`) + splitMappingFunctionRegEx = regexp.MustCompile(`{{\s*[sS]plit\s*\((.*)\)\s*}}`) +) // Enum for the subject mapping transform function types const ( @@ -261,8 +267,7 @@ func (a *Account) String() string { // Used to create shallow copies of accounts for transfer // from opts to real accounts in server struct. -func (a *Account) shallowCopy() *Account { - na := NewAccount(a.Name) +func (a *Account) shallowCopy(na *Account) { na.Nkey = a.Nkey na.Issuer = a.Issuer @@ -302,12 +307,14 @@ func (a *Account) shallowCopy() *Account { } } } + na.mappings = a.mappings + if len(na.mappings) > 0 && na.prand == nil { + na.prand = rand.New(rand.NewSource(time.Now().UnixNano())) + } // JetStream na.jsLimits = a.jsLimits // Server config account limits. na.limits = a.limits - - return na } // nextEventID uses its own lock for better concurrency. @@ -372,12 +379,14 @@ func (a *Account) updateRemoteServer(m *AccountNumConns) []*client { mtlce := a.mleafs != jwt.NoLimit && (a.nleafs+a.nrleafs > a.mleafs) if mtlce { // Take ones from the end. + a.lmu.RLock() leafs := a.lleafs over := int(a.nleafs + a.nrleafs - a.mleafs) if over < len(leafs) { leafs = leafs[len(leafs)-over:] } clients = append(clients, leafs...) + a.lmu.RUnlock() } a.mu.Unlock() @@ -607,7 +616,7 @@ func (a *Account) AddMapping(src, dest string) error { return a.AddWeightedMappings(src, NewMapDest(dest, 100)) } -// AddWeightedMapping will add in a weighted mappings for the destinations. +// AddWeightedMappings will add in a weighted mappings for the destinations. // TODO(dlc) - Allow cluster filtering func (a *Account) AddWeightedMappings(src string, dests ...*MapDest) error { a.mu.Lock() @@ -717,13 +726,15 @@ func (a *Account) AddWeightedMappings(src string, dests ...*MapDest) error { a.mappings = append(a.mappings, m) // If we have connected leafnodes make sure to update. - if len(a.lleafs) > 0 { - leafs := append([]*client(nil), a.lleafs...) + if a.nleafs > 0 { // Need to release because lock ordering is client -> account a.mu.Unlock() - for _, lc := range leafs { + // Now grab the leaf list lock. We can hold client lock under this one. + a.lmu.RLock() + for _, lc := range a.lleafs { lc.forceAddToSmap(src) } + a.lmu.RUnlock() a.mu.Lock() } return nil @@ -909,11 +920,17 @@ func (a *Account) addClient(c *client) int { a.sysclients++ } else if c.kind == LEAF { a.nleafs++ - a.lleafs = append(a.lleafs, c) } } a.mu.Unlock() + // If we added a new leaf use the list lock and add it to the list. + if added && c.kind == LEAF { + a.lmu.Lock() + a.lleafs = append(a.lleafs, c) + a.lmu.Unlock() + } + if c != nil && c.srv != nil && added { c.srv.accConnsUpdate(a) } @@ -921,11 +938,38 @@ func (a *Account) addClient(c *client) int { return n } +// For registering clusters for remote leafnodes. +// We only register as the hub. +func (a *Account) registerLeafNodeCluster(cluster string) { + a.mu.Lock() + defer a.mu.Unlock() + if a.leafClusters == nil { + a.leafClusters = make(map[string]uint64) + } + a.leafClusters[cluster]++ +} + +// Check to see if this cluster is isolated, meaning the only one. +// Read Lock should be held. +func (a *Account) isLeafNodeClusterIsolated(cluster string) bool { + if cluster == _EMPTY_ { + return false + } + if len(a.leafClusters) > 1 { + return false + } + return a.leafClusters[cluster] == uint64(a.nleafs) +} + // Helper function to remove leaf nodes. If number of leafnodes gets large // this may need to be optimized out of linear search but believe number // of active leafnodes per account scope to be small and therefore cache friendly. -// Lock should be held on account. +// Lock should not be held on general account lock. func (a *Account) removeLeafNode(c *client) { + // Make sure we hold the list lock as well. + a.lmu.Lock() + defer a.lmu.Unlock() + ll := len(a.lleafs) for i, l := range a.lleafs { if l == c { @@ -951,11 +995,24 @@ func (a *Account) removeClient(c *client) int { a.sysclients-- } else if c.kind == LEAF { a.nleafs-- - a.removeLeafNode(c) + // Need to do cluster accounting here. + // Do cluster accounting if we are a hub. + if c.isHubLeafNode() { + cluster := c.remoteCluster() + if count := a.leafClusters[cluster]; count > 1 { + a.leafClusters[cluster]-- + } else if count == 1 { + delete(a.leafClusters, cluster) + } + } } } a.mu.Unlock() + if removed && c.kind == LEAF { + a.removeLeafNode(c) + } + if c != nil && c.srv != nil && removed { c.srv.mu.Lock() doRemove := a != c.srv.gacc @@ -1342,7 +1399,7 @@ func (a *Account) sendBackendErrorTrackingLatency(si *serviceImport, reason rsiR a.sendLatencyResult(si, sl) } -// sendTrackingMessage will send out the appropriate tracking information for the +// sendTrackingLatency will send out the appropriate tracking information for the // service request/response latency. This is called when the requestor's server has // received the response. // TODO(dlc) - holding locks for RTTs may be too much long term. Should revisit. @@ -1372,7 +1429,7 @@ func (a *Account) sendTrackingLatency(si *serviceImport, responder *client) bool } sl.RequestStart = time.Unix(0, si.ts-int64(reqRTT)).UTC() sl.ServiceLatency = serviceRTT - respRTT - sl.TotalLatency = sl.Requestor.RTT + serviceRTT + sl.TotalLatency = reqRTT + serviceRTT if respRTT > 0 { sl.SystemLatency = time.Since(ts) sl.TotalLatency += sl.SystemLatency @@ -1602,7 +1659,7 @@ func (a *Account) NumPendingAllResponses() int { return a.NumPendingResponses(_EMPTY_) } -// NumResponsesPending returns the number of responses outstanding for service exports +// NumPendingResponses returns the number of responses outstanding for service exports // on this account. An empty filter string returns all responses regardless of which export. // If you specify the filter we will only return ones that are for that export. // NOTE this is only for what this server is tracking. @@ -1988,7 +2045,7 @@ func (a *Account) addServiceImportSub(si *serviceImport) error { // This is similar to what initLeafNodeSmapAndSendSubs does // TODO we need to consider performing this update as we get client subscriptions. // This behavior would result in subscription propagation only where actually used. - a.srv.updateLeafNodes(a, sub, 1) + a.updateLeafNodes(sub, 1) return nil } @@ -2017,7 +2074,14 @@ func (a *Account) removeAllServiceImportSubs() { // Add in subscriptions for all registered service imports. func (a *Account) addAllServiceImportSubs() { + var sis [32]*serviceImport + serviceImports := sis[:0] + a.mu.RLock() for _, si := range a.imports.services { + serviceImports = append(serviceImports, si) + } + a.mu.RUnlock() + for _, si := range serviceImports { a.addServiceImportSub(si) } } @@ -2237,7 +2301,7 @@ func (si *serviceImport) isRespServiceImport() bool { return si != nil && si.response } -// Sets the response theshold timer for a service export. +// Sets the response threshold timer for a service export. // Account lock should be held func (se *serviceExport) setResponseThresholdTimer() { if se.rtmr != nil { @@ -2794,7 +2858,12 @@ func (a *Account) checkStreamImportsEqual(b *Account) bool { return true } +// Returns true if `a` and `b` stream exports are the same. +// Acquires `a` read lock, but `b` is assumed to not be accessed +// by anyone but the caller (`b` is not registered anywhere). func (a *Account) checkStreamExportsEqual(b *Account) bool { + a.mu.RLock() + defer a.mu.RUnlock() if len(a.exports.streams) != len(b.exports.streams) { return false } @@ -2803,14 +2872,29 @@ func (a *Account) checkStreamExportsEqual(b *Account) bool { if !ok { return false } - if !reflect.DeepEqual(aea, bea) { + if !isStreamExportEqual(aea, bea) { return false } } return true } +func isStreamExportEqual(a, b *streamExport) bool { + if a == nil && b == nil { + return true + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + return isExportAuthEqual(&a.exportAuth, &b.exportAuth) +} + +// Returns true if `a` and `b` service exports are the same. +// Acquires `a` read lock, but `b` is assumed to not be accessed +// by anyone but the caller (`b` is not registered anywhere). func (a *Account) checkServiceExportsEqual(b *Account) bool { + a.mu.RLock() + defer a.mu.RUnlock() if len(a.exports.services) != len(b.exports.services) { return false } @@ -2819,7 +2903,66 @@ func (a *Account) checkServiceExportsEqual(b *Account) bool { if !ok { return false } - if !reflect.DeepEqual(aea, bea) { + if !isServiceExportEqual(aea, bea) { + return false + } + } + return true +} + +func isServiceExportEqual(a, b *serviceExport) bool { + if a == nil && b == nil { + return true + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + if !isExportAuthEqual(&a.exportAuth, &b.exportAuth) { + return false + } + if a.acc.Name != b.acc.Name { + return false + } + if a.respType != b.respType { + return false + } + if a.latency != nil || b.latency != nil { + if (a.latency != nil && b.latency == nil) || (a.latency == nil && b.latency != nil) { + return false + } + if a.latency.sampling != b.latency.sampling { + return false + } + if a.latency.subject != b.latency.subject { + return false + } + } + return true +} + +// Returns true if `a` and `b` exportAuth structures are equal. +// Both `a` and `b` are guaranteed to be non-nil. +// Locking is handled by the caller. +func isExportAuthEqual(a, b *exportAuth) bool { + if a.tokenReq != b.tokenReq { + return false + } + if a.accountPos != b.accountPos { + return false + } + if len(a.approved) != len(b.approved) { + return false + } + for ak, av := range a.approved { + if bv, ok := b.approved[ak]; !ok || av.Name != bv.Name { + return false + } + } + if len(a.actsRevoked) != len(b.actsRevoked) { + return false + } + for ak, av := range a.actsRevoked { + if bv, ok := b.actsRevoked[ak]; !ok || av != bv { return false } } @@ -2969,7 +3112,7 @@ func (a *Account) isClaimAccount() bool { return a.claimJWT != _EMPTY_ } -// updateAccountClaims will update an existing account with new claims. +// UpdateAccountClaims will update an existing account with new claims. // This will replace any exports or imports previously defined. // Lock MUST NOT be held upon entry. func (s *Server) UpdateAccountClaims(a *Account, ac *jwt.AccountClaims) { @@ -3360,7 +3503,7 @@ func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaim a.jsLimits = nil } - a.updated = time.Now().UTC() + a.updated = time.Now() clients := a.getClientsLocked() a.mu.Unlock() @@ -3641,10 +3784,11 @@ func (ur *URLAccResolver) Fetch(name string) (string, error) { return _EMPTY_, fmt.Errorf("could not fetch <%q>: %v", redactURLString(url), err) } else if resp == nil { return _EMPTY_, fmt.Errorf("could not fetch <%q>: no response", redactURLString(url)) - } else if resp.StatusCode != http.StatusOK { - return _EMPTY_, fmt.Errorf("could not fetch <%q>: %v", redactURLString(url), resp.Status) } defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return _EMPTY_, fmt.Errorf("could not fetch <%q>: %v", redactURLString(url), resp.Status) + } body, err := io.ReadAll(resp.Body) if err != nil { return _EMPTY_, err @@ -3819,7 +3963,7 @@ func removeCb(s *Server, pubKey string) { a.mpay = 0 a.mconns = 0 a.mleafs = 0 - a.updated = time.Now().UTC() + a.updated = time.Now() jsa := a.js a.mu.Unlock() // set the account to be expired and disconnect clients @@ -3851,17 +3995,19 @@ func (dr *DirAccResolver) Start(s *Server) error { dr.DirJWTStore.changed = func(pubKey string) { if v, ok := s.accounts.Load(pubKey); ok { if theJwt, err := dr.LoadAcc(pubKey); err != nil { - s.Errorf("update got error on load: %v", err) + s.Errorf("DirResolver - Update got error on load: %v", err) } else { acc := v.(*Account) if err = s.updateAccountWithClaimJWT(acc, theJwt); err != nil { - s.Errorf("update resulted in error %v", err) + s.Errorf("DirResolver - Update for account %q resulted in error %v", pubKey, err) } else { if _, jsa, err := acc.checkForJetStream(); err != nil { - s.Warnf("error checking for JetStream enabled error %v", err) + if !IsNatsErr(err, JSNotEnabledForAccountErr) { + s.Warnf("DirResolver - Error checking for JetStream support for account %q: %v", pubKey, err) + } } else if jsa == nil { if err = s.configJetStream(acc); err != nil { - s.Errorf("updated resulted in error when configuring JetStream %v", err) + s.Errorf("DirResolver - Error configuring JetStream for account %q: %v", pubKey, err) } } } @@ -3882,7 +4028,7 @@ func (dr *DirAccResolver) Start(s *Server) error { } else if len(tk) == accUpdateTokensOld { pubKey = tk[accUpdateAccIdxOld] } else { - s.Debugf("jwt update skipped due to bad subject %q", subj) + s.Debugf("DirResolver - jwt update skipped due to bad subject %q", subj) return } if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { @@ -3932,8 +4078,15 @@ func (dr *DirAccResolver) Start(s *Server) error { if len(tk) != accLookupReqTokens { return } - if theJWT, err := dr.DirJWTStore.LoadAcc(tk[accReqAccIndex]); err != nil { - s.Errorf("Merging resulted in error: %v", err) + accName := tk[accReqAccIndex] + if theJWT, err := dr.DirJWTStore.LoadAcc(accName); err != nil { + if errors.Is(err, fs.ErrNotExist) { + s.Debugf("DirResolver - Could not find account %q", accName) + // Reply with empty response to signal absence of JWT to others. + s.sendInternalMsgLocked(reply, _EMPTY_, nil, nil) + } else { + s.Errorf("DirResolver - Error looking up account %q: %v", accName, err) + } } else { s.sendInternalMsgLocked(reply, _EMPTY_, nil, []byte(theJWT)) } @@ -3941,7 +4094,7 @@ func (dr *DirAccResolver) Start(s *Server) error { return fmt.Errorf("error setting up lookup request handling: %v", err) } // respond to pack requests with one or more pack messages - // an empty message signifies the end of the response responder + // an empty message signifies the end of the response responder. if _, err := s.sysSubscribeQ(accPackReqSubj, "responder", func(_ *subscription, _ *client, _ *Account, _, reply string, theirHash []byte) { if reply == _EMPTY_ { return @@ -3949,14 +4102,14 @@ func (dr *DirAccResolver) Start(s *Server) error { ourHash := dr.DirJWTStore.Hash() if bytes.Equal(theirHash, ourHash[:]) { s.sendInternalMsgLocked(reply, _EMPTY_, nil, []byte{}) - s.Debugf("pack request matches hash %x", ourHash[:]) + s.Debugf("DirResolver - Pack request matches hash %x", ourHash[:]) } else if err := dr.DirJWTStore.PackWalk(1, func(partialPackMsg string) { s.sendInternalMsgLocked(reply, _EMPTY_, nil, []byte(partialPackMsg)) }); err != nil { // let them timeout - s.Errorf("pack request error: %v", err) + s.Errorf("DirResolver - Pack request error: %v", err) } else { - s.Debugf("pack request hash %x - finished responding with hash %x", theirHash, ourHash) + s.Debugf("DirResolver - Pack request hash %x - finished responding with hash %x", theirHash, ourHash) s.sendInternalMsgLocked(reply, _EMPTY_, nil, []byte{}) } }); err != nil { @@ -3977,12 +4130,12 @@ func (dr *DirAccResolver) Start(s *Server) error { if _, err := s.sysSubscribe(packRespIb, func(_ *subscription, _ *client, _ *Account, _, _ string, msg []byte) { hash := dr.DirJWTStore.Hash() if len(msg) == 0 { // end of response stream - s.Debugf("Merging Finished and resulting in: %x", dr.DirJWTStore.Hash()) + s.Debugf("DirResolver - Merging finished and resulting in: %x", dr.DirJWTStore.Hash()) return } else if err := dr.DirJWTStore.Merge(string(msg)); err != nil { - s.Errorf("Merging resulted in error: %v", err) + s.Errorf("DirResolver - Merging resulted in error: %v", err) } else { - s.Debugf("Merging succeeded and changed %x to %x", hash, dr.DirJWTStore.Hash()) + s.Debugf("DirResolver - Merging succeeded and changed %x to %x", hash, dr.DirJWTStore.Hash()) } }); err != nil { return fmt.Errorf("error setting up pack response handling: %v", err) @@ -4000,7 +4153,7 @@ func (dr *DirAccResolver) Start(s *Server) error { case <-ticker.C: } ourHash := dr.DirJWTStore.Hash() - s.Debugf("Checking store state: %x", ourHash) + s.Debugf("DirResolver - Checking store state: %x", ourHash) s.sendInternalMsgLocked(accPackReqSubj, packRespIb, nil, ourHash[:]) } }) @@ -4049,18 +4202,14 @@ func (dr *DirAccResolver) apply(opts ...DirResOption) error { return nil } -func NewDirAccResolver(path string, limit int64, syncInterval time.Duration, delete bool, opts ...DirResOption) (*DirAccResolver, error) { +func NewDirAccResolver(path string, limit int64, syncInterval time.Duration, delete deleteType, opts ...DirResOption) (*DirAccResolver, error) { if limit == 0 { limit = math.MaxInt64 } if syncInterval <= 0 { syncInterval = time.Minute } - deleteType := NoDelete - if delete { - deleteType = RenameDeleted - } - store, err := NewExpiringDirJWTStore(path, false, true, deleteType, 0, limit, false, 0, nil) + store, err := NewExpiringDirJWTStore(path, false, true, delete, 0, limit, false, 0, nil) if err != nil { return nil, err } @@ -4089,20 +4238,35 @@ func (s *Server) fetch(res AccountResolver, name string, timeout time.Duration) s.mu.Unlock() return _EMPTY_, fmt.Errorf("eventing shut down") } + // Resolver will wait for detected active servers to reply + // before serving an error in case there weren't any found. + expectedServers := len(s.sys.servers) replySubj := s.newRespInbox() replies := s.sys.replies + // Store our handler. replies[replySubj] = func(sub *subscription, _ *client, _ *Account, subject, _ string, msg []byte) { - clone := make([]byte, len(msg)) - copy(clone, msg) + var clone []byte + isEmpty := len(msg) == 0 + if !isEmpty { + clone = make([]byte, len(msg)) + copy(clone, msg) + } s.mu.Lock() + defer s.mu.Unlock() + expectedServers-- + // Skip empty responses until getting all the available servers. + if isEmpty && expectedServers > 0 { + return + } + // Use the first valid response if there is still interest or + // one of the empty responses to signal that it was not found. if _, ok := replies[replySubj]; ok { select { - case respC <- clone: // only use first response and only if there is still interest + case respC <- clone: default: } } - s.mu.Unlock() } s.sendInternalMsg(accountLookupRequest, replySubj, nil, []byte{}) quit := s.quitCh @@ -4115,7 +4279,9 @@ func (s *Server) fetch(res AccountResolver, name string, timeout time.Duration) case <-time.After(timeout): err = errors.New("fetching jwt timed out") case m := <-respC: - if err = res.Store(name, string(m)); err == nil { + if len(m) == 0 { + err = errors.New("account jwt not found") + } else if err = res.Store(name, string(m)); err == nil { theJWT = string(m) } } @@ -4153,9 +4319,9 @@ func (dr *CacheDirAccResolver) Start(s *Server) error { dr.DirJWTStore.changed = func(pubKey string) { if v, ok := s.accounts.Load(pubKey); !ok { } else if theJwt, err := dr.LoadAcc(pubKey); err != nil { - s.Errorf("update got error on load: %v", err) + s.Errorf("DirResolver - Update got error on load: %v", err) } else if err := s.updateAccountWithClaimJWT(v.(*Account), theJwt); err != nil { - s.Errorf("update resulted in error %v", err) + s.Errorf("DirResolver - Update resulted in error %v", err) } } dr.DirJWTStore.deleted = func(pubKey string) { @@ -4171,7 +4337,7 @@ func (dr *CacheDirAccResolver) Start(s *Server) error { } else if len(tk) == accUpdateTokensOld { pubKey = tk[accUpdateAccIdxOld] } else { - s.Debugf("jwt update cache skipped due to bad subject %q", subj) + s.Debugf("DirResolver - jwt update cache skipped due to bad subject %q", subj) return } if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { diff --git a/server/accounts_test.go b/server/accounts_test.go index 623ba9c07..e54f8903a 100644 --- a/server/accounts_test.go +++ b/server/accounts_test.go @@ -1,4 +1,4 @@ -// Copyright 2018-2022 The NATS Authors +// Copyright 2018-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -269,7 +269,7 @@ func TestAccountIsolationExportImport(t *testing.T) { // Setup NATS server. s := opTrustBasicSetup() defer s.Shutdown() - go s.Start() + s.Start() if err := s.readyForConnections(5 * time.Second); err != nil { t.Fatal(err) } @@ -1733,7 +1733,7 @@ func TestAccountRequestReplyTrackLatency(t *testing.T) { defer s.Shutdown() // Run server in Go routine. We need this one running for internal sending of msgs. - go s.Start() + s.Start() // Wait for accept loop(s) to be started if err := s.readyForConnections(10 * time.Second); err != nil { t.Fatal(err) @@ -2099,12 +2099,17 @@ func TestCrossAccountServiceResponseTypes(t *testing.T) { cfoo.parseAsync(string(mReply)) - var b [256]byte - n, err := crBar.Read(b[:]) - if err != nil { - t.Fatalf("Error reading response: %v", err) + var buf []byte + for i := 0; i < 20; i++ { + b, err := crBar.ReadBytes('\n') + if err != nil { + t.Fatalf("Error reading response: %v", err) + } + buf = append(buf[:], b...) + if mraw = msgPat.FindAllStringSubmatch(string(buf), -1); len(mraw) == 10 { + break + } } - mraw = msgPat.FindAllStringSubmatch(string(b[:n]), -1) if len(mraw) != 10 { t.Fatalf("Expected a response but got %d", len(mraw)) } @@ -3678,3 +3683,75 @@ func TestAccountImportDuplicateResponseDeliveryWithLeafnodes(t *testing.T) { t.Fatalf("Expected only 1 response, got %d", n) } } + +func TestAccountReloadServiceImportPanic(t *testing.T) { + conf := createConfFile(t, []byte(` + listen: 127.0.0.1:-1 + accounts { + A { + users = [ { user: "a", pass: "p" } ] + exports [ { service: "HELP" } ] + } + B { + users = [ { user: "b", pass: "p" } ] + imports [ { service: { account: A, subject: "HELP"} } ] + } + $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } + } + `)) + + s, _ := RunServerWithConfig(conf) + defer s.Shutdown() + + // Now connect up the subscriber for HELP. No-op for this test. + nc, _ := jsClientConnect(t, s, nats.UserInfo("a", "p")) + _, err := nc.Subscribe("HELP", func(m *nats.Msg) { m.Respond([]byte("OK")) }) + require_NoError(t, err) + defer nc.Close() + + // Now create connection to account b where we will publish to HELP. + nc, _ = jsClientConnect(t, s, nats.UserInfo("b", "p")) + require_NoError(t, err) + defer nc.Close() + + // We want to continually be publishing messages that will trigger the service import while calling reload. + done := make(chan bool) + var wg sync.WaitGroup + wg.Add(1) + + var requests, responses atomic.Uint64 + reply := nats.NewInbox() + _, err = nc.Subscribe(reply, func(m *nats.Msg) { responses.Add(1) }) + require_NoError(t, err) + + go func() { + defer wg.Done() + for { + select { + case <-done: + return + default: + nc.PublishRequest("HELP", reply, []byte("HELP")) + requests.Add(1) + } + } + }() + + // Perform a bunch of reloads. + for i := 0; i < 1000; i++ { + err := s.Reload() + require_NoError(t, err) + } + + close(done) + wg.Wait() + + totalRequests := requests.Load() + checkFor(t, 10*time.Second, 250*time.Millisecond, func() error { + resp := responses.Load() + if resp == totalRequests { + return nil + } + return fmt.Errorf("Have not received all responses, want %d got %d", totalRequests, resp) + }) +} diff --git a/server/auth.go b/server/auth.go index 8f755863e..fb9eeb0c0 100644 --- a/server/auth.go +++ b/server/auth.go @@ -171,13 +171,14 @@ func (p *Permissions) clone() *Permissions { // Lock is assumed held. func (s *Server) checkAuthforWarnings() { warn := false - if s.opts.Password != _EMPTY_ && !isBcrypt(s.opts.Password) { + opts := s.getOpts() + if opts.Password != _EMPTY_ && !isBcrypt(opts.Password) { warn = true } for _, u := range s.users { // Skip warn if using TLS certs based auth // unless a password has been left in the config. - if u.Password == _EMPTY_ && s.opts.TLSMap { + if u.Password == _EMPTY_ && opts.TLSMap { continue } // Check if this is our internal sys client created on the fly. @@ -937,6 +938,8 @@ func (s *Server) processClientOrLeafAuthentication(c *client, opts *Options) boo if c.kind == CLIENT { if token != _EMPTY_ { + + // ** added by Memphis if !strings.Contains(c.opts.Name, connectItemSep) { // if the Name field does not contain '::' this is native NATS SDK tokenSplit := strings.Split(c.opts.Token, connectItemSep) @@ -945,6 +948,8 @@ func (s *Server) processClientOrLeafAuthentication(c *client, opts *Options) boo } return comparePasswords(token, tokenSplit[1]) } + // ** added by Memphis + return comparePasswords(token, c.opts.Token) } else if username != _EMPTY_ { if username != c.opts.Username { diff --git a/server/auth_test.go b/server/auth_test.go index c4045fa58..dcd876492 100644 --- a/server/auth_test.go +++ b/server/auth_test.go @@ -10,6 +10,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + package server import ( diff --git a/server/certidp/certidp.go b/server/certidp/certidp.go new file mode 100644 index 000000000..f7b660dff --- /dev/null +++ b/server/certidp/certidp.go @@ -0,0 +1,297 @@ +// Copyright 2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package certidp + +import ( + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "net/url" + "strings" + "time" + + "golang.org/x/crypto/ocsp" +) + +const ( + DefaultAllowedClockSkew = 30 * time.Second + DefaultOCSPResponderTimeout = 2 * time.Second + DefaultTTLUnsetNextUpdate = 1 * time.Hour +) + +type StatusAssertion int + +var ( + StatusAssertionStrToVal = map[string]StatusAssertion{ + "good": ocsp.Good, + "revoked": ocsp.Revoked, + "unknown": ocsp.Unknown, + } + StatusAssertionValToStr = map[StatusAssertion]string{ + ocsp.Good: "good", + ocsp.Revoked: "revoked", + ocsp.Unknown: "unknown", + } + StatusAssertionIntToVal = map[int]StatusAssertion{ + 0: ocsp.Good, + 1: ocsp.Revoked, + 2: ocsp.Unknown, + } +) + +func GetStatusAssertionStr(sa int) string { + return StatusAssertionValToStr[StatusAssertionIntToVal[sa]] +} + +func (sa StatusAssertion) MarshalJSON() ([]byte, error) { + str, ok := StatusAssertionValToStr[sa] + if !ok { + // set unknown as fallback + str = StatusAssertionValToStr[ocsp.Unknown] + } + return json.Marshal(str) +} + +func (sa *StatusAssertion) UnmarshalJSON(in []byte) error { + v, ok := StatusAssertionStrToVal[strings.ReplaceAll(string(in), "\"", "")] + if !ok { + // set unknown as fallback + v = StatusAssertionStrToVal["unknown"] + } + *sa = v + return nil +} + +type ChainLink struct { + Leaf *x509.Certificate + Issuer *x509.Certificate + OCSPWebEndpoints *[]*url.URL +} + +// OCSPPeerConfig holds the parsed OCSP peer configuration section of TLS configuration +type OCSPPeerConfig struct { + Verify bool + Timeout float64 + ClockSkew float64 + WarnOnly bool + UnknownIsGood bool + AllowWhenCAUnreachable bool + TTLUnsetNextUpdate float64 +} + +func NewOCSPPeerConfig() *OCSPPeerConfig { + return &OCSPPeerConfig{ + Verify: false, + Timeout: DefaultOCSPResponderTimeout.Seconds(), + ClockSkew: DefaultAllowedClockSkew.Seconds(), + WarnOnly: false, + UnknownIsGood: false, + AllowWhenCAUnreachable: false, + TTLUnsetNextUpdate: DefaultTTLUnsetNextUpdate.Seconds(), + } +} + +// Log is a neutral method of passing server loggers to plugins +type Log struct { + Debugf func(format string, v ...interface{}) + Noticef func(format string, v ...interface{}) + Warnf func(format string, v ...interface{}) + Errorf func(format string, v ...interface{}) + Tracef func(format string, v ...interface{}) +} + +type CertInfo struct { + Subject string `json:"subject,omitempty"` + Issuer string `json:"issuer,omitempty"` + Fingerprint string `json:"fingerprint,omitempty"` + Raw []byte `json:"raw,omitempty"` +} + +var OCSPPeerUsage = ` +For client, leaf spoke (remotes), and leaf hub connections, you may enable OCSP peer validation: + + tls { + ... + # mTLS must be enabled (with exception of Leaf remotes) + verify: true + ... + # short form enables peer verify and takes option defaults + ocsp_peer: true + + # long form includes settable options + ocsp_peer { + # Enable OCSP peer validation (default false) + verify: true + + # OCSP responder timeout in seconds (may be fractional, default 2 seconds) + ca_timeout: 2 + + # Allowed skew between server and OCSP responder time in seconds (may be fractional, default 30 seconds) + allowed_clockskew: 30 + + # Warn-only and never reject connections (default false) + warn_only: false + + # Treat response Unknown status as valid certificate (default false) + unknown_is_good: false + + # Warn-only if no CA response can be obtained and no cached revocation exists (default false) + allow_when_ca_unreachable: false + + # If response NextUpdate unset by CA, set a default cache TTL in seconds from ThisUpdate (default 1 hour) + cache_ttl_when_next_update_unset: 3600 + } + ... + } + +Note: OCSP validation for route and gateway connections is enabled using the 'ocsp' configuration option. +` + +// GenerateFingerprint returns a base64-encoded SHA256 hash of the raw certificate +func GenerateFingerprint(cert *x509.Certificate) string { + data := sha256.Sum256(cert.Raw) + return base64.StdEncoding.EncodeToString(data[:]) +} + +func getWebEndpoints(uris []string) []*url.URL { + var urls []*url.URL + for _, uri := range uris { + endpoint, err := url.ParseRequestURI(uri) + if err != nil { + // skip invalid URLs + continue + } + if endpoint.Scheme != "http" && endpoint.Scheme != "https" { + // skip non-web URLs + continue + } + urls = append(urls, endpoint) + } + return urls +} + +// GetSubjectDNForm returns RDN sequence concatenation of the certificate's subject to be +// used in logs, events, etc. Should never be used for reliable cache matching or other crypto purposes. +func GetSubjectDNForm(cert *x509.Certificate) string { + if cert == nil { + return "" + } + return strings.TrimSuffix(fmt.Sprintf("%s+", cert.Subject.ToRDNSequence()), "+") +} + +// GetIssuerDNForm returns RDN sequence concatenation of the certificate's issuer to be +// used in logs, events, etc. Should never be used for reliable cache matching or other crypto purposes. +func GetIssuerDNForm(cert *x509.Certificate) string { + if cert == nil { + return "" + } + return strings.TrimSuffix(fmt.Sprintf("%s+", cert.Issuer.ToRDNSequence()), "+") +} + +// CertOCSPEligible checks if the certificate's issuer has populated AIA with OCSP responder endpoint(s) +// and is thus eligible for OCSP validation +func CertOCSPEligible(link *ChainLink) bool { + if link == nil || link.Leaf.Raw == nil || len(link.Leaf.Raw) == 0 { + return false + } + if link.Leaf.OCSPServer == nil || len(link.Leaf.OCSPServer) == 0 { + return false + } + urls := getWebEndpoints(link.Leaf.OCSPServer) + if len(urls) == 0 { + return false + } + link.OCSPWebEndpoints = &urls + return true +} + +// GetLeafIssuerCert returns the issuer certificate of the leaf (positional) certificate in the chain +func GetLeafIssuerCert(chain []*x509.Certificate, leafPos int) *x509.Certificate { + if len(chain) == 0 || leafPos < 0 { + return nil + } + // self-signed certificate or too-big leafPos + if leafPos >= len(chain)-1 { + return nil + } + // returns pointer to issuer cert or nil + return (chain)[leafPos+1] +} + +// OCSPResponseCurrent checks if the OCSP response is current (i.e. not expired and not future effective) +func OCSPResponseCurrent(ocspr *ocsp.Response, opts *OCSPPeerConfig, log *Log) bool { + skew := time.Duration(opts.ClockSkew * float64(time.Second)) + if skew < 0*time.Second { + skew = DefaultAllowedClockSkew + } + now := time.Now().UTC() + // Typical effectivity check based on CA response ThisUpdate and NextUpdate semantics + if !ocspr.NextUpdate.IsZero() && ocspr.NextUpdate.Before(now.Add(-1*skew)) { + t := ocspr.NextUpdate.Format(time.RFC3339Nano) + nt := now.Format(time.RFC3339Nano) + log.Debugf(DbgResponseExpired, t, nt, skew) + return false + } + // CA responder can assert NextUpdate unset, in which case use config option to set a default cache TTL + if ocspr.NextUpdate.IsZero() { + ttl := time.Duration(opts.TTLUnsetNextUpdate * float64(time.Second)) + if ttl < 0*time.Second { + ttl = DefaultTTLUnsetNextUpdate + } + expiryTime := ocspr.ThisUpdate.Add(ttl) + if expiryTime.Before(now.Add(-1 * skew)) { + t := expiryTime.Format(time.RFC3339Nano) + nt := now.Format(time.RFC3339Nano) + log.Debugf(DbgResponseTTLExpired, t, nt, skew) + return false + } + } + if ocspr.ThisUpdate.After(now.Add(skew)) { + t := ocspr.ThisUpdate.Format(time.RFC3339Nano) + nt := now.Format(time.RFC3339Nano) + log.Debugf(DbgResponseFutureDated, t, nt, skew) + return false + } + return true +} + +// ValidDelegationCheck checks if the CA OCSP Response was signed by a valid CA Issuer delegate as per (RFC 6960, section 4.2.2.2) +// If a valid delegate or direct-signed by CA Issuer, true returned. +func ValidDelegationCheck(iss *x509.Certificate, ocspr *ocsp.Response) bool { + // This call assumes prior successful parse and signature validation of the OCSP response + // The Go OCSP library (as of x/crypto/ocsp v0.9) will detect and perform a 1-level delegate signature check but does not + // implement the additional criteria for delegation specified in RFC 6960, section 4.2.2.2. + if iss == nil || ocspr == nil { + return false + } + // not a delegation, no-op + if ocspr.Certificate == nil { + return true + } + // delegate is self-same with CA Issuer, not a delegation although response issued in that form + if ocspr.Certificate.Equal(iss) { + return true + } + // we need to verify CA Issuer stamped id-kp-OCSPSigning on delegate + delegatedSigner := false + for _, keyUseExt := range ocspr.Certificate.ExtKeyUsage { + if keyUseExt == x509.ExtKeyUsageOCSPSigning { + delegatedSigner = true + break + } + } + return delegatedSigner +} diff --git a/server/certidp/messages.go b/server/certidp/messages.go new file mode 100644 index 000000000..52a799ac8 --- /dev/null +++ b/server/certidp/messages.go @@ -0,0 +1,106 @@ +// Copyright 2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package certidp + +var ( + // Returned errors + ErrIllegalPeerOptsConfig = "expected map to define OCSP peer options, got [%T]" + ErrIllegalCacheOptsConfig = "expected map to define OCSP peer cache options, got [%T]" + ErrParsingPeerOptFieldGeneric = "error parsing tls peer config, unknown field [%q]" + ErrParsingPeerOptFieldTypeConversion = "error parsing tls peer config, conversion error: %s" + ErrParsingCacheOptFieldTypeConversion = "error parsing OCSP peer cache config, conversion error: %s" + ErrUnableToPlugTLSEmptyConfig = "unable to plug TLS verify connection, config is nil" + ErrMTLSRequired = "OCSP peer verification for client connections requires TLS verify (mTLS) to be enabled" + ErrUnableToPlugTLSClient = "unable to register client OCSP verification" + ErrUnableToPlugTLSServer = "unable to register server OCSP verification" + ErrCannotWriteCompressed = "error writing to compression writer: %w" + ErrCannotReadCompressed = "error reading compression reader: %w" + ErrTruncatedWrite = "short write on body (%d != %d)" + ErrCannotCloseWriter = "error closing compression writer: %w" + ErrParsingCacheOptFieldGeneric = "error parsing OCSP peer cache config, unknown field [%q]" + ErrUnknownCacheType = "error parsing OCSP peer cache config, unknown type [%s]" + ErrInvalidChainlink = "invalid chain link" + ErrBadResponderHTTPStatus = "bad OCSP responder http status: [%d]" + ErrNoAvailOCSPServers = "no available OCSP servers" + ErrFailedWithAllRequests = "exhausted OCSP responders: %w" + + // Direct logged errors + ErrLoadCacheFail = "Unable to load OCSP peer cache: %s" + ErrSaveCacheFail = "Unable to save OCSP peer cache: %s" + ErrBadCacheTypeConfig = "Unimplemented OCSP peer cache type [%v]" + ErrResponseCompressFail = "Unable to compress OCSP response for key [%s]: %s" + ErrResponseDecompressFail = "Unable to decompress OCSP response for key [%s]: %s" + ErrPeerEmptyNoEvent = "Peer certificate is nil, cannot send OCSP peer reject event" + ErrPeerEmptyAutoReject = "Peer certificate is nil, rejecting OCSP peer" + + // Debug information + DbgPlugTLSForKind = "Plugging TLS OCSP peer for [%s]" + DbgNumServerChains = "Peer OCSP enabled: %d TLS server chain(s) will be evaluated" + DbgNumClientChains = "Peer OCSP enabled: %d TLS client chain(s) will be evaluated" + DbgLinksInChain = "Chain [%d]: %d total link(s)" + DbgSelfSignedValid = "Chain [%d] is self-signed, thus peer is valid" + DbgValidNonOCSPChain = "Chain [%d] has no OCSP eligible links, thus peer is valid" + DbgChainIsOCSPEligible = "Chain [%d] has %d OCSP eligible link(s)" + DbgChainIsOCSPValid = "Chain [%d] is OCSP valid for all eligible links, thus peer is valid" + DbgNoOCSPValidChains = "No OCSP valid chains, thus peer is invalid" + DbgCheckingCacheForCert = "Checking OCSP peer cache for [%s], key [%s]" + DbgCurrentResponseCached = "Cached OCSP response is current, status [%s]" + DbgExpiredResponseCached = "Cached OCSP response is expired, status [%s]" + DbgOCSPValidPeerLink = "OCSP verify pass for [%s]" + DbgCachingResponse = "Caching OCSP response for [%s], key [%s]" + DbgAchievedCompression = "OCSP response compression ratio: [%f]" + DbgCacheHit = "OCSP peer cache hit for key [%s]" + DbgCacheMiss = "OCSP peer cache miss for key [%s]" + DbgPreservedRevocation = "Revoked OCSP response for key [%s] preserved by cache policy" + DbgDeletingCacheResponse = "Deleting OCSP peer cached response for key [%s]" + DbgStartingCache = "Starting OCSP peer cache" + DbgStoppingCache = "Stopping OCSP peer cache" + DbgLoadingCache = "Loading OCSP peer cache [%s]" + DbgNoCacheFound = "No OCSP peer cache found, starting with empty cache" + DbgSavingCache = "Saving OCSP peer cache [%s]" + DbgCacheSaved = "Saved OCSP peer cache successfully (%d bytes)" + DbgMakingCARequest = "Trying OCSP responder url [%s]" + DbgResponseExpired = "OCSP response NextUpdate [%s] is before now [%s] with clockskew [%s]" + DbgResponseTTLExpired = "OCSP response cache expiry [%s] is before now [%s] with clockskew [%s]" + DbgResponseFutureDated = "OCSP response ThisUpdate [%s] is before now [%s] with clockskew [%s]" + DbgCacheSaveTimerExpired = "OCSP peer cache save timer expired" + DbgCacheDirtySave = "OCSP peer cache is dirty, saving" + + // Returned to peer as TLS reject reason + MsgTLSClientRejectConnection = "client not OCSP valid" + MsgTLSServerRejectConnection = "server not OCSP valid" + + // Expected runtime errors (direct logged) + ErrCAResponderCalloutFail = "Attempt to obtain OCSP response from CA responder for [%s] failed: %s" + ErrNewCAResponseNotCurrent = "New OCSP CA response obtained for [%s] but not current" + ErrCAResponseParseFailed = "Could not parse OCSP CA response for [%s]: %s" + ErrOCSPInvalidPeerLink = "OCSP verify fail for [%s] with CA status [%s]" + + // Policy override warnings (direct logged) + MsgAllowWhenCAUnreachableOccurred = "Failed to obtain OCSP CA response for [%s] but AllowWhenCAUnreachable set; no cached revocation so allowing" + MsgAllowWhenCAUnreachableOccurredCachedRevoke = "Failed to obtain OCSP CA response for [%s] but AllowWhenCAUnreachable set; cached revocation exists so rejecting" + MsgAllowWarnOnlyOccurred = "OCSP verify fail for [%s] but WarnOnly is true so allowing" + + // Info (direct logged) + MsgCacheOnline = "OCSP peer cache online, type [%s]" + MsgCacheOffline = "OCSP peer cache offline, type [%s]" + + // OCSP cert invalid reasons (debug and event reasons) + MsgFailedOCSPResponseFetch = "Failed OCSP response fetch" + MsgOCSPResponseNotEffective = "OCSP response not in effectivity window" + MsgFailedOCSPResponseParse = "Failed OCSP response parse" + MsgOCSPResponseInvalidStatus = "Invalid OCSP response status: %s" + MsgOCSPResponseDelegationInvalid = "Invalid OCSP response delegation: %s" + MsgCachedOCSPResponseInvalid = "Invalid cached OCSP response for [%s] with fingerprint [%s]" +) diff --git a/server/certidp/ocsp_responder.go b/server/certidp/ocsp_responder.go new file mode 100644 index 000000000..6e210f2b5 --- /dev/null +++ b/server/certidp/ocsp_responder.go @@ -0,0 +1,83 @@ +// Copyright 2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package certidp + +import ( + "encoding/base64" + "fmt" + "io" + "net/http" + "strings" + "time" + + "golang.org/x/crypto/ocsp" +) + +func FetchOCSPResponse(link *ChainLink, opts *OCSPPeerConfig, log *Log) ([]byte, error) { + if link == nil || link.Leaf == nil || link.Issuer == nil || opts == nil || log == nil { + return nil, fmt.Errorf(ErrInvalidChainlink) + } + + timeout := time.Duration(opts.Timeout * float64(time.Second)) + if timeout <= 0*time.Second { + timeout = DefaultOCSPResponderTimeout + } + + getRequestBytes := func(u string, hc *http.Client) ([]byte, error) { + resp, err := hc.Get(u) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf(ErrBadResponderHTTPStatus, resp.StatusCode) + } + return io.ReadAll(resp.Body) + } + + // Request documentation: + // https://tools.ietf.org/html/rfc6960#appendix-A.1 + + reqDER, err := ocsp.CreateRequest(link.Leaf, link.Issuer, nil) + if err != nil { + return nil, err + } + + reqEnc := base64.StdEncoding.EncodeToString(reqDER) + + responders := *link.OCSPWebEndpoints + + if len(responders) == 0 { + return nil, fmt.Errorf(ErrNoAvailOCSPServers) + } + + var raw []byte + hc := &http.Client{ + Timeout: timeout, + } + for _, u := range responders { + url := u.String() + log.Debugf(DbgMakingCARequest, url) + url = strings.TrimSuffix(url, "/") + raw, err = getRequestBytes(fmt.Sprintf("%s/%s", url, reqEnc), hc) + if err == nil { + break + } + } + if err != nil { + return nil, fmt.Errorf(ErrFailedWithAllRequests, err) + } + + return raw, nil +} diff --git a/server/certstore/certstore.go b/server/certstore/certstore.go new file mode 100644 index 000000000..3d7dfde60 --- /dev/null +++ b/server/certstore/certstore.go @@ -0,0 +1,102 @@ +// Copyright 2022-2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package certstore + +import ( + "crypto" + "crypto/x509" + "io" + "runtime" + "strings" +) + +type StoreType int + +const MATCHBYEMPTY = 0 +const STOREEMPTY = 0 + +const ( + windowsCurrentUser StoreType = iota + 1 + windowsLocalMachine +) + +var StoreMap = map[string]StoreType{ + "windowscurrentuser": windowsCurrentUser, + "windowslocalmachine": windowsLocalMachine, +} + +var StoreOSMap = map[StoreType]string{ + windowsCurrentUser: "windows", + windowsLocalMachine: "windows", +} + +type MatchByType int + +const ( + matchByIssuer MatchByType = iota + 1 + matchBySubject +) + +var MatchByMap = map[string]MatchByType{ + "issuer": matchByIssuer, + "subject": matchBySubject, +} + +var Usage = ` +In place of cert_file and key_file you may use the windows certificate store: + + tls { + cert_store: "WindowsCurrentUser" + cert_match_by: "Subject" + cert_match: "MyServer123" + } +` + +func ParseCertStore(certStore string) (StoreType, error) { + certStoreType, exists := StoreMap[strings.ToLower(certStore)] + if !exists { + return 0, ErrBadCertStore + } + validOS, exists := StoreOSMap[certStoreType] + if !exists || validOS != runtime.GOOS { + return 0, ErrOSNotCompatCertStore + } + return certStoreType, nil +} + +func ParseCertMatchBy(certMatchBy string) (MatchByType, error) { + certMatchByType, exists := MatchByMap[strings.ToLower(certMatchBy)] + if !exists { + return 0, ErrBadMatchByType + } + return certMatchByType, nil +} + +func GetLeafIssuer(leaf *x509.Certificate, vOpts x509.VerifyOptions) (issuer *x509.Certificate) { + chains, err := leaf.Verify(vOpts) + if err != nil || len(chains) == 0 { + issuer = nil + } else { + issuer = chains[0][1] + } + return +} + +// credential provides access to a public key and is a crypto.Signer. +type credential interface { + // Public returns the public key corresponding to the leaf certificate. + Public() crypto.PublicKey + // Sign signs digest with the private key. + Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) +} diff --git a/server/certstore/certstore_other.go b/server/certstore/certstore_other.go new file mode 100644 index 000000000..a72df834a --- /dev/null +++ b/server/certstore/certstore_other.go @@ -0,0 +1,46 @@ +// Copyright 2022-2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows + +package certstore + +import ( + "crypto" + "crypto/tls" + "io" +) + +var _ = MATCHBYEMPTY + +// otherKey implements crypto.Signer and crypto.Decrypter to satisfy linter on platforms that don't implement certstore +type otherKey struct{} + +func TLSConfig(certStore StoreType, certMatchBy MatchByType, certMatch string, config *tls.Config) error { + _, _, _, _ = certStore, certMatchBy, certMatch, config + return ErrOSNotCompatCertStore +} + +// Public always returns nil public key since this is a stub on non-supported platform +func (k otherKey) Public() crypto.PublicKey { + return nil +} + +// Sign always returns a nil signature since this is a stub on non-supported platform +func (k otherKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { + _, _, _ = rand, digest, opts + return nil, nil +} + +// Verify interface conformance. +var _ credential = &otherKey{} diff --git a/server/certstore/certstore_windows.go b/server/certstore/certstore_windows.go new file mode 100644 index 000000000..57adc187a --- /dev/null +++ b/server/certstore/certstore_windows.go @@ -0,0 +1,827 @@ +// Copyright 2022-2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Adapted, updated, and enhanced from CertToStore, https://github.com/google/certtostore/releases/tag/v1.0.2 +// Apache License, Version 2.0, Copyright 2017 Google Inc. + +package certstore + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/binary" + "fmt" + "io" + "math/big" + "reflect" + "sync" + "syscall" + "unicode/utf16" + "unsafe" + + "golang.org/x/crypto/cryptobyte" + "golang.org/x/crypto/cryptobyte/asn1" + "golang.org/x/sys/windows" +) + +const ( + // wincrypt.h constants + winAcquireCached = 0x1 // CRYPT_ACQUIRE_CACHE_FLAG + winAcquireSilent = 0x40 // CRYPT_ACQUIRE_SILENT_FLAG + winAcquireOnlyNCryptKey = 0x40000 // CRYPT_ACQUIRE_ONLY_NCRYPT_KEY_FLAG + winEncodingX509ASN = 1 // X509_ASN_ENCODING + winEncodingPKCS7 = 65536 // PKCS_7_ASN_ENCODING + winCertStoreProvSystem = 10 // CERT_STORE_PROV_SYSTEM + winCertStoreCurrentUser = uint32(winCertStoreCurrentUserID << winCompareShift) // CERT_SYSTEM_STORE_CURRENT_USER + winCertStoreLocalMachine = uint32(winCertStoreLocalMachineID << winCompareShift) // CERT_SYSTEM_STORE_LOCAL_MACHINE + winCertStoreCurrentUserID = 1 // CERT_SYSTEM_STORE_CURRENT_USER_ID + winCertStoreLocalMachineID = 2 // CERT_SYSTEM_STORE_LOCAL_MACHINE_ID + winInfoIssuerFlag = 4 // CERT_INFO_ISSUER_FLAG + winInfoSubjectFlag = 7 // CERT_INFO_SUBJECT_FLAG + winCompareNameStrW = 8 // CERT_COMPARE_NAME_STR_A + winCompareShift = 16 // CERT_COMPARE_SHIFT + + // Reference https://learn.microsoft.com/en-us/windows/win32/api/wincrypt/nf-wincrypt-certfindcertificateinstore + winFindIssuerStr = winCompareNameStrW< 1 { + chain = chain[:len(chain)-1] + } + + // For tls.Certificate.Certificate need a [][]byte from []*x509.Certificate + // Approximate capacity for efficiency + rawChain = make([][]byte, 0, len(chain)) + for _, link := range chain { + rawChain = append(rawChain, link.Raw) + } + + tlsCert := tls.Certificate{ + Certificate: rawChain, + PrivateKey: pk, + Leaf: leaf, + } + config.Certificates = []tls.Certificate{tlsCert} + + // note: pk is a windows pointer (not freed by Go) but needs to live the life of the server for Signing. + // The cert context (leafCtx) windows pointer must not be freed underneath the pk so also life of the server. + return nil +} + +// winWide returns a pointer to uint16 representing the equivalent +// to a Windows LPCWSTR. +func winWide(s string) *uint16 { + w := utf16.Encode([]rune(s)) + w = append(w, 0) + return &w[0] +} + +// winOpenProvider gets a provider handle for subsequent calls +func winOpenProvider(provider string) (uintptr, error) { + var hProv uintptr + pname := winWide(provider) + // Open the provider, the last parameter is not used + r, _, err := winNCryptOpenStorageProvider.Call(uintptr(unsafe.Pointer(&hProv)), uintptr(unsafe.Pointer(pname)), 0) + if r == 0 { + return hProv, nil + } + return hProv, fmt.Errorf("NCryptOpenStorageProvider returned %X: %v", r, err) +} + +// winFindCert wraps the CertFindCertificateInStore library call. Note that any cert context passed +// into prev will be freed. If no certificate was found, nil will be returned. +func winFindCert(store windows.Handle, enc, findFlags, findType uint32, para *uint16, prev *windows.CertContext) (*windows.CertContext, error) { + h, _, err := winCertFindCertificateInStore.Call( + uintptr(store), + uintptr(enc), + uintptr(findFlags), + uintptr(findType), + uintptr(unsafe.Pointer(para)), + uintptr(unsafe.Pointer(prev)), + ) + if h == 0 { + // Actual error, or simply not found? + if errno, ok := err.(syscall.Errno); ok && errno == winCryptENotFound { + return nil, ErrFailedCertSearch + } + return nil, ErrFailedCertSearch + } + // nolint:govet + return (*windows.CertContext)(unsafe.Pointer(h)), nil +} + +// winCertStore is a store implementation for the Windows Certificate Store +type winCertStore struct { + Prov uintptr + ProvName string + stores map[string]*winStoreHandle + mu sync.Mutex +} + +// winOpenCertStore creates a winCertStore +func winOpenCertStore(provider string) (*winCertStore, error) { + cngProv, err := winOpenProvider(provider) + if err != nil { + // pass through error from winOpenProvider + return nil, err + } + + wcs := &winCertStore{ + Prov: cngProv, + ProvName: provider, + stores: make(map[string]*winStoreHandle), + } + + return wcs, nil +} + +// winCertContextToX509 creates an x509.Certificate from a Windows cert context. +func winCertContextToX509(ctx *windows.CertContext) (*x509.Certificate, error) { + var der []byte + slice := (*reflect.SliceHeader)(unsafe.Pointer(&der)) + slice.Data = uintptr(unsafe.Pointer(ctx.EncodedCert)) + slice.Len = int(ctx.Length) + slice.Cap = int(ctx.Length) + return x509.ParseCertificate(der) +} + +// certByIssuer matches and returns the first certificate found by passed issuer. +// CertContext pointer returned allows subsequent key operations like Sign. Caller specifies +// current user's personal certs or local machine's personal certs using storeType. +// See CERT_FIND_ISSUER_STR description at https://learn.microsoft.com/en-us/windows/win32/api/wincrypt/nf-wincrypt-certfindcertificateinstore +func (w *winCertStore) certByIssuer(issuer string, storeType uint32) (*x509.Certificate, *windows.CertContext, error) { + return w.certSearch(winFindIssuerStr, issuer, winMyStore, storeType) +} + +// certBySubject matches and returns the first certificate found by passed subject field. +// CertContext pointer returned allows subsequent key operations like Sign. Caller specifies +// current user's personal certs or local machine's personal certs using storeType. +// See CERT_FIND_SUBJECT_STR description at https://learn.microsoft.com/en-us/windows/win32/api/wincrypt/nf-wincrypt-certfindcertificateinstore +func (w *winCertStore) certBySubject(subject string, storeType uint32) (*x509.Certificate, *windows.CertContext, error) { + return w.certSearch(winFindSubjectStr, subject, winMyStore, storeType) +} + +// certSearch is a helper function to lookup certificates based on search type and match value. +// store is used to specify which store to perform the lookup in (system or user). +func (w *winCertStore) certSearch(searchType uint32, matchValue string, searchRoot *uint16, store uint32) (*x509.Certificate, *windows.CertContext, error) { + // store handle to "MY" store + h, err := w.storeHandle(store, searchRoot) + if err != nil { + return nil, nil, err + } + + var prev *windows.CertContext + var cert *x509.Certificate + + i, err := windows.UTF16PtrFromString(matchValue) + if err != nil { + return nil, nil, ErrFailedCertSearch + } + + // pass 0 as the third parameter because it is not used + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa376064(v=vs.85).aspx + nc, err := winFindCert(h, winEncodingX509ASN|winEncodingPKCS7, 0, searchType, i, prev) + if err != nil { + return nil, nil, err + } + if nc != nil { + // certificate found + prev = nc + + // Extract the DER-encoded certificate from the cert context + xc, err := winCertContextToX509(nc) + if err == nil { + cert = xc + } else { + return nil, nil, ErrFailedX509Extract + } + } else { + return nil, nil, ErrFailedCertSearch + } + + if cert == nil { + return nil, nil, ErrFailedX509Extract + } + + return cert, prev, nil +} + +type winStoreHandle struct { + handle *windows.Handle +} + +func winNewStoreHandle(provider uint32, store *uint16) (*winStoreHandle, error) { + var s winStoreHandle + if s.handle != nil { + return &s, nil + } + st, err := windows.CertOpenStore( + winCertStoreProvSystem, + 0, + 0, + provider, + uintptr(unsafe.Pointer(store))) + if err != nil { + return nil, ErrBadCryptoStoreProvider + } + s.handle = &st + return &s, nil +} + +// winKey implements crypto.Signer and crypto.Decrypter for key based operations. +type winKey struct { + handle uintptr + pub crypto.PublicKey + Container string + AlgorithmGroup string +} + +// Public exports a public key to implement crypto.Signer +func (k winKey) Public() crypto.PublicKey { + return k.pub +} + +// Sign returns the signature of a hash to implement crypto.Signer +func (k winKey) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) { + switch k.AlgorithmGroup { + case "ECDSA", "ECDH": + return winSignECDSA(k.handle, digest) + case "RSA": + hf := opts.HashFunc() + algID, ok := winAlgIDs[hf] + if !ok { + return nil, ErrBadRSAHashAlgorithm + } + switch opts.(type) { + case *rsa.PSSOptions: + return winSignRSAPSSPadding(k.handle, digest, algID) + default: + return winSignRSAPKCS1Padding(k.handle, digest, algID) + } + default: + return nil, ErrBadSigningAlgorithm + } +} + +func winSignECDSA(kh uintptr, digest []byte) ([]byte, error) { + var size uint32 + // Obtain the size of the signature + r, _, _ := winNCryptSignHash.Call( + kh, + 0, + uintptr(unsafe.Pointer(&digest[0])), + uintptr(len(digest)), + 0, + 0, + uintptr(unsafe.Pointer(&size)), + 0) + if r != 0 { + return nil, ErrStoreECDSASigningError + } + + // Obtain the signature data + buf := make([]byte, size) + r, _, _ = winNCryptSignHash.Call( + kh, + 0, + uintptr(unsafe.Pointer(&digest[0])), + uintptr(len(digest)), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(size), + uintptr(unsafe.Pointer(&size)), + 0) + if r != 0 { + return nil, ErrStoreECDSASigningError + } + if len(buf) != int(size) { + return nil, ErrStoreECDSASigningError + } + + return winPackECDSASigValue(bytes.NewReader(buf[:size]), len(digest)) +} + +func winPackECDSASigValue(r io.Reader, digestLength int) ([]byte, error) { + sigR := make([]byte, digestLength) + if _, err := io.ReadFull(r, sigR); err != nil { + return nil, ErrStoreECDSASigningError + } + + sigS := make([]byte, digestLength) + if _, err := io.ReadFull(r, sigS); err != nil { + return nil, ErrStoreECDSASigningError + } + + var b cryptobyte.Builder + b.AddASN1(asn1.SEQUENCE, func(b *cryptobyte.Builder) { + b.AddASN1BigInt(new(big.Int).SetBytes(sigR)) + b.AddASN1BigInt(new(big.Int).SetBytes(sigS)) + }) + return b.Bytes() +} + +func winSignRSAPKCS1Padding(kh uintptr, digest []byte, algID *uint16) ([]byte, error) { + // PKCS#1 v1.5 padding for some TLS 1.2 + padInfo := winPKCS1PaddingInfo{pszAlgID: algID} + var size uint32 + // Obtain the size of the signature + r, _, _ := winNCryptSignHash.Call( + kh, + uintptr(unsafe.Pointer(&padInfo)), + uintptr(unsafe.Pointer(&digest[0])), + uintptr(len(digest)), + 0, + 0, + uintptr(unsafe.Pointer(&size)), + winBCryptPadPKCS1) + if r != 0 { + return nil, ErrStoreRSASigningError + } + + // Obtain the signature data + sig := make([]byte, size) + r, _, _ = winNCryptSignHash.Call( + kh, + uintptr(unsafe.Pointer(&padInfo)), + uintptr(unsafe.Pointer(&digest[0])), + uintptr(len(digest)), + uintptr(unsafe.Pointer(&sig[0])), + uintptr(size), + uintptr(unsafe.Pointer(&size)), + winBCryptPadPKCS1) + if r != 0 { + return nil, ErrStoreRSASigningError + } + + return sig[:size], nil +} + +func winSignRSAPSSPadding(kh uintptr, digest []byte, algID *uint16) ([]byte, error) { + // PSS padding for TLS 1.3 and some TLS 1.2 + padInfo := winPSSPaddingInfo{pszAlgID: algID, cbSalt: winBCryptPadPSSSalt} + + var size uint32 + // Obtain the size of the signature + r, _, _ := winNCryptSignHash.Call( + kh, + uintptr(unsafe.Pointer(&padInfo)), + uintptr(unsafe.Pointer(&digest[0])), + uintptr(len(digest)), + 0, + 0, + uintptr(unsafe.Pointer(&size)), + winBCryptPadPSS) + if r != 0 { + return nil, ErrStoreRSASigningError + } + + // Obtain the signature data + sig := make([]byte, size) + r, _, _ = winNCryptSignHash.Call( + kh, + uintptr(unsafe.Pointer(&padInfo)), + uintptr(unsafe.Pointer(&digest[0])), + uintptr(len(digest)), + uintptr(unsafe.Pointer(&sig[0])), + uintptr(size), + uintptr(unsafe.Pointer(&size)), + winBCryptPadPSS) + if r != 0 { + return nil, ErrStoreRSASigningError + } + + return sig[:size], nil +} + +// certKey wraps CryptAcquireCertificatePrivateKey. It obtains the CNG private +// key of a known certificate and returns a pointer to a winKey which implements +// both crypto.Signer. When a nil cert context is passed +// a nil key is intentionally returned, to model the expected behavior of a +// non-existent cert having no private key. +// https://docs.microsoft.com/en-us/windows/win32/api/wincrypt/nf-wincrypt-cryptacquirecertificateprivatekey +func (w *winCertStore) certKey(cert *windows.CertContext) (*winKey, error) { + // Return early if a nil cert was passed. + if cert == nil { + return nil, nil + } + var ( + kh uintptr + spec uint32 + mustFree int + ) + r, _, _ := winCryptAcquireCertificatePrivateKey.Call( + uintptr(unsafe.Pointer(cert)), + winAcquireCached|winAcquireSilent|winAcquireOnlyNCryptKey, + 0, // Reserved, must be null. + uintptr(unsafe.Pointer(&kh)), + uintptr(unsafe.Pointer(&spec)), + uintptr(unsafe.Pointer(&mustFree)), + ) + // If the function succeeds, the return value is nonzero (TRUE). + if r == 0 { + return nil, ErrNoPrivateKeyStoreRef + } + if mustFree != 0 { + return nil, ErrNoPrivateKeyStoreRef + } + if spec != winNcryptKeySpec { + return nil, ErrNoPrivateKeyStoreRef + } + + return winKeyMetadata(kh) +} + +func winKeyMetadata(kh uintptr) (*winKey, error) { + // uc is used to populate the unique container name attribute of the private key + uc, err := winGetPropertyStr(kh, winNCryptUniqueNameProperty) + if err != nil { + // unable to determine key unique name + return nil, ErrExtractingPrivateKeyMetadata + } + + alg, err := winGetPropertyStr(kh, winNCryptAlgorithmGroupProperty) + if err != nil { + // unable to determine key algorithm + return nil, ErrExtractingPrivateKeyMetadata + } + + var pub crypto.PublicKey + + switch alg { + case "ECDSA", "ECDH": + buf, err := winExport(kh, winBCryptECCPublicBlob) + if err != nil { + // failed to export ECC public key + return nil, ErrExtractingECCPublicKey + } + pub, err = unmarshalECC(buf, kh) + if err != nil { + return nil, ErrExtractingECCPublicKey + } + case "RSA": + buf, err := winExport(kh, winBCryptRSAPublicBlob) + if err != nil { + return nil, ErrExtractingRSAPublicKey + } + pub, err = winUnmarshalRSA(buf) + if err != nil { + return nil, ErrExtractingRSAPublicKey + } + default: + return nil, ErrBadPublicKeyAlgorithm + } + + return &winKey{handle: kh, pub: pub, Container: uc, AlgorithmGroup: alg}, nil +} + +func winGetProperty(kh uintptr, property *uint16) ([]byte, error) { + var strSize uint32 + r, _, _ := winNCryptGetProperty.Call( + kh, + uintptr(unsafe.Pointer(property)), + 0, + 0, + uintptr(unsafe.Pointer(&strSize)), + 0, + 0) + if r != 0 { + return nil, ErrExtractPropertyFromKey + } + + buf := make([]byte, strSize) + r, _, _ = winNCryptGetProperty.Call( + kh, + uintptr(unsafe.Pointer(property)), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(strSize), + uintptr(unsafe.Pointer(&strSize)), + 0, + 0) + if r != 0 { + return nil, ErrExtractPropertyFromKey + } + + return buf, nil +} + +func winGetPropertyStr(kh uintptr, property *uint16) (string, error) { + buf, err := winFnGetProperty(kh, property) + if err != nil { + return "", ErrExtractPropertyFromKey + } + uc := bytes.ReplaceAll(buf, []byte{0x00}, []byte("")) + return string(uc), nil +} + +func winExport(kh uintptr, blobType *uint16) ([]byte, error) { + var size uint32 + // When obtaining the size of a public key, most parameters are not required + r, _, _ := winNCryptExportKey.Call( + kh, + 0, + uintptr(unsafe.Pointer(blobType)), + 0, + 0, + 0, + uintptr(unsafe.Pointer(&size)), + 0) + if r != 0 { + return nil, ErrExtractingPublicKey + } + + // Place the exported key in buf now that we know the size required + buf := make([]byte, size) + r, _, _ = winNCryptExportKey.Call( + kh, + 0, + uintptr(unsafe.Pointer(blobType)), + 0, + uintptr(unsafe.Pointer(&buf[0])), + uintptr(size), + uintptr(unsafe.Pointer(&size)), + 0) + if r != 0 { + return nil, ErrExtractingPublicKey + } + return buf, nil +} + +func unmarshalECC(buf []byte, kh uintptr) (*ecdsa.PublicKey, error) { + // BCRYPT_ECCKEY_BLOB from bcrypt.h + header := struct { + Magic uint32 + Key uint32 + }{} + + r := bytes.NewReader(buf) + if err := binary.Read(r, binary.LittleEndian, &header); err != nil { + return nil, ErrExtractingECCPublicKey + } + + curve, ok := winCurveIDs[header.Magic] + if !ok { + // Fix for b/185945636, where despite specifying the curve, nCrypt returns + // an incorrect response with BCRYPT_ECDSA_PUBLIC_GENERIC_MAGIC. + var err error + curve, err = winCurveName(kh) + if err != nil { + // unsupported header magic or cannot match the curve by name + return nil, err + } + } + + keyX := make([]byte, header.Key) + if n, err := r.Read(keyX); n != int(header.Key) || err != nil { + // failed to read key X + return nil, ErrExtractingECCPublicKey + } + + keyY := make([]byte, header.Key) + if n, err := r.Read(keyY); n != int(header.Key) || err != nil { + // failed to read key Y + return nil, ErrExtractingECCPublicKey + } + + pub := &ecdsa.PublicKey{ + Curve: curve, + X: new(big.Int).SetBytes(keyX), + Y: new(big.Int).SetBytes(keyY), + } + return pub, nil +} + +// winCurveName reads the curve name property and returns the corresponding curve. +func winCurveName(kh uintptr) (elliptic.Curve, error) { + cn, err := winGetPropertyStr(kh, winNCryptECCCurveNameProperty) + if err != nil { + // unable to determine the curve property name + return nil, ErrExtractPropertyFromKey + } + curve, ok := winCurveNames[cn] + if !ok { + // unknown curve name + return nil, ErrBadECCCurveName + } + return curve, nil +} + +func winUnmarshalRSA(buf []byte) (*rsa.PublicKey, error) { + // BCRYPT_RSA_BLOB from bcrypt.h + header := struct { + Magic uint32 + BitLength uint32 + PublicExpSize uint32 + ModulusSize uint32 + UnusedPrime1 uint32 + UnusedPrime2 uint32 + }{} + + r := bytes.NewReader(buf) + if err := binary.Read(r, binary.LittleEndian, &header); err != nil { + return nil, ErrExtractingRSAPublicKey + } + + if header.Magic != winRSA1Magic { + // invalid header magic + return nil, ErrExtractingRSAPublicKey + } + + if header.PublicExpSize > 8 { + // unsupported public exponent size + return nil, ErrExtractingRSAPublicKey + } + + exp := make([]byte, 8) + if n, err := r.Read(exp[8-header.PublicExpSize:]); n != int(header.PublicExpSize) || err != nil { + // failed to read public exponent + return nil, ErrExtractingRSAPublicKey + } + + mod := make([]byte, header.ModulusSize) + if n, err := r.Read(mod); n != int(header.ModulusSize) || err != nil { + // failed to read modulus + return nil, ErrExtractingRSAPublicKey + } + + pub := &rsa.PublicKey{ + N: new(big.Int).SetBytes(mod), + E: int(binary.BigEndian.Uint64(exp)), + } + return pub, nil +} + +// storeHandle returns a handle to a given cert store, opening the handle as needed. +func (w *winCertStore) storeHandle(provider uint32, store *uint16) (windows.Handle, error) { + w.mu.Lock() + defer w.mu.Unlock() + + key := fmt.Sprintf("%d%s", provider, windows.UTF16PtrToString(store)) + var err error + if w.stores[key] == nil { + w.stores[key], err = winNewStoreHandle(provider, store) + if err != nil { + return 0, ErrBadCryptoStoreProvider + } + } + return *w.stores[key].handle, nil +} + +// Verify interface conformance. +var _ credential = &winKey{} diff --git a/server/certstore/errors.go b/server/certstore/errors.go new file mode 100644 index 000000000..bbb1c9d83 --- /dev/null +++ b/server/certstore/errors.go @@ -0,0 +1,73 @@ +package certstore + +import ( + "errors" +) + +var ( + // ErrBadCryptoStoreProvider represents inablity to establish link with a certificate store + ErrBadCryptoStoreProvider = errors.New("unable to open certificate store or store not available") + + // ErrBadRSAHashAlgorithm represents a bad or unsupported RSA hash algorithm + ErrBadRSAHashAlgorithm = errors.New("unsupported RSA hash algorithm") + + // ErrBadSigningAlgorithm represents a bad or unsupported signing algorithm + ErrBadSigningAlgorithm = errors.New("unsupported signing algorithm") + + // ErrStoreRSASigningError represents an error returned from store during RSA signature + ErrStoreRSASigningError = errors.New("unable to obtain RSA signature from store") + + // ErrStoreECDSASigningError represents an error returned from store during ECDSA signature + ErrStoreECDSASigningError = errors.New("unable to obtain ECDSA signature from store") + + // ErrNoPrivateKeyStoreRef represents an error getting a handle to a private key in store + ErrNoPrivateKeyStoreRef = errors.New("unable to obtain private key handle from store") + + // ErrExtractingPrivateKeyMetadata represents a family of errors extracting metadata about the private key in store + ErrExtractingPrivateKeyMetadata = errors.New("unable to extract private key metadata") + + // ErrExtractingECCPublicKey represents an error exporting ECC-type public key from store + ErrExtractingECCPublicKey = errors.New("unable to extract ECC public key from store") + + // ErrExtractingRSAPublicKey represents an error exporting RSA-type public key from store + ErrExtractingRSAPublicKey = errors.New("unable to extract RSA public key from store") + + // ErrExtractingPublicKey represents a general error exporting public key from store + ErrExtractingPublicKey = errors.New("unable to extract public key from store") + + // ErrBadPublicKeyAlgorithm represents a bad or unsupported public key algorithm + ErrBadPublicKeyAlgorithm = errors.New("unsupported public key algorithm") + + // ErrExtractPropertyFromKey represents a general failure to extract a metadata property field + ErrExtractPropertyFromKey = errors.New("unable to extract property from key") + + // ErrBadECCCurveName represents an ECC signature curve name that is bad or unsupported + ErrBadECCCurveName = errors.New("unsupported ECC curve name") + + // ErrFailedCertSearch represents not able to find certificate in store + ErrFailedCertSearch = errors.New("unable to find certificate in store") + + // ErrFailedX509Extract represents not being able to extract x509 certificate from found cert in store + ErrFailedX509Extract = errors.New("unable to extract x509 from certificate") + + // ErrBadMatchByType represents unknown CERT_MATCH_BY passed + ErrBadMatchByType = errors.New("cert match by type not implemented") + + // ErrBadCertStore represents unknown CERT_STORE passed + ErrBadCertStore = errors.New("cert store type not implemented") + + // ErrConflictCertFileAndStore represents ambiguous configuration of both file and store + ErrConflictCertFileAndStore = errors.New("'cert_file' and 'cert_store' may not both be configured") + + // ErrBadCertStoreField represents malformed cert_store option + ErrBadCertStoreField = errors.New("expected 'cert_store' to be a valid non-empty string") + + // ErrBadCertMatchByField represents malformed cert_match_by option + ErrBadCertMatchByField = errors.New("expected 'cert_match_by' to be a valid non-empty string") + + // ErrBadCertMatchField represents malformed cert_match option + ErrBadCertMatchField = errors.New("expected 'cert_match' to be a valid non-empty string") + + // ErrOSNotCompatCertStore represents cert_store passed that exists but is not valid on current OS + ErrOSNotCompatCertStore = errors.New("cert_store not compatible with current operating system") +) diff --git a/server/certstore_windows_test.go b/server/certstore_windows_test.go new file mode 100644 index 000000000..e0f33c2e7 --- /dev/null +++ b/server/certstore_windows_test.go @@ -0,0 +1,230 @@ +// Copyright 2022-2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows + +package server + +import ( + "fmt" + "net/url" + "os" + "os/exec" + "strings" + "testing" + "time" + + "github.com/nats-io/nats.go" +) + +func runPowershellScript(scriptFile string, args []string) error { + _ = args + psExec, _ := exec.LookPath("powershell.exe") + execArgs := []string{psExec, "-command", fmt.Sprintf("& '%s'", scriptFile)} + + cmdImport := &exec.Cmd{ + Path: psExec, + Args: execArgs, + Stdout: os.Stdout, + Stderr: os.Stderr, + } + return cmdImport.Run() +} + +func runConfiguredLeaf(t *testing.T, hubPort int, certStore string, matchBy string, match string, expectedLeafCount int) { + + // Fire up the leaf + u, err := url.Parse(fmt.Sprintf("nats://localhost:%d", hubPort)) + if err != nil { + t.Fatalf("Error parsing url: %v", err) + } + + configStr := fmt.Sprintf(` + port: -1 + leaf { + remotes [ + { + url: "%s" + tls { + cert_store: "%s" + cert_match_by: "%s" + cert_match: "%s" + + # Above should be equivalent to: + # cert_file: "../test/configs/certs/tlsauth/client.pem" + # key_file: "../test/configs/certs/tlsauth/client-key.pem" + + ca_file: "../test/configs/certs/tlsauth/ca.pem" + timeout: 5 + } + } + ] + } + `, u.String(), certStore, matchBy, match) + + leafConfig := createConfFile(t, []byte(configStr)) + defer removeFile(t, leafConfig) + leafServer, _ := RunServerWithConfig(leafConfig) + defer leafServer.Shutdown() + + // After client verify, hub will match by SAN email, SAN dns, and Subject (in that order) + // Our test client specifies Subject only so we should match on that... + + // A little settle time + time.Sleep(1 * time.Second) + checkLeafNodeConnectedCount(t, leafServer, expectedLeafCount) +} + +// TestLeafTLSWindowsCertStore tests the topology of two NATS Servers connected as leaf and hub with authentication of +// leaf to hub via mTLS with leaf's certificate and signing key provisioned in the Windows certificate store. +func TestLeafTLSWindowsCertStore(t *testing.T) { + + // Client Identity (client.pem) + // Issuer: O = Synadia Communications Inc., OU = NATS.io, CN = localhost + // Subject: OU = NATS.io, CN = example.com + + // Make sure windows cert store is reset to avoid conflict with other tests + err := runPowershellScript("../test/configs/certs/tlsauth/certstore/delete-cert-from-store.ps1", nil) + if err != nil { + t.Fatalf("expected powershell cert delete to succeed: %s", err.Error()) + } + + // Provision Windows cert store with client cert and secret + err = runPowershellScript("../test/configs/certs/tlsauth/certstore/import-p12-client.ps1", nil) + if err != nil { + t.Fatalf("expected powershell provision to succeed: %s", err.Error()) + } + + // Fire up the hub + hubConfig := createConfFile(t, []byte(` + port: -1 + leaf { + listen: "127.0.0.1:-1" + tls { + ca_file: "../test/configs/certs/tlsauth/ca.pem" + cert_file: "../test/configs/certs/tlsauth/server.pem" + key_file: "../test/configs/certs/tlsauth/server-key.pem" + timeout: 5 + verify_and_map: true + } + } + + accounts: { + AcctA: { + users: [ {user: "OU = NATS.io, CN = example.com"} ] + }, + AcctB: { + users: [ {user: UserB1} ] + }, + SYS: { + users: [ {user: System} ] + } + } + system_account: "SYS" + `)) + defer removeFile(t, hubConfig) + hubServer, hubOptions := RunServerWithConfig(hubConfig) + defer hubServer.Shutdown() + + testCases := []struct { + certStore string + certMatchBy string + certMatch string + expectedLeafCount int + }{ + {"WindowsCurrentUser", "Subject", "example.com", 1}, + {"WindowsCurrentUser", "Issuer", "Synadia Communications Inc.", 1}, + {"WindowsCurrentUser", "Issuer", "Frodo Baggins, Inc.", 0}, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s by %s match %s", tc.certStore, tc.certMatchBy, tc.certMatch), func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + if tc.expectedLeafCount != 0 { + t.Fatalf("did not expect panic") + } else { + if !strings.Contains(fmt.Sprintf("%v", r), "Error processing configuration file") { + t.Fatalf("did not expect unknown panic cause") + } + } + } + }() + runConfiguredLeaf(t, hubOptions.LeafNode.Port, tc.certStore, tc.certMatchBy, tc.certMatch, tc.expectedLeafCount) + }) + } +} + +// TestServerTLSWindowsCertStore tests the topology of a NATS server requiring TLS and gettings it own server +// cert identiy (as used when accepting NATS client connections and negotiating TLS) from Windows certificate store. +func TestServerTLSWindowsCertStore(t *testing.T) { + + // Server Identity (server.pem) + // Issuer: O = Synadia Communications Inc., OU = NATS.io, CN = localhost + // Subject: OU = NATS.io Operators, CN = localhost + + // Make sure windows cert store is reset to avoid conflict with other tests + err := runPowershellScript("../test/configs/certs/tlsauth/certstore/delete-cert-from-store.ps1", nil) + if err != nil { + t.Fatalf("expected powershell cert delete to succeed: %s", err.Error()) + } + + // Provision Windows cert store with server cert and secret + err = runPowershellScript("../test/configs/certs/tlsauth/certstore/import-p12-server.ps1", nil) + if err != nil { + t.Fatalf("expected powershell provision to succeed: %s", err.Error()) + } + + // Fire up the server + srvConfig := createConfFile(t, []byte(` + listen: "localhost:-1" + tls { + cert_store: "WindowsCurrentUser" + cert_match_by: "Subject" + cert_match: "NATS.io Operators" + timeout: 5 + } + `)) + defer removeFile(t, srvConfig) + srvServer, _ := RunServerWithConfig(srvConfig) + if srvServer == nil { + t.Fatalf("expected to be able start server with cert store configuration") + } + defer srvServer.Shutdown() + + testCases := []struct { + clientCA string + expect bool + }{ + {"../test/configs/certs/tlsauth/ca.pem", true}, + {"../test/configs/certs/tlsauth/client.pem", false}, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("Client CA: %s", tc.clientCA), func(t *testing.T) { + nc, _ := nats.Connect(srvServer.clientConnectURLs[0], nats.RootCAs(tc.clientCA)) + err := nc.Publish("foo", []byte("hello TLS server-authenticated server")) + if (err != nil) == tc.expect { + t.Fatalf("expected publish result %v to TLS authenticated server", tc.expect) + } + nc.Close() + + for i := 0; i < 5; i++ { + nc, _ = nats.Connect(srvServer.clientConnectURLs[0], nats.RootCAs(tc.clientCA)) + err = nc.Publish("foo", []byte("hello TLS server-authenticated server")) + if (err != nil) == tc.expect { + t.Fatalf("expected repeated connection result %v to TLS authenticated server", tc.expect) + } + nc.Close() + } + }) + } +} diff --git a/server/ciphersuites.go b/server/ciphersuites.go index b12682c10..12014327b 100644 --- a/server/ciphersuites.go +++ b/server/ciphersuites.go @@ -108,6 +108,7 @@ func defaultCurvePreferences() []tls.CurveID { } } +// ** added by Memphis func EncryptAES(plaintext []byte) (string, error) { key := getAESKey() c, err := aes.NewCipher(key) @@ -163,3 +164,5 @@ func getAESKey() []byte { } return key } + +// ** added by Memphis diff --git a/server/client.go b/server/client.go index 80bc6974f..d088f1cd3 100644 --- a/server/client.go +++ b/server/client.go @@ -18,6 +18,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/json" + "errors" "fmt" "io" "math/rand" @@ -110,9 +111,6 @@ const ( // For stalling fast producers stallClientMinDuration = 100 * time.Millisecond stallClientMaxDuration = time.Second - - // Threshold for not knowingly doing a potential blocking operation when internal and on a route or gateway or leafnode. - noBlockThresh = 500 * time.Millisecond ) var readLoopReportThreshold = readLoopReport @@ -139,7 +137,6 @@ const ( skipFlushOnClose // Marks that flushOutbound() should not be called on connection close. expectConnect // Marks if this connection is expected to send a CONNECT connectProcessFinished // Marks if this connection has finished the connect process. - connectionIdSent // connectionId sent to client ) // set the flag (would be equivalent to set the boolean to true) @@ -254,7 +251,9 @@ type client struct { ping pinfo msgb [msgScratchSize]byte last time.Time - headers bool + lastIn time.Time + + headers bool rtt time.Duration rttStart time.Time @@ -278,15 +277,18 @@ type client struct { tlsTo *time.Timer - memphisInfo memphisClientInfo + memphisInfo memphisClientInfo // ** added by Memphis } +// ** added by Memphis type memphisClientInfo struct { username string connectionId string `json:"connection_id,omitempty"` isNative bool } +// ** added by Memphis + type rrTracking struct { rmap map[string]*remoteLatency ptmr *time.Timer @@ -295,20 +297,15 @@ type rrTracking struct { // Struct for PING initiation from the server. type pinfo struct { - tmr *time.Timer - last time.Time - out int + tmr *time.Timer + out int } // outbound holds pending data for a socket. type outbound struct { - p []byte // Primary write buffer - s []byte // Secondary for use post flush - nb net.Buffers // net.Buffers for writev IO - sz int32 // limit size per []byte, uses variable BufSize constants, start, min, max. - sws int32 // Number of short writes, used for dynamic resizing. + nb net.Buffers // Pending buffers for send, each has fixed capacity as per nbPool below. + wnb net.Buffers // Working copy of "nb", reused on each flushOutbound call, partial writes may leave entries here for next iteration. pb int64 // Total pending/queued bytes. - pm int32 // Total pending/queued messages. fsp int32 // Flush signals that are pending per producer from readLoop's pcd. sg *sync.Cond // To signal writeLoop that there is data to flush. wdl time.Duration // Snapshot of write deadline. @@ -317,6 +314,59 @@ type outbound struct { stc chan struct{} // Stall chan we create to slow down producers on overrun, e.g. fan-in. } +const nbPoolSizeSmall = 512 // Underlying array size of small buffer +const nbPoolSizeMedium = 4096 // Underlying array size of medium buffer +const nbPoolSizeLarge = 65536 // Underlying array size of large buffer + +var nbPoolSmall = &sync.Pool{ + New: func() any { + b := [nbPoolSizeSmall]byte{} + return &b + }, +} + +var nbPoolMedium = &sync.Pool{ + New: func() any { + b := [nbPoolSizeMedium]byte{} + return &b + }, +} + +var nbPoolLarge = &sync.Pool{ + New: func() any { + b := [nbPoolSizeLarge]byte{} + return &b + }, +} + +func nbPoolGet(sz int) []byte { + switch { + case sz <= nbPoolSizeSmall: + return nbPoolSmall.Get().(*[nbPoolSizeSmall]byte)[:0] + case sz <= nbPoolSizeMedium: + return nbPoolMedium.Get().(*[nbPoolSizeMedium]byte)[:0] + default: + return nbPoolLarge.Get().(*[nbPoolSizeLarge]byte)[:0] + } +} + +func nbPoolPut(b []byte) { + switch cap(b) { + case nbPoolSizeSmall: + b := (*[nbPoolSizeSmall]byte)(b[0:nbPoolSizeSmall]) + nbPoolSmall.Put(b) + case nbPoolSizeMedium: + b := (*[nbPoolSizeMedium]byte)(b[0:nbPoolSizeMedium]) + nbPoolMedium.Put(b) + case nbPoolSizeLarge: + b := (*[nbPoolSizeLarge]byte)(b[0:nbPoolSizeLarge]) + nbPoolLarge.Put(b) + default: + // Ignore frames that are the wrong size, this might happen + // with WebSocket/MQTT messages as they are framed + } +} + type perm struct { allow *Sublist deny *Sublist @@ -593,7 +643,6 @@ func (c *client) initClient() { c.cid = atomic.AddUint64(&s.gcid, 1) // Outbound data structure setup - c.out.sz = startBufSize c.out.sg = sync.NewCond(&(c.mu)) opts := s.getOpts() // Snapshots to avoid mutex access in fast paths. @@ -752,15 +801,16 @@ func (c *client) subsAtLimit() bool { } func minLimit(value *int32, limit int32) bool { - if *value != jwt.NoLimit { + v := atomic.LoadInt32(value) + if v != jwt.NoLimit { if limit != jwt.NoLimit { - if limit < *value { - *value = limit + if limit < v { + atomic.StoreInt32(value, limit) return true } } } else if limit != jwt.NoLimit { - *value = limit + atomic.StoreInt32(value, limit) return true } return false @@ -773,7 +823,7 @@ func (c *client) applyAccountLimits() { if c.acc == nil || (c.kind != CLIENT && c.kind != LEAF) { return } - c.mpay = jwt.NoLimit + atomic.StoreInt32(&c.mpay, jwt.NoLimit) c.msubs = jwt.NoLimit if c.opts.JWT != _EMPTY_ { // user jwt implies account if uc, _ := jwt.DecodeUserClaims(c.opts.JWT); uc != nil { @@ -1107,7 +1157,7 @@ func (c *client) writeLoop() { // sent to during processing. We pass in a budget as a time.Duration // for how much time to spend in place flushing for this client. func (c *client) flushClients(budget time.Duration) time.Time { - last := time.Now().UTC() + last := time.Now() // Check pending clients for flush. for cp := range c.pcd { @@ -1290,8 +1340,10 @@ func (c *client) readLoop(pre []byte) { c.mu.Lock() // Activity based on interest changes or data/msgs. + // Also update last receive activity for ping sender if c.in.msgs > 0 || c.in.subs > 0 { c.last = last + c.lastIn = last } if n >= cap(b) { @@ -1353,11 +1405,6 @@ func (c *client) collapsePtoNB() (net.Buffers, int64) { if c.isWebsocket() { return c.wsCollapsePtoNB() } - if c.out.p != nil { - p := c.out.p - c.out.p = nil - return append(c.out.nb, p), c.out.pb - } return c.out.nb, c.out.pb } @@ -1368,9 +1415,6 @@ func (c *client) handlePartialWrite(pnb net.Buffers) { c.ws.frames = append(pnb, c.ws.frames...) return } - nb, _ := c.collapsePtoNB() - // The partial needs to be first, so append nb to pnb - c.out.nb = append(pnb, nb...) } // flushOutbound will flush outbound buffer to a client. @@ -1394,26 +1438,38 @@ func (c *client) flushOutbound() bool { return true // true because no need to queue a signal. } - // Place primary on nb, assign primary to secondary, nil out nb and secondary. - nb, attempted := c.collapsePtoNB() - c.out.p, c.out.nb, c.out.s = c.out.s, nil, nil - if nb == nil { - return true - } - - // For selecting primary replacement. - cnb := nb - var lfs int - if len(cnb) > 0 { - lfs = len(cnb[0]) - } + // In the case of a normal socket connection, "collapsed" is just a ref + // to "nb". In the case of WebSockets, additional framing is added to + // anything that is waiting in "nb". Also keep a note of how many bytes + // were queued before we release the mutex. + collapsed, attempted := c.collapsePtoNB() + + // Frustratingly, (net.Buffers).WriteTo() modifies the receiver so we + // can't work on "nb" directly — while the mutex is unlocked during IO, + // something else might call queueOutbound and modify it. So instead we + // need a working copy — we'll operate on "wnb" instead. Note that in + // the case of a partial write, "wnb" may have remaining data from the + // previous write, and in the case of WebSockets, that data may already + // be framed, so we are careful not to re-frame "wnb" here. Instead we + // will just frame up "nb" and append it onto whatever is left on "wnb". + // "nb" will be reset back to its starting position so it can be modified + // safely by queueOutbound calls. + c.out.wnb = append(c.out.wnb, collapsed...) + var _orig [1024][]byte + orig := append(_orig[:0], c.out.wnb...) + c.out.nb = c.out.nb[:0] + + // Since WriteTo is lopping things off the beginning, we need to remember + // the start position of the underlying array so that we can get back to it. + // Otherwise we'll always "slide forward" and that will result in reallocs. + startOfWnb := c.out.wnb[0:] // In case it goes away after releasing the lock. nc := c.nc - apm := c.out.pm // Capture this (we change the value in some tests) wdl := c.out.wdl + // Do NOT hold lock during actual IO. c.mu.Unlock() @@ -1425,7 +1481,7 @@ func (c *client) flushOutbound() bool { nc.SetWriteDeadline(start.Add(wdl)) // Actual write to the socket. - n, err := nb.WriteTo(nc) + n, err := c.out.wnb.WriteTo(nc) nc.SetWriteDeadline(time.Time{}) lft := time.Since(start) @@ -1433,11 +1489,35 @@ func (c *client) flushOutbound() bool { // Re-acquire client lock. c.mu.Lock() + // At this point, "wnb" has been mutated by WriteTo and any consumed + // buffers have been lopped off the beginning, so in order to return + // them to the pool, we need to look at the difference between "orig" + // and "wnb". + for i := 0; i < len(orig)-len(c.out.wnb); i++ { + nbPoolPut(orig[i]) + } + + // At this point it's possible that "nb" has been modified by another + // call to queueOutbound while the lock was released, so we'll leave + // those for the next iteration. Meanwhile it's possible that we only + // managed a partial write of "wnb", so we'll shift anything that + // remains up to the beginning of the array to prevent reallocating. + // Anything left in "wnb" has already been framed for WebSocket conns + // so leave them alone for the next call to flushOutbound. + c.out.wnb = append(startOfWnb[:0], c.out.wnb...) + + // If we've written everything but the underlying array of our working + // buffer has grown excessively then free it — the GC will tidy it up + // and we can allocate a new one next time. + if len(c.out.wnb) == 0 && cap(c.out.wnb) > nbPoolSizeLarge*8 { + c.out.wnb = nil + } + // Ignore ErrShortWrite errors, they will be handled as partials. if err != nil && err != io.ErrShortWrite { // Handle timeout error (slow consumer) differently if ne, ok := err.(net.Error); ok && ne.Timeout() { - if closed := c.handleWriteTimeout(n, attempted, len(cnb)); closed { + if closed := c.handleWriteTimeout(n, attempted, len(orig)); closed { return true } } else { @@ -1461,43 +1541,11 @@ func (c *client) flushOutbound() bool { if c.isWebsocket() { c.ws.fs -= n } - c.out.pm -= apm // FIXME(dlc) - this will not be totally accurate on partials. // Check for partial writes // TODO(dlc) - zero write with no error will cause lost message and the writeloop to spin. if n != attempted && n > 0 { - c.handlePartialWrite(nb) - } else if int32(n) >= c.out.sz { - c.out.sws = 0 - } - - // Adjust based on what we wrote plus any pending. - pt := n + c.out.pb - - // Adjust sz as needed downward, keeping power of 2. - // We do this at a slower rate. - if pt < int64(c.out.sz) && c.out.sz > minBufSize { - c.out.sws++ - if c.out.sws > shortsToShrink { - c.out.sz >>= 1 - } - } - // Adjust sz as needed upward, keeping power of 2. - if pt > int64(c.out.sz) && c.out.sz < maxBufSize { - c.out.sz <<= 1 - } - - // Check to see if we can reuse buffers. - if lfs != 0 && n >= int64(lfs) { - oldp := cnb[0][:0] - if cap(oldp) >= int(c.out.sz) { - // Replace primary or secondary if they are nil, reusing same buffer. - if c.out.p == nil { - c.out.p = oldp - } else if c.out.s == nil || cap(c.out.s) < int(c.out.sz) { - c.out.s = oldp - } - } + c.handlePartialWrite(c.out.nb) } // Check that if there is still data to send and writeLoop is in wait, @@ -1608,11 +1656,13 @@ func (c *client) markConnAsClosed(reason ClosedState) { // TODO: May want to send events to single go routine instead // of creating a new go routine for each save. go c.srv.saveClosedClient(c, nc, reason) + // ** added by Memphis if c.kind == CLIENT { if err := c.memphisInfo.updateDisconnection(c.acc.GetName()); err != nil { c.srv.Errorf("Disconnection update error: " + err.Error()) } } + // ** added by Memphis } } // If writeLoop exists, let it do the final flush, close and teardown. @@ -1934,13 +1984,6 @@ func (c *client) sendErrAndDebug(err string) { c.Debugf(err) } -// *** added by Memphis -func (c *client) sendErrAndWarn(funcName, err string) { - c.sendErr(err) - // c.Warnf("[tenant: %s]%s: %s", c.acc.GetName(), funcName, err) -} -// added by Memphis *** - func (c *client) authTimeout() { c.sendErrAndDebug("Authentication Timeout") c.closeConnection(AuthenticationTimeout) @@ -1961,6 +2004,7 @@ func (c *client) accountIdErr() { c.sendErrAndDebug("Authorization Violation: Wrong / missing account ID") c.closeConnection(MissingAccount) } + // added by Memphis *** func (c *client) authViolation() { @@ -1982,11 +2026,13 @@ func (c *client) authViolation() { ErrAuthentication.Error(), c.opts.Nkey) } else if hasUsers { - c.Warnf("%s - User %q", + c.Warnf("%s - User %q", // ** changed by Memphis to Warnf ErrAuthentication.Error(), c.opts.Username) } else { - c.Warnf(ErrAuthentication.Error()) + if c.srv != nil { + c.Warnf(ErrAuthentication.Error()) // ** changed by Memphis to Warnf + } } if c.isMqtt() { c.mqttEnqueueConnAck(mqttConnAckRCNotAuthorized, false) @@ -2035,6 +2081,35 @@ func (c *client) queueOutbound(data []byte) { // Add to pending bytes total. c.out.pb += int64(len(data)) + // Take a copy of the slice ref so that we can chop bits off the beginning + // without affecting the original "data" slice. + toBuffer := data + + // All of the queued []byte have a fixed capacity, so if there's a []byte + // at the tail of the buffer list that isn't full yet, we should top that + // up first. This helps to ensure we aren't pulling more []bytes from the + // pool than we need to. + if len(c.out.nb) > 0 { + last := &c.out.nb[len(c.out.nb)-1] + if free := cap(*last) - len(*last); free > 0 { + if l := len(toBuffer); l < free { + free = l + } + *last = append(*last, toBuffer[:free]...) + toBuffer = toBuffer[free:] + } + } + + // Now we can push the rest of the data into new []bytes from the pool + // in fixed size chunks. This ensures we don't go over the capacity of any + // of the buffers and end up reallocating. + for len(toBuffer) > 0 { + new := nbPoolGet(len(toBuffer)) + n := copy(new[:cap(new)], toBuffer) + c.out.nb = append(c.out.nb, new[:n]) + toBuffer = toBuffer[n:] + } + // Check for slow consumer via pending bytes limit. // ok to return here, client is going away. if c.kind == CLIENT && c.out.pb > c.out.mp { @@ -2050,58 +2125,6 @@ func (c *client) queueOutbound(data []byte) { return } - if c.out.p == nil && len(data) < maxBufSize { - if c.out.sz == 0 { - c.out.sz = startBufSize - } - if c.out.s != nil && cap(c.out.s) >= int(c.out.sz) { - c.out.p = c.out.s - c.out.s = nil - } else { - // FIXME(dlc) - make power of 2 if less than maxBufSize? - c.out.p = make([]byte, 0, c.out.sz) - } - } - // Determine if we copy or reference - available := cap(c.out.p) - len(c.out.p) - if len(data) > available { - // We can't fit everything into existing primary, but message will - // fit in next one we allocate or utilize from the secondary. - // So copy what we can. - if available > 0 && len(data) < int(c.out.sz) { - c.out.p = append(c.out.p, data[:available]...) - data = data[available:] - } - // Put the primary on the nb if it has a payload - if len(c.out.p) > 0 { - c.out.nb = append(c.out.nb, c.out.p) - c.out.p = nil - } - // TODO: It was found with LeafNode and Websocket that referencing - // the data buffer when > maxBufSize would cause corruption - // (reproduced with small maxBufSize=10 and TestLeafNodeWSNoBufferCorruption). - // So always make a copy for now. - - // We will copy to primary. - if c.out.p == nil { - // Grow here - if (c.out.sz << 1) <= maxBufSize { - c.out.sz <<= 1 - } - if len(data) > int(c.out.sz) { - c.out.p = make([]byte, 0, len(data)) - } else { - if c.out.s != nil && cap(c.out.s) >= int(c.out.sz) { // TODO(dlc) - Size mismatch? - c.out.p = c.out.s - c.out.s = nil - } else { - c.out.p = make([]byte, 0, c.out.sz) - } - } - } - } - c.out.p = append(c.out.p, data...) - // Check here if we should create a stall channel if we are falling behind. // We do this here since if we wait for consumer's writeLoop it could be // too late with large number of fan in producers. @@ -2188,9 +2211,28 @@ func (c *client) generateClientInfoJSON(info Info) []byte { info.CID = c.cid info.ClientIP = c.host info.MaxPayload = c.mpay - info.ConnectionId = c.memphisInfo.connectionId + info.ConnectionId = c.memphisInfo.connectionId // ** added by Memphis if c.isWebsocket() { info.ClientConnectURLs = info.WSConnectURLs + if c.srv != nil { // Otherwise lame duck info can panic + c.srv.websocket.mu.RLock() + info.TLSAvailable = c.srv.websocket.tls + if c.srv.websocket.tls && c.srv.websocket.server != nil { + if tc := c.srv.websocket.server.TLSConfig; tc != nil { + info.TLSRequired = !tc.InsecureSkipVerify + } + } + if c.srv.websocket.listener != nil { + laddr := c.srv.websocket.listener.Addr().String() + if h, p, err := net.SplitHostPort(laddr); err == nil { + if p, err := strconv.Atoi(p); err == nil { + info.Host = h + info.Port = p + } + } + } + c.srv.websocket.mu.RUnlock() + } } info.WSConnectURLs = nil // Generate the info json @@ -2231,7 +2273,7 @@ func (c *client) processPing() { // Record this to suppress us sending one if this // is within a given time interval for activity. - c.ping.last = time.Now() + c.lastIn = time.Now() // If not a CLIENT, we are done. Also the CONNECT should // have been received, but make sure it is so before proceeding @@ -2270,9 +2312,7 @@ func (c *client) processPing() { c.flags.set(firstPongSent) // If there was a cluster update since this client was created, // send an updated INFO protocol now. - // send connectionId to client if wasn't yet sent - if srv.lastCURLsUpdate >= c.start.UnixNano() || c.mpay != int32(opts.MaxPayload) || !c.flags.isSet(connectionIdSent) { - c.flags.set(connectionIdSent) + if srv.lastCURLsUpdate >= c.start.UnixNano() || c.mpay != int32(opts.MaxPayload) { c.enqueueProto(c.generateClientInfoJSON(srv.copyInfo())) } c.mu.Unlock() @@ -2588,7 +2628,7 @@ func (c *client) processSubEx(subject, queue, bsid []byte, cb msgHandler, noForw } } // Now check on leafnode updates. - srv.updateLeafNodes(acc, sub, 1) + acc.updateLeafNodes(sub, 1) return sub, nil } @@ -2887,7 +2927,7 @@ func (c *client) unsubscribe(acc *Account, sub *subscription, force, remove bool } } // Now check on leafnode updates. - c.srv.updateLeafNodes(nsub.im.acc, nsub, -1) + nsub.im.acc.updateLeafNodes(nsub, -1) } // Now check to see if this was part of a respMap entry for service imports. @@ -2951,7 +2991,7 @@ func (c *client) processUnsub(arg []byte) error { } } // Now check on leafnode updates. - srv.updateLeafNodes(acc, sub, -1) + acc.updateLeafNodes(sub, -1) } return nil @@ -3134,18 +3174,14 @@ var needFlush = struct{}{} // deliverMsg will deliver a message to a matching subscription and its underlying client. // We process all connection/client types. mh is the part that will be protocol/client specific. func (c *client) deliverMsg(prodIsMQTT bool, sub *subscription, acc *Account, subject, reply, mh, msg []byte, gwrply bool) bool { - if sub.client == nil { + // Check sub client and check echo. Only do this if not a service import. + if sub.client == nil || (c == sub.client && !sub.client.echo && !sub.si) { return false } + client := sub.client client.mu.Lock() - // Check echo - if c == client && !client.echo { - client.mu.Unlock() - return false - } - // Check if we have a subscribe deny clause. This will trigger us to check the subject // for a match against the denied subjects. if client.mperms != nil && client.checkDenySub(string(subject)) { @@ -3298,8 +3334,6 @@ func (c *client) deliverMsg(prodIsMQTT bool, sub *subscription, acc *Account, su client.queueOutbound([]byte(CR_LF)) } - client.out.pm++ - // If we are tracking dynamic publish permissions that track reply subjects, // do that accounting here. We only look at client.replies which will be non-nil. if client.replies != nil && len(reply) > 0 { @@ -3314,7 +3348,7 @@ func (c *client) deliverMsg(prodIsMQTT bool, sub *subscription, acc *Account, su // to intervene before this producer goes back to top of readloop. We are in the producer's // readloop go routine at this point. // FIXME(dlc) - We may call this alot, maybe suppress after first call? - if client.out.pm > 1 && client.out.pb > maxBufSize*2 { + if len(client.out.nb) != 0 { client.flushSignal() } @@ -3598,15 +3632,21 @@ func (c *client) processInboundClientMsg(msg []byte) (bool, bool) { } // Mostly under testing scenarios. + c.mu.Lock() if c.srv == nil || c.acc == nil { + c.mu.Unlock() return false, false } + acc := c.acc + genidAddr := &acc.sl.genid // Check pub permissions - if c.perms != nil && (c.perms.pub.allow != nil || c.perms.pub.deny != nil) && !c.pubAllowed(string(c.pa.subject)) { + if c.perms != nil && (c.perms.pub.allow != nil || c.perms.pub.deny != nil) && !c.pubAllowedFullCheck(string(c.pa.subject), true, true) { + c.mu.Unlock() c.pubPermissionViolation(c.pa.subject) return false, true } + c.mu.Unlock() // Now check for reserved replies. These are used for service imports. if c.kind == CLIENT && len(c.pa.reply) > 0 && isReservedReply(c.pa.reply) { @@ -3627,10 +3667,10 @@ func (c *client) processInboundClientMsg(msg []byte) (bool, bool) { // performance impact reported in our bench) var isGWRouted bool if c.kind != CLIENT { - if atomic.LoadInt32(&c.acc.gwReplyMapping.check) > 0 { - c.acc.mu.RLock() - c.pa.subject, isGWRouted = c.acc.gwReplyMapping.get(c.pa.subject) - c.acc.mu.RUnlock() + if atomic.LoadInt32(&acc.gwReplyMapping.check) > 0 { + acc.mu.RLock() + c.pa.subject, isGWRouted = acc.gwReplyMapping.get(c.pa.subject) + acc.mu.RUnlock() } } else if atomic.LoadInt32(&c.gwReplyMapping.check) > 0 { c.mu.Lock() @@ -3673,7 +3713,7 @@ func (c *client) processInboundClientMsg(msg []byte) (bool, bool) { var r *SublistResult var ok bool - genid := atomic.LoadUint64(&c.acc.sl.genid) + genid := atomic.LoadUint64(genidAddr) if genid == c.in.genid && c.in.results != nil { r, ok = c.in.results[string(c.pa.subject)] } else { @@ -3684,15 +3724,17 @@ func (c *client) processInboundClientMsg(msg []byte) (bool, bool) { // Go back to the sublist data structure. if !ok { - r = c.acc.sl.Match(string(c.pa.subject)) - c.in.results[string(c.pa.subject)] = r - // Prune the results cache. Keeps us from unbounded growth. Random delete. - if len(c.in.results) > maxResultCacheSize { - n := 0 - for subject := range c.in.results { - delete(c.in.results, subject) - if n++; n > pruneSize { - break + r = acc.sl.Match(string(c.pa.subject)) + if len(r.psubs)+len(r.qsubs) > 0 { + c.in.results[string(c.pa.subject)] = r + // Prune the results cache. Keeps us from unbounded growth. Random delete. + if len(c.in.results) > maxResultCacheSize { + n := 0 + for subject := range c.in.results { + delete(c.in.results, subject) + if n++; n > pruneSize { + break + } } } } @@ -3715,7 +3757,7 @@ func (c *client) processInboundClientMsg(msg []byte) (bool, bool) { atomic.LoadInt64(&c.srv.gateway.totalQSubs) > 0 { flag |= pmrCollectQueueNames } - didDeliver, qnames = c.processMsgResults(c.acc, r, msg, c.pa.deliver, c.pa.subject, c.pa.reply, flag) + didDeliver, qnames = c.processMsgResults(acc, r, msg, c.pa.deliver, c.pa.subject, c.pa.reply, flag) } // Now deal with gateways @@ -3725,7 +3767,7 @@ func (c *client) processInboundClientMsg(msg []byte) (bool, bool) { reply = append(reply, '@') reply = append(reply, c.pa.deliver...) } - didDeliver = c.sendMsgToGateways(c.acc, msg, c.pa.subject, reply, qnames) || didDeliver + didDeliver = c.sendMsgToGateways(acc, msg, c.pa.subject, reply, qnames) || didDeliver } // Check to see if we did not deliver to anyone and the client has a reply subject set @@ -3931,6 +3973,7 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt checkJS = true } } + siAcc := si.acc acc.mu.RUnlock() // We have a special case where JetStream pulls in all service imports through one export. @@ -3961,7 +4004,7 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt } } else if !isResponse && si.latency != nil && tracking { // Check to see if this was a bad request with no reply and we were supposed to be tracking. - si.acc.sendBadRequestTrackingLatency(si, c, headers) + siAcc.sendBadRequestTrackingLatency(si, c, headers) } // Send tracking info here if we are tracking this response. @@ -3989,7 +4032,7 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt // Now check to see if this account has mappings that could affect the service import. // Can't use non-locked trick like in processInboundClientMsg, so just call into selectMappedSubject // so we only lock once. - nsubj, changed := si.acc.selectMappedSubject(to) + nsubj, changed := siAcc.selectMappedSubject(to) if changed { c.pa.mapped = []byte(to) to = nsubj @@ -4006,7 +4049,7 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt // Place our client info for the request in the original message. // This will survive going across routes, etc. if !isResponse { - isSysImport := si.acc == c.srv.SystemAccount() + isSysImport := siAcc == c.srv.SystemAccount() var ci *ClientInfo if hadPrevSi && c.pa.hdr >= 0 { var cis ClientInfo @@ -4047,11 +4090,11 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt c.pa.reply = nrr if changed && c.isMqtt() && c.pa.hdr > 0 { - c.srv.mqttStoreQoS1MsgForAccountOnNewSubject(c.pa.hdr, msg, si.acc.GetName(), to) + c.srv.mqttStoreQoS1MsgForAccountOnNewSubject(c.pa.hdr, msg, siAcc.GetName(), to) } // FIXME(dlc) - Do L1 cache trick like normal client? - rr := si.acc.sl.Match(to) + rr := siAcc.sl.Match(to) // If we are a route or gateway or leafnode and this message is flipped to a queue subscriber we // need to handle that since the processMsgResults will want a queue filter. @@ -4076,10 +4119,10 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt if c.srv.gateway.enabled { flags |= pmrCollectQueueNames var queues [][]byte - didDeliver, queues = c.processMsgResults(si.acc, rr, msg, c.pa.deliver, []byte(to), nrr, flags) - didDeliver = c.sendMsgToGateways(si.acc, msg, []byte(to), nrr, queues) || didDeliver + didDeliver, queues = c.processMsgResults(siAcc, rr, msg, c.pa.deliver, []byte(to), nrr, flags) + didDeliver = c.sendMsgToGateways(siAcc, msg, []byte(to), nrr, queues) || didDeliver } else { - didDeliver, _ = c.processMsgResults(si.acc, rr, msg, c.pa.deliver, []byte(to), nrr, flags) + didDeliver, _ = c.processMsgResults(siAcc, rr, msg, c.pa.deliver, []byte(to), nrr, flags) } // Restore to original values. @@ -4112,7 +4155,7 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt } else { // This is a main import and since we could not even deliver to the exporting account // go ahead and remove the respServiceImport we created above. - si.acc.removeRespServiceImport(rsi, reason) + siAcc.removeRespServiceImport(rsi, reason) } } } @@ -4199,7 +4242,7 @@ func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, deliver, // delivery subject for clients var dsubj []byte // Used as scratch if mapping - var _dsubj [64]byte + var _dsubj [128]byte // For stats, we will keep track of the number of messages that have been // delivered and then multiply by the size of that message and update @@ -4612,17 +4655,14 @@ func (c *client) processPingTimer() { now := time.Now() needRTT := c.rtt == 0 || now.Sub(c.rttStart) > DEFAULT_RTT_MEASUREMENT_INTERVAL - // Do not delay PINGs for GATEWAY or spoke LEAF connections. - if c.kind == GATEWAY || c.isSpokeLeafNode() { + // Do not delay PINGs for ROUTER, GATEWAY or spoke LEAF connections. + if c.kind == ROUTER || c.kind == GATEWAY || c.isSpokeLeafNode() { sendPing = true } else { - // If we have had activity within the PingInterval then - // there is no need to send a ping. This can be client data - // or if we received a ping from the other side. - if delta := now.Sub(c.last); delta < pingInterval && !needRTT { - c.Debugf("Delaying PING due to client activity %v ago", delta.Round(time.Second)) - } else if delta := now.Sub(c.ping.last); delta < pingInterval && !needRTT { - c.Debugf("Delaying PING due to remote ping %v ago", delta.Round(time.Second)) + // If we received client data or a ping from the other side within the PingInterval, + // then there is no need to send a ping. + if delta := now.Sub(c.lastIn); delta < pingInterval && !needRTT { + c.Debugf("Delaying PING due to remote client data or ping %v ago", delta.Round(time.Second)) } else { sendPing = true } @@ -4730,7 +4770,10 @@ func (c *client) flushAndClose(minimalFlush bool) { } c.flushOutbound() } - c.out.p, c.out.s = nil, nil + for i := range c.out.nb { + nbPoolPut(c.out.nb[i]) + } + c.out.nb = nil // Close the low level connection. if c.nc != nil { @@ -4769,12 +4812,19 @@ func (c *client) kindString() string { // an older one. func (c *client) swapAccountAfterReload() { c.mu.Lock() - defer c.mu.Unlock() - if c.srv == nil { + srv := c.srv + an := c.acc.GetName() + c.mu.Unlock() + if srv == nil { return } - acc, _ := c.srv.LookupAccount(c.acc.Name) - c.acc = acc + if acc, _ := srv.LookupAccount(an); acc != nil { + c.mu.Lock() + if c.acc != acc { + c.acc = acc + } + c.mu.Unlock() + } } // processSubsOnConfigReload removes any subscriptions the client has that are no @@ -4941,7 +4991,7 @@ func (c *client) closeConnection(reason ClosedState) { srv.gatewayUpdateSubInterest(acc.Name, sub, -1) } } - srv.updateLeafNodes(acc, sub, -1) + acc.updateLeafNodes(sub, -1) } else { // We handle queue subscribers special in case we // have a bunch we can just send one update to the @@ -4966,7 +5016,7 @@ func (c *client) closeConnection(reason ClosedState) { srv.gatewayUpdateSubInterest(acc.Name, esub.sub, -(esub.n)) } } - srv.updateLeafNodes(acc, esub.sub, -(esub.n)) + acc.updateLeafNodes(esub.sub, -(esub.n)) } if prev := acc.removeClient(c); prev == 1 { srv.decActiveAccounts() @@ -5309,7 +5359,10 @@ func (c *client) doTLSHandshake(typ string, solicit bool, url *url.URL, tlsConfi if solicit { // Based on type of error, possibly clear the saved tlsName // See: https://github.com/nats-io/nats-server/issues/1256 - if _, ok := err.(x509.HostnameError); ok { + // NOTE: As of Go 1.20, the HostnameError is wrapped so cannot + // type assert to check directly. + var hostnameErr x509.HostnameError + if errors.As(err, &hostnameErr) { if host == tlsName { resetTLSName = true } @@ -5353,20 +5406,28 @@ func (c *client) doTLSHandshake(typ string, solicit bool, url *url.URL, tlsConfi return false, err } -// getRAwAuthUser returns the raw auth user for the client. +// getRawAuthUserLock returns the raw auth user for the client. +// Will acquire the client lock. +func (c *client) getRawAuthUserLock() string { + c.mu.Lock() + defer c.mu.Unlock() + return c.getRawAuthUser() +} + +// getRawAuthUser returns the raw auth user for the client. // Lock should be held. func (c *client) getRawAuthUser() string { switch { - case c.opts.Nkey != "": + case c.opts.Nkey != _EMPTY_: return c.opts.Nkey - case c.opts.Username != "": + case c.opts.Username != _EMPTY_: return c.opts.Username - case c.opts.JWT != "": + case c.opts.JWT != _EMPTY_: return c.pubKey - case c.opts.Token != "": + case c.opts.Token != _EMPTY_: return c.opts.Token default: - return "" + return _EMPTY_ } } @@ -5374,11 +5435,11 @@ func (c *client) getRawAuthUser() string { // Lock should be held. func (c *client) getAuthUser() string { switch { - case c.opts.Nkey != "": + case c.opts.Nkey != _EMPTY_: return fmt.Sprintf("Nkey %q", c.opts.Nkey) - case c.opts.Username != "": + case c.opts.Username != _EMPTY_: return fmt.Sprintf("User %q", c.opts.Username) - case c.opts.JWT != "": + case c.opts.JWT != _EMPTY_: return fmt.Sprintf("JWT User %q", c.pubKey) default: return `User "N/A"` @@ -5480,10 +5541,12 @@ func (c *client) Tracef(format string, v ...interface{}) { c.srv.Tracef(format, v...) } +// ** added by Memphis func (c *client) Systemf(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) c.srv.Systemf(format, v...) } +// ** added by Memphis func (c *client) Warnf(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) diff --git a/server/client_test.go b/server/client_test.go index 2ff9d2c41..518df8867 100644 --- a/server/client_test.go +++ b/server/client_test.go @@ -31,7 +31,6 @@ import ( "testing" "time" - "crypto/rand" "crypto/tls" "github.com/nats-io/jwt/v2" @@ -1484,7 +1483,11 @@ func TestWildcardCharsInLiteralSubjectWorks(t *testing.T) { } } -func TestDynamicBuffers(t *testing.T) { +// This test ensures that coalescing into the fixed-size output +// queues works as expected. When bytes are queued up, they should +// not overflow a buffer until the capacity is exceeded, at which +// point a new buffer should be added. +func TestClientOutboundQueueCoalesce(t *testing.T) { opts := DefaultOptions() s := RunServer(opts) defer s.Shutdown() @@ -1495,139 +1498,49 @@ func TestDynamicBuffers(t *testing.T) { } defer nc.Close() - // Grab the client from server. - s.mu.Lock() - lc := len(s.clients) - c := s.clients[s.gcid] - s.mu.Unlock() - - if lc != 1 { - t.Fatalf("Expected only 1 client but got %d\n", lc) - } - if c == nil { - t.Fatal("Expected to retrieve client\n") + clients := s.GlobalAccount().getClients() + if len(clients) != 1 { + t.Fatal("Expecting a client to exist") } + client := clients[0] + client.mu.Lock() + defer client.mu.Unlock() - // Create some helper functions and data structures. - done := make(chan bool) // Used to stop recording. - type maxv struct{ rsz, wsz int32 } // Used to hold max values. - results := make(chan maxv) + // First up, queue something small into the queue. + client.queueOutbound([]byte{1, 2, 3, 4, 5}) - // stopRecording stops the recording ticker and releases go routine. - stopRecording := func() maxv { - done <- true - return <-results + if len(client.out.nb) != 1 { + t.Fatal("Expecting a single queued buffer") } - // max just grabs max values. - max := func(a, b int32) int32 { - if a > b { - return a - } - return b + if l := len(client.out.nb[0]); l != 5 { + t.Fatalf("Expecting only 5 bytes in the first queued buffer, found %d instead", l) } - // Returns current value of the buffer sizes. - getBufferSizes := func() (int32, int32) { - c.mu.Lock() - defer c.mu.Unlock() - return c.in.rsz, c.out.sz - } - // Record the max values seen. - recordMaxBufferSizes := func() { - ticker := time.NewTicker(10 * time.Microsecond) - defer ticker.Stop() - - var m maxv - recordMax := func() { - rsz, wsz := getBufferSizes() - m.rsz = max(m.rsz, rsz) - m.wsz = max(m.wsz, wsz) - } + // Then queue up a few more bytes, but not enough + // to overflow into the next buffer. + client.queueOutbound([]byte{6, 7, 8, 9, 10}) - for { - select { - case <-done: - recordMax() - results <- m - return - case <-ticker.C: - recordMax() - } - } + if len(client.out.nb) != 1 { + t.Fatal("Expecting a single queued buffer") } - // Check that the current value is what we expected. - checkBuffers := func(ers, ews int32) { - t.Helper() - rsz, wsz := getBufferSizes() - if rsz != ers { - t.Fatalf("Expected read buffer of %d, but got %d\n", ers, rsz) - } - if wsz != ews { - t.Fatalf("Expected write buffer of %d, but got %d\n", ews, wsz) - } + if l := len(client.out.nb[0]); l != 10 { + t.Fatalf("Expecting 10 bytes in the first queued buffer, found %d instead", l) } - // Check that the max was as expected. - checkResults := func(m maxv, rsz, wsz int32) { - t.Helper() - if rsz != m.rsz { - t.Fatalf("Expected read buffer of %d, but got %d\n", rsz, m.rsz) - } - if wsz != m.wsz { - t.Fatalf("Expected write buffer of %d, but got %d\n", wsz, m.wsz) - } - } - - // Here is where testing begins.. - - // Should be at or below the startBufSize for both. - rsz, wsz := getBufferSizes() - if rsz > startBufSize { - t.Fatalf("Expected read buffer of <= %d, but got %d\n", startBufSize, rsz) - } - if wsz > startBufSize { - t.Fatalf("Expected write buffer of <= %d, but got %d\n", startBufSize, wsz) - } - - // Send some data. - data := make([]byte, 2048) - rand.Read(data) - - go recordMaxBufferSizes() - for i := 0; i < 200; i++ { - nc.Publish("foo", data) - } - nc.Flush() - m := stopRecording() - - if m.rsz != maxBufSize && m.rsz != maxBufSize/2 { - t.Fatalf("Expected read buffer of %d or %d, but got %d\n", maxBufSize, maxBufSize/2, m.rsz) + // Finally, queue up something that is guaranteed + // to overflow. + b := nbPoolSmall.Get().(*[nbPoolSizeSmall]byte)[:] + b = b[:cap(b)] + client.queueOutbound(b) + if len(client.out.nb) != 2 { + t.Fatal("Expecting buffer to have overflowed") } - if m.wsz > startBufSize { - t.Fatalf("Expected write buffer of <= %d, but got %d\n", startBufSize, m.wsz) + if l := len(client.out.nb[0]); l != cap(b) { + t.Fatalf("Expecting %d bytes in the first queued buffer, found %d instead", cap(b), l) } - - // Create Subscription to test outbound buffer from server. - nc.Subscribe("foo", func(m *nats.Msg) { - // Just eat it.. - }) - go recordMaxBufferSizes() - - for i := 0; i < 200; i++ { - nc.Publish("foo", data) - } - nc.Flush() - - m = stopRecording() - checkResults(m, maxBufSize, maxBufSize) - - // Now test that we shrink correctly. - - // Should go to minimum for both.. - for i := 0; i < 20; i++ { - nc.Flush() + if l := len(client.out.nb[1]); l != 10 { + t.Fatalf("Expecting 10 bytes in the second queued buffer, found %d instead", l) } - checkBuffers(minBufSize, minBufSize) } func TestClientTraceRace(t *testing.T) { @@ -2246,7 +2159,6 @@ func TestFlushOutboundNoSliceReuseIfPartial(t *testing.T) { expected.Write(buf) c.mu.Lock() c.queueOutbound(buf) - c.out.sz = 10 c.flushOutbound() fakeConn.partial = false c.mu.Unlock() diff --git a/server/config_check_test.go b/server/config_check_test.go index 74f9e75d8..9b1e6d250 100644 --- a/server/config_check_test.go +++ b/server/config_check_test.go @@ -1579,13 +1579,30 @@ func TestConfigCheck(t *testing.T) { errorLine: 5, errorPos: 6, }, + { + name: "show warnings on empty configs without values", + config: ``, + warningErr: errors.New(`config has no values or is empty`), + errorLine: 0, + errorPos: 0, + reason: "", + }, + { + name: "show warnings on empty configs without values and only comments", + config: `# Valid file but has no usable values. + `, + warningErr: errors.New(`config has no values or is empty`), + errorLine: 0, + errorPos: 0, + reason: "", + }, } checkConfig := func(config string) error { opts := &Options{ CheckConfig: true, } - return opts.ProcessConfigFile(config, false) + return opts.ProcessConfigFile(config, false) // ** false added by Memphis } checkErr := func(t *testing.T, err, expectedErr error) { @@ -1620,6 +1637,8 @@ func TestConfigCheck(t *testing.T) { if test.reason != "" { msg += ": " + test.reason } + } else if test.warningErr != nil { + msg = expectedErr.Error() } else { msg = test.reason } @@ -1639,7 +1658,7 @@ func TestConfigCheckIncludes(t *testing.T) { opts := &Options{ CheckConfig: true, } - err := opts.ProcessConfigFile("./configs/include_conf_check_a.conf", false) + err := opts.ProcessConfigFile("./configs/include_conf_check_a.conf", false) // ** false added by Memphis if err != nil { t.Errorf("Unexpected error processing include files with configuration check enabled: %v", err) } @@ -1647,7 +1666,7 @@ func TestConfigCheckIncludes(t *testing.T) { opts = &Options{ CheckConfig: true, } - err = opts.ProcessConfigFile("./configs/include_bad_conf_check_a.conf", false) + err = opts.ProcessConfigFile("./configs/include_bad_conf_check_a.conf", false) // ** false added by Memphis if err == nil { t.Errorf("Expected error processing include files with configuration check enabled: %v", err) } @@ -1661,7 +1680,7 @@ func TestConfigCheckMultipleErrors(t *testing.T) { opts := &Options{ CheckConfig: true, } - err := opts.ProcessConfigFile("./configs/multiple_errors.conf", false) + err := opts.ProcessConfigFile("./configs/multiple_errors.conf", false) // ** false added by Memphis if err == nil { t.Errorf("Expected error processing config files with multiple errors check enabled: %v", err) } diff --git a/server/configs/certs/tls/benchmark-ca-cert.pem b/server/configs/certs/tls/benchmark-ca-cert.pem new file mode 100644 index 000000000..f91985a67 --- /dev/null +++ b/server/configs/certs/tls/benchmark-ca-cert.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID2zCCAsOgAwIBAgIUZj0PngA93uUSShcRndTQju/J88YwDQYJKoZIhvcNAQEL +BQAwfDETMBEGA1UEAwwKbmF0cy5pby5DQTEiMCAGA1UECwwZUGVyZm9ybWFuY2VB +bmRSZWxpYWJpbGl0eTEQMA4GA1UECgwHU3luYWRpYTEQMA4GA1UEBwwHVG9yb250 +bzEQMA4GA1UECAwHT250YXJpbzELMAkGA1UEBhMCQ0EwIBcNMjMwODE0MTUyNzU3 +WhgPMjEyMzA3MjExNTI3NTdaMHwxEzARBgNVBAMMCm5hdHMuaW8uQ0ExIjAgBgNV +BAsMGVBlcmZvcm1hbmNlQW5kUmVsaWFiaWxpdHkxEDAOBgNVBAoMB1N5bmFkaWEx +EDAOBgNVBAcMB1Rvcm9udG8xEDAOBgNVBAgMB09udGFyaW8xCzAJBgNVBAYTAkNB +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2cCyJL+DExUyZto2eFLm +MBRSkQLxM9pOWB9O8TecHlPcc/SPGq/x9lpguJ/IiaUj+VffVWy236KW2JL5Xj83 +PZwhXi1yZzxlIBsKAgAUeNfWuTAc0K0Qm9pR5Wjv5eNcT0mw6JX0SPgUQAl9BSwU +WvtMOTxOt0hBjHmZaEamp7nLmwogpvgPsrubD6U4O/vUQm3JTsbp2rFQxXPpkG19 +69PGsT37r0/w9Zv0xNAcB/zCWdNBXCTA2ACV2IpJedWm8Jrjcn3Kp4Fv3TKTsCZl +eWtfxCdljndk88+NFK7cEw7b9Bs5R5Zhu20C+Ne8vmMWhYbVBFYws5/jGzPBkVTD +7wIDAQABo1MwUTAdBgNVHQ4EFgQUEqfeAemfeIp4MM4C7H1bJS+mra4wHwYDVR0j +BBgwFoAUEqfeAemfeIp4MM4C7H1bJS+mra4wDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQsFAAOCAQEAiamiPxOlZ0pwOJvv0ylDreHVk2kast67YlhAOcZoSMvi +e2jbKL98U3+ZznGj21AKqEaOkO7UmKoJ/3QlrjgElXzcMUrUrJ1WNowlbXPlAhyL +KhNthLKUr72Tv6wv5GZdAR6DaAwq3iYTbpnLq4oCnFHiXgDWgWyJDLsTGulWve/K +GGM2JMcnacNgNC18uki440Wcfp0vGj9HhO6I/u63oGewZnIK87GQMQCt3JLFyiUc +hrn9nWoixFWcJfCjBcMlwZXMIAlDdelU1/hWtSknKCs57GvZuACcicAYiYIkWCkd +p1pF4G0Ic6irAnLTqhdGwL4+5pjNd1Ih0Gezn9hJLg== +-----END CERTIFICATE----- diff --git a/server/configs/certs/tls/benchmark-ca-key.pem b/server/configs/certs/tls/benchmark-ca-key.pem new file mode 100644 index 000000000..69bbac4fc --- /dev/null +++ b/server/configs/certs/tls/benchmark-ca-key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDZwLIkv4MTFTJm +2jZ4UuYwFFKRAvEz2k5YH07xN5weU9xz9I8ar/H2WmC4n8iJpSP5V99VbLbfopbY +kvlePzc9nCFeLXJnPGUgGwoCABR419a5MBzQrRCb2lHlaO/l41xPSbDolfRI+BRA +CX0FLBRa+0w5PE63SEGMeZloRqanucubCiCm+A+yu5sPpTg7+9RCbclOxunasVDF +c+mQbX3r08axPfuvT/D1m/TE0BwH/MJZ00FcJMDYAJXYikl51abwmuNyfcqngW/d +MpOwJmV5a1/EJ2WOd2Tzz40UrtwTDtv0GzlHlmG7bQL417y+YxaFhtUEVjCzn+Mb +M8GRVMPvAgMBAAECggEASvFWfm5JMtqjP6HPiGbjoV2FMzJjiFiUiSCxXzSn7wom +v+PGEsXGTWa6jiAz+SeUc38KNtDVOa+wIfani4fPP82J8GtMyfoPxdZ4gcq8QQDr +/k1wRWOi6Tjg4cdVdXXkMcencs0VR73V3lpFpG+Qy+VcTQCUCOF96dZ59VkHh4a4 +CHX6PegOWwr0TSaCUacwhua+rPmCar/btAYv7Wp7+c+Zf7Rn2WYTV7ol4sYXR4ZQ +Sy/ROijeFTMkYNpaW/KOO2/pn3OJA8ycYH8UWZpsenPfIajC0Eka7BoEQpw6M8HR +wRWrKwssBEs0psFiq9s8J+6resPgXfU/9pf+mTkTqQKBgQD8kktUqN+vYc6t22DE +tSkg8y8OsGh9VTYfp7x5hu9qEC9t4mAKjqA/rRLiTXze/wInntreRTjjMb/NlqMy +PvI0Z+dM1UuqcF1axgKrIYsgnLJWuunOhaj5K3LhiNcHznlCtN9601dbccwLlQhL +5jdjnOuJ0i+Nh9v5oiu37SfldwKBgQDctWdbF4hJrBPnS6CcojuQj6ha10wYYe46 +ZVcxKe5hFBs1q975YCHnEntyCDvXGfOTRgbKeZbwNhMvAc7Pp6eGMR/9SpiRwTt4 +567hUz56WXVmp4gSvxoNuYRlWiMI8rZkyKJ8KFipvHgRa8nuamh0QBB4ShEJiVk8 +fhaUiZeTSQKBgF0hAD/OKPR9Jv06J9tARVMN+Cr9ZvnXwqY3biqNU5gTMbndv7YE +0xfHlG/3THTZKI09aMyOT6SOQn/m7HPpe9tQ+Jt/BnBpEDMZUgCR1MAIp0WNlAp/ +hEej+q8oiskpG9M56DFc3hgsxKT8pdt+nqvPP5ZI9xnDn5vTbTVbb9uPAoGAFRvU +csXhZwpqLOjyx4hMohrbQzTsNjjHjBY9LJqSDf7aS1vQy5ECLRN7cwCOmJgGz8MW +yy6t3POPCiPmH74tK4xvPs5voSEWCw49j5dillkP/W1wejqEx2NC4l6okyaDg0gd +IjrJoBJCeYgRnBfZPaUS7i5HSt40BrEYf8RZFuECgYAjSnvY8nYFRudsylYOu3TL +AcGbAdpDfL2H4G9z7qEQC6t1LqqGNcfMan/qS4J/n5GCOedVWHcfCraROOMil52v +3ZDXyyjGEO08XgKnoa2ZL/z2a6s077+hAAnbcywyi2Qz8Yfi6GnwYfU1u8SN0APd +T71HPNsWkU4zkxmP4S0Olg== +-----END PRIVATE KEY----- diff --git a/server/configs/certs/tls/benchmark-server-cert-ed25519.pem b/server/configs/certs/tls/benchmark-server-cert-ed25519.pem new file mode 100644 index 000000000..bc9cb2d0a --- /dev/null +++ b/server/configs/certs/tls/benchmark-server-cert-ed25519.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDHTCCAgWgAwIBAgIUc31LCktokAIQGqLsSC2BlsLCTsYwDQYJKoZIhvcNAQEL +BQAwfDETMBEGA1UEAwwKbmF0cy5pby5DQTEiMCAGA1UECwwZUGVyZm9ybWFuY2VB +bmRSZWxpYWJpbGl0eTEQMA4GA1UECgwHU3luYWRpYTEQMA4GA1UEBwwHVG9yb250 +bzEQMA4GA1UECAwHT250YXJpbzELMAkGA1UEBhMCQ0EwIBcNMjMwODE0MTUyNzU3 +WhgPMjEyMzA3MjExNTI3NTdaMHkxEDAOBgNVBAMMB25hdHMuaW8xIjAgBgNVBAsM +GVBlcmZvcm1hbmNlQW5kUmVsaWFiaWxpdHkxEDAOBgNVBAoMB1N5bmFkaWExEDAO +BgNVBAcMB1Rvcm9udG8xEDAOBgNVBAgMB09udGFyaW8xCzAJBgNVBAYTAkNBMCow +BQYDK2VwAyEAyyc9y9iZgWWSsPRahbeGxF6XN3VOFPZBvD/HQps6jr6jgZEwgY4w +CwYDVR0PBAQDAgQwMBMGA1UdJQQMMAoGCCsGAQUFBwMBMCoGA1UdEQQjMCGCDnJl +dWJlbi5uYXRzLmlvgg9yZXViZW4ubmF0cy5jb20wHQYDVR0OBBYEFBwkwMU8xuQO +FN1Ck5o2qQ4Dz87ZMB8GA1UdIwQYMBaAFBKn3gHpn3iKeDDOAux9WyUvpq2uMA0G +CSqGSIb3DQEBCwUAA4IBAQALjCynuxEobk1MYQAFhkrfAD29H6yRpOcKigHCZjTJ +Dnpupip1xvaFPPvhi4nxtuWcXgKpWEfd1jOPaiNV6lrefahitZpzcflD7wNOxqvx +Hau2U3lFnjnGaC0ppp66x26cQznp6YcTdxrJ1QF4vkOejxqNvaTzmiwzSPIIYm7+ +iKVWT+Z86WKof3vAdsX/f148YH1YSPk0ykiBzlbLScbyWebbaydrAIpU01IkSvMo +qDYu+Fba0tpONLe1BUklc608riwQjw9HiJJ2zJIAOBAUev5+48RP91/K111Ix1bl +fGPT8/1TJbyGG2jeJwyLoSIu72aDnnIBfqGkVunRTmeg +-----END CERTIFICATE----- diff --git a/server/configs/certs/tls/benchmark-server-cert-rsa-1024.pem b/server/configs/certs/tls/benchmark-server-cert-rsa-1024.pem new file mode 100644 index 000000000..be9bbafa9 --- /dev/null +++ b/server/configs/certs/tls/benchmark-server-cert-rsa-1024.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDkzCCAnugAwIBAgIUc31LCktokAIQGqLsSC2BlsLCTsMwDQYJKoZIhvcNAQEL +BQAwfDETMBEGA1UEAwwKbmF0cy5pby5DQTEiMCAGA1UECwwZUGVyZm9ybWFuY2VB +bmRSZWxpYWJpbGl0eTEQMA4GA1UECgwHU3luYWRpYTEQMA4GA1UEBwwHVG9yb250 +bzEQMA4GA1UECAwHT250YXJpbzELMAkGA1UEBhMCQ0EwIBcNMjMwODE0MTUyNzU3 +WhgPMjEyMzA3MjExNTI3NTdaMHkxEDAOBgNVBAMMB25hdHMuaW8xIjAgBgNVBAsM +GVBlcmZvcm1hbmNlQW5kUmVsaWFiaWxpdHkxEDAOBgNVBAoMB1N5bmFkaWExEDAO +BgNVBAcMB1Rvcm9udG8xEDAOBgNVBAgMB09udGFyaW8xCzAJBgNVBAYTAkNBMIGf +MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCyHHaVHinB3jBsicR4hp7uopz0u3+O +kUicIUSQXDcWiPzdvE+7YZ/s4+Ud4aw4g9q0wHzkZSaMg8nil4tCKmTrUKolVTVj +CCCBmtqq3LwzNLapyoDJRyXsWqHt5TWYSxaf/UQT6sWOgqHOLrbd4J8F0sjxEniB +GDHR1ZXpJCBaIQIDAQABo4GRMIGOMAsGA1UdDwQEAwIEMDATBgNVHSUEDDAKBggr +BgEFBQcDATAqBgNVHREEIzAhgg5yZXViZW4ubmF0cy5pb4IPcmV1YmVuLm5hdHMu +Y29tMB0GA1UdDgQWBBQk5kWOUcUNn7FppddLANe3droUlzAfBgNVHSMEGDAWgBQS +p94B6Z94ingwzgLsfVslL6atrjANBgkqhkiG9w0BAQsFAAOCAQEA2Njy2f1PUZRf +G1/oZ0El7J8L6Ql1HmEC7tOTzbORg7U9uMHKqIFL/IXXAdAlE/EjFEA2riPO8cu/ +bvL2A4CapYzt2kDD9PPYfVtniRr7mv0EVntPwEvfiySMAEeZuW/M2liPfgPpQkhL +fzwPeCOfqM8AjpyDab8NEGX5Bbf421oQorlENpm4PKQCXoUN5cWpBwuwWxj7yndj +256MevLDKKe/ALSLQEo/2Jgpnmp7Qol0GtomCzsLgZ+ASuVtCsGTFmaRrsqVPspJ +oOl6qby5gYwN9TR8zfRYL1m1sbYROz+5+ofEoiTnaOoOSjiBIoYoMeSC/jvJQTPT +VdD8QeQ6Og== +-----END CERTIFICATE----- diff --git a/server/configs/certs/tls/benchmark-server-cert-rsa-2048.pem b/server/configs/certs/tls/benchmark-server-cert-rsa-2048.pem new file mode 100644 index 000000000..9fb03ac4f --- /dev/null +++ b/server/configs/certs/tls/benchmark-server-cert-rsa-2048.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEFzCCAv+gAwIBAgIUc31LCktokAIQGqLsSC2BlsLCTsQwDQYJKoZIhvcNAQEL +BQAwfDETMBEGA1UEAwwKbmF0cy5pby5DQTEiMCAGA1UECwwZUGVyZm9ybWFuY2VB +bmRSZWxpYWJpbGl0eTEQMA4GA1UECgwHU3luYWRpYTEQMA4GA1UEBwwHVG9yb250 +bzEQMA4GA1UECAwHT250YXJpbzELMAkGA1UEBhMCQ0EwIBcNMjMwODE0MTUyNzU3 +WhgPMjEyMzA3MjExNTI3NTdaMHkxEDAOBgNVBAMMB25hdHMuaW8xIjAgBgNVBAsM +GVBlcmZvcm1hbmNlQW5kUmVsaWFiaWxpdHkxEDAOBgNVBAoMB1N5bmFkaWExEDAO +BgNVBAcMB1Rvcm9udG8xEDAOBgNVBAgMB09udGFyaW8xCzAJBgNVBAYTAkNBMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyx3O+Z6u8Y1SiuHu3szWbLvL +WrZpSpEiZkll+wk5205S1FRcQLccfr4ubdtjOBdi+RzILCtkflUI01Dbqu6cV7/2 +yfLthxBeNDiXMhjyOFkYLwwE4w7CdTwWWsmW31oUH1rYXIDPoeb7WPF7w3NwaUJu +ZXnqM98LRgWDTmh+nsqDDW/bz1fYIdxcO9az6iBOnJ2AGWI2ur5GzWc4+gNMOZiZ +Xj657g0MbyVM4Gzyc4Au22hShZ/YorLP8NAiwNJamlrCFzlnZN/ePjuQPcI6glnb +oO9IAGfPdAOJptfayuPAZgUngzewB38yY0Q/rKG1GJKSkQ8X6/lXiWaRPZJjYwID +AQABo4GRMIGOMAsGA1UdDwQEAwIEMDATBgNVHSUEDDAKBggrBgEFBQcDATAqBgNV +HREEIzAhgg5yZXViZW4ubmF0cy5pb4IPcmV1YmVuLm5hdHMuY29tMB0GA1UdDgQW +BBRtanJZScdSlsPsi58lBcpdj+bV/zAfBgNVHSMEGDAWgBQSp94B6Z94ingwzgLs +fVslL6atrjANBgkqhkiG9w0BAQsFAAOCAQEAV4TZ3b8cYO7ZeRyoCQtCBAab9gNe +kbQpWqICvkVQOk5Anq3opwAWk2FuIRs5KoT7ssckHpXwTwWLs+KuIVo+Fet19IH6 +BQfck1jwhzM04MA6zLO/F2j548XlrJy3IzViPM/VxwMMTt5YSoogrz/3TzzJPIe0 +eQomf5HbpVgrf08pMVkdaI7PCd7N/CxeWiD5zEWqBu9FqofO188Kb/umx0VwgBju +dX46MKO5TyUc91UrG3M35/r4Z7fd52SWWWFQiI7UBOl2L27samjHlJsKjyFoBF3Z +alvnoUVzo7zwAYmhEdPYDNVceF4KtAFpGipoQPRMg83G87LgYBA4Sa6uKw== +-----END CERTIFICATE----- diff --git a/server/configs/certs/tls/benchmark-server-cert-rsa-4096.pem b/server/configs/certs/tls/benchmark-server-cert-rsa-4096.pem new file mode 100644 index 000000000..ed231a1a1 --- /dev/null +++ b/server/configs/certs/tls/benchmark-server-cert-rsa-4096.pem @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIUc31LCktokAIQGqLsSC2BlsLCTsUwDQYJKoZIhvcNAQEL +BQAwfDETMBEGA1UEAwwKbmF0cy5pby5DQTEiMCAGA1UECwwZUGVyZm9ybWFuY2VB +bmRSZWxpYWJpbGl0eTEQMA4GA1UECgwHU3luYWRpYTEQMA4GA1UEBwwHVG9yb250 +bzEQMA4GA1UECAwHT250YXJpbzELMAkGA1UEBhMCQ0EwIBcNMjMwODE0MTUyNzU3 +WhgPMjEyMzA3MjExNTI3NTdaMHkxEDAOBgNVBAMMB25hdHMuaW8xIjAgBgNVBAsM +GVBlcmZvcm1hbmNlQW5kUmVsaWFiaWxpdHkxEDAOBgNVBAoMB1N5bmFkaWExEDAO +BgNVBAcMB1Rvcm9udG8xEDAOBgNVBAgMB09udGFyaW8xCzAJBgNVBAYTAkNBMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAj/rw0WpCnizUSs06NxSsjuYb +6sWtYQjLd6O6SCoQwrPSY2Zv0u28RywBJkSiIbv12kWxrhqv+kDqhmt7tfwEdHAE +gOl6E2P/CFDnyAJ4it5qgFRHcEItGp1Ap4ZrZ3OTl41DDe28giflHb7+VAzxUV6r +zkHmrqkaJd41YrJoKyZ3u+/FHs6CudO+jGXC2ubH1i71ARXHn6tkmOTwFf1N49wB +Hr144xBmPyvH/A/elgXBjR28/7x64MO/qBZsr/lbTaS4YwN1+rod/HRc2GIiJxpS +tB1bh/dXZCfa0QyhTzCNLG/j7IzetDrPBGzpw2WjufhkSuaxoMDWlDkkfqmQxtHJ +5L+PqIiPT69tzEbfuS9Ogz7DW10CcpSdXW13sWCdSNCGEvPqLFka36q9B82V0GHz +tmx8VqdfWSu4qMyVmTsxxzLTTxpQU4X1Q2RnT/1igbsOM620LuvrvnXlh5Rdg7tp +T++QJ/b4xCCg62tv4VwORe27xHYXMeDn4aoRdyoI45/+ZK6yqwNAOrKKpse/M8uz +mJK2i8pfEFmitIKoNYn3MR2dFrCqifZkFf9rX9A/1Ym+WKAPLmWdGp13fvTdzxQG +Y44f9tBL2RWsoGX++01XEwIiWz7kqObC0L8fz3EdIPaULX7MZiQrxzzhRCcJhyn/ +aOrJfLYj0GAmIaHElHkCAwEAAaOBkTCBjjALBgNVHQ8EBAMCBDAwEwYDVR0lBAww +CgYIKwYBBQUHAwEwKgYDVR0RBCMwIYIOcmV1YmVuLm5hdHMuaW+CD3JldWJlbi5u +YXRzLmNvbTAdBgNVHQ4EFgQUbJSM8LNWmc9IgLe0X53yE3c3h1cwHwYDVR0jBBgw +FoAUEqfeAemfeIp4MM4C7H1bJS+mra4wDQYJKoZIhvcNAQELBQADggEBAEGmLvEE ++MTE1bHMbl/5QG+/xusmervIuxkfAfId0H+8TWB75y+yhVZpEdM7knfl+lexmtGQ +Gr4HNGTZhAZ3NYFaBw7nfeqO48He7gHUKfJA/zv7FREF3Fy+Qe/hydDJQzBzZfaU +64XqhY6jOurpZhTAoOXfjpYzZaLi7+rdpTAbfxHCCAC8SxZD3++Q97ZeoT6en47O +8SQQ7FIzWxs15k88oYalw51vZujxX7dz4l+LxsLXtlYW7ZM1163cgU7lF/jQqDcN +z8X8jk1AjQY7AuFPuOzQ1hLXcZySm8rUG5pPgHrZ1QKmkFFWRaeCiO2hU794wI5C +vIGR8lIhkNwEJnQ= +-----END CERTIFICATE----- diff --git a/server/configs/certs/tls/benchmark-server-key-ed25519.pem b/server/configs/certs/tls/benchmark-server-key-ed25519.pem new file mode 100644 index 000000000..edb1732a9 --- /dev/null +++ b/server/configs/certs/tls/benchmark-server-key-ed25519.pem @@ -0,0 +1,3 @@ +-----BEGIN PRIVATE KEY----- +MC4CAQAwBQYDK2VwBCIEIJRCtUNxUuutNs9j8OtcwFw1xkbs+zxjHhpAqVuqDNo5 +-----END PRIVATE KEY----- diff --git a/server/configs/certs/tls/benchmark-server-key-rsa-1024.pem b/server/configs/certs/tls/benchmark-server-key-rsa-1024.pem new file mode 100644 index 000000000..4847c5a85 --- /dev/null +++ b/server/configs/certs/tls/benchmark-server-key-rsa-1024.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALIcdpUeKcHeMGyJ +xHiGnu6inPS7f46RSJwhRJBcNxaI/N28T7thn+zj5R3hrDiD2rTAfORlJoyDyeKX +i0IqZOtQqiVVNWMIIIGa2qrcvDM0tqnKgMlHJexaoe3lNZhLFp/9RBPqxY6Coc4u +tt3gnwXSyPESeIEYMdHVlekkIFohAgMBAAECgYAwt8RfyV5WnvXT2mMZLIlwcJ5J ++rdLQcYAnsDoU7DlwxaXeBi/AlcCLtvOrpmy464A3t3KgzhmGu4vwo/ey0XK+nTQ +tzORP/PXTaVC8DzJ8PnJmUaB7l+H7a88OSPLbjgnbpw4SyvDpKUHiiw0EDYC7L6Y +1vvCOlnprptXbE5eeQJBAOpjwdBVWkVtmStjsxbxZsUTI7XKxS2VZinRLH0l5/hI +hIHRxwy9oRbeNrf5815lGolTUD0mq+N0dJRlMop1yKsCQQDCiGDkH/pQqhB8ibmD +0XNw0EzxJmPFACO/x49VCfCPE5p1FQhpyIl6JkyAFNN7Xs4HX8jMHTuvNgJVti61 +O0BjAkEAj0wr2vXDubyWrztF61nszcG0zFjKkeLL0fcLLvv0xQt4z3F0MyrgCH4U +kAflLSm8voZMAQbagbXZ7DuuWY5G/wJAWyKnOdidXZL+3ElthwrmKVD86vEQRqe1 +F9C3HqDkeTM25mkvItfXSEmPB2Y6WY7luOCv4qhDYOdNmrgaE7+pfwJAcbV5ZVJW +OZvH1ofsJVvUA8J58tzv1+KPb96pI3YRAu8xbMC0mzezPsYjg2wjaRgJ2C+7On27 +BaArNo75B20AkA== +-----END PRIVATE KEY----- diff --git a/server/configs/certs/tls/benchmark-server-key-rsa-2048.pem b/server/configs/certs/tls/benchmark-server-key-rsa-2048.pem new file mode 100644 index 000000000..60bd94579 --- /dev/null +++ b/server/configs/certs/tls/benchmark-server-key-rsa-2048.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDLHc75nq7xjVKK +4e7ezNZsu8tatmlKkSJmSWX7CTnbTlLUVFxAtxx+vi5t22M4F2L5HMgsK2R+VQjT +UNuq7pxXv/bJ8u2HEF40OJcyGPI4WRgvDATjDsJ1PBZayZbfWhQfWthcgM+h5vtY +8XvDc3BpQm5leeoz3wtGBYNOaH6eyoMNb9vPV9gh3Fw71rPqIE6cnYAZYja6vkbN +Zzj6A0w5mJlePrnuDQxvJUzgbPJzgC7baFKFn9iiss/w0CLA0lqaWsIXOWdk394+ +O5A9wjqCWdug70gAZ890A4mm19rK48BmBSeDN7AHfzJjRD+sobUYkpKRDxfr+VeJ +ZpE9kmNjAgMBAAECggEADicdEWKfsQAWZMv6X3bpZ/kr4b29F2+GdJcfrn7Fk8Tg +25+nL/EyYJhy1r/HKZTjlhUN05oQbgcRztue+smWhjy/fvHY4CThU4Uv79MyKX/3 +wet1+bZBEXcm3ZuXUifOKCMl2Ug2b4MPN3LYG1XTWtpAo/x7N7MOb4oZzKBWVk0J +GEfxvJVZyVe3libIN2WWZFQJ1620AxaWP2jZ83PhRR/TJIBiq+pI/lBIyt+nu8Z3 +3vzB020R2uvENOhnaDjzNVfnJxSvlAAQkN47zo2Tf4UnB0792Y7DEEjFKX4XCFVd +zxmjmw7VcRcnyruDhCoRC6mNraaAHuMqPwuBoC6GtQKBgQD/7CV8XKzcPPfFJY9e +aHPzgXJwK+5u3jq7tNYUksVfv0s2lLQnRdbqAhHzxNYLQjdVd7J6t55h2z8scYaP +oB7TTwszhKZS2sQ/lcpfOFoFN6KjN0iOnXVFucGoQ36gexNqPw894NFKWX/RHrnZ +UfL/OnOUPpra3w+WMxjUYQ5ivQKBgQDLLZDIpM8fSRKeqZzjv/eoKnoDhqmzPipj +bvNXAkIr/nWfUHL9YRnpxX7PW8DqFWIYoM8b0uOKO4vLJnQUMyc2jPq1rPXEBrjk +w+xKWCipKdPrqwttiLKme+ZArRT5CJ9qcqxSX0yNp69xwXtyHe+zY18F/a4+70wT +5wJwYmhQnwKBgHpICTk8xtOMxg6K/c/sNMr65QU32Htc789Ufp3h6zDupC92KgZB +1oiFaLKDMIq8ntfVk5ATQDgdnDfOHq9toIzyzbVWAmrAYNjI56NLt6eah7lY5vBN +yAUC1sdhSJXBeOthKhU04IuX6/yto7t07piJA0SoDTHbNwVbcNe5cDg5AoGBALjR +jxVlDd+4mc5oHYXy1rZLRUg10+JvlyFyCLrKHCVmx9oO1Tr1fBvxggPfw+FraBtd +FGiL8l2JAwXdydOiIHZ30Ys3dSxGrSOzsRqDjSEsIlEK+088/L2CkRWeHCjYliK/ +g08+zyVANtC0nrVU0/mLWCHb/AfVp4+nIMnYSmmjAoGAEMyBqq2AyUmx1xAmhw36 +LqgKy+vgHEAFRFPD8IttHFLOlUdXlvxoDq4xW2a7bJsJrs9ZrluRFKVh7QnSckmP +Jt/Plg+XYB3B2exD5Xyh9xNYNVW/Aqvg+NuiWeCGK/o7mUfGWd9qWrD2aw51m+X3 +Svtkgck1kulqPoUFG1b3R4k= +-----END PRIVATE KEY----- diff --git a/server/configs/certs/tls/benchmark-server-key-rsa-4096.pem b/server/configs/certs/tls/benchmark-server-key-rsa-4096.pem new file mode 100644 index 000000000..daf897f85 --- /dev/null +++ b/server/configs/certs/tls/benchmark-server-key-rsa-4096.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCP+vDRakKeLNRK +zTo3FKyO5hvqxa1hCMt3o7pIKhDCs9JjZm/S7bxHLAEmRKIhu/XaRbGuGq/6QOqG +a3u1/AR0cASA6XoTY/8IUOfIAniK3mqAVEdwQi0anUCnhmtnc5OXjUMN7byCJ+Ud +vv5UDPFRXqvOQeauqRol3jVismgrJne778UezoK5076MZcLa5sfWLvUBFcefq2SY +5PAV/U3j3AEevXjjEGY/K8f8D96WBcGNHbz/vHrgw7+oFmyv+VtNpLhjA3X6uh38 +dFzYYiInGlK0HVuH91dkJ9rRDKFPMI0sb+PsjN60Os8EbOnDZaO5+GRK5rGgwNaU +OSR+qZDG0cnkv4+oiI9Pr23MRt+5L06DPsNbXQJylJ1dbXexYJ1I0IYS8+osWRrf +qr0HzZXQYfO2bHxWp19ZK7iozJWZOzHHMtNPGlBThfVDZGdP/WKBuw4zrbQu6+u+ +deWHlF2Du2lP75An9vjEIKDra2/hXA5F7bvEdhcx4OfhqhF3Kgjjn/5krrKrA0A6 +soqmx78zy7OYkraLyl8QWaK0gqg1ifcxHZ0WsKqJ9mQV/2tf0D/Vib5YoA8uZZ0a +nXd+9N3PFAZjjh/20EvZFaygZf77TVcTAiJbPuSo5sLQvx/PcR0g9pQtfsxmJCvH +POFEJwmHKf9o6sl8tiPQYCYhocSUeQIDAQABAoICAAEYUQ4KqeyJiJN0DvIIdc0q +v1dwVEKlaOS6nvSkYJcWe9kL2E8JRfy1lIxTCneethPdoqhhZWljZn/fX+QRAVir +BGxq5NCHxWgDCNble+oJydNlhgXldEcxnyJXBvM/trA49Q5X+pkeeWMvRsBiuSVx +BqCi3CtZDQzw18TtzessF6OF2IuE2bYqP9anVyLH4rOvN5JKn9zH69PaLorq76rL +YMp4JEFNKIs+R5RUFjwxBC6Q+r+9fM1qTQdtJOZMC293pfusylAoll315ZczXIah +xYi/ThOmjtMrwWyEan1PcCIVzMI0X7o3q0eMQNU0ApmBY9k0+sYEnsJ8Um8l1icb +WE/KBDzQk2mSoIzEdo99gYiuLAhi5WTEcQzeovZKHKkhRxLn3GUK/ZznpTuI0Voh +1Qbfw05wf62yh8/nc747n5ULPgf8/nUH5G8jUeSis9gMGtFloqIxUEBphzIHwj7W +FUIKoKpGkHL4Jkg9wXIe7lg5Qp+cdCrkWKKvQc57EbWxsM1Pjktw6X5ctNQclWG0 +WQJUw+BJhq6InKAV4aDW4/QlCOjvGtDDkYvwghE+ZgIzm9BARM4+DbeoPTmAkwwz +6NTM8DYWvdcYVpgchEwphS6OFkOD1Dc6Wg2OUUtLjvl4NVf/NDYN8wIWNwmMm4aj ++Is12NwoUqZ44E4pSfohAoIBAQDLloatDuObuLzInBqER2tByPNnDV9AVTR2rm6w +TgBmslFHBCf/uSzq0ivh1oZmFfDhcqdrtheTL0BWXbFxR/GelDm9N4d/LqOGRyWV +Qmy5dGp2wE6gsZGZuvsbVst6eFO+zgDLvDMLrVqP+577g/QbrSR8gPz39WCgLpvf +RcNpM2rHeKkm09IdUe0G+YEnW3fD7rxtMS+j8J57yuuxLO+SemGJuJQqGHYOukxF +KMZxX2wMs1YMNwKEXY0/9M+LAtW9J3azkRp5xd+E2kzXGoWcl2nS/3+J6nh1+HWq +SZE7SDWSimzut/aPTOM7YxLVqtANWHzVBBNpTbOMSrSZTZQJAoIBAQC1C/afOROb +GQMb1LOt95AxvGyYo62mOkcPxhxQGojVIi+Yz8Q1ZWTuT5qyyWSofR3bE9mQwQGW +KQ/znJKiwA48BS0osnmQi3Bi6eVD73NfwxMkL6zrGFxf336DNQi9mbidJvOaHNsT +wXGPqRQXDcLa3s3WqerDzKJ2gxG22rtkoC0uCw+J9NlkJTTI9bJ9rjDZIh6mE/M0 +3ye19IgkBdbMv7FSjGVpovdqWZ2HELDYAXJftzuMPUO1GNYbyMHAHnWH0a1TQRM8 +ELHzmPRaFBeegmpBVaenlxKoi72iwqrVFrEr5FLAKxLq/9RwizJ8FzySIpH/5+hU +Ky4mG93lmHjxAoIBAQDLOXs+jTpPW923M3yUxuYeSQYPvJ10jplMT1tWysZDvYS8 +qz1yW9qmnR4I1ihbB1Po+JZ/QsnNtsE2dViHiBV9AuGQLDopjtjVVXgCwsfdaIRN +/jF+30JEfw3igIWlvy95rBHHThp2cZmRWM+eql2msvNVBT2AF4VY4K3f9rfV7+mL +LLtNcuyvL/S3naB7NSccgte89/hiYfMSB8G2nvCW+2saGxJr4vcWRImWD9nnmiU1 +mF8w2ki88NXrHel/Dlll9FrdbN9M52T0LSW/I050vgB5C2q4tUGCIX7zeXRsBOzV +VzDeKu0Ipuu9gGxwtY3xhH839FWcLGAqjvgwf+xhAoIBAQCuvWs9ZoNr0QpVFEiQ +Aj9kIa7W7DOwGtN3gAjXr6Sdwa8a2H1R5Bk0ghSXtxW2IXxtdI0qz35OhjdlM5u8 +BY43k+9wNkJqporEjWfA2B4NMWUKKhHFnu+ZgUbEMK3NAc9TrsKz3mH8gVqwA8rm +LVwCj8UwCTQT4zBzHjI8wITZrFeu9vH6fx5LMDXwOGQcNcHj8LCQLvUv9KqJTgkQ +a6pUWDg3qlY/TRFrzi7iq9NjyJGxnFKXGpJ8+gm9K1kFquBZRKD7l/WOpbZ7nQdK +4dWiIdGYWanFcWSK1MUlkKn9nTdHW8oau/g4ZM+QCGmjp3HIwiEUU6rDgiG6mm7j +KPShAoIBAG1sFn8X841038Z/sp4JzYypQjj7g9UOTkghHcJHIZzRXLiTqW3lKxbt +Gz93DjWRxXD404hem8dOUf22VgfjB4z1mSrV5SWtLjLf5wD8gl0eUNKP2lZFLggz +O6nHCLLzlKs2RH5pDo3c8qyjLRIfCXy0YGnr+9RErVJG+TVB/MOSgHagmkdVOYCH +phw4EiwJ+rPFy/xm5D6+BuOyt7hw7boQsw3EHpZTyQHWObcIggjlopTX2VSG8Dx+ +/iQRTuVRVyNAhYwCuNtSh27zawWr+A40acFJsJpvkFkbZBEH1IqoCteUaiEVU1qm +51lKgt3ZVAXuecJ1U/0u4HtC0QdEBGE= +-----END PRIVATE KEY----- diff --git a/server/configs/reload/reload.conf b/server/configs/reload/reload.conf index 613033a73..068500b32 100644 --- a/server/configs/reload/reload.conf +++ b/server/configs/reload/reload.conf @@ -6,6 +6,7 @@ port: 2233 debug: true # enable on reload trace: true # enable on reload logtime: true # enable on reload +logtime_utc: true # enable on reload log_file: "nats-server.log" # change on reload pid_file: "nats-server.pid" # change on reload diff --git a/server/configs/tls/tls-ed25519.conf b/server/configs/tls/tls-ed25519.conf new file mode 100644 index 000000000..dcb8fd945 --- /dev/null +++ b/server/configs/tls/tls-ed25519.conf @@ -0,0 +1,10 @@ +# Simple TLS (ed25519) config file + +listen: 127.0.0.1:-1 + +tls { + cert_file: "./configs/certs/tls/benchmark-server-cert-ed25519.pem" + key_file: "./configs/certs/tls/benchmark-server-key-ed25519.pem" + ca_file: "./configs/certs/tls/benchmark-ca-cert.pem" + timeout: "5s" +} diff --git a/server/configs/tls/tls-none.conf b/server/configs/tls/tls-none.conf new file mode 100644 index 000000000..042bf4e0b --- /dev/null +++ b/server/configs/tls/tls-none.conf @@ -0,0 +1,4 @@ +# Simple config file + +listen: 127.0.0.1:-1 + diff --git a/server/configs/tls/tls-rsa-1024.conf b/server/configs/tls/tls-rsa-1024.conf new file mode 100644 index 000000000..fb3aaa418 --- /dev/null +++ b/server/configs/tls/tls-rsa-1024.conf @@ -0,0 +1,10 @@ +# Simple TLS (rsa-1024) config file + +listen: 127.0.0.1:-1 + +tls { + cert_file: "./configs/certs/tls/benchmark-server-cert-rsa-1024.pem" + key_file: "./configs/certs/tls/benchmark-server-key-rsa-1024.pem" + ca_file: "./configs/certs/tls/benchmark-ca-cert.pem" + timeout: "5s" +} diff --git a/server/configs/tls/tls-rsa-2048.conf b/server/configs/tls/tls-rsa-2048.conf new file mode 100644 index 000000000..08f54a250 --- /dev/null +++ b/server/configs/tls/tls-rsa-2048.conf @@ -0,0 +1,10 @@ +# Simple TLS (rsa-2048) config file + +listen: 127.0.0.1:-1 + +tls { + cert_file: "./configs/certs/tls/benchmark-server-cert-rsa-2048.pem" + key_file: "./configs/certs/tls/benchmark-server-key-rsa-2048.pem" + ca_file: "./configs/certs/tls/benchmark-ca-cert.pem" + timeout: "5s" +} diff --git a/server/configs/tls/tls-rsa-4096.conf b/server/configs/tls/tls-rsa-4096.conf new file mode 100644 index 000000000..68ad841b7 --- /dev/null +++ b/server/configs/tls/tls-rsa-4096.conf @@ -0,0 +1,10 @@ +# Simple TLS (rsa-4096) config file + +listen: 127.0.0.1:-1 + +tls { + cert_file: "./configs/certs/tls/benchmark-server-cert-rsa-4096.pem" + key_file: "./configs/certs/tls/benchmark-server-key-rsa-4096.pem" + ca_file: "./configs/certs/tls/benchmark-ca-cert.pem" + timeout: "5s" +} diff --git a/server/const.go b/server/const.go index 0074a7171..cd50444d8 100644 --- a/server/const.go +++ b/server/const.go @@ -40,9 +40,10 @@ var ( ) const ( - // VERSION is the current version for the memphis. - VERSION = "1.4.1" + // VERSION is the current version for the memphis server. + VERSION = "1.4.1" // ** changed by Memphis + // ** added by Memphis DEFAULT_SERVER_NAME = "memphis-0" DEFAULT_WS_PORT = 7770 DEFAULT_UI_PORT = 9000 @@ -52,9 +53,10 @@ const ( SHOWABLE_ERROR_STATUS_CODE = 666 DEFAULT_TIERED_STORAGE_UPLOAD_INTERVAL_SEC = 8 DEFAULT_DLS_RETENTION_HOURS = 3 - + DEFAULT_ENCRYPTION_SECRET_KEY = "thisis32bitlongpassphraseimusing" // COMP_WITH_NATS_VERSION is the NATS version Memphis is compatible with - COMP_WITH_NATS_VERSION = "2.9.15" + COMP_WITH_NATS_VERSION = "2.9.22" + // ** added by Memphis // PROTO is the currently supported protocol. // 0 was the original @@ -63,7 +65,7 @@ const ( PROTO = 1 // DEFAULT_PORT is the default port for client connections. - DEFAULT_PORT = 6666 + DEFAULT_PORT = 6666 // ** changed by Memphis // RANDOM_PORT is the value for port that, when supplied, will cause the // server to listen on a randomly-chosen available port. The resolved port @@ -79,7 +81,7 @@ const ( // MAX_PAYLOAD_SIZE is the maximum allowed payload size. Should be using // something different if > 1MB payloads are needed. - MAX_PAYLOAD_SIZE = (1 * 1024 * 1024) + MAX_PAYLOAD_SIZE = (1024 * 1024) // MAX_PAYLOAD_MAX_SIZE is the size at which the server will warn about // max_payload being too high. In the future, the server may enforce/reject @@ -221,6 +223,4 @@ const ( // DEFAULT_FETCH_TIMEOUT is the default time that the system will wait for an account fetch to return. DEFAULT_ACCOUNT_FETCH_TIMEOUT = 1900 * time.Millisecond - - DEFAULT_ENCRYPTION_SECRET_KEY = "thisis32bitlongpassphraseimusing" ) diff --git a/server/consumer.go b/server/consumer.go index 1bad9c692..b198d0d68 100644 --- a/server/consumer.go +++ b/server/consumer.go @@ -1,4 +1,4 @@ -// Copyright 2019-2022 The NATS Authors +// Copyright 2019-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -213,14 +213,14 @@ var ( // Calculate accurate replicas for the consumer config with the parent stream config. func (consCfg ConsumerConfig) replicas(strCfg *StreamConfig) int { - if consCfg.Replicas == 0 { - if !isDurableConsumer(&consCfg) && strCfg.Retention == LimitsPolicy { + if consCfg.Replicas == 0 || consCfg.Replicas > strCfg.Replicas { + if !isDurableConsumer(&consCfg) && strCfg.Retention == LimitsPolicy && consCfg.Replicas == 0 { + // Matches old-school ephemerals only, where the replica count is 0. return 1 } return strCfg.Replicas - } else { - return consCfg.Replicas } + return consCfg.Replicas } // Consumer is a jetstream consumer. @@ -254,6 +254,7 @@ type consumer struct { ackReplyT string ackSubj string nextMsgSubj string + nextMsgReqs *ipQueue[*nextMsgReq] maxp int pblimit int maxpb int @@ -299,6 +300,8 @@ type consumer struct { prOk bool uch chan struct{} retention RetentionPolicy + + monitorWg sync.WaitGroup inMonitor bool // R>1 proposals @@ -618,7 +621,7 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri mset.mu.Lock() if mset.client == nil || mset.store == nil || mset.consumers == nil { mset.mu.Unlock() - return nil, errors.New("invalid stream") + return nil, NewJSStreamInvalidError() } // If this one is durable and already exists, we let that be ok as long as only updating what should be allowed. @@ -748,6 +751,8 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri // Create our request waiting queue. if o.isPullMode() { o.waiting = newWaitQueue(config.MaxWaiting) + // Create our internal queue for next msg requests. + o.nextMsgReqs = newIPQueue[*nextMsgReq](s, fmt.Sprintf("[ACC:%s] consumer '%s' on stream '%s' pull requests", accName, o.name, mset.cfg.Name)) } // Check if we have filtered subject that is a wildcard. @@ -818,8 +823,10 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri // Set up the ack subscription for this consumer. Will use wildcard for all acks. // We will remember the template to generate replies with sequence numbers and use // that to scanf them back in. - mn := mset.cfg.Name - pre := fmt.Sprintf(jsAckT, mn, o.name) + // Escape '%' in consumer and stream names, as `pre` is used as a template later + // in consumer.ackReply(), resulting in erroneous formatting of the ack subject. + mn := strings.ReplaceAll(mset.cfg.Name, "%", "%%") + pre := fmt.Sprintf(jsAckT, mn, strings.ReplaceAll(o.name, "%", "%%")) o.ackReplyT = fmt.Sprintf("%s.%%d.%%d.%%d.%%d.%%d", pre) o.ackSubj = fmt.Sprintf("%s.*.*.*.*.*", pre) o.nextMsgSubj = fmt.Sprintf(JSApiRequestNextT, mn, o.name) @@ -981,7 +988,24 @@ func (o *consumer) setLeader(isLeader bool) { // If we are here we have a change in leader status. if isLeader { - if mset == nil || isRunning { + if mset == nil { + return + } + if isRunning { + // If we detect we are scaling up, make sure to create clustered routines and channels. + o.mu.Lock() + if o.node != nil && o.pch == nil { + // We are moving from R1 to clustered. + o.pch = make(chan struct{}, 1) + go o.loopAndForwardProposals(o.qch) + if o.phead != nil { + select { + case o.pch <- struct{}{}: + default: + } + } + } + o.mu.Unlock() return } @@ -1019,10 +1043,12 @@ func (o *consumer) setLeader(isLeader bool) { } var err error - if o.ackSub, err = o.subscribeInternal(o.ackSubj, o.pushAck); err != nil { - o.mu.Unlock() - o.deleteWithoutAdvisory() - return + if o.cfg.AckPolicy != AckNone { + if o.ackSub, err = o.subscribeInternal(o.ackSubj, o.pushAck); err != nil { + o.mu.Unlock() + o.deleteWithoutAdvisory() + return + } } // Setup the internal sub for next message requests regardless. @@ -1065,7 +1091,7 @@ func (o *consumer) setLeader(isLeader bool) { if o.dthresh > 0 && (o.isPullMode() || !o.active) { // Pull consumer. We run the dtmr all the time for this one. stopAndClearTimer(&o.dtmr) - o.dtmr = time.AfterFunc(o.dthresh, func() { o.deleteNotActive() }) + o.dtmr = time.AfterFunc(o.dthresh, o.deleteNotActive) } // If we are not in ReplayInstant mode mark us as in replay state until resolved. @@ -1080,6 +1106,7 @@ func (o *consumer) setLeader(isLeader bool) { if node != nil && o.pch == nil { o.pch = make(chan struct{}, 1) } + pullMode := o.isPullMode() o.mu.Unlock() // Snapshot initial info. @@ -1091,6 +1118,11 @@ func (o *consumer) setLeader(isLeader bool) { // Now start up Go routine to process acks. go o.processInboundAcks(qch) + if pullMode { + // Now start up Go routine to process inbound next message requests. + go o.processInboundNextMsgReqs(qch) + } + // If we are R>1 spin up our proposal loop. if node != nil { // Determine if we can send pending requests info to the group. @@ -1107,7 +1139,10 @@ func (o *consumer) setLeader(isLeader bool) { close(o.qch) o.qch = nil } - // Make sure to clear out any re delivery queues + // Stop any inactivity timers. Should only be running on leaders. + stopAndClearTimer(&o.dtmr) + + // Make sure to clear out any re-deliver queues stopAndClearTimer(&o.ptmr) o.rdq, o.rdqi = nil, nil o.pending = nil @@ -1123,9 +1158,7 @@ func (o *consumer) setLeader(isLeader bool) { // Reset waiting if we are in pull mode. if o.isPullMode() { o.waiting = newWaitQueue(o.cfg.MaxWaiting) - if !o.isDurable() { - stopAndClearTimer(&o.dtmr) - } + o.nextMsgReqs.drain() } else if o.srv.gateway.enabled { stopAndClearTimer(&o.gwdtmr) } @@ -1315,7 +1348,7 @@ func (o *consumer) updateDeliveryInterest(localInterest bool) bool { // If we do not have interest anymore and have a delete threshold set, then set // a timer to delete us. We wait for a bit in case of server reconnect. if !interest && o.dthresh > 0 { - o.dtmr = time.AfterFunc(o.dthresh, func() { o.deleteNotActive() }) + o.dtmr = time.AfterFunc(o.dthresh, o.deleteNotActive) return true } return false @@ -1342,7 +1375,7 @@ func (o *consumer) deleteNotActive() { if o.dtmr != nil { o.dtmr.Reset(o.dthresh - elapsed) } else { - o.dtmr = time.AfterFunc(o.dthresh-elapsed, func() { o.deleteNotActive() }) + o.dtmr = time.AfterFunc(o.dthresh-elapsed, o.deleteNotActive) } o.mu.Unlock() return @@ -1352,7 +1385,7 @@ func (o *consumer) deleteNotActive() { if o.dtmr != nil { o.dtmr.Reset(o.dthresh) } else { - o.dtmr = time.AfterFunc(o.dthresh, func() { o.deleteNotActive() }) + o.dtmr = time.AfterFunc(o.dthresh, o.deleteNotActive) } o.mu.Unlock() return @@ -1383,9 +1416,10 @@ func (o *consumer) deleteNotActive() { defer ticker.Stop() for range ticker.C { js.mu.RLock() - ca := js.consumerAssignment(acc, stream, name) + nca := js.consumerAssignment(acc, stream, name) js.mu.RUnlock() - if ca != nil { + // Make sure this is not a new consumer with the same name. + if nca != nil && nca == ca { s.Warnf("Consumer assignment for '%s > %s > %s' not cleaned up, retrying", acc, stream, name) meta.ForwardProposal(removeEntry) } else { @@ -1605,7 +1639,7 @@ func (o *consumer) updateConfig(cfg *ConsumerConfig) error { stopAndClearTimer(&o.dtmr) // Restart timer only if we are the leader. if o.isLeader() && o.dthresh > 0 { - o.dtmr = time.AfterFunc(o.dthresh, func() { o.deleteNotActive() }) + o.dtmr = time.AfterFunc(o.dthresh, o.deleteNotActive) } } @@ -1691,7 +1725,7 @@ func newJSAckMsg(subj, reply string, hdr int, msg []byte) *jsAckMsg { } else { m = &jsAckMsg{} } - // When getting something from a pool it is criticical that all fields are + // When getting something from a pool it is critical that all fields are // initialized. Doing this way guarantees that if someone adds a field to // the structure, the compiler will fail the build if this line is not updated. (*m) = jsAckMsg{subj, reply, hdr, msg} @@ -1791,7 +1825,9 @@ func (o *consumer) loopAndForwardProposals(qch chan struct{}) { const maxBatch = 256 * 1024 var entries []*Entry for sz := 0; proposal != nil; proposal = proposal.next { - entries = append(entries, &Entry{EntryNormal, proposal.data}) + entry := entryPool.Get().(*Entry) + entry.Type, entry.Data = EntryNormal, proposal.data + entries = append(entries, entry) sz += len(proposal.data) if sz > maxBatch { node.ProposeDirect(entries) @@ -2280,6 +2316,16 @@ func (o *consumer) infoWithSnapAndReply(snap bool, reply string) *ConsumerInfo { NumPending: o.checkNumPending(), PushBound: o.isPushMode() && o.active, } + + // If we are replicated and we are not the leader we need to pull certain data from our store. + if rg != nil && rg.node != nil && !o.isLeader() && o.store != nil { + state, _ := o.store.BorrowState() + info.Delivered.Consumer, info.Delivered.Stream = state.Delivered.Consumer, state.Delivered.Stream + info.AckFloor.Consumer, info.AckFloor.Stream = state.AckFloor.Consumer, state.AckFloor.Stream + info.NumAckPending = len(state.Pending) + info.NumRedelivered = len(state.Redelivered) + } + // Adjust active based on non-zero etc. Also make UTC here. if !o.ldt.IsZero() { ldt := o.ldt.UTC() // This copies as well. @@ -2371,6 +2417,11 @@ func (o *consumer) sampleAck(sseq, dseq, dc uint64) { func (o *consumer) processAckMsg(sseq, dseq, dc uint64, doSample bool) { o.mu.Lock() + if o.closed { + o.mu.Unlock() + return + } + var sagap uint64 var needSignal bool @@ -2478,20 +2529,25 @@ func (o *consumer) needAck(sseq uint64, subj string) bool { var needAck bool var asflr, osseq uint64 var pending map[uint64]*Pending + o.mu.RLock() + defer o.mu.RUnlock() + + isFiltered := o.isFiltered() + if isFiltered && o.mset == nil { + return false + } // Check if we are filtered, and if so check if this is even applicable to us. - if o.isFiltered() && o.mset != nil { + if isFiltered { if subj == _EMPTY_ { var svp StoreMsg if _, err := o.mset.store.LoadMsg(sseq, &svp); err != nil { - o.mu.RUnlock() return false } subj = svp.subj } if !o.isFilteredMatch(subj) { - o.mu.RUnlock() return false } } @@ -2501,15 +2557,12 @@ func (o *consumer) needAck(sseq uint64, subj string) bool { pending = o.pending } else { if o.store == nil { - o.mu.RUnlock() return false } state, err := o.store.BorrowState() if err != nil || state == nil { // Fall back to what we track internally for now. - needAck := sseq > o.asflr && !o.isFiltered() - o.mu.RUnlock() - return needAck + return sseq > o.asflr && !o.isFiltered() } // If loading state as here, the osseq is +1. asflr, osseq, pending = state.AckFloor.Stream, state.Delivered.Stream+1, state.Pending @@ -2528,7 +2581,6 @@ func (o *consumer) needAck(sseq uint64, subj string) bool { } } - o.mu.RUnlock() return needAck } @@ -2784,6 +2836,17 @@ func (o *consumer) nextWaiting(sz int) *waitingRequest { } else if o.srv.gateway.enabled && o.srv.hasGatewayInterest(wr.acc.Name, wr.interest) { return o.waiting.pop() } + } else { + // We do check for expiration in `processWaiting`, but it is possible to hit the expiry here, and not there. + hdr := []byte(fmt.Sprintf("NATS/1.0 408 Request Timeout\r\n%s: %d\r\n%s: %d\r\n\r\n", JSPullRequestPendingMsgs, wr.n, JSPullRequestPendingBytes, wr.b)) + o.outq.send(newJSPubMsg(wr.reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0)) + o.waiting.removeCurrent() + if o.node != nil { + o.removeClusterPendingRequest(wr.reply) + } + wr.recycle() + continue + } if wr.interest != wr.reply { const intExpT = "NATS/1.0 408 Interest Expired\r\n%s: %d\r\n%s: %d\r\n\r\n" @@ -2800,6 +2863,37 @@ func (o *consumer) nextWaiting(sz int) *waitingRequest { return nil } +// Next message request. +type nextMsgReq struct { + reply string + msg []byte +} + +var nextMsgReqPool sync.Pool + +func newNextMsgReq(reply string, msg []byte) *nextMsgReq { + var nmr *nextMsgReq + m := nextMsgReqPool.Get() + if m != nil { + nmr = m.(*nextMsgReq) + } else { + nmr = &nextMsgReq{} + } + // When getting something from a pool it is critical that all fields are + // initialized. Doing this way guarantees that if someone adds a field to + // the structure, the compiler will fail the build if this line is not updated. + (*nmr) = nextMsgReq{reply, msg} + return nmr +} + +func (nmr *nextMsgReq) returnToPool() { + if nmr == nil { + return + } + nmr.reply, nmr.msg = _EMPTY_, nil + nextMsgReqPool.Put(nmr) +} + // processNextMsgReq will process a request for the next message available. A nil message payload means deliver // a single message. If the payload is a formal request or a number parseable with Atoi(), then we will send a // batch of messages without requiring another request to this endpoint, or an ACK. @@ -2807,21 +2901,16 @@ func (o *consumer) processNextMsgReq(_ *subscription, c *client, _ *Account, _, if reply == _EMPTY_ { return } - _, msg = c.msgParts(msg) - inlineOk := c.kind != ROUTER && c.kind != GATEWAY && c.kind != LEAF - if !inlineOk { - // Check how long we have been away from the readloop for the route or gateway or leafnode. - // If too long move to a separate go routine. - if elapsed := time.Since(c.in.start); elapsed < noBlockThresh { - inlineOk = true - } - } - if inlineOk { - o.processNextMsgRequest(reply, msg) - } else { - go o.processNextMsgRequest(reply, copyBytes(msg)) + // Short circuit error here. + if o.nextMsgReqs == nil { + hdr := []byte("NATS/1.0 409 Consumer is push based\r\n\r\n") + o.outq.send(newJSPubMsg(reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0)) + return } + + _, msg = c.msgParts(msg) + o.nextMsgReqs.push(newNextMsgReq(reply, copyBytes(msg))) } func (o *consumer) processNextMsgRequest(reply string, msg []byte) { @@ -2944,6 +3033,22 @@ func (o *consumer) incDeliveryCount(sseq uint64) uint64 { return o.rdc[sseq] + 1 } +// Used if we have to adjust on failed delivery or bad lookups. +// Those failed attempts should not increase deliver count. +// Lock should be held. +func (o *consumer) decDeliveryCount(sseq uint64) { + if o.rdc == nil { + return + } + if dc, ok := o.rdc[sseq]; ok { + if dc == 1 { + delete(o.rdc, sseq) + } else { + o.rdc[sseq] -= 1 + } + } +} + // send a delivery exceeded advisory. func (o *consumer) notifyDeliveryExceeded(sseq, dc uint64, sm *StoreMsg) { // ** added by memphis (sm to notifyDeliveryExceeded) ** // *** added by memphis @@ -2962,9 +3067,7 @@ func (o *consumer) notifyDeliveryExceeded(sseq, dc uint64, sm *StoreMsg) { // ** StreamSeq: sseq, Deliveries: dc, Domain: o.srv.getOpts().JetStreamDomain, - // ** added by memphis - Account: o.acc.GetName(), - // added by memphis ** + Account: o.acc.GetName(), // ** added by memphis } // added by memphis *** if sm != nil { @@ -3007,9 +3110,9 @@ var ( // Is partition aware and redeliver aware. // Lock should be held. func (o *consumer) getNextMsg() (*jsPubMsg, uint64, bool, error) { // *** bool (redelivery) returned value added by memphis - redelivery := false + redelivery := false // ** added by memphis if o.mset == nil || o.mset.store == nil { - return nil, 0, redelivery, errBadConsumer + return nil, 0, redelivery, errBadConsumer // ** redelivery added by memphis } seq, dc := o.sseq, uint64(1) // Process redelivered messages before looking at possibly "skip list" (deliver last per subject) @@ -3023,11 +3126,13 @@ func (o *consumer) getNextMsg() (*jsPubMsg, uint64, bool, error) { // *** bool ( if o.maxdc > 0 && dc > o.maxdc { // Only send once if dc == o.maxdc+1 { - // ** added by memphis (sm to notifyDeliveryExceeded) ** - o.notifyDeliveryExceeded(seq, dc-1, sm) + o.notifyDeliveryExceeded(seq, dc-1, sm) // ** added by memphis (sm to notifyDeliveryExceeded) ** } // Make sure to remove from pending. - delete(o.pending, seq) + if p, ok := o.pending[seq]; ok && p != nil { + delete(o.pending, seq) + o.updateDelivered(p.Sequence, seq, dc, p.Timestamp) + } continue } if seq > 0 { @@ -3038,9 +3143,11 @@ func (o *consumer) getNextMsg() (*jsPubMsg, uint64, bool, error) { // *** bool ( if sm == nil || err != nil { pmsg.returnToPool() pmsg, dc = nil, 0 + // Adjust back deliver count. + o.decDeliveryCount(seq) } - redelivery = true - return pmsg, dc, redelivery, err + redelivery = true // ** added by memphis + return pmsg, dc, redelivery, err // ** redelivery added by memphis } } // Fallback if all redeliveries are gone. @@ -3063,7 +3170,7 @@ func (o *consumer) getNextMsg() (*jsPubMsg, uint64, bool, error) { // *** bool ( if o.maxp > 0 && len(o.pending) >= o.maxp { // maxp only set when ack policy != AckNone and user set MaxAckPending // Stall if we have hit max pending. - return nil, 0, redelivery, errMaxAckPending + return nil, 0, redelivery, errMaxAckPending // ** redelivery added by memphis } store := o.mset.store @@ -3087,7 +3194,7 @@ func (o *consumer) getNextMsg() (*jsPubMsg, uint64, bool, error) { // *** bool ( } } - return pmsg, dc, redelivery, err + return pmsg, dc, redelivery, err // ** redelivery added by memphis } // Will check for expiration and lack of interest on waiting requests. @@ -3140,6 +3247,7 @@ func (o *consumer) processWaiting(eos bool) (int, int, int, time.Time) { interest = true } } + // If interest, update batch pending requests counter and update fexp timer. if interest { brp += wr.n @@ -3187,13 +3295,115 @@ func (o *consumer) hbTimer() (time.Duration, *time.Timer) { return o.cfg.Heartbeat, time.NewTimer(o.cfg.Heartbeat) } +// Check here for conditions when our ack floor may have drifted below the streams first sequence. +// In general this is accounted for in normal operations, but if the consumer misses the signal from +// the stream it will not clear the message and move the ack state. +// Should only be called from consumer leader. +func (o *consumer) checkAckFloor() { + o.mu.RLock() + mset, closed, asflr, numPending := o.mset, o.closed, o.asflr, len(o.pending) + o.mu.RUnlock() + + if asflr == 0 || closed || mset == nil { + return + } + + var ss StreamState + mset.store.FastState(&ss) + + // If our floor is equal or greater that is normal and nothing for us to do. + if ss.FirstSeq == 0 || asflr >= ss.FirstSeq-1 { + return + } + + // Check which linear space is less to walk. + if ss.FirstSeq-asflr-1 < uint64(numPending) { + // Process all messages that no longer exist. + for seq := asflr + 1; seq < ss.FirstSeq; seq++ { + // Check if this message was pending. + o.mu.RLock() + p, isPending := o.pending[seq] + var rdc uint64 = 1 + if o.rdc != nil { + rdc = o.rdc[seq] + } + o.mu.RUnlock() + // If it was pending for us, get rid of it. + if isPending { + o.processTerm(seq, p.Sequence, rdc) + } + } + } else if numPending > 0 { + // here it is shorter to walk pending. + // toTerm is seq, dseq, rcd for each entry. + toTerm := make([]uint64, 0, numPending*3) + o.mu.RLock() + for seq, p := range o.pending { + if seq < ss.FirstSeq { + var dseq uint64 = 1 + if p != nil { + dseq = p.Sequence + } + var rdc uint64 = 1 + if o.rdc != nil { + rdc = o.rdc[seq] + } + toTerm = append(toTerm, seq, dseq, rdc) + } + } + o.mu.RUnlock() + + for i := 0; i < len(toTerm); i += 3 { + seq, dseq, rdc := toTerm[i], toTerm[i+1], toTerm[i+2] + o.processTerm(seq, dseq, rdc) + } + } + + // Do one final check here. + o.mu.Lock() + defer o.mu.Unlock() + + // If we are here, and this should be rare, we still are off with our ack floor. + // We will set it explicitly to 1 behind our current lowest in pending, or if + // pending is empty, to our current delivered -1. + if o.asflr < ss.FirstSeq-1 { + var psseq, pdseq uint64 + for seq, p := range o.pending { + if psseq == 0 || seq < psseq { + psseq, pdseq = seq, p.Sequence + } + } + // If we still have none, set to current delivered -1. + if psseq == 0 { + psseq, pdseq = o.sseq-1, o.dseq-1 + // If still not adjusted. + if psseq < ss.FirstSeq-1 { + psseq, pdseq = ss.FirstSeq-1, ss.FirstSeq-1 + } + } + o.asflr, o.adflr = psseq, pdseq + } +} + func (o *consumer) processInboundAcks(qch chan struct{}) { // Grab the server lock to watch for server quit. o.mu.RLock() - s := o.srv + s, mset := o.srv, o.mset hasInactiveThresh := o.cfg.InactiveThreshold > 0 o.mu.RUnlock() + if s == nil || mset == nil { + return + } + + // We will check this on entry and periodically. + o.checkAckFloor() + + // How often we will check for ack floor drift. + // Spread these out for large numbers on a server restart. + delta := time.Duration(rand.Int63n(int64(time.Minute))) + var ackFloorCheck = time.Minute + delta + for { select { case <-o.ackMsgs.ch: @@ -3207,6 +3417,32 @@ func (o *consumer) processInboundAcks(qch chan struct{}) { if hasInactiveThresh { o.suppressDeletion() } + case <-time.After(ackFloorCheck): + o.checkAckFloor() + case <-qch: + return + case <-s.quitCh: + return + } + } +} + +// Process inbound next message requests. +func (o *consumer) processInboundNextMsgReqs(qch chan struct{}) { + // Grab the server lock to watch for server quit. + o.mu.RLock() + s := o.srv + o.mu.RUnlock() + + for { + select { + case <-o.nextMsgReqs.ch: + reqs := o.nextMsgReqs.pop() + for _, req := range reqs { + o.processNextMsgRequest(req.reply, req.msg) + req.returnToPool() + } + o.nextMsgReqs.recycle(&reqs) case <-qch: return case <-s.quitCh: @@ -3279,15 +3515,13 @@ func (o *consumer) loopAndGatherMsgs(qch chan struct{}) { // Deliver all the msgs we have now, once done or on a condition, we wait for new ones. for { var ( - pmsg *jsPubMsg - dc uint64 - dsubj string - ackReply string - delay time.Duration - sz int - // *** Added by Memphis - redelivery bool - // Added by Memphis *** + pmsg *jsPubMsg + dc uint64 + dsubj string + ackReply string + delay time.Duration + sz int + redelivery bool // *** Added by Memphis ) o.mu.Lock() // consumer is closed when mset is set to nil. @@ -3310,7 +3544,7 @@ func (o *consumer) loopAndGatherMsgs(qch chan struct{}) { } // Grab our next msg. - pmsg, dc, redelivery, err = o.getNextMsg() + pmsg, dc, redelivery, err = o.getNextMsg() // ** redelivery added by memphis // On error either wait or return. if err != nil || pmsg == nil { @@ -3318,10 +3552,16 @@ func (o *consumer) loopAndGatherMsgs(qch chan struct{}) { if err == ErrStoreEOF { o.checkNumPendingOnEOF() } - if err == ErrStoreMsgNotFound || err == ErrStoreEOF || err == errMaxAckPending || err == errPartialCache { + if err == ErrStoreMsgNotFound || err == errDeletedMsg || err == ErrStoreEOF || err == errMaxAckPending { + goto waitForMsgs + } else if err == errPartialCache { + s.Warnf("Unexpected partial cache error looking up message for consumer '%s > %s > %s'", + o.mset.acc, o.mset.cfg.Name, o.cfg.Name) goto waitForMsgs + } else { - s.Errorf("Received an error looking up message for consumer: %v", err) + s.Errorf("Received an error looking up message for consumer '%s > %s > %s': %v", + o.mset.acc, o.mset.cfg.Name, o.cfg.Name, err) goto waitForMsgs } } @@ -3723,20 +3963,39 @@ func (o *consumer) trackPending(sseq, dseq uint64) { } } +// Credit back a failed delivery. +// lock should be held. +func (o *consumer) creditWaitingRequest(reply string) { + for i, rp := 0, o.waiting.rp; i < o.waiting.n; i++ { + if wr := o.waiting.reqs[rp]; wr != nil { + if wr.reply == reply { + wr.n++ + wr.d-- + return + } + } + rp = (rp + 1) % cap(o.waiting.reqs) + } +} + // didNotDeliver is called when a delivery for a consumer message failed. // Depending on our state, we will process the failure. -func (o *consumer) didNotDeliver(seq uint64) { +func (o *consumer) didNotDeliver(seq uint64, subj string) { o.mu.Lock() mset := o.mset if mset == nil { o.mu.Unlock() return } + // Adjust back deliver count. + o.decDeliveryCount(seq) + var checkDeliveryInterest bool if o.isPushMode() { o.active = false checkDeliveryInterest = true } else if o.pending != nil { + o.creditWaitingRequest(subj) // pull mode and we have pending. if _, ok := o.pending[seq]; ok { // We found this messsage on pending, we need @@ -3822,7 +4081,8 @@ func (o *consumer) checkPending() { o.mu.RLock() mset := o.mset // On stop, mset and timer will be nil. - if mset == nil || o.ptmr == nil { + if o.closed || mset == nil || o.ptmr == nil { + stopAndClearTimer(&o.ptmr) o.mu.RUnlock() return } @@ -4249,6 +4509,13 @@ func (o *consumer) delete() error { return o.stopWithFlags(true, false, true, true) } +// To test for closed state. +func (o *consumer) isClosed() bool { + o.mu.RLock() + defer o.mu.RUnlock() + return o.closed +} + func (o *consumer) stopWithFlags(dflag, sdflag, doSignal, advisory bool) error { o.mu.Lock() js := o.js @@ -4310,6 +4577,9 @@ func (o *consumer) stopWithFlags(dflag, sdflag, doSignal, advisory bool) error { n := o.node qgroup := o.cfg.DeliverGroup o.ackMsgs.unregister() + if o.nextMsgReqs != nil { + o.nextMsgReqs.unregister() + } // For cleaning up the node assignment. var ca *consumerAssignment @@ -4359,14 +4629,15 @@ func (o *consumer) stopWithFlags(dflag, sdflag, doSignal, advisory bool) error { } var rmseqs []uint64 - mset.mu.RLock() + mset.mu.Lock() for seq := start; seq <= stop; seq++ { - if !mset.checkInterest(seq, o) { + if mset.noInterest(seq, o) { rmseqs = append(rmseqs, seq) } } - mset.mu.RUnlock() + mset.mu.Unlock() + // These can be removed. for _, seq := range rmseqs { mset.store.RemoveMsg(seq) } @@ -4378,7 +4649,7 @@ func (o *consumer) stopWithFlags(dflag, sdflag, doSignal, advisory bool) error { n.Delete() } else { // Try to install snapshot on clean exit - if o.store != nil && n.NeedSnapshot() { + if o.store != nil && (o.retention != LimitsPolicy || n.NeedSnapshot()) { if snap, err := o.store.EncodedState(); err == nil { n.InstallSnapshot(snap) } @@ -4560,7 +4831,48 @@ func (o *consumer) clearMonitorRunning() { // Test whether we are in the monitor routine. func (o *consumer) isMonitorRunning() bool { - o.mu.Lock() - defer o.mu.Unlock() + o.mu.RLock() + defer o.mu.RUnlock() return o.inMonitor } + +// If we are a consumer of an interest or workqueue policy stream, process that state and make sure consistent. +func (o *consumer) checkStateForInterestStream() { + o.mu.Lock() + // See if we need to process this update if our parent stream is not a limits policy stream. + mset := o.mset + shouldProcessState := mset != nil && o.retention != LimitsPolicy + if o.closed || !shouldProcessState { + o.mu.Unlock() + return + } + state, err := o.store.State() + o.mu.Unlock() + + if err != nil { + return + } + + // We should make sure to update the acks. + var ss StreamState + mset.store.FastState(&ss) + + asflr := state.AckFloor.Stream + for seq := ss.FirstSeq; seq <= asflr; seq++ { + mset.ackMsg(o, seq) + } + + o.mu.RLock() + // See if we need to process this update if our parent stream is not a limits policy stream. + state, _ = o.store.State() + o.mu.RUnlock() + + // If we have pending, we will need to walk through to delivered in case we missed any of those acks as well. + if state != nil && len(state.Pending) > 0 { + for seq := state.AckFloor.Stream + 1; seq <= state.Delivered.Stream; seq++ { + if _, ok := state.Pending[seq]; !ok { + mset.ackMsg(o, seq) + } + } + } +} diff --git a/server/core_benchmarks_test.go b/server/core_benchmarks_test.go new file mode 100644 index 000000000..1ecb594e5 --- /dev/null +++ b/server/core_benchmarks_test.go @@ -0,0 +1,251 @@ +// Copyright 2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "crypto/rand" + "crypto/tls" + "errors" + "fmt" + "os" + "sync" + "testing" + "time" + + "github.com/nats-io/nats.go" +) + +func BenchmarkCoreRequestReply(b *testing.B) { + const ( + subject = "test-subject" + ) + + messageSizes := []int64{ + 1024, // 1kb + 4096, // 4kb + 40960, // 40kb + 409600, // 400kb + } + + for _, messageSize := range messageSizes { + b.Run(fmt.Sprintf("msgSz=%db", messageSize), func(b *testing.B) { + + // Start server + serverOpts := DefaultOptions() + server := RunServer(serverOpts) + defer server.Shutdown() + + clientUrl := server.ClientURL() + + // Create "echo" subscriber + ncSub, err := nats.Connect(clientUrl) + if err != nil { + b.Fatal(err) + } + defer ncSub.Close() + sub, err := ncSub.Subscribe(subject, func(msg *nats.Msg) { + // Responder echoes the request payload as-is + msg.Respond(msg.Data) + }) + defer sub.Unsubscribe() + if err != nil { + b.Fatal(err) + } + + // Create publisher + ncPub, err := nats.Connect(clientUrl) + if err != nil { + b.Fatal(err) + } + defer ncPub.Close() + + var errors = 0 + + // Create message (reused for all requests) + messageData := make([]byte, messageSize) + b.SetBytes(messageSize) + rand.Read(messageData) + + // Benchmark + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := ncPub.Request(subject, messageData, time.Second) + if err != nil { + errors++ + } + } + b.StopTimer() + + b.ReportMetric(float64(errors), "errors") + }) + } +} + +func BenchmarkCoreTLSFanOut(b *testing.B) { + const ( + subject = "test-subject" + configsBasePath = "./configs/tls" + maxPendingMessages = 25 + maxPendingBytes = 15 * 1024 * 1024 // 15MiB + ) + + keyTypeCases := []string{ + "none", + "ed25519", + "rsa-1024", + "rsa-2048", + "rsa-4096", + } + messageSizeCases := []int64{ + 512 * 1024, // 512Kib + } + numSubsCases := []int{ + 5, + } + + // Custom error handler that ignores ErrSlowConsumer. + // Lots of them are expected in this benchmark which indiscriminately publishes at a rate higher + // than what the server can fan-out to subscribers. + ignoreSlowConsumerErrorHandler := func(conn *nats.Conn, s *nats.Subscription, err error) { + if errors.Is(err, nats.ErrSlowConsumer) { + // Swallow this error + } else { + _, _ = fmt.Fprintf(os.Stderr, "Warning: %s\n", err) + } + } + + for _, keyType := range keyTypeCases { + + b.Run( + fmt.Sprintf("keyType=%s", keyType), + func(b *testing.B) { + + for _, messageSize := range messageSizeCases { + b.Run( + fmt.Sprintf("msgSz=%db", messageSize), + func(b *testing.B) { + + for _, numSubs := range numSubsCases { + b.Run( + fmt.Sprintf("subs=%d", numSubs), + func(b *testing.B) { + // Start server + configPath := fmt.Sprintf("%s/tls-%s.conf", configsBasePath, keyType) + server, _ := RunServerWithConfig(configPath) + defer server.Shutdown() + + opts := []nats.Option{ + nats.MaxReconnects(-1), + nats.ReconnectWait(0), + nats.ErrorHandler(ignoreSlowConsumerErrorHandler), + } + + if keyType != "none" { + opts = append(opts, nats.Secure(&tls.Config{ + InsecureSkipVerify: true, + })) + } + + clientUrl := server.ClientURL() + + // Count of messages received for by each subscriber + counters := make([]int, numSubs) + + // Wait group for subscribers to signal they received b.N messages + var wg sync.WaitGroup + wg.Add(numSubs) + + // Create subscribers + for i := 0; i < numSubs; i++ { + subIndex := i + ncSub, err := nats.Connect(clientUrl, opts...) + if err != nil { + b.Fatal(err) + } + defer ncSub.Close() + sub, err := ncSub.Subscribe(subject, func(msg *nats.Msg) { + counters[subIndex] += 1 + if counters[subIndex] == b.N { + wg.Done() + } + }) + if err != nil { + b.Fatalf("failed to subscribe: %s", err) + } + err = sub.SetPendingLimits(maxPendingMessages, maxPendingBytes) + if err != nil { + b.Fatalf("failed to set pending limits: %s", err) + } + defer sub.Unsubscribe() + if err != nil { + b.Fatal(err) + } + } + + // publisher + ncPub, err := nats.Connect(clientUrl, opts...) + if err != nil { + b.Fatal(err) + } + defer ncPub.Close() + + var errorCount = 0 + + // random bytes as payload + messageData := make([]byte, messageSize) + rand.Read(messageData) + + quitCh := make(chan bool, 1) + + publish := func() { + for { + select { + case <-quitCh: + return + default: + // continue publishing + } + + err := ncPub.Publish(subject, messageData) + if err != nil { + errorCount += 1 + } + } + } + + // Set bytes per operation + b.SetBytes(messageSize) + // Start the clock + b.ResetTimer() + // Start publishing as fast as the server allows + go publish() + // Wait for all subscribers to have delivered b.N messages + wg.Wait() + // Stop the clock + b.StopTimer() + + // Stop publisher + quitCh <- true + + b.ReportMetric(float64(errorCount), "errors") + }, + ) + } + }, + ) + } + }, + ) + } +} diff --git a/server/dirstore.go b/server/dirstore.go index cabd4a199..b39ab9ae0 100644 --- a/server/dirstore.go +++ b/server/dirstore.go @@ -288,6 +288,10 @@ func (store *DirJWTStore) PackWalk(maxJWTs int, cb func(partialPackMsg string)) if err != nil { return err } + if len(jwtBytes) == 0 { + // Skip if no contents in the JWT. + return nil + } if exp != nil { claim, err := jwt.DecodeGeneric(string(jwtBytes)) if err == nil && claim.Expires > 0 && claim.Expires < time.Now().Unix() { @@ -406,6 +410,9 @@ func (store *DirJWTStore) load(publicKey string) (string, error) { // write that keeps hash of all jwt in sync // Assumes the lock is held. Does return true or an error never both. func (store *DirJWTStore) write(path string, publicKey string, theJWT string) (bool, error) { + if len(theJWT) == 0 { + return false, fmt.Errorf("invalid JWT") + } var newHash *[sha256.Size]byte if store.expiration != nil { h := sha256.Sum256([]byte(theJWT)) diff --git a/server/disk_avail.go b/server/disk_avail.go index 9ef2e76f2..9bddda80f 100644 --- a/server/disk_avail.go +++ b/server/disk_avail.go @@ -28,7 +28,7 @@ func diskAvailable(storeDir string) int64 { } var fs syscall.Statfs_t if err := syscall.Statfs(storeDir, &fs); err == nil { - // Estimate 95% of available storage. + // Estimate 95% of available storage. // ** changed to 95 by Memphis ba = int64(uint64(fs.Blocks) * uint64(fs.Bsize) / 20 * 19) // ** changed to 95% by Memphis ** } else { // Used 1TB default as a guess if all else fails. diff --git a/server/errors_gen.go b/server/errors_gen.go index cd7dbd595..fca18a55a 100644 --- a/server/errors_gen.go +++ b/server/errors_gen.go @@ -1,19 +1,7 @@ //go:build ignore // +build ignore -// Copyright 2012-2018 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package server +package server // ** changed to server by Memphis ** import ( "encoding/json" diff --git a/server/events.go b/server/events.go index 34b0fec10..97654e09c 100644 --- a/server/events.go +++ b/server/events.go @@ -1,4 +1,4 @@ -// Copyright 2018-2022 The NATS Authors +// Copyright 2018-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -10,17 +10,20 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + package server import ( "bytes" "compress/gzip" "crypto/sha256" + "crypto/x509" "encoding/json" "errors" "fmt" "math/rand" "net/http" + "runtime" "strconv" "strings" "sync" @@ -28,6 +31,7 @@ import ( "time" "github.com/klauspost/compress/s2" + "github.com/memphisdev/memphis/server/certidp" "github.com/memphisdev/memphis/server/pse" "github.com/nats-io/jwt/v2" ) @@ -51,6 +55,7 @@ const ( connsRespSubj = "$SYS._INBOX_.%s" accConnsEventSubjNew = "$SYS.ACCOUNT.%s.SERVER.CONNS" accConnsEventSubjOld = "$SYS.SERVER.ACCOUNT.%s.CONNS" // kept for backward compatibility + lameDuckEventSubj = "$SYS.SERVER.%s.LAMEDUCK" shutdownEventSubj = "$SYS.SERVER.%s.SHUTDOWN" authErrorEventSubj = "$SYS.SERVER.%s.CLIENT.AUTH.ERR" serverStatsSubj = "$SYS.SERVER.%s.STATSZ" @@ -76,31 +81,50 @@ const ( accReqTokens = 5 accReqAccIndex = 3 + + ocspPeerRejectEventSubj = "$SYS.SERVER.%s.OCSP.PEER.CONN.REJECT" + ocspPeerChainlinkInvalidEventSubj = "$SYS.SERVER.%s.OCSP.PEER.LINK.INVALID" ) // FIXME(dlc) - make configurable. var eventsHBInterval = 30 * time.Second +type sysMsgHandler func(sub *subscription, client *client, acc *Account, subject, reply string, hdr, msg []byte) + +// Used if we have to queue things internally to avoid the route/gw path. +type inSysMsg struct { + sub *subscription + c *client + acc *Account + subj string + rply string + hdr []byte + msg []byte + cb sysMsgHandler +} + // Used to send and receive messages from inside the server. type internal struct { - account *Account - client *client - seq uint64 - sid int - servers map[string]*serverUpdate - sweeper *time.Timer - stmr *time.Timer - replies map[string]msgHandler - sendq *ipQueue[*pubMsg] - resetCh chan struct{} - wg sync.WaitGroup - sq *sendq - orphMax time.Duration - chkOrph time.Duration - statsz time.Duration - cstatsz time.Duration - shash string - inboxPre string + account *Account + client *client + seq uint64 + sid int + servers map[string]*serverUpdate + sweeper *time.Timer + stmr *time.Timer + replies map[string]msgHandler + sendq *ipQueue[*pubMsg] + recvq *ipQueue[*inSysMsg] + resetCh chan struct{} + wg sync.WaitGroup + sq *sendq + orphMax time.Duration + chkOrph time.Duration + statsz time.Duration + cstatsz time.Duration + shash string + inboxPre string + remoteStatsSub *subscription } // ServerStatsMsg is sent periodically with stats updates. @@ -133,6 +157,34 @@ type DisconnectEventMsg struct { // DisconnectEventMsgType is the schema type for DisconnectEventMsg const DisconnectEventMsgType = "io.nats.server.advisory.v1.client_disconnect" +// OCSPPeerRejectEventMsg is sent when a peer TLS handshake is ultimately rejected due to OCSP invalidation. +// A "peer" can be an inbound client connection or a leaf connection to a remote server. Peer in event payload +// is always the peer's (TLS) leaf cert, which may or may be the invalid cert (See also OCSPPeerChainlinkInvalidEventMsg) +type OCSPPeerRejectEventMsg struct { + TypedEvent + Kind string `json:"kind"` + Peer certidp.CertInfo `json:"peer"` + Server ServerInfo `json:"server"` + Reason string `json:"reason"` +} + +// OCSPPeerRejectEventMsgType is the schema type for OCSPPeerRejectEventMsg +const OCSPPeerRejectEventMsgType = "io.nats.server.advisory.v1.ocsp_peer_reject" + +// OCSPPeerChainlinkInvalidEventMsg is sent when a certificate (link) in a valid TLS chain is found to be OCSP invalid +// during a peer TLS handshake. A "peer" can be an inbound client connection or a leaf connection to a remote server. +// Peer and Link may be the same if the invalid cert was the peer's leaf cert +type OCSPPeerChainlinkInvalidEventMsg struct { + TypedEvent + Link certidp.CertInfo `json:"link"` + Peer certidp.CertInfo `json:"peer"` + Server ServerInfo `json:"server"` + Reason string `json:"reason"` +} + +// OCSPPeerChainlinkInvalidEventMsgType is the schema type for OCSPPeerChainlinkInvalidEventMsg +const OCSPPeerChainlinkInvalidEventMsgType = "io.nats.server.advisory.v1.ocsp_peer_link_invalid" + // AccountNumConns is an event that will be sent from a server that is tracking // a given account when the number of connections changes. It will also HB // updates in the absence of any changes. @@ -298,6 +350,33 @@ type TypedEvent struct { Time time.Time `json:"timestamp"` } +// internalReceiveLoop will be responsible for dispatching all messages that +// a server receives and needs to internally process, e.g. internal subs. +func (s *Server) internalReceiveLoop() { + s.mu.RLock() + if s.sys == nil || s.sys.recvq == nil { + s.mu.RUnlock() + return + } + recvq := s.sys.recvq + s.mu.RUnlock() + + for s.eventsRunning() { + select { + case <-recvq.ch: + msgs := recvq.pop() + for _, m := range msgs { + if m.cb != nil { + m.cb(m.sub, m.c, m.acc, m.subj, m.rply, m.hdr, m.msg) + } + } + recvq.recycle(&msgs) + case <-s.quitCh: + return + } + } +} + // internalSendLoop will be responsible for serializing all messages that // a server wants to send. func (s *Server) internalSendLoop(wg *sync.WaitGroup) { @@ -454,6 +533,19 @@ RESET: } } +// Will send a shutdown message for lame-duck. Unlike sendShutdownEvent, this will +// not close off the send queue or reply handler, as we may still have a workload +// that needs migrating off. +// Lock should be held. +func (s *Server) sendLDMShutdownEventLocked() { + if s.sys == nil || s.sys.sendq == nil { + return + } + subj := fmt.Sprintf(lameDuckEventSubj, s.info.ID) + si := &ServerInfo{} + s.sys.sendq.push(newPubMsg(nil, subj, _EMPTY_, si, nil, si, noCompression, false, true)) +} + // Will send a shutdown message. func (s *Server) sendShutdownEvent() { s.mu.Lock() @@ -611,11 +703,9 @@ func (s *Server) checkRemoteServers() { // Grab RSS and PCPU // Server lock will be held but released. func (s *Server) updateServerUsage(v *ServerStats) { - s.mu.Unlock() - defer s.mu.Lock() var vss int64 pse.ProcUsage(&v.CPU, &v.Mem, &vss) - v.Cores = numCores + v.Cores = runtime.NumCPU() } // Generate a route stat for our statz update. @@ -648,6 +738,32 @@ func routeStat(r *client) *RouteStat { func (s *Server) sendStatsz(subj string) { var m ServerStatsMsg s.updateServerUsage(&m.Stats) + + s.mu.RLock() + defer s.mu.RUnlock() + + // Check that we have a system account, etc. + if s.sys == nil || s.sys.account == nil { + return + } + + // if we are running standalone, check for interest. + if s.standAloneMode() { + // Check if we even have interest in this subject. + sacc := s.sys.account + rr := sacc.sl.Match(subj) + totalSubs := len(rr.psubs) + len(rr.qsubs) + if totalSubs == 0 { + return + } else if totalSubs == 1 && len(rr.psubs) == 1 { + // For the broadcast subject we listen to that ourselves with no echo for remote updates. + // If we are the only ones listening do not send either. + if rr.psubs[0] == s.sys.remoteStatsSub { + return + } + } + } + m.Stats.Start = s.start m.Stats.Connections = len(s.clients) m.Stats.TotalConnections = s.totalClients @@ -691,14 +807,12 @@ func (s *Server) sendStatsz(subj string) { gw.RUnlock() } // Active Servers - m.Stats.ActiveServers = 1 - if s.sys != nil { - m.Stats.ActiveServers += len(s.sys.servers) - } + m.Stats.ActiveServers = len(s.sys.servers) + 1 + // JetStream if js := s.js; js != nil { jStat := &JetStreamVarz{} - s.mu.Unlock() + s.mu.RUnlock() js.mu.RLock() c := js.config c.StoreDir = _EMPTY_ @@ -740,7 +854,7 @@ func (s *Server) sendStatsz(subj string) { } } m.Stats.JetStream = jStat - s.mu.Lock() + s.mu.RLock() } // Send message. s.sendInternalMsg(subj, _EMPTY_, &m.Server, &m) @@ -759,13 +873,12 @@ func (s *Server) heartbeatStatsz() { } s.sys.stmr.Reset(s.sys.cstatsz) } - s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.info.ID)) + // Do in separate Go routine. + go s.sendStatszUpdate() } func (s *Server) sendStatszUpdate() { - s.mu.Lock() - s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.info.ID)) - s.mu.Unlock() + s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.ID())) } // This should be wrapChk() to setup common locking. @@ -790,35 +903,15 @@ func getHash(name string) string { return getHashSize(name, sysHashLen) } -var nameToHashSize8 = sync.Map{} -var nameToHashSize6 = sync.Map{} - // Computes a hash for the given `name`. The result will be `size` characters long. func getHashSize(name string, size int) string { - compute := func() string { - sha := sha256.New() - sha.Write([]byte(name)) - b := sha.Sum(nil) - for i := 0; i < size; i++ { - b[i] = digits[int(b[i]%base)] - } - return string(b[:size]) - } - var m *sync.Map - switch size { - case 8: - m = &nameToHashSize8 - case 6: - m = &nameToHashSize6 - default: - return compute() - } - if v, ok := m.Load(name); ok { - return v.(string) + sha := sha256.New() + sha.Write([]byte(name)) + b := sha.Sum(nil) + for i := 0; i < size; i++ { + b[i] = digits[int(b[i]%base)] } - h := compute() - m.Store(name, h) - return h + return string(b[:size]) } // Returns the node name for this server which is a hash of the server name. @@ -852,26 +945,36 @@ func (s *Server) initEventTracking() { s.sys.inboxPre = subject // This is for remote updates for connection accounting. subject = fmt.Sprintf(accConnsEventSubjOld, "*") - if _, err := s.sysSubscribe(subject, s.remoteConnsUpdate); err != nil { + if _, err := s.sysSubscribe(subject, s.noInlineCallback(s.remoteConnsUpdate)); err != nil { s.Errorf("Error setting up internal tracking for %s: %v", subject, err) } // This will be for responses for account info that we send out. subject = fmt.Sprintf(connsRespSubj, s.info.ID) - if _, err := s.sysSubscribe(subject, s.remoteConnsUpdate); err != nil { + if _, err := s.sysSubscribe(subject, s.noInlineCallback(s.remoteConnsUpdate)); err != nil { s.Errorf("Error setting up internal tracking: %v", err) } // Listen for broad requests to respond with number of subscriptions for a given subject. - if _, err := s.sysSubscribe(accNumSubsReqSubj, s.nsubsRequest); err != nil { + if _, err := s.sysSubscribe(accNumSubsReqSubj, s.noInlineCallback(s.nsubsRequest)); err != nil { s.Errorf("Error setting up internal tracking: %v", err) } // Listen for statsz from others. subject = fmt.Sprintf(serverStatsSubj, "*") - if _, err := s.sysSubscribe(subject, s.remoteServerUpdate); err != nil { + if sub, err := s.sysSubscribe(subject, s.noInlineCallback(s.remoteServerUpdate)); err != nil { s.Errorf("Error setting up internal tracking: %v", err) + } else { + // Keep track of this one. + s.sys.remoteStatsSub = sub } // Listen for all server shutdowns. subject = fmt.Sprintf(shutdownEventSubj, "*") - if _, err := s.sysSubscribe(subject, s.remoteServerShutdown); err != nil { + if _, err := s.sysSubscribe(subject, s.noInlineCallback(s.remoteServerShutdown)); err != nil { + s.Errorf("Error setting up internal tracking: %v", err) + } + // Listen for servers entering lame-duck mode. + // NOTE: This currently is handled in the same way as a server shutdown, but has + // a different subject in case we need to handle differently in future. + subject = fmt.Sprintf(lameDuckEventSubj, "*") + if _, err := s.sysSubscribe(subject, s.noInlineCallback(s.remoteServerShutdown)); err != nil { s.Errorf("Error setting up internal tracking: %v", err) } // Listen for account claims updates. @@ -881,62 +984,62 @@ func (s *Server) initEventTracking() { } if subscribeToUpdate { for _, sub := range []string{accUpdateEventSubjOld, accUpdateEventSubjNew} { - if _, err := s.sysSubscribe(fmt.Sprintf(sub, "*"), s.accountClaimUpdate); err != nil { + if _, err := s.sysSubscribe(fmt.Sprintf(sub, "*"), s.noInlineCallback(s.accountClaimUpdate)); err != nil { s.Errorf("Error setting up internal tracking: %v", err) } } } // Listen for ping messages that will be sent to all servers for statsz. // This subscription is kept for backwards compatibility. Got replaced by ...PING.STATZ from below - if _, err := s.sysSubscribe(serverStatsPingReqSubj, s.statszReq); err != nil { + if _, err := s.sysSubscribe(serverStatsPingReqSubj, s.noInlineCallback(s.statszReq)); err != nil { s.Errorf("Error setting up internal tracking: %v", err) } - monSrvc := map[string]msgHandler{ + monSrvc := map[string]sysMsgHandler{ "STATSZ": s.statszReq, - "VARZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { + "VARZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { optz := &VarzEventOptions{} - s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Varz(&optz.VarzOptions) }) + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Varz(&optz.VarzOptions) }) }, - "SUBSZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { + "SUBSZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { optz := &SubszEventOptions{} - s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Subsz(&optz.SubszOptions) }) + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Subsz(&optz.SubszOptions) }) }, - "CONNZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { + "CONNZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { optz := &ConnzEventOptions{} - s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Connz(&optz.ConnzOptions) }) + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Connz(&optz.ConnzOptions) }) }, - "ROUTEZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { + "ROUTEZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { optz := &RoutezEventOptions{} - s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Routez(&optz.RoutezOptions) }) + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Routez(&optz.RoutezOptions) }) }, - "GATEWAYZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { + "GATEWAYZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { optz := &GatewayzEventOptions{} - s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Gatewayz(&optz.GatewayzOptions) }) + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Gatewayz(&optz.GatewayzOptions) }) }, - "LEAFZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { + "LEAFZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { optz := &LeafzEventOptions{} - s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Leafz(&optz.LeafzOptions) }) + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Leafz(&optz.LeafzOptions) }) }, - "ACCOUNTZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { + "ACCOUNTZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { optz := &AccountzEventOptions{} - s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Accountz(&optz.AccountzOptions) }) + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Accountz(&optz.AccountzOptions) }) }, - "JSZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { + "JSZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { optz := &JszEventOptions{} - s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Jsz(&optz.JSzOptions) }) + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Jsz(&optz.JSzOptions) }) }, - "HEALTHZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { + "HEALTHZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { optz := &HealthzEventOptions{} - s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.healthz(&optz.HealthzOptions), nil }) + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.healthz(&optz.HealthzOptions), nil }) }, } for name, req := range monSrvc { subject = fmt.Sprintf(serverDirectReqSubj, s.info.ID, name) - if _, err := s.sysSubscribe(subject, req); err != nil { + if _, err := s.sysSubscribe(subject, s.noInlineCallback(req)); err != nil { s.Errorf("Error setting up internal tracking: %v", err) } subject = fmt.Sprintf(serverPingReqSubj, name) - if _, err := s.sysSubscribe(subject, req); err != nil { + if _, err := s.sysSubscribe(subject, s.noInlineCallback(req)); err != nil { s.Errorf("Error setting up internal tracking: %v", err) } } @@ -947,10 +1050,10 @@ func (s *Server) initEventTracking() { return tk[accReqAccIndex], nil } } - monAccSrvc := map[string]msgHandler{ - "SUBSZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { + monAccSrvc := map[string]sysMsgHandler{ + "SUBSZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { optz := &SubszEventOptions{} - s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { if acc, err := extractAccount(c, subject, msg); err != nil { return nil, err } else { @@ -960,9 +1063,9 @@ func (s *Server) initEventTracking() { } }) }, - "CONNZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { + "CONNZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { optz := &ConnzEventOptions{} - s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { if acc, err := extractAccount(c, subject, msg); err != nil { return nil, err } else { @@ -971,9 +1074,9 @@ func (s *Server) initEventTracking() { } }) }, - "LEAFZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { + "LEAFZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { optz := &LeafzEventOptions{} - s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { if acc, err := extractAccount(c, subject, msg); err != nil { return nil, err } else { @@ -982,9 +1085,9 @@ func (s *Server) initEventTracking() { } }) }, - "JSZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { + "JSZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { optz := &JszEventOptions{} - s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { if acc, err := extractAccount(c, subject, msg); err != nil { return nil, err } else { @@ -993,9 +1096,9 @@ func (s *Server) initEventTracking() { } }) }, - "INFO": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { + "INFO": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { optz := &AccInfoEventOptions{} - s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { if acc, err := extractAccount(c, subject, msg); err != nil { return nil, err } else { @@ -1006,9 +1109,9 @@ func (s *Server) initEventTracking() { // STATZ is essentially a duplicate of CONNS with an envelope identical to the others. // For historical reasons CONNS is the odd one out. // STATZ is also less heavy weight than INFO - "STATZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { + "STATZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { optz := &AccountStatzEventOptions{} - s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { if acc, err := extractAccount(c, subject, msg); err != nil { return nil, err } else if acc == "PING" { // Filter PING subject. Happens for server as well. But wildcards are not used @@ -1028,16 +1131,16 @@ func (s *Server) initEventTracking() { "CONNS": s.connsRequest, } for name, req := range monAccSrvc { - if _, err := s.sysSubscribe(fmt.Sprintf(accDirectReqSubj, "*", name), req); err != nil { + if _, err := s.sysSubscribe(fmt.Sprintf(accDirectReqSubj, "*", name), s.noInlineCallback(req)); err != nil { s.Errorf("Error setting up internal tracking: %v", err) } } // For now only the STATZ subject has an account specific ping equivalent. if _, err := s.sysSubscribe(fmt.Sprintf(accPingReqSubj, "STATZ"), - func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { + s.noInlineCallback(func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { optz := &AccountStatzEventOptions{} - s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { + s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { if stz, err := s.AccountStatz(&optz.AccountStatzOptions); err != nil { return nil, err } else if len(stz.Accounts) == 0 && !optz.IncludeUnused { @@ -1046,23 +1149,23 @@ func (s *Server) initEventTracking() { return stz, nil } }) - }); err != nil { + })); err != nil { s.Errorf("Error setting up internal tracking: %v", err) } // Listen for updates when leaf nodes connect for a given account. This will // force any gateway connections to move to `modeInterestOnly` subject = fmt.Sprintf(leafNodeConnectEventSubj, "*") - if _, err := s.sysSubscribe(subject, s.leafNodeConnected); err != nil { + if _, err := s.sysSubscribe(subject, s.noInlineCallback(s.leafNodeConnected)); err != nil { s.Errorf("Error setting up internal tracking: %v", err) } // For tracking remote latency measurements. subject = fmt.Sprintf(remoteLatencyEventSubj, s.sys.shash) - if _, err := s.sysSubscribe(subject, s.remoteLatencyUpdate); err != nil { + if _, err := s.sysSubscribe(subject, s.noInlineCallback(s.remoteLatencyUpdate)); err != nil { s.Errorf("Error setting up internal latency tracking: %v", err) } // This is for simple debugging of number of subscribers that exist in the system. - if _, err := s.sysSubscribeInternal(accSubsSubj, s.debugSubscribers); err != nil { + if _, err := s.sysSubscribeInternal(accSubsSubj, s.noInlineCallback(s.debugSubscribers)); err != nil { s.Errorf("Error setting up internal debug service for subscribers: %v", err) } } @@ -1130,7 +1233,7 @@ func (s *Server) addSystemAccountExports(sacc *Account) { } // accountClaimUpdate will receive claim updates for accounts. -func (s *Server) accountClaimUpdate(sub *subscription, c *client, _ *Account, subject, resp string, rmsg []byte) { +func (s *Server) accountClaimUpdate(sub *subscription, c *client, _ *Account, subject, resp string, hdr, msg []byte) { if !s.EventsEnabled() { return } @@ -1144,7 +1247,7 @@ func (s *Server) accountClaimUpdate(sub *subscription, c *client, _ *Account, su s.Debugf("Received account claims update on bad subject %q", subject) return } - if _, msg := c.msgParts(rmsg); len(msg) == 0 { + if len(msg) == 0 { err := errors.New("request body is empty") respondToUpdate(s, resp, pubKey, "jwt update error", err) } else if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { @@ -1187,7 +1290,7 @@ func (s *Server) sameDomain(domain string) bool { } // remoteServerShutdownEvent is called when we get an event from another server shutting down. -func (s *Server) remoteServerShutdown(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { +func (s *Server) remoteServerShutdown(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { s.mu.Lock() defer s.mu.Unlock() if !s.eventsEnabled() { @@ -1199,7 +1302,6 @@ func (s *Server) remoteServerShutdown(sub *subscription, c *client, _ *Account, return } - _, msg := c.msgParts(rmsg) if len(msg) == 0 { s.Errorf("Remote server sent invalid (empty) shutdown message to %q", subject) return @@ -1230,9 +1332,9 @@ func (s *Server) remoteServerShutdown(sub *subscription, c *client, _ *Account, } // remoteServerUpdate listens for statsz updates from other servers. -func (s *Server) remoteServerUpdate(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { +func (s *Server) remoteServerUpdate(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { var ssm ServerStatsMsg - if _, msg := c.msgParts(rmsg); len(msg) == 0 { + if len(msg) == 0 { s.Debugf("Received empty server info for remote server update") return } else if err := json.Unmarshal(msg, &ssm); err != nil { @@ -1244,6 +1346,13 @@ func (s *Server) remoteServerUpdate(sub *subscription, c *client, _ *Account, su } si := ssm.Server + // Should do normal updates before bailing if wrong domain. + s.mu.Lock() + if s.running && s.eventsEnabled() && ssm.Server.ID != s.info.ID { + s.updateRemoteServer(&si) + } + s.mu.Unlock() + // JetStream node updates. if !s.sameDomain(si.Domain) { return @@ -1269,11 +1378,6 @@ func (s *Server) remoteServerUpdate(sub *subscription, c *client, _ *Account, su stats, false, si.JetStream, }) - s.mu.Lock() - if s.running && s.eventsEnabled() && ssm.Server.ID != s.info.ID { - s.updateRemoteServer(&si) - } - s.mu.Unlock() } // updateRemoteServer is called when we have an update from a remote server. @@ -1312,7 +1416,8 @@ func (s *Server) processNewServer(si *ServerInfo) { } } // Announce ourselves.. - s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.info.ID)) + // Do this in a separate Go routine. + go s.sendStatszUpdate() } // If GW is enabled on this server and there are any leaf node connections, @@ -1365,7 +1470,7 @@ func (s *Server) shutdownEventing() { } // Request for our local connection count. -func (s *Server) connsRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { +func (s *Server) connsRequest(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { if !s.eventsRunning() { return } @@ -1376,7 +1481,7 @@ func (s *Server) connsRequest(sub *subscription, c *client, _ *Account, subject, } a := tk[accReqAccIndex] m := accNumConnsReq{Account: a} - if _, msg := c.msgParts(rmsg); len(msg) > 0 { + if len(msg) > 0 { if err := json.Unmarshal(msg, &m); err != nil { s.sys.client.Errorf("Error unmarshalling account connections request message: %v", err) return @@ -1404,7 +1509,7 @@ func (s *Server) connsRequest(sub *subscription, c *client, _ *Account, subject, } // leafNodeConnected is an event we will receive when a leaf node for a given account connects. -func (s *Server) leafNodeConnected(sub *subscription, _ *client, _ *Account, subject, reply string, msg []byte) { +func (s *Server) leafNodeConnected(sub *subscription, _ *client, _ *Account, subject, reply string, hdr, msg []byte) { m := accNumConnsReq{} if err := json.Unmarshal(msg, &m); err != nil { s.sys.client.Errorf("Error unmarshalling account connections request message: %v", err) @@ -1563,7 +1668,7 @@ type ServerAPIConnzResponse struct { } // statszReq is a request for us to respond with current statsz. -func (s *Server) statszReq(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { +func (s *Server) statszReq(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { if !s.EventsEnabled() { return } @@ -1574,7 +1679,7 @@ func (s *Server) statszReq(sub *subscription, c *client, _ *Account, subject, re } opts := StatszEventOptions{} - if _, msg := c.msgParts(rmsg); len(msg) != 0 { + if len(msg) != 0 { if err := json.Unmarshal(msg, &opts); err != nil { response := &ServerAPIResponse{ Server: &ServerInfo{}, @@ -1586,9 +1691,7 @@ func (s *Server) statszReq(sub *subscription, c *client, _ *Account, subject, re return } } - s.mu.Lock() s.sendStatsz(reply) - s.mu.Unlock() } var errSkipZreq = errors.New("filtered response") @@ -1613,14 +1716,13 @@ func getAcceptEncoding(hdr []byte) compressionType { return unsupportedCompression } -func (s *Server) zReq(c *client, reply string, rmsg []byte, fOpts *EventFilterOptions, optz interface{}, respf func() (interface{}, error)) { +func (s *Server) zReq(c *client, reply string, hdr, msg []byte, fOpts *EventFilterOptions, optz interface{}, respf func() (interface{}, error)) { if !s.EventsEnabled() || reply == _EMPTY_ { return } response := &ServerAPIResponse{Server: &ServerInfo{}} var err error status := 0 - hdr, msg := c.msgParts(rmsg) if len(msg) != 0 { if err = json.Unmarshal(msg, optz); err != nil { status = http.StatusBadRequest // status is only included on error, so record how far execution got @@ -1645,12 +1747,12 @@ func (s *Server) zReq(c *client, reply string, rmsg []byte, fOpts *EventFilterOp } // remoteConnsUpdate gets called when we receive a remote update from another server. -func (s *Server) remoteConnsUpdate(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { +func (s *Server) remoteConnsUpdate(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { if !s.eventsRunning() { return } var m AccountNumConns - if _, msg := c.msgParts(rmsg); len(msg) == 0 { + if len(msg) == 0 { s.sys.client.Errorf("No message body provided") return } else if err := json.Unmarshal(msg, &m); err != nil { @@ -1694,7 +1796,7 @@ func (s *Server) remoteConnsUpdate(sub *subscription, c *client, _ *Account, sub // This will import any system level exports. func (s *Server) registerSystemImports(a *Account) { - if a == nil || !s.eventsEnabled() { + if a == nil || !s.EventsEnabled() { return } sacc := s.SystemAccount() @@ -2005,6 +2107,25 @@ func (s *Server) sendAuthErrorEvent(c *client) { // rmsg contains header and the message. use client.msgParts(rmsg) to split them apart type msgHandler func(sub *subscription, client *client, acc *Account, subject, reply string, rmsg []byte) +// Create a wrapped callback handler for the subscription that will move it to an +// internal recvQ for processing not inline with routes etc. +func (s *Server) noInlineCallback(cb sysMsgHandler) msgHandler { + s.mu.RLock() + if !s.eventsEnabled() { + s.mu.RUnlock() + return nil + } + // Capture here for direct reference to avoid any unnecessary blocking inline with routes, gateways etc. + recvq := s.sys.recvq + s.mu.RUnlock() + + return func(sub *subscription, c *client, acc *Account, subj, rply string, rmsg []byte) { + // Need to copy and split here. + hdr, msg := c.msgParts(rmsg) + recvq.push(&inSysMsg{sub, c, acc, subj, rply, copyBytes(hdr), copyBytes(msg), cb}) + } +} + // Create an internal subscription. sysSubscribeQ for queue groups. func (s *Server) sysSubscribe(subject string, cb msgHandler) (*subscription, error) { return s.systemSubscribe(subject, _EMPTY_, false, nil, cb) @@ -2044,9 +2165,10 @@ func (s *Server) systemSubscribe(subject, queue string, internalOnly bool, c *cl } var q []byte - if queue != "" { + if queue != _EMPTY_ { q = []byte(queue) } + // Now create the subscription return c.processSub([]byte(subject), q, []byte(sid), cb, internalOnly) } @@ -2079,11 +2201,11 @@ func remoteLatencySubjectForResponse(subject []byte) string { } // remoteLatencyUpdate is used to track remote latency measurements for tracking on exported services. -func (s *Server) remoteLatencyUpdate(sub *subscription, _ *client, _ *Account, subject, _ string, msg []byte) { +func (s *Server) remoteLatencyUpdate(sub *subscription, _ *client, _ *Account, subject, _ string, hdr, msg []byte) { if !s.eventsRunning() { return } - rl := remoteLatency{} + var rl remoteLatency if err := json.Unmarshal(msg, &rl); err != nil { s.Errorf("Error unmarshalling remote latency measurement: %v", err) return @@ -2105,25 +2227,21 @@ func (s *Server) remoteLatencyUpdate(sub *subscription, _ *client, _ *Account, s acc.mu.RUnlock() return } - m1 := si.m1 - m2 := rl.M2 - lsub := si.latency.subject acc.mu.RUnlock() + si.acc.mu.Lock() + m1 := si.m1 + m2 := rl.M2 + // So we have not processed the response tracking measurement yet. if m1 == nil { - si.acc.mu.Lock() - // Double check since could have slipped in. - m1 = si.m1 - if m1 == nil { - // Store our value there for them to pick up. - si.m1 = &m2 - } - si.acc.mu.Unlock() - if m1 == nil { - return - } + // Store our value there for them to pick up. + si.m1 = &m2 + } + si.acc.mu.Unlock() + if m1 == nil { + return } // Calculate the correct latencies given M1 and M2. @@ -2215,7 +2333,7 @@ func totalSubs(rr *SublistResult, qg []byte) (nsubs int32) { // Allows users of large systems to debug active subscribers for a given subject. // Payload should be the subject of interest. -func (s *Server) debugSubscribers(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { +func (s *Server) debugSubscribers(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { // Even though this is an internal only subscription, meaning interest was not forwarded, we could // get one here from a GW in optimistic mode. Ignore for now. // FIXME(dlc) - Should we send no interest here back to the GW? @@ -2223,8 +2341,26 @@ func (s *Server) debugSubscribers(sub *subscription, c *client, _ *Account, subj return } - _, acc, _, msg, err := s.getRequestInfo(c, rmsg) - if err != nil { + var ci ClientInfo + if len(hdr) > 0 { + if err := json.Unmarshal(getHeader(ClientInfoHdr, hdr), &ci); err != nil { + return + } + } + + var acc *Account + if ci.Service != _EMPTY_ { + acc, _ = s.LookupAccount(ci.Service) + } else if ci.Account != _EMPTY_ { + acc, _ = s.LookupAccount(ci.Account) + } else { + // Direct $SYS access. + acc = c.acc + if acc == nil { + acc = s.SystemAccount() + } + } + if acc == nil { return } @@ -2325,12 +2461,12 @@ func (s *Server) debugSubscribers(sub *subscription, c *client, _ *Account, subj // Request for our local subscription count. This will come from a remote origin server // that received the initial request. -func (s *Server) nsubsRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { +func (s *Server) nsubsRequest(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) { if !s.eventsRunning() { return } m := accNumSubsReq{} - if _, msg := c.msgParts(rmsg); len(msg) == 0 { + if len(msg) == 0 { s.sys.client.Errorf("request requires a body") return } else if err := json.Unmarshal(msg, &m); err != nil { @@ -2405,3 +2541,74 @@ func (s *Server) wrapChk(f func()) func() { s.mu.Unlock() } } + +// sendOCSPPeerRejectEvent sends a system level event to system account when a peer connection is +// rejected due to OCSP invalid status of its trust chain(s). +func (s *Server) sendOCSPPeerRejectEvent(kind string, peer *x509.Certificate, reason string) { + s.mu.Lock() + defer s.mu.Unlock() + if !s.eventsEnabled() { + return + } + if peer == nil { + s.Errorf(certidp.ErrPeerEmptyNoEvent) + return + } + eid := s.nextEventID() + now := time.Now().UTC() + m := OCSPPeerRejectEventMsg{ + TypedEvent: TypedEvent{ + Type: OCSPPeerRejectEventMsgType, + ID: eid, + Time: now, + }, + Kind: kind, + Peer: certidp.CertInfo{ + Subject: certidp.GetSubjectDNForm(peer), + Issuer: certidp.GetIssuerDNForm(peer), + Fingerprint: certidp.GenerateFingerprint(peer), + Raw: peer.Raw, + }, + Reason: reason, + } + subj := fmt.Sprintf(ocspPeerRejectEventSubj, s.info.ID) + s.sendInternalMsg(subj, _EMPTY_, &m.Server, &m) +} + +// sendOCSPPeerChainlinkInvalidEvent sends a system level event to system account when a link in a peer's trust chain +// is OCSP invalid. +func (s *Server) sendOCSPPeerChainlinkInvalidEvent(peer *x509.Certificate, link *x509.Certificate, reason string) { + s.mu.Lock() + defer s.mu.Unlock() + if !s.eventsEnabled() { + return + } + if peer == nil || link == nil { + s.Errorf(certidp.ErrPeerEmptyNoEvent) + return + } + eid := s.nextEventID() + now := time.Now().UTC() + m := OCSPPeerChainlinkInvalidEventMsg{ + TypedEvent: TypedEvent{ + Type: OCSPPeerChainlinkInvalidEventMsgType, + ID: eid, + Time: now, + }, + Link: certidp.CertInfo{ + Subject: certidp.GetSubjectDNForm(link), + Issuer: certidp.GetIssuerDNForm(link), + Fingerprint: certidp.GenerateFingerprint(link), + Raw: link.Raw, + }, + Peer: certidp.CertInfo{ + Subject: certidp.GetSubjectDNForm(peer), + Issuer: certidp.GetIssuerDNForm(peer), + Fingerprint: certidp.GenerateFingerprint(peer), + Raw: peer.Raw, + }, + Reason: reason, + } + subj := fmt.Sprintf(ocspPeerChainlinkInvalidEventSubj, s.info.ID) + s.sendInternalMsg(subj, _EMPTY_, &m.Server, &m) +} diff --git a/server/events_test.go b/server/events_test.go index 70745e3f8..34a9d0d47 100644 --- a/server/events_test.go +++ b/server/events_test.go @@ -1,4 +1,4 @@ -// Copyright 2018-2020 The NATS Authors +// Copyright 2018-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -436,8 +436,8 @@ func checkLeafNodeConnectedCount(t testing.TB, s *Server, lnCons int) { t.Helper() checkFor(t, 5*time.Second, 15*time.Millisecond, func() error { if nln := s.NumLeafNodes(); nln != lnCons { - return fmt.Errorf("Expected %d connected leafnode(s) for server %q, got %d", - lnCons, s.ID(), nln) + return fmt.Errorf("Expected %d connected leafnode(s) for server %v, got %d", + lnCons, s, nln) } return nil }) @@ -1220,6 +1220,7 @@ func TestAccountClaimsUpdates(t *testing.T) { claimUpdateSubj := fmt.Sprintf(subj, pub) nc.Publish(claimUpdateSubj, []byte(ajwt)) nc.Flush() + time.Sleep(200 * time.Millisecond) acc, _ = s.LookupAccount(pub) if acc.MaxActiveConnections() != 8 { @@ -1340,6 +1341,9 @@ func TestAccountReqMonitoring(t *testing.T) { require_NoError(t, nc.PublishRequest(pStatz, ib, nil)) resp, err = rSub.NextMsg(time.Second) require_NoError(t, err) + // Since we now have processed our own message, msgs will be 1. + respContentAcc = []string{`"conns":1,`, `"total_conns":1`, `"slow_consumers":0`, `"sent":{"msgs":0,"bytes":0}`, + `"received":{"msgs":1,"bytes":0}`, fmt.Sprintf(`"acc":"%s"`, acc.Name)} require_Contains(t, string(resp.Data), respContentAcc...) _, err = rSub.NextMsg(200 * time.Millisecond) require_Error(t, err) @@ -1511,6 +1515,7 @@ func TestAccountClaimsUpdatesWithServiceImports(t *testing.T) { nc.Publish(claimUpdateSubj, []byte(ajwt2)) } nc.Flush() + time.Sleep(50 * time.Millisecond) if startSubs < s.NumSubscriptions() { t.Fatalf("Subscriptions leaked: %d vs %d", startSubs, s.NumSubscriptions()) @@ -1641,7 +1646,7 @@ func TestSystemAccountWithBadRemoteLatencyUpdate(t *testing.T) { ReqId: "_INBOX.22", } b, _ := json.Marshal(&rl) - s.remoteLatencyUpdate(nil, nil, nil, "foo", _EMPTY_, b) + s.remoteLatencyUpdate(nil, nil, nil, "foo", _EMPTY_, nil, b) } func TestSystemAccountWithGateways(t *testing.T) { @@ -1661,7 +1666,7 @@ func TestSystemAccountWithGateways(t *testing.T) { // If this tests fails with wrong number after 10 seconds we may have // added a new inititial subscription for the eventing system. - checkExpectedSubs(t, 45, sa) + checkExpectedSubs(t, 46, sa) // Create a client on B and see if we receive the event urlb := fmt.Sprintf("nats://%s:%d", ob.Host, ob.Port) @@ -2496,6 +2501,40 @@ func TestServerEventsAndDQSubscribers(t *testing.T) { checkSubsPending(t, sub, 10) } +func TestServerEventsStatszSingleServer(t *testing.T) { + conf := createConfFile(t, []byte(` + listen: "127.0.0.1:-1" + accounts { $SYS { users [{user: "admin", password: "p1d"}]} } + `)) + s, _ := RunServerWithConfig(conf) + defer s.Shutdown() + + // Grab internal system client. + s.mu.RLock() + sysc := s.sys.client + wait := s.sys.cstatsz + 25*time.Millisecond + s.mu.RUnlock() + + // Wait for when first statsz would have gone out.. + time.Sleep(wait) + + sysc.mu.Lock() + outMsgs := sysc.stats.outMsgs + sysc.mu.Unlock() + + require_True(t, outMsgs == 0) + + // Connect as a system user and make sure if there is + // subscription interest that we will receive updates. + nc, _ := jsClientConnect(t, s, nats.UserInfo("admin", "p1d")) + defer nc.Close() + + sub, err := nc.SubscribeSync(fmt.Sprintf(serverStatsSubj, "*")) + require_NoError(t, err) + + checkSubsPending(t, sub, 1) +} + func Benchmark_GetHash(b *testing.B) { b.StopTimer() // Get 100 random names diff --git a/server/filestore.go b/server/filestore.go index 3b871ac73..4aea0fa34 100644 --- a/server/filestore.go +++ b/server/filestore.go @@ -107,6 +107,7 @@ type psi struct { } type fileStore struct { + srv *Server mu sync.RWMutex state StreamState ld *LostStreamData @@ -258,6 +259,8 @@ const ( // For smaller reuse buffers. Usually being generated during contention on the lead write buffer. // E.g. mirrors/sources etc. defaultSmallBlockSize = 1 * 1024 * 1024 // 1MB + // Maximum size for the encrypted head block. + maximumEncryptedBlockSize = 2 * 1024 * 1024 // 2MB // Default for KV based defaultKVBlockSize = defaultMediumBlockSize // max block size for now. @@ -274,6 +277,8 @@ const ( wiThresh = int64(30 * time.Second) // Time threshold to write index info for non FIFO cases winfThresh = int64(2 * time.Second) + // Checksum size for hash for msg records. + recordHashSize = 8 ) func newFileStore(fcfg FileStoreConfig, cfg StreamConfig) (*fileStore, error) { @@ -290,7 +295,7 @@ func newFileStoreWithCreatedMemphis(fcfg FileStoreConfig, cfg StreamConfig, crea } // Default values. if fcfg.BlockSize == 0 { - fcfg.BlockSize = dynBlkSize(cfg.Retention, cfg.MaxBytes) + fcfg.BlockSize = dynBlkSize(cfg.Retention, cfg.MaxBytes, prf != nil) } if fcfg.BlockSize > maxBlockSize { return nil, fmt.Errorf("filestore max block size is %s", friendlyBytes(maxBlockSize)) @@ -314,8 +319,11 @@ func newFileStoreWithCreatedMemphis(fcfg FileStoreConfig, cfg StreamConfig, crea if err != nil { return nil, fmt.Errorf("storage directory is not writable") } + tmpfile.Close() + <-dios os.Remove(tmpfile.Name()) + dios <- struct{}{} fs := &fileStore{ fcfg: fcfg, @@ -347,6 +355,14 @@ func newFileStoreWithCreatedMemphis(fcfg FileStoreConfig, cfg StreamConfig, crea return nil, fmt.Errorf("could not create hash: %v", err) } + keyFile := filepath.Join(fs.fcfg.StoreDir, JetStreamMetaFileKey) + // Make sure we do not have an encrypted store underneath of us but no main key. + if fs.prf == nil { + if _, err := os.Stat(keyFile); err == nil { + return nil, errNoMainKey + } + } + // Recover our message state. if err := fs.recoverMsgs(); err != nil { return nil, err @@ -364,7 +380,6 @@ func newFileStoreWithCreatedMemphis(fcfg FileStoreConfig, cfg StreamConfig, crea // If we expect to be encrypted check that what we are restoring is not plaintext. // This can happen on snapshot restores or conversions. if fs.prf != nil { - keyFile := filepath.Join(fs.fcfg.StoreDir, JetStreamMetaFileKey) if _, err := os.Stat(keyFile); err != nil && os.IsNotExist(err) { if err := fs.writeStreamMeta(); err != nil { return nil, err @@ -387,7 +402,7 @@ func newFileStoreWithCreated(fcfg FileStoreConfig, cfg StreamConfig, created tim } // Default values. if fcfg.BlockSize == 0 { - fcfg.BlockSize = dynBlkSize(cfg.Retention, cfg.MaxBytes) + fcfg.BlockSize = dynBlkSize(cfg.Retention, cfg.MaxBytes, prf != nil) } if fcfg.BlockSize > maxBlockSize { return nil, fmt.Errorf("filestore max block size is %s", friendlyBytes(maxBlockSize)) @@ -411,8 +426,11 @@ func newFileStoreWithCreated(fcfg FileStoreConfig, cfg StreamConfig, created tim if err != nil { return nil, fmt.Errorf("storage directory is not writable") } + tmpfile.Close() + <-dios os.Remove(tmpfile.Name()) + dios <- struct{}{} fs := &fileStore{ fcfg: fcfg, @@ -443,6 +461,14 @@ func newFileStoreWithCreated(fcfg FileStoreConfig, cfg StreamConfig, created tim return nil, fmt.Errorf("could not create hash: %v", err) } + keyFile := filepath.Join(fs.fcfg.StoreDir, JetStreamMetaFileKey) + // Make sure we do not have an encrypted store underneath of us but no main key. + if fs.prf == nil { + if _, err := os.Stat(keyFile); err == nil { + return nil, errNoMainKey + } + } + // Recover our message state. if err := fs.recoverMsgs(); err != nil { return nil, err @@ -460,7 +486,6 @@ func newFileStoreWithCreated(fcfg FileStoreConfig, cfg StreamConfig, created tim // If we expect to be encrypted check that what we are restoring is not plaintext. // This can happen on snapshot restores or conversions. if fs.prf != nil { - keyFile := filepath.Join(fs.fcfg.StoreDir, JetStreamMetaFileKey) if _, err := os.Stat(keyFile); err != nil && os.IsNotExist(err) { if err := fs.writeStreamMeta(); err != nil { return nil, err @@ -473,6 +498,12 @@ func newFileStoreWithCreated(fcfg FileStoreConfig, cfg StreamConfig, created tim return fs, nil } +func (fs *fileStore) registerServer(s *Server) { + fs.mu.Lock() + defer fs.mu.Unlock() + fs.srv = s +} + // Lock all existing message blocks. // Lock held on entry. func (fs *fileStore) lockAllMsgBlocks() { @@ -530,7 +561,7 @@ func (fs *fileStore) UpdateConfig(cfg *StreamConfig) error { fs.ageChk = nil } - if cfg.MaxMsgsPer > 0 && cfg.MaxMsgsPer < old_cfg.MaxMsgsPer { + if fs.cfg.MaxMsgsPer > 0 && fs.cfg.MaxMsgsPer < old_cfg.MaxMsgsPer { fs.enforceMsgPerSubjectLimit() } fs.mu.Unlock() @@ -541,7 +572,7 @@ func (fs *fileStore) UpdateConfig(cfg *StreamConfig) error { return nil } -func dynBlkSize(retention RetentionPolicy, maxBytes int64) uint64 { +func dynBlkSize(retention RetentionPolicy, maxBytes int64, encrypted bool) uint64 { if maxBytes > 0 { blkSize := (maxBytes / 4) + 1 // (25% overhead) // Round up to nearest 100 @@ -555,13 +586,24 @@ func dynBlkSize(retention RetentionPolicy, maxBytes int64) uint64 { } else { blkSize = defaultMediumBlockSize } + if encrypted && blkSize > maximumEncryptedBlockSize { + // Notes on this below. + blkSize = maximumEncryptedBlockSize + } return uint64(blkSize) } - if retention == LimitsPolicy { + switch { + case encrypted: + // In the case of encrypted stores, large blocks can result in worsened perf + // since many writes on disk involve re-encrypting the entire block. For now, + // we will enforce a cap on the block size when encryption is enabled to avoid + // this. + return maximumEncryptedBlockSize + case retention == LimitsPolicy: // TODO(dlc) - Make the blocksize relative to this if set. return defaultLargeBlockSize - } else { + default: // TODO(dlc) - Make the blocksize relative to this if set. return defaultMediumBlockSize } @@ -865,6 +907,7 @@ func (fs *fileStore) recoverMsgBlock(fi os.FileInfo, index uint32) (*msgBlock, e if ld, _ := mb.rebuildState(); ld != nil { fs.addLostData(ld) } + if mb.msgs > 0 && !mb.noTrack && fs.psim != nil { fs.populateGlobalPerSubjectInfo(mb) // Try to dump any state we needed on recovery. @@ -1040,6 +1083,10 @@ func (mb *msgBlock) rebuildState() (*LostStreamData, error) { func (mb *msgBlock) rebuildStateLocked() (*LostStreamData, error) { startLastSeq := mb.last.seq + // Remove the .fss file and clear any cache we have set. + mb.clearCacheAndOffset() + mb.removePerSubjectInfoLocked() + buf, err := mb.loadBlock(nil) if err != nil || len(buf) == 0 { var ld *LostStreamData @@ -1095,7 +1142,7 @@ func (mb *msgBlock) rebuildStateLocked() (*LostStreamData, error) { fd = mb.mfd } else { fd, err = os.OpenFile(mb.mfn, os.O_RDWR, defaultFilePerms) - if err != nil { + if err == nil { defer fd.Close() } } @@ -1136,19 +1183,40 @@ func (mb *msgBlock) rebuildStateLocked() (*LostStreamData, error) { rl &^= hbit dlen := int(rl) - msgHdrSize // Do some quick sanity checks here. - if dlen < 0 || int(slen) > dlen || dlen > int(rl) || rl > rlBadThresh { + if dlen < 0 || int(slen) > (dlen-recordHashSize) || dlen > int(rl) || index+rl > lbuf || rl > rlBadThresh { truncate(index) return gatherLost(lbuf - index), errBadMsg } - if index+rl > lbuf { - truncate(index) - return gatherLost(lbuf - index), errBadMsg + // Check for checksum failures before additional processing. + data := buf[index+msgHdrSize : index+rl] + if hh := mb.hh; hh != nil { + hh.Reset() + hh.Write(hdr[4:20]) + hh.Write(data[:slen]) + if hasHeaders { + hh.Write(data[slen+4 : dlen-recordHashSize]) + } else { + hh.Write(data[slen : dlen-recordHashSize]) + } + checksum := hh.Sum(nil) + if !bytes.Equal(checksum, data[len(data)-recordHashSize:]) { + truncate(index) + return gatherLost(lbuf - index), errBadMsg + } + copy(mb.lchk[0:], checksum) } + // Grab our sequence and timestamp. seq := le.Uint64(hdr[4:]) ts := int64(le.Uint64(hdr[12:])) + // Check if this is a delete tombstone. + if seq&tbit != 0 { + index += rl + continue + } + // This is an old erased message, or a new one that we can track. if seq == 0 || seq&ebit != 0 || seq < mb.first.seq { seq = seq &^ ebit @@ -1157,15 +1225,17 @@ func (mb *msgBlock) rebuildStateLocked() (*LostStreamData, error) { addToDmap(seq) } index += rl - mb.last.seq = seq - mb.last.ts = ts + if seq >= mb.first.seq { + mb.last.seq = seq + mb.last.ts = ts + } continue } // This is for when we have index info that adjusts for deleted messages // at the head. So the first.seq will be already set here. If this is larger // replace what we have with this seq. - if firstNeedsSet && seq > mb.first.seq { + if firstNeedsSet && seq >= mb.first.seq { firstNeedsSet, mb.first.seq, mb.first.ts = false, seq, ts } @@ -1174,29 +1244,7 @@ func (mb *msgBlock) rebuildStateLocked() (*LostStreamData, error) { _, deleted = mb.dmap[seq] } - // Always set last. - mb.last.seq = seq - mb.last.ts = ts - if !deleted { - data := buf[index+msgHdrSize : index+rl] - if hh := mb.hh; hh != nil { - hh.Reset() - hh.Write(hdr[4:20]) - hh.Write(data[:slen]) - if hasHeaders { - hh.Write(data[slen+4 : dlen-8]) - } else { - hh.Write(data[slen : dlen-8]) - } - checksum := hh.Sum(nil) - if !bytes.Equal(checksum, data[len(data)-8:]) { - truncate(index) - return gatherLost(lbuf - index), errBadMsg - } - copy(mb.lchk[0:], checksum) - } - if firstNeedsSet { firstNeedsSet, mb.first.seq, mb.first.ts = false, seq, ts } @@ -1222,6 +1270,11 @@ func (mb *msgBlock) rebuildStateLocked() (*LostStreamData, error) { mb.fssNeedsWrite = true } } + + // Always set last + mb.last.seq = seq + mb.last.ts = ts + // Advance to next record. index += rl } @@ -1231,6 +1284,11 @@ func (mb *msgBlock) rebuildStateLocked() (*LostStreamData, error) { mb.last.seq = mb.first.seq - 1 } + // Update our fss file if needed. + if len(mb.fss) > 0 { + mb.writePerSubjectInfo() + } + return nil, nil } @@ -1240,9 +1298,11 @@ func (fs *fileStore) recoverMsgs() error { // Check for any left over purged messages. pdir := filepath.Join(fs.fcfg.StoreDir, purgeDir) + <-dios if _, err := os.Stat(pdir); err == nil { os.RemoveAll(pdir) } + dios <- struct{}{} mdir := filepath.Join(fs.fcfg.StoreDir, msgDir) fis, err := os.ReadDir(mdir) @@ -1260,6 +1320,13 @@ func (fs *fileStore) recoverMsgs() error { return err } if mb, err := fs.recoverMsgBlock(finfo, index); err == nil && mb != nil { + // This is a truncate block with possibly no index. If the OS got shutdown + // out from underneath of us this is possible. + if mb.first.seq == 0 { + mb.dirtyCloseWithRemove(true) + fs.removeMsgBlockFromList(mb) + continue + } if fs.state.FirstSeq == 0 || mb.first.seq < fs.state.FirstSeq { fs.state.FirstSeq = mb.first.seq fs.state.FirstTime = time.Unix(0, mb.first.ts).UTC() @@ -1367,6 +1434,11 @@ func (fs *fileStore) expireMsgsOnRecover() { fs.psim = make(map[string]*psi) return false } + // Make sure we do subject cleanup as well. + mb.ensurePerSubjectInfoLoaded() + for subj := range mb.fss { + fs.removePerSubject(subj) + } mb.dirtyCloseWithRemove(true) deleted++ return true @@ -1398,6 +1470,7 @@ func (fs *fileStore) expireMsgsOnRecover() { } var smv StoreMsg + var needNextFirst bool // Walk messages and remove if expired. mb.ensurePerSubjectInfoLoaded() @@ -1412,14 +1485,13 @@ func (fs *fileStore) expireMsgsOnRecover() { mb.dmap = nil } } - // Keep this update just in case since we are removing dmap entries. - mb.first.seq = seq + // Keep this updated just in case since we are removing dmap entries. + mb.first.seq, needNextFirst = seq, true continue } // Break on other errors. if err != nil || sm == nil { - // Keep this update just in case since we could have removed dmap entries. - mb.first.seq = seq + mb.first.seq, needNextFirst = seq, true break } @@ -1427,6 +1499,7 @@ func (fs *fileStore) expireMsgsOnRecover() { // Check for done. if minAge < sm.ts { + mb.first.seq, needNextFirst = sm.seq, false mb.first.seq = sm.seq mb.first.ts = sm.ts nts = sm.ts @@ -1435,7 +1508,11 @@ func (fs *fileStore) expireMsgsOnRecover() { // Delete the message here. if mb.msgs > 0 { + mb.first.seq, needNextFirst = seq, true sz := fileStoreMsgSize(sm.subj, sm.hdr, sm.msg) + if sz > mb.bytes { + sz = mb.bytes + } mb.bytes -= sz bytes += sz mb.msgs-- @@ -1443,10 +1520,13 @@ func (fs *fileStore) expireMsgsOnRecover() { } // Update fss // Make sure we have fss loaded. - mb.removeSeqPerSubject(sm.subj, seq, nil) + mb.removeSeqPerSubject(sm.subj, seq) fs.removePerSubject(sm.subj) } - + // Make sure we have a proper next first sequence. + if needNextFirst { + mb.selectNextFirst() + } // Check if empty after processing, could happen if tail of messages are all deleted. needWriteIndex := true if mb.msgs == 0 { @@ -1480,8 +1560,16 @@ func (fs *fileStore) expireMsgsOnRecover() { } } // Update top level accounting. - fs.state.Msgs -= purged - fs.state.Bytes -= bytes + if purged < fs.state.Msgs { + fs.state.Msgs -= purged + } else { + fs.state.Msgs = 0 + } + if bytes < fs.state.Bytes { + fs.state.Bytes -= bytes + } else { + fs.state.Bytes = 0 + } // Make sure to we properly set the fs first sequence and timestamp. fs.selectNextFirst() } @@ -1564,6 +1652,9 @@ func (mb *msgBlock) firstMatching(filter string, wc bool, start uint64, sm *Stor fseq = mb.last.seq + 1 for _, subj := range subs { ss := mb.fss[subj] + if ss != nil && ss.firstNeedsUpdate { + mb.recalculateFirstForSubj(subj, ss.First, ss) + } if ss == nil || start > ss.Last || ss.First >= fseq { continue } @@ -1669,6 +1760,9 @@ func (mb *msgBlock) filteredPendingLocked(filter string, wc bool, sseq uint64) ( var havePartial bool for subj, ss := range mb.fss { if isAll || isMatch(subj) { + if ss.firstNeedsUpdate { + mb.recalculateFirstForSubj(subj, ss.First, ss) + } if sseq <= ss.First { update(ss) } else if sseq <= ss.Last { @@ -1866,6 +1960,9 @@ func (fs *fileStore) SubjectsState(subject string) map[string]SimpleState { mb.ensurePerSubjectInfoLoaded() for subj, ss := range mb.fss { if subject == _EMPTY_ || subject == fwcs || subjectIsSubsetMatch(subj, subject) { + if ss.firstNeedsUpdate { + mb.recalculateFirstForSubj(subj, ss.First, ss) + } oss := fss[subj] if oss.First == 0 { // New fss[subj] = *ss @@ -1907,12 +2004,24 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) seqStart, _ = fs.selectMsgBlockWithIndex(sseq) } - tsa := [32]string{} - fsa := [32]string{} + var tsa, fsa [32]string fts := tokenizeSubjectIntoSlice(fsa[:0], filter) isAll := filter == _EMPTY_ || filter == fwcs wc := subjectHasWildcard(filter) + // See if filter was provided but its the only subject. + if !isAll && !wc && len(fs.psim) == 1 && fs.psim[filter] != nil { + isAll = true + } + + // If we are isAll and have no deleted we can do a simpler calculation. + if isAll && (fs.state.LastSeq-fs.state.FirstSeq+1) == fs.state.Msgs { + if sseq == 0 { + return fs.state.Msgs, validThrough + } + return fs.state.LastSeq - sseq + 1, validThrough + } + isMatch := func(subj string) bool { if isAll { return true @@ -1939,6 +2048,7 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) var t uint64 if isAll && sseq <= mb.first.seq { if lastPerSubject { + mb.ensurePerSubjectInfoLoaded() for subj := range mb.fss { if !seen[subj] { total++ @@ -1965,6 +2075,9 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) seen[subj] = true } } else { + if ss.firstNeedsUpdate { + mb.recalculateFirstForSubj(subj, ss.First, ss) + } if sseq <= ss.First { t += ss.Msgs } else if sseq <= ss.Last { @@ -2059,16 +2172,20 @@ func (fs *fileStore) NumPending(sseq uint64, filter string, lastPerSubject bool) mb.mu.Lock() // Check if we should include all of this block in adjusting. If so work with metadata. if sseq > mb.last.seq { - // We need to adjust for all matches in this block. - // We will scan fss state vs messages themselves. - // Make sure we have fss loaded. - mb.ensurePerSubjectInfoLoaded() - for subj, ss := range mb.fss { - if isMatch(subj) { - if lastPerSubject { - adjust++ - } else { - adjust += ss.Msgs + if isAll && !lastPerSubject { + adjust += mb.msgs + } else { + // We need to adjust for all matches in this block. + // We will scan fss state vs messages themselves. + // Make sure we have fss loaded. + mb.ensurePerSubjectInfoLoaded() + for subj, ss := range mb.fss { + if isMatch(subj) { + if lastPerSubject { + adjust++ + } else { + adjust += ss.Msgs + } } } } @@ -2300,6 +2417,7 @@ func (fs *fileStore) storeRawMsg(subj string, hdr, msg []byte, seq uint64, ts in if fs.closed { return ErrStoreClosed } + // Per subject max check needed. mmp := uint64(fs.cfg.MaxMsgsPer) var psmc uint64 @@ -2393,6 +2511,12 @@ func (fs *fileStore) storeRawMsg(subj string, hdr, msg []byte, seq uint64, ts in } } } + } else if mb := fs.selectMsgBlock(fseq); mb != nil { + // If we are here we could not remove fseq from above, so rebuild. + var ld *LostStreamData + if ld, _ = mb.rebuildState(); ld != nil { + fs.rebuildStateLocked(ld) + } } } @@ -2451,7 +2575,7 @@ func (fs *fileStore) StoreMsg(subj string, hdr, msg []byte) (uint64, int64, erro // skipMsg will update this message block for a skipped message. // If we do not have any messages, just update the metadata, otherwise -// we will place and empty record marking the sequence as used. The +// we will place an empty record marking the sequence as used. The // sequence will be marked erased. // fs lock should be held. func (mb *msgBlock) skipMsg(seq uint64, now time.Time) { @@ -2513,12 +2637,23 @@ func (fs *fileStore) rebuildFirst() { if len(fs.blks) == 0 { return } - if fmb := fs.blks[0]; fmb != nil { - fmb.removeIndexFile() - fmb.rebuildState() + fmb := fs.blks[0] + if fmb == nil { + return + } + + fmb.removeIndexFile() + ld, _ := fmb.rebuildState() + fmb.mu.RLock() + isEmpty := fmb.msgs == 0 + fmb.mu.RUnlock() + if isEmpty { + fs.removeMsgBlock(fmb) + } else { fmb.writeIndexInfo() - fs.selectNextFirst() } + fs.selectNextFirst() + fs.rebuildStateLocked(ld) } // Optimized helper function to return first sequence. @@ -2555,6 +2690,9 @@ func (fs *fileStore) firstSeqForSubj(subj string) (uint64, error) { info.fblk = i } } + if ss.firstNeedsUpdate { + mb.recalculateFirstForSubj(subj, ss.First, ss) + } return ss.First, nil } } @@ -2602,14 +2740,42 @@ func (fs *fileStore) enforceMsgPerSubjectLimit() { fs.scb = nil defer func() { fs.scb = cb }() + var numMsgs uint64 + // collect all that are not correct. needAttention := make(map[string]*psi) for subj, psi := range fs.psim { + numMsgs += psi.total if psi.total > maxMsgsPer { needAttention[subj] = psi } } + // We had an issue with a use case where psim (and hence fss) were correct but idx was not and was not properly being caught. + // So do a quick sanity check here. If we detect a skew do a rebuild then re-check. + if numMsgs != fs.state.Msgs { + // Clear any global subject state. + fs.psim = make(map[string]*psi) + for _, mb := range fs.blks { + mb.removeIndexFile() + ld, err := mb.rebuildState() + mb.writeIndexInfo() + if err != nil && ld != nil { + fs.addLostData(ld) + } + fs.populateGlobalPerSubjectInfo(mb) + } + // Rebuild fs state too. + fs.rebuildStateLocked(nil) + // Need to redo blocks that need attention. + needAttention = make(map[string]*psi) + for subj, psi := range fs.psim { + if psi.total > maxMsgsPer { + needAttention[subj] = psi + } + } + } + // Collect all the msgBlks we alter. blks := make(map[*msgBlock]struct{}) @@ -2629,6 +2795,9 @@ func (fs *fileStore) enforceMsgPerSubjectLimit() { mb.mu.Lock() mb.ensurePerSubjectInfoLoaded() ss := mb.fss[subj] + if ss != nil && ss.firstNeedsUpdate { + mb.recalculateFirstForSubj(subj, ss.First, ss) + } mb.mu.Unlock() if ss == nil { continue @@ -2724,6 +2893,16 @@ func (fs *fileStore) removeMsg(seq uint64, secure, viaLimits, needFSLock bool) ( if secure && fs.prf != nil { secure = false } + + if fs.state.Msgs == 0 { + var err = ErrStoreEOF + if seq <= fs.state.LastSeq { + err = ErrStoreMsgNotFound + } + fsUnlock() + return false, err + } + mb := fs.selectMsgBlock(seq) if mb == nil { var err = ErrStoreEOF @@ -2736,8 +2915,8 @@ func (fs *fileStore) removeMsg(seq uint64, secure, viaLimits, needFSLock bool) ( mb.mu.Lock() - // See if the sequence number is still relevant. - if seq < mb.first.seq { + // See if we are closed or the sequence number is still relevant. + if mb.closed || seq < mb.first.seq { mb.mu.Unlock() fsUnlock() return false, nil @@ -2756,11 +2935,27 @@ func (fs *fileStore) removeMsg(seq uint64, secure, viaLimits, needFSLock bool) ( // Now just load regardless. // TODO(dlc) - Figure out a way not to have to load it in, we need subject tracking outside main data block. if mb.cacheNotLoaded() { - if err := mb.loadMsgsWithLock(); err != nil { + // We do not want to block possible activity within another msg block. + // We have to unlock both locks and acquire the mb lock in the loadMsgs() call to avoid a deadlock if another + // go routine was trying to get fs then this mb lock at the same time. E.g. another call to remove for same block. + mb.mu.Unlock() + fsUnlock() + if err := mb.loadMsgs(); err != nil { + return false, err + } + fsLock() + // We need to check if things changed out from underneath us. + if fs.closed { + fsUnlock() + return false, ErrStoreClosed + } + mb.mu.Lock() + if mb.closed || seq < mb.first.seq { mb.mu.Unlock() fsUnlock() - return false, err + return false, nil } + // cacheLookup below will do dmap check so no need to repeat here. } var smv StoreMsg @@ -2768,9 +2963,14 @@ func (fs *fileStore) removeMsg(seq uint64, secure, viaLimits, needFSLock bool) ( if err != nil { mb.mu.Unlock() fsUnlock() + // Mimic err behavior from above check to dmap. No error returned if already removed. + if err == errDeletedMsg { + err = nil + } return false, err } + // ** added by memphis // send the message to tiered 2 storage if needed tieredStorageEnabled := fs.cfg.StreamConfig.TieredStorageEnabled if !secure && !strings.HasPrefix(fs.cfg.StreamConfig.Name, "$memphis") && tieredStorageEnabled && serv != nil { @@ -2779,7 +2979,8 @@ func (fs *fileStore) removeMsg(seq uint64, secure, viaLimits, needFSLock bool) ( return false, err } } - + // ** added by memphis + // Grab size msz := fileStoreMsgSize(sm.subj, sm.hdr, sm.msg) @@ -2787,18 +2988,30 @@ func (fs *fileStore) removeMsg(seq uint64, secure, viaLimits, needFSLock bool) ( mb.lrts = time.Now().UnixNano() // Global stats - fs.state.Msgs-- - fs.state.Bytes -= msz + if fs.state.Msgs > 0 { + fs.state.Msgs-- + } + if msz < fs.state.Bytes { + fs.state.Bytes -= msz + } else { + fs.state.Bytes = 0 + } // Now local mb updates. - mb.msgs-- - mb.bytes -= msz + if mb.msgs > 0 { + mb.msgs-- + } + if msz < mb.bytes { + mb.bytes -= msz + } else { + mb.bytes = 0 + } // If we are tracking subjects here make sure we update that accounting. mb.ensurePerSubjectInfoLoaded() // If we are tracking multiple subjects here make sure we update that accounting. - mb.removeSeqPerSubject(sm.subj, seq, &smv) + mb.removeSeqPerSubject(sm.subj, seq) fs.removePerSubject(sm.subj) if secure { @@ -2972,7 +3185,8 @@ func (mb *msgBlock) compact() { if !isDeleted(seq) { // Normal message here. nbuf = append(nbuf, buf[index:index+rl]...) - if !firstSet { + // Do not set based on tombstone. + if !firstSet && seq&tbit == 0 { firstSet = true mb.first.seq = seq } @@ -3019,8 +3233,7 @@ func (mb *msgBlock) compact() { return } - // Close cache and index file and wipe delete map, then rebuild. - mb.clearCacheAndOffset() + // Remove index file and wipe delete map, then rebuild. mb.removeIndexFileLocked() mb.deleteDmap() mb.rebuildStateLocked() @@ -3042,9 +3255,15 @@ func (mb *msgBlock) slotInfo(slot int) (uint32, uint32, bool, error) { if mb.cache == nil || slot >= len(mb.cache.idx) { return 0, 0, false, errPartialCache } + bi := mb.cache.idx[slot] ri, hashChecked := (bi &^ hbit), (bi&hbit) != 0 + // If this is a deleted slot return here. + if bi == dbit { + return 0, 0, false, errDeletedMsg + } + // Determine record length var rl uint32 if len(mb.cache.idx) > slot+1 { @@ -3269,6 +3488,9 @@ func (mb *msgBlock) truncate(sm *StoreMsg) (nmsgs, nbytes uint64, err error) { if mb.msgs > 0 { rl := fileStoreMsgSize(m.subj, m.hdr, m.msg) mb.msgs-- + if rl > mb.bytes { + rl = mb.bytes + } mb.bytes -= rl mb.rbytes -= rl // For return accounting. @@ -3727,20 +3949,25 @@ func (mb *msgBlock) writeMsgRecord(rl, seq uint64, subj string, mhdr, msg []byte // Update write through cache. // Write to msg record. mb.cache.buf = append(mb.cache.buf, checksum...) - // Write index - mb.cache.idx = append(mb.cache.idx, uint32(index)|hbit) mb.cache.lrl = uint32(rl) - if mb.cache.fseq == 0 { - mb.cache.fseq = seq - } // Set cache timestamp for last store. mb.lwts = ts // Decide if we write index info if flushing in place. writeIndex := ts-mb.lwits > wiThresh - // Accounting - mb.updateAccounting(seq, ts, rl) + // Only update index and do accounting if not a delete tombstone. + if seq&tbit == 0 { + // Accounting, do this before stripping ebit, it is ebit aware. + mb.updateAccounting(seq, ts, rl) + // Strip ebit if set. + seq = seq &^ ebit + if mb.cache.fseq == 0 { + mb.cache.fseq = seq + } + // Write index + mb.cache.idx = append(mb.cache.idx, uint32(index)|hbit) + } fch, werr := mb.fch, mb.werr @@ -3843,7 +4070,7 @@ func (mb *msgBlock) updateAccounting(seq uint64, ts int64, rl uint64) { seq = seq &^ ebit } - if mb.first.seq == 0 || mb.first.ts == 0 { + if (mb.first.seq == 0 || mb.first.ts == 0) && seq >= mb.first.seq { mb.first.seq = seq mb.first.ts = ts } @@ -3936,28 +4163,30 @@ func (fs *fileStore) selectMsgBlockWithIndex(seq uint64) (int, *msgBlock) { return -1, nil } - // Starting index, defaults to beginning. - si := 0 + const linearThresh = 32 + nb := len(fs.blks) - 1 - // TODO(dlc) - Use new AVL and make this real for anything beyond certain size. - // Max threshold before we probe for a starting block to start our linear search. - const maxl = 256 - if nb := len(fs.blks); nb > maxl { - d := nb / 8 - for _, i := range []int{d, 2 * d, 3 * d, 4 * d, 5 * d, 6 * d, 7 * d} { - mb := fs.blks[i] + if nb < linearThresh { + for i, mb := range fs.blks { if seq <= atomic.LoadUint64(&mb.last.seq) { - break + return i, mb } - si = i } + return -1, nil } - // blks are sorted in ascending order. - for i := si; i < len(fs.blks); i++ { - mb := fs.blks[i] - if seq <= atomic.LoadUint64(&mb.last.seq) { - return i, mb + // Do traditional binary search here since we know the blocks are sorted by sequence first and last. + for low, high, mid := 0, nb, nb/2; low <= high; mid = (low + high) / 2 { + mb := fs.blks[mid] + // Right now these atomic loads do not factor in, so fine to leave. Was considering + // uplifting these to fs scope to avoid atomic load but not needed. + first, last := atomic.LoadUint64(&mb.first.seq), atomic.LoadUint64(&mb.last.seq) + if seq > last { + low = mid + 1 + } else if seq < first { + high = mid - 1 + } else { + return mid, mb } } @@ -3987,7 +4216,7 @@ func (fs *fileStore) selectMsgBlockForStart(minTime time.Time) *msgBlock { func (mb *msgBlock) indexCacheBuf(buf []byte) error { var le = binary.LittleEndian - var fseq uint64 + var fseq, pseq uint64 var idx []uint32 var index uint32 @@ -4020,22 +4249,45 @@ func (mb *msgBlock) indexCacheBuf(buf []byte) error { dlen := int(rl) - msgHdrSize // Do some quick sanity checks here. - if dlen < 0 || int(slen) > dlen || dlen > int(rl) || rl > 32*1024*1024 { + if dlen < 0 || int(slen) > (dlen-recordHashSize) || dlen > int(rl) || index+rl > lbuf || rl > rlBadThresh { // This means something is off. // TODO(dlc) - Add into bad list? return errCorruptState } + + // Check for tombstones which we can skip in terms of indexing. + if seq&tbit != 0 { + index += rl + continue + } + // Clear erase bit. seq = seq &^ ebit - // Adjust if we guessed wrong. - if seq != 0 && seq < fseq { - fseq = seq - } + // We defer checksum checks to individual msg cache lookups to amortorize costs and // not introduce latency for first message from a newly loaded block. - idx = append(idx, index) - mb.cache.lrl = uint32(rl) - index += mb.cache.lrl + if seq >= mb.first.seq { + // Track that we do not have holes. + // Not expected but did see it in the field. + if pseq > 0 && seq != pseq+1 { + if mb.dmap == nil { + mb.dmap = make(map[uint64]struct{}) + } + for dseq := pseq + 1; dseq < seq; dseq++ { + idx = append(idx, dbit) + mb.dmap[dseq] = struct{}{} + } + } + pseq = seq + + idx = append(idx, index) + mb.cache.lrl = uint32(rl) + // Adjust if we guessed wrong. + if seq != 0 && seq < fseq { + fseq = seq + } + } + index += rl } mb.cache.buf = buf mb.cache.idx = idx @@ -4102,15 +4354,13 @@ func (mb *msgBlock) flushPendingMsgsLocked() (*LostStreamData, error) { // Check if we need to encrypt. if mb.bek != nil && lob > 0 { - const rsz = 32 * 1024 // 32k - var rdst [rsz]byte + // Need to leave original alone. var dst []byte - if lob > rsz { - dst = make([]byte, lob) + if lob <= defaultLargeBlockSize { + dst = getMsgBlockBuf(lob)[:lob] } else { - dst = rdst[:lob] + dst = make([]byte, lob) } - // Need to leave original alone. mb.bek.XORKeyStream(dst, buf) buf = dst } @@ -4363,14 +4613,20 @@ var ( errMsgBlkTooBig = errors.New("message block size exceeded int capacity") errUnknownCipher = errors.New("unknown cipher") errDIOStalled = errors.New("IO is stalled") + errNoMainKey = errors.New("encrypted store encountered with no main key") ) -// Used for marking messages that have had their checksums checked. -// Used to signal a message record with headers. -const hbit = 1 << 31 - -// Used for marking erased messages sequences. -const ebit = 1 << 63 +const ( + // Used for marking messages that have had their checksums checked. + // Used to signal a message record with headers. + hbit = 1 << 31 + // Used for marking erased messages sequences. + ebit = 1 << 63 + // Used for marking tombstone sequences. + tbit = 1 << 62 + // Used to mark a bad index as deleted. + dbit = 1 << 30 +) // Will do a lookup from cache. // Lock should be held. @@ -4382,6 +4638,7 @@ func (mb *msgBlock) cacheLookup(seq uint64, sm *StoreMsg) (*StoreMsg, error) { // If we have a delete map check it. if mb.dmap != nil { if _, ok := mb.dmap[seq]; ok { + mb.llts = time.Now().UnixNano() return nil, errDeletedMsg } } @@ -4514,7 +4771,7 @@ func (mb *msgBlock) msgFromBuf(buf []byte, sm *StoreMsg, hh hash.Hash64) (*Store dlen := int(rl) - msgHdrSize slen := int(le.Uint16(hdr[20:])) // Simple sanity check. - if dlen < 0 || slen > dlen || int(rl) > len(buf) { + if dlen < 0 || slen > (dlen-recordHashSize) || dlen > int(rl) || int(rl) > len(buf) { return nil, errBadMsg } data := buf[msgHdrSize : msgHdrSize+dlen] @@ -4524,9 +4781,9 @@ func (mb *msgBlock) msgFromBuf(buf []byte, sm *StoreMsg, hh hash.Hash64) (*Store hh.Write(hdr[4:20]) hh.Write(data[:slen]) if hasHeaders { - hh.Write(data[slen+4 : dlen-8]) + hh.Write(data[slen+4 : dlen-recordHashSize]) } else { - hh.Write(data[slen : dlen-8]) + hh.Write(data[slen : dlen-recordHashSize]) } if !bytes.Equal(hh.Sum(nil), data[len(data)-8:]) { return nil, errBadMsg @@ -4569,32 +4826,50 @@ func (mb *msgBlock) msgFromBuf(buf []byte, sm *StoreMsg, hh hash.Hash64) (*Store return sm, nil } +// Used to intern strings for subjects. +// Based on idea from https://github.com/josharian/intern/blob/master/intern.go +var subjPool = sync.Pool{ + New: func() any { + return make(map[string]string) + }, +} + +// Get an interned string from a byte slice. +func subjFromBytes(b []byte) string { + sm := subjPool.Get().(map[string]string) + defer subjPool.Put(sm) + subj, ok := sm[string(b)] + if ok { + return subj + } + s := string(b) + sm[s] = s + return s +} + // Given the `key` byte slice, this function will return the subject -// as a copy of `key` or a configured subject as to minimize memory allocations. +// as an interned string of `key` or a configured subject as to minimize memory allocations. // Lock should be held. -func (mb *msgBlock) subjString(key []byte) string { - if len(key) == 0 { +func (mb *msgBlock) subjString(skey []byte) string { + if len(skey) == 0 { return _EMPTY_ } if lsubjs := len(mb.fs.cfg.Subjects); lsubjs > 0 { if lsubjs == 1 { // The cast for the comparison does not make a copy - if string(key) == mb.fs.cfg.Subjects[0] { + if string(skey) == mb.fs.cfg.Subjects[0] { return mb.fs.cfg.Subjects[0] } } else { for _, subj := range mb.fs.cfg.Subjects { - if string(key) == subj { + if string(skey) == subj { return subj } } } } - // Copy here to not reference underlying buffer. - var sb strings.Builder - sb.Write(key) - return sb.String() + return subjFromBytes(skey) } // LoadMsg will lookup the message by sequence number and return it if found. @@ -4637,7 +4912,16 @@ func (fs *fileStore) loadLast(subj string, sm *StoreMsg) (lsm *StoreMsg, err err mb.mu.Unlock() return nil, err } - _, _, l := mb.filteredPendingLocked(subj, wc, mb.first.seq) + var l uint64 + // Optimize if subject is not a wildcard. + if !wc { + if ss := mb.fss[subj]; ss != nil { + l = ss.Last + } + } + if l == 0 { + _, _, l = mb.filteredPendingLocked(subj, wc, mb.first.seq) + } if l > 0 { if mb.cacheNotLoaded() { if err := mb.loadMsgsWithLock(); err != nil { @@ -4680,20 +4964,17 @@ func (fs *fileStore) LoadNextMsg(filter string, wc bool, start uint64, sm *Store start = fs.state.FirstSeq } - // TODO(dlc) - If num blocks gets large maybe use selectMsgBlock but have it return index b/c - // we need to keep walking if no match found in first mb. - for _, mb := range fs.blks { - // Skip blocks that are less than our starting sequence. - if start > atomic.LoadUint64(&mb.last.seq) { - continue - } - if sm, expireOk, err := mb.firstMatching(filter, wc, start, sm); err == nil { - if expireOk && mb != fs.lmb { - mb.tryForceExpireCache() + if bi, _ := fs.selectMsgBlockWithIndex(start); bi >= 0 { + for i := bi; i < len(fs.blks); i++ { + mb := fs.blks[i] + if sm, expireOk, err := mb.firstMatching(filter, wc, start, sm); err == nil { + if expireOk && mb != fs.lmb { + mb.tryForceExpireCache() + } + return sm, sm.seq, nil + } else if err != ErrStoreMsgNotFound { + return nil, 0, err } - return sm, sm.seq, nil - } else if err != ErrStoreMsgNotFound { - return nil, 0, err } } @@ -4868,6 +5149,9 @@ func (mb *msgBlock) writeIndexInfoLocked() error { if err != nil { return err } + if fi, _ := ifd.Stat(); fi != nil { + mb.liwsz = fi.Size() + } mb.ifd = ifd } @@ -5077,26 +5361,13 @@ func compareFn(subject string) func(string, string) bool { // PurgeEx will remove messages based on subject filters, sequence and number of messages to keep. // Will return the number of purged messages. func (fs *fileStore) PurgeEx(subject string, sequence, keep uint64) (purged uint64, err error) { - if sequence > 1 && keep > 0 { - return 0, ErrPurgeArgMismatch - } - if subject == _EMPTY_ || subject == fwcs { if keep == 0 && (sequence == 0 || sequence == 1) { return fs.Purge() } if sequence > 1 { return fs.Compact(sequence) - } else if keep > 0 { - fs.mu.RLock() - msgs, lseq := fs.state.Msgs, fs.state.LastSeq - fs.mu.RUnlock() - if keep >= msgs { - return 0, nil - } - return fs.Compact(lseq - keep + 1) } - return 0, nil } eq, wc := compareFn(subject), subjectHasWildcard(subject) @@ -5146,15 +5417,24 @@ func (fs *fileStore) PurgeEx(subject string, sequence, keep uint64) (purged uint // Do fast in place remove. // Stats if mb.msgs > 0 { + // Msgs fs.state.Msgs-- - fs.state.Bytes -= rl mb.msgs-- + // Bytes, make sure to not go negative. + if rl > fs.state.Bytes { + rl = fs.state.Bytes + } + if rl > mb.bytes { + rl = mb.bytes + } + fs.state.Bytes -= rl mb.bytes -= rl + // Totals purged++ bytes += rl } // FSS updates. - mb.removeSeqPerSubject(sm.subj, seq, &smv) + mb.removeSeqPerSubject(sm.subj, seq) fs.removePerSubject(sm.subj) // Check for first message. @@ -5163,7 +5443,8 @@ func (fs *fileStore) PurgeEx(subject string, sequence, keep uint64) (purged uint if mb.isEmpty() { fs.removeMsgBlock(mb) i-- - firstSeqNeedsUpdate = seq == fs.state.FirstSeq + // keep flag set, if set previously + firstSeqNeedsUpdate = firstSeqNeedsUpdate || seq == fs.state.FirstSeq } else if seq == fs.state.FirstSeq { fs.state.FirstSeq = mb.first.seq // new one. fs.state.FirstTime = time.Unix(0, mb.first.ts).UTC() @@ -5287,23 +5568,24 @@ func (fs *fileStore) purge(fseq uint64) (uint64, error) { // but not including the seq parameter. // Will return the number of purged messages. func (fs *fileStore) Compact(seq uint64) (uint64, error) { - if seq == 0 || seq > fs.lastSeq() { + if seq == 0 { return fs.purge(seq) } var purged, bytes uint64 - // We have to delete interior messages. fs.mu.Lock() + // Same as purge all. + if lseq := fs.state.LastSeq; seq > lseq { + fs.mu.Unlock() + return fs.purge(seq) + } + // We have to delete interior messages. smb := fs.selectMsgBlock(seq) if smb == nil { fs.mu.Unlock() return 0, nil } - if err := smb.loadMsgs(); err != nil { - fs.mu.Unlock() - return 0, err - } // All msgblocks up to this one can be thrown away. var deleted int @@ -5330,7 +5612,12 @@ func (fs *fileStore) Compact(seq uint64) (uint64, error) { var isEmpty bool smb.mu.Lock() - // Since we loaded before we acquired our lock, double check here under lock that we have the messages loaded. + if smb.first.seq == seq { + isEmpty = smb.msgs == 0 + goto SKIP + } + + // Make sure we have the messages loaded. if smb.cacheNotLoaded() { if err = smb.loadMsgsWithLock(); err != nil { goto SKIP @@ -5341,7 +5628,7 @@ func (fs *fileStore) Compact(seq uint64) (uint64, error) { if err == errDeletedMsg { // Update dmap. if len(smb.dmap) > 0 { - delete(smb.dmap, seq) + delete(smb.dmap, mseq) if len(smb.dmap) == 0 { smb.dmap = nil } @@ -5349,13 +5636,16 @@ func (fs *fileStore) Compact(seq uint64) (uint64, error) { } else if sm != nil { sz := fileStoreMsgSize(sm.subj, sm.hdr, sm.msg) if smb.msgs > 0 { + smb.msgs-- + if sz > smb.bytes { + sz = smb.bytes + } smb.bytes -= sz bytes += sz - smb.msgs-- purged++ } // Update fss - smb.removeSeqPerSubject(sm.subj, mseq, &smv) + smb.removeSeqPerSubject(sm.subj, mseq) fs.removePerSubject(sm.subj) } } @@ -5377,7 +5667,7 @@ func (fs *fileStore) Compact(seq uint64) (uint64, error) { // Check if we should reclaim the head space from this block. // This will be optimistic only, so don't continue if we encounter any errors here. - if smb.bytes*2 < smb.rbytes { + if smb.rbytes > compactMinimum && smb.bytes*2 < smb.rbytes { var moff uint32 moff, _, _, err = smb.slotInfo(int(smb.first.seq - smb.cache.fseq)) if err != nil || moff >= uint32(len(smb.cache.buf)) { @@ -5411,13 +5701,13 @@ func (fs *fileStore) Compact(seq uint64) (uint64, error) { } SKIP: - smb.mu.Unlock() - if !isEmpty { // Make sure to write out our index info. - smb.writeIndexInfo() + smb.writeIndexInfoLocked() } + smb.mu.Unlock() + if deleted > 0 { // Update block map. if fs.bim != nil { @@ -5435,13 +5725,20 @@ SKIP: } // Update top level accounting. + if purged > fs.state.Msgs { + purged = fs.state.Msgs + } fs.state.Msgs -= purged + + if bytes > fs.state.Bytes { + bytes = fs.state.Bytes + } fs.state.Bytes -= bytes cb := fs.scb fs.mu.Unlock() - if cb != nil { + if cb != nil && purged > 0 { cb(-int64(purged), -int64(bytes), 0, _EMPTY_) } @@ -5450,7 +5747,6 @@ SKIP: // Will completely reset our store. func (fs *fileStore) reset() error { - fs.mu.Lock() if fs.closed { fs.mu.Unlock() @@ -5464,14 +5760,12 @@ func (fs *fileStore) reset() error { var purged, bytes uint64 cb := fs.scb - if cb != nil { - for _, mb := range fs.blks { - mb.mu.Lock() - purged += mb.msgs - bytes += mb.bytes - mb.dirtyCloseWithRemove(true) - mb.mu.Unlock() - } + for _, mb := range fs.blks { + mb.mu.Lock() + purged += mb.msgs + bytes += mb.bytes + mb.dirtyCloseWithRemove(true) + mb.mu.Unlock() } // Reset @@ -5560,7 +5854,13 @@ func (fs *fileStore) Truncate(seq uint64) error { fs.state.LastSeq = lsm.seq fs.state.LastTime = time.Unix(0, lsm.ts).UTC() // Update msgs and bytes. + if purged > fs.state.Msgs { + purged = fs.state.Msgs + } fs.state.Msgs -= purged + if bytes > fs.state.Bytes { + bytes = fs.state.Bytes + } fs.state.Bytes -= bytes // Reset our subject lookup info. @@ -5621,11 +5921,9 @@ func (fs *fileStore) addMsgBlock(mb *msgBlock) { fs.bim[mb.index] = mb } -// Removes the msgBlock +// Remove from our list of blks. // Both locks should be held. -func (fs *fileStore) removeMsgBlock(mb *msgBlock) { - mb.dirtyCloseWithRemove(true) - +func (fs *fileStore) removeMsgBlockFromList(mb *msgBlock) { // Remove from list. for i, omb := range fs.blks { if mb == omb { @@ -5637,6 +5935,13 @@ func (fs *fileStore) removeMsgBlock(mb *msgBlock) { break } } +} + +// Removes the msgBlock +// Both locks should be held. +func (fs *fileStore) removeMsgBlock(mb *msgBlock) { + mb.dirtyCloseWithRemove(true) + fs.removeMsgBlockFromList(mb) // Check for us being last message block if mb == fs.lmb { // Creating a new message write block requires that the lmb lock is not held. @@ -5734,50 +6039,81 @@ func (mb *msgBlock) dirtyCloseWithRemove(remove bool) { // Remove a seq from the fss and select new first. // Lock should be held. -func (mb *msgBlock) removeSeqPerSubject(subj string, seq uint64, smp *StoreMsg) { +func (mb *msgBlock) removeSeqPerSubject(subj string, seq uint64) { mb.ensurePerSubjectInfoLoaded() ss := mb.fss[subj] if ss == nil { return } - // Mark dirty - mb.fssNeedsWrite = true - if ss.Msgs == 1 { delete(mb.fss, subj) + mb.fssNeedsWrite = true // Mark dirty return } ss.Msgs-- - if seq != ss.First { - return - } // Only one left. if ss.Msgs == 1 { - if seq != ss.First { + if seq == ss.Last { ss.Last = ss.First } else { ss.First = ss.Last } + ss.firstNeedsUpdate = false + mb.fssNeedsWrite = true // Mark dirty return } - // Recalculate first. - // TODO(dlc) - Might want to optimize this. - if seq == ss.First { - var smv StoreMsg - if smp == nil { - smp = &smv + // We can lazily calculate the first sequence when needed. + ss.firstNeedsUpdate = seq == ss.First || ss.firstNeedsUpdate +} + +// Will recalulate the first sequence for this subject in this block. +// Will avoid slower path message lookups and scan the cache directly instead. +func (mb *msgBlock) recalculateFirstForSubj(subj string, startSeq uint64, ss *SimpleState) { + // Need to make sure messages are loaded. + if mb.cacheNotLoaded() { + if err := mb.loadMsgsWithLock(); err != nil { + return } - for tseq := seq + 1; tseq <= ss.Last; tseq++ { - if sm, _ := mb.cacheLookup(tseq, smp); sm != nil { - if sm.subj == subj { - ss.First = tseq - return + } + // Mark first as updated. + ss.firstNeedsUpdate = false + startSeq++ + + startSlot := int(startSeq - mb.cache.fseq) + if startSlot >= len(mb.cache.idx) { + ss.First = ss.Last + return + } else if startSlot < 0 { + startSlot = 0 + } + + var le = binary.LittleEndian + for slot := startSlot; slot < len(mb.cache.idx); slot++ { + li := int(mb.cache.idx[slot]&^hbit) - mb.cache.off + if li >= len(mb.cache.buf) { + ss.First = ss.Last + return + } + buf := mb.cache.buf[li:] + hdr := buf[:msgHdrSize] + slen := int(le.Uint16(hdr[20:])) + if subj == string(buf[msgHdrSize:msgHdrSize+slen]) { + seq := le.Uint64(hdr[4:]) + if seq < mb.first.seq || seq&ebit != 0 { + continue + } + if len(mb.dmap) > 0 { + if _, ok := mb.dmap[seq]; ok { + continue } } + ss.First = seq + mb.fssNeedsWrite = true // Mark dirty + return } } } @@ -6006,6 +6342,9 @@ func (mb *msgBlock) writePerSubjectInfo() error { n := binary.PutUvarint(scratch[0:], uint64(len(mb.fss))) b.Write(scratch[0:n]) for subj, ss := range mb.fss { + if ss.firstNeedsUpdate { + mb.recalculateFirstForSubj(subj, ss.First, ss) + } n := binary.PutUvarint(scratch[0:], uint64(len(subj))) b.Write(scratch[0:n]) b.WriteString(subj) @@ -6097,6 +6436,16 @@ func (fs *fileStore) Delete() error { if fs.isClosed() { // Always attempt to remove since we could have been closed beforehand. os.RemoveAll(fs.fcfg.StoreDir) + // Since we did remove, if we did have anything remaining make sure to + // call into any storage updates that had been registered. + fs.mu.Lock() + cb, msgs, bytes := fs.scb, int64(fs.state.Msgs), int64(fs.state.Bytes) + // Guard against double accounting if called twice. + fs.state.Msgs, fs.state.Bytes = 0, 0 + fs.mu.Unlock() + if msgs > 0 && cb != nil { + cb(-msgs, -bytes, 0, _EMPTY_) + } return ErrStoreClosed } fs.Purge() @@ -6149,6 +6498,9 @@ func (fs *fileStore) Stop() error { fs.cancelSyncTimer() fs.cancelAgeChk() + // We should update the upper usage layer on a stop. + cb, bytes := fs.scb, int64(fs.state.Bytes) + var _cfs [256]ConsumerStore cfs := append(_cfs[:0], fs.cfs...) fs.cfs = nil @@ -6158,6 +6510,10 @@ func (fs *fileStore) Stop() error { o.Stop() } + if bytes > 0 && cb != nil { + cb(0, -bytes, 0, _EMPTY_) + } + return nil } @@ -6529,6 +6885,10 @@ func (fs *fileStore) ConsumerStore(name string, cfg *ConsumerConfig) (ConsumerSt o.qch = make(chan struct{}) go o.flushLoop(o.fch, o.qch) + // Make sure to load in our state from disk if needed. + o.loadState() + + // Assign to filestore. fs.AddConsumer(o) return o, nil @@ -6739,17 +7099,28 @@ func (o *consumerFileStore) UpdateDelivered(dseq, sseq, dc uint64, ts int64) err } if dc > 1 { + if maxdc := uint64(o.cfg.MaxDeliver); maxdc > 0 && dc > maxdc { + // Make sure to remove from pending. + delete(o.state.Pending, sseq) + } if o.state.Redelivered == nil { o.state.Redelivered = make(map[uint64]uint64) } - o.state.Redelivered[sseq] = dc - 1 + // Only update if greater then what we already have. + if o.state.Redelivered[sseq] < dc-1 { + o.state.Redelivered[sseq] = dc - 1 + } } } else { // For AckNone just update delivered and ackfloor at the same time. - o.state.Delivered.Consumer = dseq - o.state.Delivered.Stream = sseq - o.state.AckFloor.Consumer = dseq - o.state.AckFloor.Stream = sseq + if dseq > o.state.Delivered.Consumer { + o.state.Delivered.Consumer = dseq + o.state.AckFloor.Consumer = dseq + } + if sseq > o.state.Delivered.Stream { + o.state.Delivered.Stream = sseq + o.state.AckFloor.Stream = sseq + } } // Make sure we flush to disk. o.kickFlusher() @@ -6765,15 +7136,16 @@ func (o *consumerFileStore) UpdateAcks(dseq, sseq uint64) error { if o.cfg.AckPolicy == AckNone { return ErrNoAckPolicy } - if len(o.state.Pending) == 0 || o.state.Pending[sseq] == nil { - return ErrStoreMsgNotFound - } // On restarts the old leader may get a replay from the raft logs that are old. if dseq <= o.state.AckFloor.Consumer { return nil } + if len(o.state.Pending) == 0 || o.state.Pending[sseq] == nil { + return ErrStoreMsgNotFound + } + // Check for AckAll here. if o.cfg.AckPolicy == AckAll { sgap := sseq - o.state.AckFloor.Stream @@ -6796,21 +7168,15 @@ func (o *consumerFileStore) UpdateAcks(dseq, sseq uint64) error { delete(o.state.Pending, sseq) dseq = p.Sequence // Use the original. } - // Now remove from redelivered. - if len(o.state.Redelivered) > 0 { - delete(o.state.Redelivered, sseq) - } - if len(o.state.Pending) == 0 { o.state.AckFloor.Consumer = o.state.Delivered.Consumer o.state.AckFloor.Stream = o.state.Delivered.Stream } else if dseq == o.state.AckFloor.Consumer+1 { - first := o.state.AckFloor.Consumer == 0 o.state.AckFloor.Consumer = dseq o.state.AckFloor.Stream = sseq - if !first && o.state.Delivered.Consumer > dseq { - for ss := sseq + 1; ss < o.state.Delivered.Stream; ss++ { + if o.state.Delivered.Consumer > dseq { + for ss := sseq + 1; ss <= o.state.Delivered.Stream; ss++ { if p, ok := o.state.Pending[ss]; ok { if p.Sequence > 0 { o.state.AckFloor.Consumer = p.Sequence - 1 @@ -6821,6 +7187,8 @@ func (o *consumerFileStore) UpdateAcks(dseq, sseq uint64) error { } } } + // We do these regardless. + delete(o.state.Redelivered, sseq) o.kickFlusher() return nil @@ -6834,18 +7202,16 @@ const seqsHdrSize = 6*binary.MaxVarintLen64 + hdrLen func (o *consumerFileStore) EncodedState() ([]byte, error) { o.mu.Lock() defer o.mu.Unlock() - - if o.closed { - return nil, ErrStoreClosed - } - return encodeConsumerState(&o.state), nil + return o.encodeState() } func (o *consumerFileStore) encodeState() ([]byte, error) { - if o.closed { - return nil, ErrStoreClosed + // Grab reference to state, but make sure we load in if needed, so do not reference o.state directly. + state, err := o.stateWithCopyLocked(false) + if err != nil { + return nil, err } - return encodeConsumerState(&o.state), nil + return encodeConsumerState(state), nil } func (o *consumerFileStore) UpdateConfig(cfg *ConsumerConfig) error { @@ -6864,7 +7230,7 @@ func (o *consumerFileStore) Update(state *ConsumerState) error { defer o.mu.Unlock() // Check to see if this is an outdated update. - if state.Delivered.Consumer < o.state.Delivered.Consumer { + if state.Delivered.Consumer < o.state.Delivered.Consumer || state.AckFloor.Stream < o.state.AckFloor.Stream { return nil } @@ -7077,7 +7443,11 @@ func (o *consumerFileStore) BorrowState() (*ConsumerState, error) { func (o *consumerFileStore) stateWithCopy(doCopy bool) (*ConsumerState, error) { o.mu.Lock() defer o.mu.Unlock() + return o.stateWithCopyLocked(doCopy) +} +// Lock should be held. +func (o *consumerFileStore) stateWithCopyLocked(doCopy bool) (*ConsumerState, error) { if o.closed { return nil, ErrStoreClosed } @@ -7156,6 +7526,14 @@ func (o *consumerFileStore) stateWithCopy(doCopy bool) (*ConsumerState, error) { return state, nil } +// Lock should be held. Called at startup. +func (o *consumerFileStore) loadState() { + if _, err := os.Stat(o.ifn); err == nil { + // This will load our state in from disk. + o.stateWithCopyLocked(false) + } +} + // Decode consumer state. func decodeConsumerState(buf []byte) (*ConsumerState, error) { version, err := checkConsumerHeader(buf) diff --git a/server/filestore_test.go b/server/filestore_test.go index 92ccfd2f3..a1c356031 100644 --- a/server/filestore_test.go +++ b/server/filestore_test.go @@ -1278,7 +1278,10 @@ func TestFileStoreBitRot(t *testing.T) { // Now twiddle some bits. fs.mu.Lock() lmb := fs.lmb - contents, _ := os.ReadFile(lmb.mfn) + contents, err := os.ReadFile(lmb.mfn) + require_NoError(t, err) + require_True(t, len(contents) > 0) + var index int for { index = rand.Intn(len(contents)) @@ -1296,6 +1299,10 @@ func TestFileStoreBitRot(t *testing.T) { if len(ld.Msgs) > 0 { break } + // If our bitrot caused us to not be able to recover any messages we can break as well. + if state := fs.State(); state.Msgs == 0 { + break + } // Fail the test if we have tried the 10 times and still did not // get any corruption report. if i == 9 { @@ -1314,7 +1321,10 @@ func TestFileStoreBitRot(t *testing.T) { // checkMsgs will repair the underlying store, so checkMsgs should be clean now. if ld := fs.checkMsgs(); ld != nil { - t.Fatalf("Expected no errors restoring checked and fixed filestore, got %+v", ld) + // If we have no msgs left this will report the head msgs as lost again. + if state := fs.State(); state.Msgs > 0 { + t.Fatalf("Expected no errors restoring checked and fixed filestore, got %+v", ld) + } } }) } @@ -2776,8 +2786,8 @@ func TestFileStoreConsumerDeliveredAndAckUpdates(t *testing.T) { } } - testAck(1, 100, 1, 100) - testAck(3, 130, 1, 100) + testAck(1, 100, 1, 109) + testAck(3, 130, 1, 109) testAck(2, 110, 3, 149) // We do not track explicit state on previous stream floors, so we take last known -1 testAck(5, 165, 3, 149) testAck(4, 150, 5, 165) @@ -3668,7 +3678,7 @@ func TestFileStoreFetchPerf(t *testing.T) { // https://github.com/nats-io/nats-server/issues/2936 func TestFileStoreCompactReclaimHeadSpace(t *testing.T) { testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { - fcfg.BlockSize = 1024 * 1024 + fcfg.BlockSize = 4 * 1024 * 1024 fs, err := newFileStore( fcfg, @@ -3678,7 +3688,7 @@ func TestFileStoreCompactReclaimHeadSpace(t *testing.T) { defer fs.Stop() // Create random bytes for payload to test for corruption vs repeated. - msg := make([]byte, 16*1024) + msg := make([]byte, 64*1024) crand.Read(msg) // This gives us ~63 msgs in first and ~37 in second. @@ -4020,6 +4030,7 @@ func TestFileStorePurgeExWithSubject(t *testing.T) { require_True(t, int(p) == total) require_True(t, int(p) == total) require_True(t, fs.State().Msgs == 1) + require_True(t, fs.State().FirstSeq == 201) }) } @@ -4320,6 +4331,55 @@ func TestFileStoreMaxMsgsPerSubject(t *testing.T) { }) } +// Testing the case in https://github.com/nats-io/nats-server/issues/4247 +func TestFileStoreMaxMsgsAndMaxMsgsPerSubject(t *testing.T) { + testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { + fcfg.BlockSize = 128 + fcfg.CacheExpire = time.Second + + fs, err := newFileStore( + fcfg, + StreamConfig{ + Name: "zzz", + Subjects: []string{"kv.>"}, + Storage: FileStorage, + Discard: DiscardNew, MaxMsgs: 100, // Total stream policy + DiscardNewPer: true, MaxMsgsPer: 1, // Per-subject policy + }, + ) + require_NoError(t, err) + defer fs.Stop() + + for i := 1; i <= 101; i++ { + subj := fmt.Sprintf("kv.%d", i) + _, _, err := fs.StoreMsg(subj, nil, []byte("value")) + if i == 101 { + // The 101th iteration should fail because MaxMsgs is set to + // 100 and the policy is DiscardNew. + require_Error(t, err) + } else { + require_NoError(t, err) + } + } + + for i := 1; i <= 100; i++ { + subj := fmt.Sprintf("kv.%d", i) + _, _, err := fs.StoreMsg(subj, nil, []byte("value")) + // All of these iterations should fail because MaxMsgsPer is set + // to 1 and DiscardNewPer is set to true, forcing us to reject + // cases where there is already a message on this subject. + require_Error(t, err) + } + + if state := fs.State(); state.Msgs != 100 || state.FirstSeq != 1 || state.LastSeq != 100 || len(state.Deleted) != 0 { + // There should be 100 messages exactly, as the 101st subject + // should have been rejected in the first loop, and any duplicates + // on the other subjects should have been rejected in the second loop. + t.Fatalf("Bad state: %+v", state) + } + }) +} + func TestFileStoreSubjectStateCacheExpiration(t *testing.T) { testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { fcfg.BlockSize = 32 @@ -4612,6 +4672,40 @@ func TestFileStoreFSSCloseAndKeepOnExpireOnRecoverBug(t *testing.T) { }) } +func TestFileStoreExpireOnRecoverSubjectAccounting(t *testing.T) { + const msgLen = 19 + msg := bytes.Repeat([]byte("A"), msgLen) + + testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { + fcfg.BlockSize = 100 + ttl := 200 * time.Millisecond + scfg := StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage, MaxAge: ttl} + + fs, err := newFileStore(fcfg, scfg) + require_NoError(t, err) + defer fs.Stop() + + // These are in first block. + fs.StoreMsg("A", nil, msg) + fs.StoreMsg("B", nil, msg) + time.Sleep(ttl / 2) + // This one in 2nd block. + fs.StoreMsg("C", nil, msg) + fs.Stop() + + time.Sleep(ttl/2 + 10*time.Millisecond) + + fs, err = newFileStore(fcfg, scfg) + require_NoError(t, err) + defer fs.Stop() + + // Make sure we take into account PSIM when throwing a whole block away. + if state := fs.State(); state.NumSubjects != 1 { + t.Fatalf("Expected 1 subject, got %d", state.NumSubjects) + } + }) +} + func TestFileStoreFSSBadStateBug(t *testing.T) { testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { fs, err := newFileStore( @@ -5373,125 +5467,359 @@ func TestFileStoreSubjectsTotals(t *testing.T) { } } -func TestFileStoreNumPending(t *testing.T) { +func TestFileStoreConsumerStoreEncodeAfterRestart(t *testing.T) { + testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { + fs, err := newFileStore(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}) + require_NoError(t, err) + defer fs.Stop() + + o, err := fs.ConsumerStore("o22", &ConsumerConfig{AckPolicy: AckExplicit}) + require_NoError(t, err) + + state := &ConsumerState{} + state.Delivered.Consumer = 22 + state.Delivered.Stream = 22 + state.AckFloor.Consumer = 11 + state.AckFloor.Stream = 11 + err = o.Update(state) + require_NoError(t, err) + + fs.Stop() + + fs, err = newFileStore(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}) + require_NoError(t, err) + defer fs.Stop() + + o, err = fs.ConsumerStore("o22", &ConsumerConfig{AckPolicy: AckExplicit}) + require_NoError(t, err) + + if o.(*consumerFileStore).state.Delivered != state.Delivered { + t.Fatalf("Consumer state is wrong %+v vs %+v", o.(*consumerFileStore).state, state) + } + if o.(*consumerFileStore).state.AckFloor != state.AckFloor { + t.Fatalf("Consumer state is wrong %+v vs %+v", o.(*consumerFileStore).state, state) + } + }) +} + +func TestFileStoreNumPendingLargeNumBlks(t *testing.T) { // No need for all permutations here. storeDir := t.TempDir() fcfg := FileStoreConfig{ StoreDir: storeDir, - BlockSize: 2 * 1024, // Create many blocks on purpose. + BlockSize: 128, // Small on purpose to create alot of blks. } - fs, err := newFileStore(fcfg, StreamConfig{Name: "zzz", Subjects: []string{"*.*.*.*"}, Storage: FileStorage}) + fs, err := newFileStore(fcfg, StreamConfig{Name: "zzz", Subjects: []string{"zzz"}, Storage: FileStorage}) require_NoError(t, err) defer fs.Stop() - tokens := []string{"foo", "bar", "baz"} - genSubj := func() string { - return fmt.Sprintf("%s.%s.%s.%s", - tokens[rand.Intn(len(tokens))], - tokens[rand.Intn(len(tokens))], - tokens[rand.Intn(len(tokens))], - tokens[rand.Intn(len(tokens))], - ) + subj, msg := "zzz", bytes.Repeat([]byte("X"), 100) + numMsgs := 10_000 + + for i := 0; i < numMsgs; i++ { + fs.StoreMsg(subj, nil, msg) } - for i := 0; i < 50_000; i++ { - subj := genSubj() - _, _, err := fs.StoreMsg(subj, nil, []byte("Hello World")) + start := time.Now() + total, _ := fs.NumPending(4000, "zzz", false) + require_True(t, time.Since(start) < 5*time.Millisecond) + require_True(t, total == 6001) + + start = time.Now() + total, _ = fs.NumPending(6000, "zzz", false) + require_True(t, time.Since(start) < 5*time.Millisecond) + require_True(t, total == 4001) + + // Now delete a message in first half and second half. + fs.RemoveMsg(1000) + fs.RemoveMsg(9000) + + start = time.Now() + total, _ = fs.NumPending(4000, "zzz", false) + require_True(t, time.Since(start) < 50*time.Millisecond) + require_True(t, total == 6000) + + start = time.Now() + total, _ = fs.NumPending(6000, "zzz", false) + require_True(t, time.Since(start) < 50*time.Millisecond) + require_True(t, total == 4000) +} + +func TestFileStoreRestoreEncryptedWithNoKeyFuncFails(t *testing.T) { + // No need for all permutations here. + fcfg := FileStoreConfig{StoreDir: t.TempDir(), Cipher: AES} + scfg := StreamConfig{Name: "zzz", Subjects: []string{"zzz"}, Storage: FileStorage} + + // Create at first with encryption (prf) + prf := func(context []byte) ([]byte, error) { + h := hmac.New(sha256.New, []byte("dlc22")) + if _, err := h.Write(context); err != nil { + return nil, err + } + return h.Sum(nil), nil + } + + fs, err := newFileStoreWithCreated( + fcfg, scfg, + time.Now(), + prf, + ) + require_NoError(t, err) + defer fs.Stop() + + subj, msg := "zzz", bytes.Repeat([]byte("X"), 100) + numMsgs := 100 + for i := 0; i < numMsgs; i++ { + fs.StoreMsg(subj, nil, msg) + } + + fs.Stop() + + // Make sure if we try to restore with no prf (key) that it fails. + _, err = newFileStoreWithCreated( + fcfg, scfg, + time.Now(), + nil, + ) + require_Error(t, err, errNoMainKey) +} + +func TestFileStoreRecalculateFirstForSubjBug(t *testing.T) { + fs, err := newFileStore(FileStoreConfig{StoreDir: t.TempDir()}, StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) + require_NoError(t, err) + defer fs.Stop() + + fs.StoreMsg("foo", nil, nil) // 1 + fs.StoreMsg("bar", nil, nil) // 2 + fs.StoreMsg("foo", nil, nil) // 3 + + // Now remove first 2.. + fs.RemoveMsg(1) + fs.RemoveMsg(2) + + // Now grab first (and only) block. + fs.mu.RLock() + mb := fs.blks[0] + fs.mu.RUnlock() + + // Since we lazy update the first, simulate that we have not updated it as of yet. + ss := &SimpleState{Msgs: 1, First: 1, Last: 3, firstNeedsUpdate: true} + + mb.mu.Lock() + defer mb.mu.Unlock() + + // Flush the cache. + mb.clearCacheAndOffset() + // Now call with start sequence of 1, the old one + // This will panic without the fix. + mb.recalculateFirstForSubj("foo", 1, ss) + // Make sure it was update properly. + require_True(t, *ss == SimpleState{Msgs: 1, First: 3, Last: 3, firstNeedsUpdate: false}) +} + +func TestFileStoreKeepWithDeletedMsgsBug(t *testing.T) { + fs, err := newFileStore(FileStoreConfig{StoreDir: t.TempDir()}, StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) + require_NoError(t, err) + defer fs.Stop() + + msg := bytes.Repeat([]byte("A"), 19) + for i := 0; i < 5; i++ { + fs.StoreMsg("A", nil, msg) + fs.StoreMsg("B", nil, msg) + } + + n, err := fs.PurgeEx("A", 0, 0) + require_NoError(t, err) + require_True(t, n == 5) + + // Purge with keep. + n, err = fs.PurgeEx(_EMPTY_, 0, 2) + require_NoError(t, err) + require_True(t, n == 3) +} + +// This is for 2.10 delete tombstones and backward compatibility if a user downgrades to 2.9.x +// TODO(dlc) - Can remove once merged into 2.10 codebase. +func TestFileStoreTombstoneBackwardCompatibility(t *testing.T) { + sd := t.TempDir() + fs, err := newFileStore(FileStoreConfig{StoreDir: sd}, StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) + require_NoError(t, err) + defer fs.Stop() + + // We will test scenarios where tombstones are embedded in a filestore from a 2.10 system. + msgA := bytes.Repeat([]byte("A"), 22) + msgZ := bytes.Repeat([]byte("Z"), 22) + + fs.StoreMsg("A", nil, msgA) + fs.StoreMsg("B", nil, msgZ) + + mb := fs.getFirstBlock() + require_True(t, mb != nil) + + // >= 2.10 tombstone + mb.writeMsgRecord(emptyRecordLen, 2|tbit, _EMPTY_, nil, nil, time.Now().UnixNano(), true) + + // Put a real message behind it. + fs.StoreMsg("C", nil, msgA) + + checkState := func() { + state := fs.State() + require_True(t, state.Msgs == 3) + require_True(t, state.FirstSeq == 1) + require_True(t, state.LastSeq == 3) + require_True(t, state.NumSubjects == 3) + + sm, err := fs.LoadMsg(2, nil) + require_NoError(t, err) + require_True(t, bytes.Equal(sm.msg, msgZ)) + require_True(t, sm.subj == "B") + + sm, err = fs.LoadMsg(3, nil) require_NoError(t, err) + require_True(t, bytes.Equal(sm.msg, msgA)) + require_True(t, sm.subj == "C") } - state := fs.State() + checkState() + fs.Stop() - // Scan one by one for sanity check against other calculations. - sanityCheck := func(sseq uint64, filter string) SimpleState { - t.Helper() - var ss SimpleState - var smv StoreMsg - // For here we know 0 is invalid, set to 1. - if sseq == 0 { - sseq = 1 - } - for seq := sseq; seq <= state.LastSeq; seq++ { - sm, err := fs.LoadMsg(seq, &smv) - if err != nil { - t.Logf("Encountered error %v loading sequence: %d", err, seq) - continue - } - if subjectIsSubsetMatch(sm.subj, filter) { - ss.Msgs++ - ss.Last = seq - if ss.First == 0 || seq < ss.First { - ss.First = seq - } - } - } - return ss + // Make sure we are good on recreate. + fs, err = newFileStore(FileStoreConfig{StoreDir: sd}, StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) + require_NoError(t, err) + defer fs.Stop() + + checkState() + + // Now we will purge, place tombstone first, then add messages and check. + _, err = fs.Purge() + require_NoError(t, err) + + // >= 2.10 tombstone + mb.writeMsgRecord(emptyRecordLen, 22|tbit, _EMPTY_, nil, nil, time.Now().UnixNano(), true) + + fs.StoreMsg("A", nil, msgA) // seq 4 + fs.StoreMsg("B", nil, msgZ) // seq 5 + + checkPurgeState := func() { + state := fs.State() + require_True(t, state.Msgs == 2) + require_True(t, state.FirstSeq == 4) + require_True(t, state.LastSeq == 5) + require_True(t, state.NumSubjects == 2) + + sm, err := fs.LoadMsg(4, nil) + require_NoError(t, err) + require_True(t, bytes.Equal(sm.msg, msgA)) + require_True(t, sm.subj == "A") + + sm, err = fs.LoadMsg(5, nil) + require_NoError(t, err) + require_True(t, bytes.Equal(sm.msg, msgZ)) + require_True(t, sm.subj == "B") } - check := func(sseq uint64, filter string) { - t.Helper() - np, lvs := fs.NumPending(sseq, filter, false) - ss := fs.FilteredState(sseq, filter) - sss := sanityCheck(sseq, filter) - if lvs != state.LastSeq { - t.Fatalf("Expected NumPending to return valid through last of %d but got %d", state.LastSeq, lvs) - } - if ss.Msgs != np { - t.Fatalf("NumPending of %d did not match ss.Msgs of %d", np, ss.Msgs) - } - if ss != sss { - t.Fatalf("Failed sanity check, expected %+v got %+v", sss, ss) + checkPurgeState() + + // Make sure we are good on recreate. + fs, err = newFileStore(FileStoreConfig{StoreDir: sd}, StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) + require_NoError(t, err) + defer fs.Stop() + + checkPurgeState() +} + +// Test that loads from lmb under lots of writes do not return errPartialCache. +func TestFileStoreErrPartialLoad(t *testing.T) { + fs, err := newFileStore(FileStoreConfig{StoreDir: t.TempDir()}, StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) + require_NoError(t, err) + defer fs.Stop() + + put := func(num int) { + for i := 0; i < num; i++ { + fs.StoreMsg("Z", nil, []byte("ZZZZZZZZZZZZZ")) } } - sanityCheckLastOnly := func(sseq uint64, filter string) SimpleState { - t.Helper() - var ss SimpleState - var smv StoreMsg - // For here we know 0 is invalid, set to 1. - if sseq == 0 { - sseq = 1 - } - seen := make(map[string]bool) - for seq := state.LastSeq; seq >= sseq; seq-- { - sm, err := fs.LoadMsg(seq, &smv) - if err != nil { - t.Logf("Encountered error %v loading sequence: %d", err, seq) - continue - } - if !seen[sm.subj] && subjectIsSubsetMatch(sm.subj, filter) { - ss.Msgs++ - if ss.Last == 0 { - ss.Last = seq - } - if ss.First == 0 || seq < ss.First { - ss.First = seq + put(100) + + // Dump cache of lmb. + clearCache := func() { + fs.mu.RLock() + lmb := fs.lmb + fs.mu.RUnlock() + lmb.mu.Lock() + lmb.clearCache() + lmb.mu.Unlock() + } + clearCache() + + qch := make(chan struct{}) + defer close(qch) + + for i := 0; i < 10; i++ { + go func() { + for { + select { + case <-qch: + return + default: + put(5) } - seen[sm.subj] = true } - } - return ss + }() } - checkLastOnly := func(sseq uint64, filter string) { - t.Helper() - np, lvs := fs.NumPending(sseq, filter, true) - ss := sanityCheckLastOnly(sseq, filter) - if lvs != state.LastSeq { - t.Fatalf("Expected NumPending to return valid through last of %d but got %d", state.LastSeq, lvs) + time.Sleep(100 * time.Millisecond) + + var smv StoreMsg + for i := 0; i < 10_000; i++ { + fs.mu.RLock() + lmb := fs.lmb + fs.mu.RUnlock() + lmb.mu.Lock() + first, last := fs.lmb.first.seq, fs.lmb.last.seq + if i%100 == 0 { + lmb.clearCache() } - if ss.Msgs != np { - t.Fatalf("NumPending of %d did not match ss.Msgs of %d", np, ss.Msgs) + lmb.mu.Unlock() + + if spread := int(last - first); spread > 0 { + seq := first + uint64(rand.Intn(spread)) + _, err = fs.LoadMsg(seq, &smv) + require_NoError(t, err) } } +} + +func TestFileStoreErrPartialLoadOnSyncClose(t *testing.T) { + fs, err := newFileStore( + FileStoreConfig{StoreDir: t.TempDir(), BlockSize: 500}, + StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}, + ) + require_NoError(t, err) + defer fs.Stop() - startSeqs := []uint64{0, 1, 2, 200, 444, 555, 2222, 8888, 12_345, 28_222, 33_456, 44_400, 49_999} - checkSubs := []string{"foo.>", "*.bar.>", "foo.bar.*.baz", "*.bar.>", "*.foo.bar.*", "foo.foo.bar.baz"} + // This yields an internal record length of 50 bytes. So 10 msgs per blk. + msgLen := 19 + msg := bytes.Repeat([]byte("A"), msgLen) - for _, filter := range checkSubs { - for _, start := range startSeqs { - check(start, filter) - checkLastOnly(start, filter) - } + // Load up half the block. + for _, subj := range []string{"A", "B", "C", "D", "E"} { + fs.StoreMsg(subj, nil, msg) } + + // Now simulate the sync timer closing the last block. + fs.mu.RLock() + lmb := fs.lmb + fs.mu.RUnlock() + require_True(t, lmb != nil) + + lmb.mu.Lock() + lmb.expireCacheLocked() + lmb.dirtyCloseWithRemove(false) + lmb.mu.Unlock() + + fs.StoreMsg("Z", nil, msg) + _, err = fs.LoadMsg(1, nil) + require_NoError(t, err) } diff --git a/server/gateway.go b/server/gateway.go index f86944b6e..f23ada3bb 100644 --- a/server/gateway.go +++ b/server/gateway.go @@ -761,7 +761,7 @@ func (s *Server) createGateway(cfg *gatewayCfg, url *url.URL, conn net.Conn) { // Snapshot server options. opts := s.getOpts() - now := time.Now().UTC() + now := time.Now() c := &client{srv: s, nc: conn, start: now, last: now, kind: GATEWAY} // Are we creating the gateway based on the configuration @@ -870,9 +870,7 @@ func (s *Server) createGateway(cfg *gatewayCfg, url *url.URL, conn net.Conn) { // Announce ourselves again to new connections. if solicit && s.EventsEnabled() { - s.mu.Lock() - s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.info.ID)) - s.mu.Unlock() + s.sendStatszUpdate() } } @@ -2680,12 +2678,11 @@ func (s *Server) gatewayHandleSubjectNoInterest(c *client, acc *Account, accName // If there is no subscription for this account, we would normally // send an A-, however, if this account has the internal subscription // for service reply, send a specific RS- for the subject instead. - hasSubs := acc.sl.Count() > 0 - if !hasSubs { - acc.mu.RLock() - hasSubs = acc.siReply != nil - acc.mu.RUnlock() - } + // Need to grab the lock here since sublist can change during reload. + acc.mu.RLock() + hasSubs := acc.sl.Count() > 0 || acc.siReply != nil + acc.mu.RUnlock() + // If there is at least a subscription, possibly send RS- if hasSubs { sendProto := false @@ -2964,7 +2961,7 @@ func (c *client) processInboundGatewayMsg(msg []byte) { // Check if this is a service reply subject (_R_) noInterest := len(r.psubs) == 0 checkNoInterest := true - if acc.imports.services != nil { + if acc.NumServiceImports() > 0 { if isServiceReply(c.pa.subject) { checkNoInterest = false } else { diff --git a/server/ipqueue.go b/server/ipqueue.go index d0bba0ecf..9a96e4863 100644 --- a/server/ipqueue.go +++ b/server/ipqueue.go @@ -1,4 +1,4 @@ -// Copyright 2021 The NATS Authors +// Copyright 2021-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -20,28 +20,28 @@ import ( const ( ipQueueDefaultMaxRecycleSize = 4 * 1024 - ipQueueDefaultLen = 0 - ipQueueInitialCapacity = 32 + ipQueueDefaultLen = 0 // ** added by Memphis + ipQueueInitialCapacity = 32 // ** added by Memphis ) // This is a generic intra-process queue. type ipQueue[T any] struct { inprogress int64 - sync.RWMutex + sync.Mutex ch chan struct{} elts []T pos int pool *sync.Pool mrs int - mql int - wrapAround bool + mql int // ** added by Memphis + wrapAround bool // ** added by Memphis name string m *sync.Map } type ipQueueOpts struct { maxRecycleSize int - maxQueueLen int + maxQueueLen int // ** added by Memphis } type ipQueueOpt func(*ipQueueOpts) @@ -54,6 +54,7 @@ func ipQueue_MaxRecycleSize(max int) ipQueueOpt { } } +// ** added by Memphis // This option allows to set the maximum queue length // Queues with this option enabled (>0) do not support the popOne() op func ipQueue_MaxQueueLen(max int) ipQueueOpt { @@ -62,10 +63,12 @@ func ipQueue_MaxQueueLen(max int) ipQueueOpt { } } +// ** added by Memphis + func newIPQueue[T any](s *Server, name string, opts ...ipQueueOpt) *ipQueue[T] { qo := ipQueueOpts{ maxRecycleSize: ipQueueDefaultMaxRecycleSize, - maxQueueLen: ipQueueDefaultLen, + maxQueueLen: ipQueueDefaultLen, // ** added by Memphis } for _, o := range opts { @@ -74,7 +77,7 @@ func newIPQueue[T any](s *Server, name string, opts ...ipQueueOpt) *ipQueue[T] { q := &ipQueue[T]{ ch: make(chan struct{}, 1), mrs: qo.maxRecycleSize, - mql: qo.maxQueueLen, + mql: qo.maxQueueLen, // ** added by Memphis pool: &sync.Pool{}, name: name, m: &s.ipQueues, @@ -91,9 +94,11 @@ func (q *ipQueue[T]) push(e T) int { q.Lock() l := len(q.elts) - q.pos + // ** added by Memphis if q.wrapAround { l = q.mql } + // ** added by Memphis if l == 0 { signal = true @@ -104,10 +109,11 @@ func (q *ipQueue[T]) push(e T) int { q.elts = (*(eltsi.(*[]T)))[:0] } if cap(q.elts) == 0 { - q.elts = make([]T, 0, ipQueueInitialCapacity) + q.elts = make([]T, 0, ipQueueInitialCapacity) // ** changed by Memphis } } + // ** added/changed by Memphis if q.mql > 0 && q.mql == l { if !q.wrapAround { //first time we reached max elements @@ -119,6 +125,7 @@ func (q *ipQueue[T]) push(e T) int { q.elts = append(q.elts, e) l++ } + // ** added by Memphis q.Unlock() if signal { @@ -140,13 +147,17 @@ func (q *ipQueue[T]) push(e T) int { // emptied the queue. So the caller should never assume that pop() will // return a slice of 1 or more, it could return `nil`. func (q *ipQueue[T]) pop() []T { + if q == nil { + return nil + } var elts []T q.Lock() if q.pos == 0 { elts = q.elts - } else if q.wrapAround { + } else if q.wrapAround { // ** added by Memphis elts = q.elts[q.pos:] elts = append(elts, q.elts[:q.pos]...) + // ** added by Memphis } else { elts = q.elts[q.pos:] } @@ -219,9 +230,9 @@ func (q *ipQueue[T]) recycle(elts *[]T) { // Returns the current length of the queue. func (q *ipQueue[T]) len() int { - q.RLock() + q.Lock() l := len(q.elts) - q.pos - q.RUnlock() + q.Unlock() return l } diff --git a/server/ipqueue_test.go b/server/ipqueue_test.go index ba0e9f34b..1800f2a13 100644 --- a/server/ipqueue_test.go +++ b/server/ipqueue_test.go @@ -221,9 +221,9 @@ func TestIPQueuePopOne(t *testing.T) { q.push(1) q.push(2) // Capture current capacity - q.RLock() + q.Lock() c := cap(q.elts) - q.RUnlock() + q.Unlock() e, ok = q.popOne() if !ok || e != 1 { t.Fatalf("Invalid value: %v", e) @@ -343,9 +343,9 @@ func TestIPQueueRecycle(t *testing.T) { values = q.pop() q.recycle(&values) q.push(1002) - q.RLock() + q.Lock() recycled := &q.elts == &values - q.RUnlock() + q.Unlock() if recycled { t.Fatalf("Unexpected recycled slice") } diff --git a/server/jetstream.go b/server/jetstream.go index e502b4ef5..6fbf25b66 100644 --- a/server/jetstream.go +++ b/server/jetstream.go @@ -1,4 +1,4 @@ -// Copyright 2019-2022 The NATS Authors +// Copyright 2019-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -109,11 +109,14 @@ type jetStream struct { started time.Time // System level request to purge a stream move - accountPurge *subscription + accountPurge *subscription + + // Some bools regarding general state. metaRecovering bool standAlone bool disabled bool oos bool + shuttingDown bool } type remoteUsage struct { @@ -137,6 +140,7 @@ type jsAccount struct { js *jetStream account *Account storeDir string + inflight sync.Map streams map[string]*stream templates map[string]*streamTemplate store TemplateStore @@ -144,6 +148,9 @@ type jsAccount struct { // From server sendq *ipQueue[*pubMsg] + // For limiting only running one checkAndSync at a time. + sync atomic.Bool + // Usage/limits related fields that will be protected by usageMu usageMu sync.RWMutex limits map[string]JetStreamAccountLimits // indexed by tierName @@ -172,6 +179,7 @@ func (s *Server) EnableJetStream(config *JetStreamConfig) error { return fmt.Errorf("jetstream already enabled") } + s.Noticef("Starting JetStream") if config == nil || config.MaxMemory <= 0 || config.MaxStore <= 0 { var storeDir, domain string var maxStore, maxMem int64 @@ -358,6 +366,7 @@ func (s *Server) enableJetStream(cfg JetStreamConfig) error { s.SetDefaultSystemAccount() } + // ** changed by Memphis s.Noticef(" __ __ _ _ __ _ __ ") s.Noticef(" | \\/ | | | (_) / / | | \\ \\ ") s.Noticef(" | \\ / | ___ _ __ ___ _ __ | |__ _ ___ | | __| | ___ __ __ | | ") @@ -369,6 +378,7 @@ func (s *Server) enableJetStream(cfg JetStreamConfig) error { s.Noticef("") s.Noticef(" https://docs.memphis.dev") s.Noticef("") + // ** changed by Memphis s.Noticef(" Max Memory: %s", friendlyBytes(cfg.MaxMemory)) s.Noticef(" Max Storage: %s", friendlyBytes(cfg.MaxStore)) s.Noticef(" Store Directory: \"%s\"", cfg.StoreDir) @@ -465,7 +475,7 @@ func (s *Server) restartJetStream() error { return nil } -// checkStreamExports will check if we have the JS exports setup +// checkJetStreamExports will check if we have the JS exports setup // on the system account, and if not go ahead and set them up. func (s *Server) checkJetStreamExports() { if sacc := s.SystemAccount(); sacc != nil { @@ -580,6 +590,9 @@ func (s *Server) DisableJetStream() error { // Normal shutdown. s.shutdownJetStream() + // Shut down the RAFT groups. + s.shutdownRaftNodes() + return nil } @@ -636,7 +649,7 @@ func (a *Account) enableAllJetStreamServiceImportsAndMappings() error { return nil } -// enableJetStreamEnabledServiceImportOnly will enable the single service import responder. +// enableJetStreamInfoServiceImportOnly will enable the single service import responder. // Should we do them all regardless? func (a *Account) enableJetStreamInfoServiceImportOnly() error { // Check if this import would be overshadowed. This can happen when accounts @@ -853,6 +866,13 @@ func (s *Server) signalPullConsumers() { } } +// Helper for determining if we are shutting down. +func (js *jetStream) isShuttingDown() bool { + js.mu.RLock() + defer js.mu.RUnlock() + return js.shuttingDown +} + // Shutdown jetstream for this server. func (s *Server) shutdownJetStream() { s.mu.RLock() @@ -885,6 +905,8 @@ func (s *Server) shutdownJetStream() { } accPurgeSub := js.accountPurge js.accountPurge = nil + // Signal we are shutting down. + js.shuttingDown = true js.mu.Unlock() if accPurgeSub != nil { @@ -1006,9 +1028,9 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro return fmt.Errorf("jetstream can not be enabled on the system account") } - s.mu.Lock() + s.mu.RLock() sendq := s.sys.sendq - s.mu.Unlock() + s.mu.RUnlock() // No limits means we dynamically set up limits. // We also place limits here so we know that the account is configured for JetStream. @@ -1040,12 +1062,15 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro jsa := &jsAccount{js: js, account: a, limits: limits, streams: make(map[string]*stream), sendq: sendq, usage: make(map[string]*jsaStorage)} jsa.storeDir = filepath.Join(js.config.StoreDir, a.Name) - jsa.usageMu.Lock() - jsa.utimer = time.AfterFunc(usageTick, jsa.sendClusterUsageUpdateTimer) - // Cluster mode updates to resource usage, but we always will turn on. System internal prevents echos. - jsa.updatesPub = fmt.Sprintf(jsaUpdatesPubT, a.Name, sysNode) - jsa.updatesSub, _ = s.sysSubscribe(fmt.Sprintf(jsaUpdatesSubT, a.Name), jsa.remoteUpdateUsage) - jsa.usageMu.Unlock() + // A single server does not need to do the account updates at this point. + if js.cluster != nil || !s.standAloneMode() { + jsa.usageMu.Lock() + jsa.utimer = time.AfterFunc(usageTick, jsa.sendClusterUsageUpdateTimer) + // Cluster mode updates to resource usage. System internal prevents echos. + jsa.updatesPub = fmt.Sprintf(jsaUpdatesPubT, a.Name, sysNode) + jsa.updatesSub, _ = s.sysSubscribe(fmt.Sprintf(jsaUpdatesSubT, a.Name), jsa.remoteUpdateUsage) + jsa.usageMu.Unlock() + } js.accounts[a.Name] = jsa js.mu.Unlock() @@ -1195,22 +1220,23 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro // Check if we are encrypted. keyFile := filepath.Join(mdir, JetStreamMetaFileKey) - if key, err := os.ReadFile(keyFile); err == nil { + keyBuf, err := os.ReadFile(keyFile) + if err == nil { s.Debugf(" Stream metafile is encrypted, reading encrypted keyfile") - if len(key) < minMetaKeySize { - s.Warnf(" Bad stream encryption key length of %d", len(key)) + if len(keyBuf) < minMetaKeySize { + s.Warnf(" Bad stream encryption key length of %d", len(keyBuf)) continue } // Decode the buffer before proceeding. - nbuf, err := s.decryptMeta(sc, key, buf, a.Name, fi.Name()) + nbuf, err := s.decryptMeta(sc, keyBuf, buf, a.Name, fi.Name()) if err != nil { // See if we are changing ciphers. switch sc { case ChaCha: - nbuf, err = s.decryptMeta(AES, key, buf, a.Name, fi.Name()) + nbuf, err = s.decryptMeta(AES, keyBuf, buf, a.Name, fi.Name()) osc, convertingCiphers = AES, true case AES: - nbuf, err = s.decryptMeta(ChaCha, key, buf, a.Name, fi.Name()) + nbuf, err = s.decryptMeta(ChaCha, keyBuf, buf, a.Name, fi.Name()) osc, convertingCiphers = ChaCha, true } if err != nil { @@ -1220,9 +1246,6 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro } buf = nbuf plaintext = false - - // Remove the key file to have system regenerate with the new cipher. - os.Remove(keyFile) } var cfg FileStreamInfo @@ -1274,6 +1297,8 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro s.Noticef(" Encrypting stream '%s > %s'", a.Name, cfg.StreamConfig.Name) } else if convertingCiphers { s.Noticef(" Converting from %s to %s for stream '%s > %s'", osc, sc, a.Name, cfg.StreamConfig.Name) + // Remove the key file to have system regenerate with the new cipher. + os.Remove(keyFile) } } @@ -1281,6 +1306,13 @@ func (a *Account) EnableJetStream(limits map[string]JetStreamAccountLimits) erro mset, err := a.addStream(&cfg.StreamConfig) if err != nil { s.Warnf(" Error recreating stream %q: %v", cfg.Name, err) + // If we removed a keyfile from above make sure to put it back. + if convertingCiphers { + err := os.WriteFile(keyFile, keyBuf, defaultFilePerms) + if err != nil { + s.Warnf(" Error replacing meta keyfile for stream %q: %v", cfg.Name, err) + } + } continue } if !cfg.Created.IsZero() { @@ -1742,14 +1774,13 @@ func (a *Account) JetStreamEnabled() bool { } func (jsa *jsAccount) remoteUpdateUsage(sub *subscription, c *client, _ *Account, subject, _ string, msg []byte) { - const usageSize = 32 - // jsa.js.srv is immutable and guaranteed to no be nil, so no lock needed. s := jsa.js.srv jsa.usageMu.Lock() - if len(msg) < usageSize { - jsa.usageMu.Unlock() + defer jsa.usageMu.Unlock() + + if len(msg) < minUsageUpdateLen { s.Warnf("Ignoring remote usage update with size too short") return } @@ -1758,7 +1789,6 @@ func (jsa *jsAccount) remoteUpdateUsage(sub *subscription, c *client, _ *Account rnode = subject[li+1:] } if rnode == _EMPTY_ { - jsa.usageMu.Unlock() s.Warnf("Received remote usage update with no remote node") return } @@ -1793,21 +1823,31 @@ func (jsa *jsAccount) remoteUpdateUsage(sub *subscription, c *client, _ *Account apiTotal, apiErrors := le.Uint64(msg[16:]), le.Uint64(msg[24:]) memUsed, storeUsed := int64(le.Uint64(msg[0:])), int64(le.Uint64(msg[8:])) - // we later extended the data structure to support multiple tiers - excessRecordCnt := uint32(0) - tierName := _EMPTY_ - if len(msg) >= 44 { - excessRecordCnt = le.Uint32(msg[32:]) - length := le.Uint64(msg[36:]) - tierName = string(msg[44 : 44+length]) - msg = msg[44+length:] + // We later extended the data structure to support multiple tiers + var excessRecordCnt uint32 + var tierName string + + if len(msg) >= usageMultiTiersLen { + excessRecordCnt = le.Uint32(msg[minUsageUpdateLen:]) + length := le.Uint64(msg[minUsageUpdateLen+4:]) + // Need to protect past this point in case this is wrong. + if uint64(len(msg)) < usageMultiTiersLen+length { + s.Warnf("Received corrupt remote usage update") + return + } + tierName = string(msg[usageMultiTiersLen : usageMultiTiersLen+length]) + msg = msg[usageMultiTiersLen+length:] } updateTotal(tierName, memUsed, storeUsed) - for ; excessRecordCnt > 0 && len(msg) >= 24; excessRecordCnt-- { + for ; excessRecordCnt > 0 && len(msg) >= usageRecordLen; excessRecordCnt-- { memUsed, storeUsed := int64(le.Uint64(msg[0:])), int64(le.Uint64(msg[8:])) length := le.Uint64(msg[16:]) - tierName = string(msg[24 : 24+length]) - msg = msg[24+length:] + if uint64(len(msg)) < usageRecordLen+length { + s.Warnf("Received corrupt remote usage update on excess record") + return + } + tierName = string(msg[usageRecordLen : usageRecordLen+length]) + msg = msg[usageRecordLen+length:] updateTotal(tierName, memUsed, storeUsed) } jsa.apiTotal -= rUsage.api @@ -1816,7 +1856,85 @@ func (jsa *jsAccount) remoteUpdateUsage(sub *subscription, c *client, _ *Account rUsage.err = apiErrors jsa.apiTotal += apiTotal jsa.apiErrors += apiErrors - jsa.usageMu.Unlock() +} + +// When we detect a skew of some sort this will verify the usage reporting is correct. +// No locks should be held. +func (jsa *jsAccount) checkAndSyncUsage(tierName string, storeType StorageType) { + // This will run in a separate go routine, so check that we are only running once. + if !jsa.sync.CompareAndSwap(false, true) { + return + } + defer jsa.sync.Store(false) + + // Hold the account read lock and the usage lock while we calculate. + // We scope by tier and storage type, but if R3 File has 200 streams etc. could + // show a pause. I did test with > 100 non-active streams and was 80-200ns or so. + // Should be rare this gets called as well. + jsa.mu.RLock() + defer jsa.mu.RUnlock() + js := jsa.js + if js == nil { + return + } + s := js.srv + + // We need to collect the stream stores before we acquire the usage lock since in storeUpdates the + // stream lock could be held if deletion are inline with storing a new message, e.g. via limits. + var stores []StreamStore + for _, mset := range jsa.streams { + mset.mu.RLock() + if mset.tier == tierName && mset.stype == storeType && mset.store != nil { + stores = append(stores, mset.store) + } + mset.mu.RUnlock() + } + + // Now range and qualify, hold usage lock to prevent updates. + jsa.usageMu.Lock() + defer jsa.usageMu.Unlock() + + usage, ok := jsa.usage[tierName] + if !ok { + return + } + + // Collect current total for all stream stores that matched. + var total int64 + var state StreamState + for _, store := range stores { + store.FastState(&state) + total += int64(state.Bytes) + } + + var needClusterUpdate bool + // If we do not match on our calculations compute delta and adjust. + if storeType == MemoryStorage { + if total != usage.local.mem { + s.Warnf("MemStore usage drift of %v vs %v detected for account %q", + friendlyBytes(total), friendlyBytes(usage.local.mem), jsa.account.GetName()) + delta := total - usage.local.mem + usage.local.mem += delta + usage.total.mem += delta + atomic.AddInt64(&js.memUsed, delta) + needClusterUpdate = true + } + } else { + if total != usage.local.store { + s.Warnf("FileStore usage drift of %v vs %v detected for account %q", + friendlyBytes(total), friendlyBytes(usage.local.store), jsa.account.GetName()) + delta := total - usage.local.store + usage.local.store += delta + usage.total.store += delta + atomic.AddInt64(&js.storeUsed, delta) + needClusterUpdate = true + } + } + + // Publish our local updates if in clustered mode. + if needClusterUpdate && js.isClusteredNoLock() { + jsa.sendClusterUsageUpdate() + } } // Updates accounting on in use memory and storage. This is called from locally @@ -1829,9 +1947,8 @@ func (jsa *jsAccount) updateUsage(tierName string, storeType StorageType, delta // use of an atomic to do the check without having data race reports. isClustered := js.isClusteredNoLock() + var needsCheck bool jsa.usageMu.Lock() - defer jsa.usageMu.Unlock() - s, ok := jsa.usage[tierName] if !ok { s = &jsaStorage{} @@ -1841,15 +1958,25 @@ func (jsa *jsAccount) updateUsage(tierName string, storeType StorageType, delta s.local.mem += delta s.total.mem += delta atomic.AddInt64(&js.memUsed, delta) + needsCheck = s.local.mem < 0 } else { s.local.store += delta s.total.store += delta atomic.AddInt64(&js.storeUsed, delta) + needsCheck = s.local.store < 0 } // Publish our local updates if in clustered mode. if isClustered { jsa.sendClusterUsageUpdate() } + jsa.usageMu.Unlock() + + if needsCheck { + // We could be holding the stream lock from up in the stack, and this + // will want the jsa lock, which would violate locking order. + // So do this in a Go routine. The function will check if it is already running. + go jsa.checkAndSyncUsage(tierName, storeType) + } } var usageTick = 1500 * time.Millisecond @@ -1863,12 +1990,22 @@ func (jsa *jsAccount) sendClusterUsageUpdateTimer() { } } +// For usage fields. +const ( + minUsageUpdateLen = 32 + stackUsageUpdate = 72 + usageRecordLen = 24 + usageMultiTiersLen = 44 + apiStatsAndNumTiers = 20 + minUsageUpdateWindow = 250 * time.Millisecond +) + // Send updates to our account usage for this server. // jsa.usageMu lock should be held. func (jsa *jsAccount) sendClusterUsageUpdate() { // These values are absolute so we can limit send rates. now := time.Now() - if now.Sub(jsa.lupdate) < 250*time.Millisecond { + if now.Sub(jsa.lupdate) < minUsageUpdateWindow { return } jsa.lupdate = now @@ -1878,32 +2015,37 @@ func (jsa *jsAccount) sendClusterUsageUpdate() { return } // every base record contains mem/store/len(tier) as well as the tier name - l := 24 * lenUsage + l := usageRecordLen * lenUsage for tier := range jsa.usage { l += len(tier) } - if lenUsage > 0 { - // first record contains api/usage errors as well as count for extra base records - l += 20 + // first record contains api/usage errors as well as count for extra base records + l += apiStatsAndNumTiers + + var raw [stackUsageUpdate]byte + var b []byte + if l > stackUsageUpdate { + b = make([]byte, l) + } else { + b = raw[:l] } - var le = binary.LittleEndian - b := make([]byte, l) - i := 0 + var i int + var le = binary.LittleEndian for tier, usage := range jsa.usage { le.PutUint64(b[i+0:], uint64(usage.local.mem)) le.PutUint64(b[i+8:], uint64(usage.local.store)) if i == 0 { - le.PutUint64(b[i+16:], jsa.usageApi) - le.PutUint64(b[i+24:], jsa.usageErr) - le.PutUint32(b[i+32:], uint32(len(jsa.usage)-1)) - le.PutUint64(b[i+36:], uint64(len(tier))) - copy(b[i+44:], tier) - i += 44 + len(tier) + le.PutUint64(b[16:], jsa.usageApi) + le.PutUint64(b[24:], jsa.usageErr) + le.PutUint32(b[32:], uint32(len(jsa.usage)-1)) + le.PutUint64(b[36:], uint64(len(tier))) + copy(b[usageMultiTiersLen:], tier) + i = usageMultiTiersLen + len(tier) } else { le.PutUint64(b[i+16:], uint64(len(tier))) - copy(b[i+24:], tier) - i += 24 + len(tier) + copy(b[i+usageRecordLen:], tier) + i += usageRecordLen + len(tier) } } jsa.sendq.push(newPubMsg(nil, jsa.updatesPub, _EMPTY_, nil, nil, b, noCompression, false, false)) @@ -2136,15 +2278,24 @@ func (js *jetStream) usageStats() *JetStreamStats { var stats JetStreamStats js.mu.RLock() stats.Accounts = len(js.accounts) - stats.ReservedMemory = (uint64)(js.memReserved) - stats.ReservedStore = (uint64)(js.storeReserved) + stats.ReservedMemory = uint64(js.memReserved) + stats.ReservedStore = uint64(js.storeReserved) s := js.srv js.mu.RUnlock() - stats.API.Total = (uint64)(atomic.LoadInt64(&js.apiTotal)) - stats.API.Errors = (uint64)(atomic.LoadInt64(&js.apiErrors)) - stats.API.Inflight = (uint64)(atomic.LoadInt64(&js.apiInflight)) - stats.Memory = (uint64)(atomic.LoadInt64(&js.memUsed)) - stats.Store = (uint64)(atomic.LoadInt64(&js.storeUsed)) + stats.API.Total = uint64(atomic.LoadInt64(&js.apiTotal)) + stats.API.Errors = uint64(atomic.LoadInt64(&js.apiErrors)) + stats.API.Inflight = uint64(atomic.LoadInt64(&js.apiInflight)) + // Make sure we do not report negative. + used := atomic.LoadInt64(&js.memUsed) + if used < 0 { + used = 0 + } + stats.Memory = uint64(used) + used = atomic.LoadInt64(&js.storeUsed) + if used < 0 { + used = 0 + } + stats.Store = uint64(used) stats.HAAssets = s.numRaftNodes() return &stats } @@ -2610,9 +2761,13 @@ func (jsa *jsAccount) checkTemplateOwnership(tname, sname string) bool { return false } +type Number interface { + int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 | float32 | float64 +} + // friendlyBytes returns a string with the given bytes int64 // represented as a size, such as 1KB, 10MB, etc... -func friendlyBytes(bytes int64) string { +func friendlyBytes[T Number](bytes T) string { fbytes := float64(bytes) base := 1024 pre := []string{"K", "M", "G", "T", "P", "E"} @@ -2638,7 +2793,7 @@ func canonicalName(name string) string { } // To throttle the out of resources errors. -func (s *Server) resourcesExeededError() { +func (s *Server) resourcesExceededError() { var didAlert bool s.rerrMu.Lock() diff --git a/server/jetstream_api.go b/server/jetstream_api.go index f88fe4fa6..eafc96b45 100644 --- a/server/jetstream_api.go +++ b/server/jetstream_api.go @@ -1,4 +1,4 @@ -// Copyright 2020-2022 The NATS Authors +// Copyright 2020-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -735,13 +735,13 @@ type jsAPIRoutedReq struct { func (js *jetStream) apiDispatch(sub *subscription, c *client, acc *Account, subject, reply string, rmsg []byte) { // No lock needed, those are immutable. - wrappedSubject := memphisFindJSAPIWrapperSubject(c, subject) - s, rr := js.srv, js.apiSubs.Match(wrappedSubject) + wrappedSubject := memphisFindJSAPIWrapperSubject(c, subject) // ** added by memphis + s, rr := js.srv, js.apiSubs.Match(wrappedSubject) // ** changed to wrappedSubject by memphis hdr, _ := c.msgParts(rmsg) if len(getHeader(ClientInfoHdr, hdr)) == 0 { // Check if this is the system account. We will let these through for the account info only. - if s.SystemAccount() != acc || wrappedSubject != JSApiAccountInfo { + if s.SystemAccount() != acc || wrappedSubject != JSApiAccountInfo { // ** changed to wrappedSubject by memphis return } } @@ -754,7 +754,7 @@ func (js *jetStream) apiDispatch(sub *subscription, c *client, acc *Account, sub // We should only have psubs and only 1 per result. // FIXME(dlc) - Should we respond here with NoResponders or error? if len(rr.psubs) != 1 { - s.Warnf("Malformed JetStream API Request: [%s] %q", wrappedSubject, rmsg) + s.Warnf("Malformed JetStream API Request: [%s] %q", wrappedSubject, rmsg) // ** changed to wrappedSubject by memphis return } jsub := rr.psubs[0] @@ -764,7 +764,7 @@ func (js *jetStream) apiDispatch(sub *subscription, c *client, acc *Account, sub start := time.Now() jsub.icb(sub, c, acc, subject, reply, rmsg) if dur := time.Since(start); dur >= readLoopReportThreshold { - s.Warnf("Internal subscription on %q took too long: %v", wrappedSubject, dur) + s.Warnf("Internal subscription on %q took too long: %v", wrappedSubject, dur) // ** changed to wrappedSubject by memphis } return } @@ -837,13 +837,13 @@ func (s *Server) setJetStreamExportSubs() error { {JSApiTemplateInfo, s.jsTemplateInfoRequest}, {JSApiTemplateDelete, s.jsTemplateDeleteRequest}, {JSApiStreamCreate, s.jsStreamCreateRequest}, - {memphisJSApiStreamCreate, s.memphisJSApiWrapStreamCreate}, + {memphisJSApiStreamCreate, s.memphisJSApiWrapStreamCreate}, // ** added by memphis {JSApiStreamUpdate, s.jsStreamUpdateRequest}, {JSApiStreams, s.jsStreamNamesRequest}, {JSApiStreamList, s.jsStreamListRequest}, {JSApiStreamInfo, s.jsStreamInfoRequest}, {JSApiStreamDelete, s.jsStreamDeleteRequest}, - {memphisJSApiStreamDelete, s.memphisJSApiWrapStreamDelete}, + {memphisJSApiStreamDelete, s.memphisJSApiWrapStreamDelete}, // ** added by memphis {JSApiStreamPurge, s.jsStreamPurgeRequest}, {JSApiStreamSnapshot, s.jsStreamSnapshotRequest}, {JSApiStreamRestore, s.jsStreamRestoreRequest}, @@ -890,6 +890,7 @@ func (s *Server) sendAPIErrResponse(ci *ClientInfo, acc *Account, subject, reply s.sendJetStreamAPIAuditAdvisory(ci, acc, subject, request, response) } +// ** added by memphis func (s *Server) sendAPIErrResponseWithEcho(ci *ClientInfo, acc *Account, subject, reply, request, response string) { acc.trackAPIErr() if reply != _EMPTY_ { @@ -898,13 +899,22 @@ func (s *Server) sendAPIErrResponseWithEcho(ci *ClientInfo, acc *Account, subjec s.sendJetStreamAPIAuditAdvisory(ci, acc, subject, request, response) } +// ** added by memphis + const errRespDelay = 500 * time.Millisecond func (s *Server) sendDelayedAPIErrResponse(ci *ClientInfo, acc *Account, subject, reply, request, response string, rg *raftGroup) { + js := s.getJetStream() + if js == nil { + return + } var quitCh <-chan struct{} + js.mu.RLock() if rg != nil && rg.node != nil { quitCh = rg.node.QuitC() } + js.mu.RUnlock() + s.startGoRoutine(func() { defer s.grWG.Done() select { @@ -1281,12 +1291,14 @@ func (jsa *jsAccount) tieredReservation(tier string, cfg *StreamConfig) int64 { } // Request to create a stream. +// ** changed by memphis func (s *Server) jsStreamCreateRequest(sub *subscription, c *client, acc *Account, subject, reply string, rmsg []byte) { s.jsStreamCreateRequestIntern(sub, c, acc, subject, reply, rmsg) } -func (s *Server) jsStreamCreateRequestIntern(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { - var cfg StreamConfig +// ** changed by memphis + +func (s *Server) jsStreamCreateRequestIntern(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { // ** changed by memphis if c == nil || !s.JetStreamEnabled() { return } @@ -1323,6 +1335,7 @@ func (s *Server) jsStreamCreateRequestIntern(sub *subscription, c *client, _ *Ac return } + var cfg StreamConfig if err := json.Unmarshal(msg, &cfg); err != nil { resp.Error = NewJSInvalidJSONError() s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) @@ -1902,7 +1915,7 @@ func (s *Server) jsStreamInfoRequest(sub *subscription, c *client, a *Account, s if actualSize > 0 { sd = make(map[string]uint64, actualSize) - subjState = make(map[string]SimpleState, actualSize) + subjState = make(map[string]SimpleState, actualSize) // ** added by Memphis for _, ss := range subjs[offset:end] { sd[ss] = st[ss] subjState[ss] = subjectsState[ss] // ** added by Memphis @@ -1999,18 +2012,24 @@ func (s *Server) jsStreamLeaderStepDownRequest(sub *subscription, c *client, _ * return } - // Call actual stepdown. - if mset != nil { + if mset == nil { + resp.Success = true + s.sendAPIResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(resp)) + return + } + + // Call actual stepdown. Do this in a Go routine. + go func() { if node := mset.raftNode(); node != nil { mset.setLeader(false) // TODO (mh) eventually make sure all go routines exited and all channels are cleared time.Sleep(250 * time.Millisecond) node.StepDown() } - } - resp.Success = true - s.sendAPIResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(resp)) + resp.Success = true + s.sendAPIResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(resp)) + }() } // Request to have a consumer leader stepdown. @@ -2105,16 +2124,23 @@ func (s *Server) jsConsumerLeaderStepDownRequest(sub *subscription, c *client, _ return } - // Call actual stepdown. - if n := o.raftNode(); n != nil { + n := o.raftNode() + if n == nil { + resp.Success = true + s.sendAPIResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(resp)) + return + } + + // Call actual stepdown. Do this in a Go routine. + go func() { o.setLeader(false) // TODO (mh) eventually make sure all go routines exited and all channels are cleared time.Sleep(250 * time.Millisecond) n.StepDown() - } - resp.Success = true - s.sendAPIResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(resp)) + resp.Success = true + s.sendAPIResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(resp)) + }() } // Request to remove a peer from a clustered stream. @@ -2668,7 +2694,7 @@ func (s *Server) jsLeaderAccountPurgeRequest(sub *subscription, c *client, _ *Ac } // Request to have the meta leader stepdown. -// These will only be received the the meta leaders, so less checking needed. +// These will only be received by the meta leader, so less checking needed. func (s *Server) jsLeaderStepDownRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return @@ -2763,11 +2789,13 @@ func isEmptyRequest(req []byte) bool { } // Request to delete a stream. +// ** added/changed by Memphis func (s *Server) jsStreamDeleteRequest(sub *subscription, c *client, acc *Account, subject, reply string, rmsg []byte) { s.jsStreamDeleteRequestIntern(sub, c, acc, subject, reply, rmsg) } +// ** added/changed by Memphis -func (s *Server) jsStreamDeleteRequestIntern(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { +func (s *Server) jsStreamDeleteRequestIntern(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { // ** added/changed by Memphis if c == nil || !s.JetStreamEnabled() { return } @@ -3342,7 +3370,7 @@ func (s *Server) processStreamRestore(ci *ClientInfo, acc *Account, cfg *StreamC var total int - // FIXM(dlc) - Probably take out of network path eventually due to disk I/O? + // FIXME(dlc) - Probably take out of network path eventually due to disk I/O? processChunk := func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { // We require reply subjects to communicate back failures, flow etc. If they do not have one log and cancel. if reply == _EMPTY_ { @@ -3376,7 +3404,7 @@ func (s *Server) processStreamRestore(ci *ClientInfo, acc *Account, cfg *StreamC // TODO(dlc) - We could check apriori and cancel initial request if we know it won't fit. total += len(msg) if js.wouldExceedLimits(FileStorage, total) { - s.resourcesExeededError() + s.resourcesExceededError() resultCh <- result{NewJSInsufficientResourcesError(), reply} return } @@ -3782,11 +3810,11 @@ func (s *Server) jsConsumerCreateRequest(sub *subscription, c *client, a *Accoun } else { streamName = streamNameFromSubject(subject) consumerName = consumerNameFromSubject(subject) - } - // New has optional filtered subject as part of main subject.. - if n > 7 { - tokens := strings.Split(subject, tsep) - filteredSubject = strings.Join(tokens[6:], tsep) + // New has optional filtered subject as part of main subject.. + if n > 6 { + tokens := strings.Split(subject, tsep) + filteredSubject = strings.Join(tokens[6:], tsep) + } } } diff --git a/server/jetstream_benchmark_consume_test.go b/server/jetstream_benchmark_consume_test.go deleted file mode 100644 index f2af0ef2d..000000000 --- a/server/jetstream_benchmark_consume_test.go +++ /dev/null @@ -1,404 +0,0 @@ -// Copyright 2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !skip_js_tests && !skip_js_cluster_tests && !skip_js_cluster_tests_2 -// +build !skip_js_tests,!skip_js_cluster_tests,!skip_js_cluster_tests_2 - -package server - -import ( - "fmt" - "math/rand" - "testing" - "time" - - "github.com/nats-io/nats.go" -) - -func BenchmarkJetStreamConsume(b *testing.B) { - - const ( - verbose = false - streamName = "S" - subject = "s" - seed = 12345 - publishTimeout = 30 * time.Second - ) - - runSyncPushConsumer := func(b *testing.B, js nats.JetStreamContext, streamName, subject string) (int, int, int) { - const nextMsgTimeout = 3 * time.Second - - subOpts := []nats.SubOpt{ - nats.BindStream(streamName), - } - sub, err := js.SubscribeSync("", subOpts...) - if err != nil { - b.Fatalf("Failed to subscribe: %v", err) - } - defer sub.Unsubscribe() - - bitset := NewBitset(uint64(b.N)) - uniqueConsumed, duplicates, errors := 0, 0, 0 - - b.ResetTimer() - - for uniqueConsumed < b.N { - msg, err := sub.NextMsg(nextMsgTimeout) - if err != nil { - b.Fatalf("No more messages (received: %d/%d)", uniqueConsumed, b.N) - } - - metadata, mdErr := msg.Metadata() - if mdErr != nil { - errors++ - continue - } - - ackErr := msg.Ack() - if ackErr != nil { - errors++ - continue - } - - seq := metadata.Sequence.Stream - - index := seq - 1 - if bitset.get(index) { - duplicates++ - continue - } - - uniqueConsumed++ - bitset.set(index, true) - b.SetBytes(int64(len(msg.Data))) - - if verbose && uniqueConsumed%1000 == 0 { - b.Logf("Consumed: %d/%d", bitset.count(), b.N) - } - } - - b.StopTimer() - - return uniqueConsumed, duplicates, errors - } - - runAsyncPushConsumer := func(b *testing.B, js nats.JetStreamContext, streamName, subject string, ordered, durable bool) (int, int, int) { - const timeout = 3 * time.Minute - bitset := NewBitset(uint64(b.N)) - doneCh := make(chan bool, 1) - uniqueConsumed, duplicates, errors := 0, 0, 0 - - handleMsg := func(msg *nats.Msg) { - metadata, mdErr := msg.Metadata() - if mdErr != nil { - // fmt.Printf("Metadata error: %v\n", mdErr) - errors++ - return - } - - // Ordered defaults to AckNone policy, don't try to ACK - if !ordered { - ackErr := msg.Ack() - if ackErr != nil { - // fmt.Printf("Ack error: %v\n", ackErr) - errors++ - return - } - } - - seq := metadata.Sequence.Stream - - index := seq - 1 - if bitset.get(index) { - duplicates++ - return - } - - uniqueConsumed++ - bitset.set(index, true) - b.SetBytes(int64(len(msg.Data))) - - if uniqueConsumed == b.N { - msg.Sub.Unsubscribe() - doneCh <- true - } - if verbose && uniqueConsumed%1000 == 0 { - b.Logf("Consumed %d/%d", uniqueConsumed, b.N) - } - } - - subOpts := []nats.SubOpt{ - nats.BindStream(streamName), - } - - if ordered { - subOpts = append(subOpts, nats.OrderedConsumer()) - } - - if durable { - subOpts = append(subOpts, nats.Durable("c")) - } - - sub, err := js.Subscribe("", handleMsg, subOpts...) - if err != nil { - b.Fatalf("Failed to subscribe: %v", err) - } - defer sub.Unsubscribe() - - b.ResetTimer() - - select { - case <-doneCh: - b.StopTimer() - case <-time.After(timeout): - b.Fatalf("Timeout, %d/%d received, %d errors", uniqueConsumed, b.N, errors) - } - - return uniqueConsumed, duplicates, errors - } - - runPullConsumer := func(b *testing.B, js nats.JetStreamContext, streamName, subject string, durable bool) (int, int, int) { - const fetchMaxWait = nats.MaxWait(3 * time.Second) - const fetchMaxMessages = 1000 - - bitset := NewBitset(uint64(b.N)) - uniqueConsumed, duplicates, errors := 0, 0, 0 - - subOpts := []nats.SubOpt{ - nats.BindStream(streamName), - } - - consumerName := "" // Default ephemeral - if durable { - consumerName = "c" // Durable - } - - sub, err := js.PullSubscribe("", consumerName, subOpts...) - if err != nil { - b.Fatalf("Failed to subscribe: %v", err) - } - defer sub.Unsubscribe() - - b.ResetTimer() - - fetchLoop: - for { - msgs, err := sub.Fetch(fetchMaxMessages, fetchMaxWait) - if err != nil { - b.Fatalf("Failed to fetch: %v", err) - } - - processMsgsLoop: - for _, msg := range msgs { - metadata, mdErr := msg.Metadata() - if mdErr != nil { - errors++ - continue processMsgsLoop - } - - ackErr := msg.Ack() - if ackErr != nil { - errors++ - continue processMsgsLoop - } - - seq := metadata.Sequence.Stream - - index := seq - 1 - if bitset.get(index) { - duplicates++ - continue processMsgsLoop - } - - uniqueConsumed++ - bitset.set(index, true) - b.SetBytes(int64(len(msg.Data))) - - if uniqueConsumed == b.N { - msg.Sub.Unsubscribe() - break fetchLoop - } - - if verbose && uniqueConsumed%1000 == 0 { - b.Logf("Consumed %d/%d", uniqueConsumed, b.N) - } - } - } - - b.StopTimer() - - return uniqueConsumed, duplicates, errors - } - - type ConsumerType string - const ( - PushSync ConsumerType = "PUSH[Sync,Ephemeral]" - PushAsync ConsumerType = "PUSH[Async,Ephemeral]" - PushAsyncOrdered ConsumerType = "PUSH[Async,Ordered]" - PushAsyncDurable ConsumerType = "PUSH[Async,Durable]" - PullDurable ConsumerType = "PULL[Durable]" - PullEphemeral ConsumerType = "PULL[Ephemeral]" - ) - - benchmarksCases := []struct { - clusterSize int - replicas int - messageSize int - minMessages int - }{ - {1, 1, 10, 100_000}, // Single node, 10B messages, ~1MiB minimum - {1, 1, 1024, 1_000}, // Single node, 1KB messages, ~1MiB minimum - {3, 3, 10, 100_000}, // Cluster, R3, 10B messages, ~1MiB minimum - {3, 3, 1024, 1_000}, // Cluster, R3, 1KB messages, ~1MiB minimum - } - - //Each of the cases above is run with each of the consumer types - consumerTypes := []ConsumerType{ - PushSync, - PushAsync, - PushAsyncOrdered, - PushAsyncDurable, - PullDurable, - PullEphemeral, - } - - for _, bc := range benchmarksCases { - - name := fmt.Sprintf( - "N=%d,R=%d,MsgSz=%db", - bc.clusterSize, - bc.replicas, - bc.messageSize, - ) - - b.Run( - name, - func(b *testing.B) { - - for _, ct := range consumerTypes { - name := fmt.Sprintf( - "%v", - ct, - ) - b.Run( - name, - func(b *testing.B) { - // Skip short runs, benchmark gets re-executed with a larger N - if b.N < bc.minMessages { - b.ResetTimer() - return - } - - if verbose { - b.Logf("Running %s with %d messages", name, b.N) - } - - if verbose { - b.Logf("Setting up %d nodes", bc.clusterSize) - } - var connectURL string - if bc.clusterSize == 1 { - s := RunBasicJetStreamServer(b) - defer s.Shutdown() - connectURL = s.ClientURL() - } else { - cl := createJetStreamClusterExplicit(b, "BENCH_PUB", bc.clusterSize) - defer cl.shutdown() - cl.waitOnClusterReadyWithNumPeers(bc.clusterSize) - cl.waitOnLeader() - connectURL = cl.randomServer().ClientURL() - } - - nc, js := jsClientConnectURL(b, connectURL) - defer nc.Close() - - if verbose { - b.Logf("Creating stream with R=%d", bc.replicas) - } - streamConfig := &nats.StreamConfig{ - Name: streamName, - Subjects: []string{subject}, - Replicas: bc.replicas, - } - if _, err := js.AddStream(streamConfig); err != nil { - b.Fatalf("Error creating stream: %v", err) - } - - rng := rand.New(rand.NewSource(int64(seed))) - message := make([]byte, bc.messageSize) - publishedCount := 0 - for publishedCount < b.N { - rng.Read(message) - _, err := js.PublishAsync(subject, message) - if err != nil { - continue - } else { - publishedCount++ - } - } - - select { - case <-js.PublishAsyncComplete(): - if verbose { - b.Logf("Published %d messages", b.N) - } - case <-time.After(publishTimeout): - b.Fatalf("Publish timed out") - } - - // Discard time spent during setup - // Consumer may reset again further in - b.ResetTimer() - - var consumed, duplicates, errors int - - const ( - ordered = true - unordered = false - durable = true - ephemeral = false - ) - - switch ct { - case PushSync: - consumed, duplicates, errors = runSyncPushConsumer(b, js, streamName, subject) - case PushAsync: - consumed, duplicates, errors = runAsyncPushConsumer(b, js, streamName, subject, unordered, ephemeral) - case PushAsyncOrdered: - consumed, duplicates, errors = runAsyncPushConsumer(b, js, streamName, subject, ordered, ephemeral) - case PushAsyncDurable: - consumed, duplicates, errors = runAsyncPushConsumer(b, js, streamName, subject, unordered, durable) - case PullDurable: - consumed, duplicates, errors = runPullConsumer(b, js, streamName, subject, durable) - case PullEphemeral: - consumed, duplicates, errors = runPullConsumer(b, js, streamName, subject, ephemeral) - default: - b.Fatalf("Unknown consumer type: %v", ct) - } - - // Benchmark ends here, (consumer may have stopped earlier) - b.StopTimer() - - if consumed != b.N { - b.Fatalf("Something doesn't add up: %d != %d", consumed, b.N) - } - - b.ReportMetric(float64(duplicates)*100/float64(b.N), "%dupe") - b.ReportMetric(float64(errors)*100/float64(b.N), "%error") - }, - ) - } - }, - ) - } -} diff --git a/server/jetstream_benchmark_kv_test.go b/server/jetstream_benchmark_kv_test.go deleted file mode 100644 index f89f686c5..000000000 --- a/server/jetstream_benchmark_kv_test.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !skip_js_tests && !skip_js_cluster_tests && !skip_js_cluster_tests_2 -// +build !skip_js_tests,!skip_js_cluster_tests,!skip_js_cluster_tests_2 - -package server - -import ( - "fmt" - "math/rand" - "testing" - - "github.com/nats-io/nats.go" -) - -func BenchmarkJetStreamKV(b *testing.B) { - - const ( - verbose = false - kvNamePrefix = "B_" - keyPrefix = "K_" - seed = 12345 - minOps = 1_000 - ) - - runKVGet := func(b *testing.B, kvs []nats.KeyValue, keys []string) int { - rng := rand.New(rand.NewSource(int64(seed))) - errors := 0 - - b.ResetTimer() - - for i := 1; i <= b.N; i++ { - kv := kvs[rng.Intn(len(kvs))] - key := keys[rng.Intn(len(keys))] - kve, err := kv.Get(key) - if err != nil { - errors++ - continue - } - - b.SetBytes(int64(len(kve.Value()))) - - if verbose && i%1000 == 0 { - b.Logf("Completed %d/%d Get ops", i, b.N) - } - } - - b.StopTimer() - return errors - } - - runKVPut := func(b *testing.B, kvs []nats.KeyValue, keys []string, valueSize int) int { - rng := rand.New(rand.NewSource(int64(seed))) - value := make([]byte, valueSize) - errors := 0 - - b.ResetTimer() - - for i := 1; i <= b.N; i++ { - kv := kvs[rng.Intn(len(kvs))] - key := keys[rng.Intn(len(keys))] - rng.Read(value) - _, err := kv.Put(key, value) - if err != nil { - errors++ - continue - } - - b.SetBytes(int64(valueSize)) - - if verbose && i%1000 == 0 { - b.Logf("Completed %d/%d Put ops", i, b.N) - } - } - - b.StopTimer() - return errors - } - - runKVUpdate := func(b *testing.B, kvs []nats.KeyValue, keys []string, valueSize int) int { - rng := rand.New(rand.NewSource(int64(seed))) - value := make([]byte, valueSize) - errors := 0 - - b.ResetTimer() - - for i := 1; i <= b.N; i++ { - kv := kvs[rng.Intn(len(kvs))] - key := keys[rng.Intn(len(keys))] - - kve, getErr := kv.Get(key) - if getErr != nil { - errors++ - continue - } - - rng.Read(value) - _, updateErr := kv.Update(key, value, kve.Revision()) - if updateErr != nil { - errors++ - continue - } - - b.SetBytes(int64(valueSize)) - - if verbose && i%1000 == 0 { - b.Logf("Completed %d/%d Update ops", i, b.N) - } - } - - b.StopTimer() - return errors - } - - type WorkloadType string - const ( - Get WorkloadType = "GET" - Put WorkloadType = "PUT" - Update WorkloadType = "CAS" - ) - - benchmarksCases := []struct { - clusterSize int - replicas int - numBuckets int - numKeys int - valueSize int - }{ - {1, 1, 1, 100, 100}, // 1 node, 1 bucket with 100 keys, 100B values - {1, 1, 10, 1000, 100}, // 1 node, 10 buckets with 1000 keys, 100B values - {3, 3, 1, 100, 100}, // 3 nodes, 1 bucket with 100 keys, 100B values - {3, 3, 10, 1000, 100}, // 3 nodes, 10 buckets with 1000 keys, 100B values - {3, 3, 10, 1000, 1024}, // 3 nodes, 10 buckets with 1000 keys, 1KB values - } - - workloadCases := []WorkloadType{ - Get, - Put, - Update, - } - - for _, bc := range benchmarksCases { - - bName := fmt.Sprintf( - "N=%d,R=%d,B=%d,K=%d,ValSz=%db", - bc.clusterSize, - bc.replicas, - bc.numBuckets, - bc.numKeys, - bc.valueSize, - ) - - b.Run( - bName, - func(b *testing.B) { - for _, wc := range workloadCases { - wName := fmt.Sprintf("%v", wc) - b.Run( - wName, - func(b *testing.B) { - // Skip short runs, benchmark gets re-executed with a larger N - if b.N < minOps { - b.ResetTimer() - return - } - - if verbose { - b.Logf("Running %s workload %s with %d messages", wName, bName, b.N) - } - - if verbose { - b.Logf("Setting up %d nodes", bc.clusterSize) - } - var connectURL string - if bc.clusterSize == 1 { - s := RunBasicJetStreamServer(b) - defer s.Shutdown() - connectURL = s.ClientURL() - } else { - cl := createJetStreamClusterExplicit(b, "BENCH_KV", bc.clusterSize) - defer cl.shutdown() - cl.waitOnClusterReadyWithNumPeers(bc.clusterSize) - cl.waitOnLeader() - connectURL = cl.randomServer().ClientURL() - } - - nc, js := jsClientConnectURL(b, connectURL) - defer nc.Close() - - // Pre-generate all keys - keys := make([]string, 0, bc.numKeys) - for i := 1; i <= bc.numKeys; i++ { - key := fmt.Sprintf("%s%d", keyPrefix, i) - keys = append(keys, key) - } - - // Initialize all KVs - kvs := make([]nats.KeyValue, 0, bc.numBuckets) - for i := 1; i <= bc.numBuckets; i++ { - // Create bucket - kvName := fmt.Sprintf("%s%d", kvNamePrefix, i) - if verbose { - b.Logf("Creating KV %s with R=%d", kvName, bc.replicas) - } - kvConfig := &nats.KeyValueConfig{ - Bucket: kvName, - Replicas: bc.replicas, - } - kv, err := js.CreateKeyValue(kvConfig) - if err != nil { - b.Fatalf("Error creating KV: %v", err) - } - kvs = append(kvs, kv) - - // Initialize all keys - rng := rand.New(rand.NewSource(int64(seed * i))) - value := make([]byte, bc.valueSize) - for _, key := range keys { - rng.Read(value) - _, err := kv.Create(key, value) - if err != nil { - b.Fatalf("Failed to initialize %s/%s: %v", kvName, key, err) - } - } - } - - // Discard time spent during setup - // May reset again further in - b.ResetTimer() - - var errors int - - switch wc { - case Get: - errors = runKVGet(b, kvs, keys) - case Put: - errors = runKVPut(b, kvs, keys, bc.valueSize) - case Update: - errors = runKVUpdate(b, kvs, keys, bc.valueSize) - default: - b.Fatalf("Unknown workload type: %v", wc) - } - - // Benchmark ends here, (may have stopped earlier) - b.StopTimer() - - b.ReportMetric(float64(errors)*100/float64(b.N), "%error") - }, - ) - } - }, - ) - } -} diff --git a/server/jetstream_benchmark_publish_test.go b/server/jetstream_benchmark_publish_test.go deleted file mode 100644 index c1f3427a4..000000000 --- a/server/jetstream_benchmark_publish_test.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !skip_js_tests && !skip_js_cluster_tests && !skip_js_cluster_tests_2 -// +build !skip_js_tests,!skip_js_cluster_tests,!skip_js_cluster_tests_2 - -package server - -import ( - "fmt" - "math/rand" - "testing" - "time" - - "github.com/nats-io/nats.go" -) - -func BenchmarkJetStreamPublish(b *testing.B) { - - const ( - verbose = false - seed = 12345 - ) - - runSyncPublisher := func(b *testing.B, js nats.JetStreamContext, messageSize int, subjects []string) (int, int) { - published, errors := 0, 0 - rng := rand.New(rand.NewSource(int64(seed))) - message := make([]byte, messageSize) - - b.ResetTimer() - - for i := 1; i <= b.N; i++ { - rng.Read(message) // TODO may skip this? - subject := subjects[rng.Intn(len(subjects))] - _, pubErr := js.Publish(subject, message) - if pubErr != nil { - errors++ - } else { - published++ - b.SetBytes(int64(messageSize)) - } - - if verbose && i%1000 == 0 { - b.Logf("Published %d/%d, %d errors", i, b.N, errors) - } - } - - b.StopTimer() - - return published, errors - } - - runAsyncPublisher := func(b *testing.B, js nats.JetStreamContext, messageSize int, subjects []string, asyncWindow int) (int, int) { - const publishCompleteMaxWait = 30 * time.Second - rng := rand.New(rand.NewSource(int64(seed))) - message := make([]byte, messageSize) - pending := make([]nats.PubAckFuture, 0, asyncWindow) - published, errors := 0, 0 - - b.ResetTimer() - - for i := 1; i <= b.N; i++ { - rng.Read(message) // TODO may skip this? - subject := subjects[rng.Intn(len(subjects))] - pubAckFuture, err := js.PublishAsync(subject, message) - if err != nil { - errors++ - continue - } - pending = append(pending, pubAckFuture) - - // Regularly trim the list of pending - if i%asyncWindow == 0 { - newPending := make([]nats.PubAckFuture, 0, asyncWindow) - for _, pubAckFuture := range pending { - select { - case <-pubAckFuture.Ok(): - published++ - b.SetBytes(int64(messageSize)) - case <-pubAckFuture.Err(): - errors++ - default: - // This pubAck is still pending, keep it - newPending = append(newPending, pubAckFuture) - } - } - pending = newPending - } - - if verbose && i%1000 == 0 { - b.Logf("Published %d/%d, %d errors", i, b.N, errors) - } - } - - // All published, wait for completed - select { - case <-js.PublishAsyncComplete(): - case <-time.After(publishCompleteMaxWait): - b.Fatalf("Publish timed out") - } - - // Clear whatever is left pending - for _, pubAckFuture := range pending { - select { - case <-pubAckFuture.Ok(): - published++ - b.SetBytes(int64(messageSize)) - case <-pubAckFuture.Err(): - errors++ - default: - b.Fatalf("PubAck is still pending after publish completed") - } - } - - b.StopTimer() - - return published, errors - } - - type PublishType string - const ( - Sync PublishType = "Sync" - Async PublishType = "Async" - ) - - benchmarksCases := []struct { - clusterSize int - replicas int - messageSize int - numSubjects int - minMessages int - }{ - {1, 1, 10, 1, 100_000}, // Single node, 10B messages, ~1MB minimum - {1, 1, 1024, 1, 1_000}, // Single node, 1KB messages, ~1MB minimum - {3, 3, 10, 1, 100_000}, // 3-nodes cluster, R=3, 10B messages, ~1MB minimum - {3, 3, 1024, 1, 1_000}, // 3-nodes cluster, R=3, 10B messages, ~1MB minimum - } - - // All the cases above are run with each of the publisher cases below - publisherCases := []struct { - pType PublishType - asyncWindow int - }{ - {Sync, -1}, - {Async, 1000}, - {Async, 4000}, - {Async, 8000}, - } - - for _, bc := range benchmarksCases { - name := fmt.Sprintf( - "N=%d,R=%d,MsgSz=%db,Subjs=%d", - bc.clusterSize, - bc.replicas, - bc.messageSize, - bc.numSubjects, - ) - - b.Run( - name, - func(b *testing.B) { - - for _, pc := range publisherCases { - name := fmt.Sprintf("%v", pc.pType) - if pc.pType == Async && pc.asyncWindow > 0 { - name = fmt.Sprintf("%s[W:%d]", name, pc.asyncWindow) - } - - b.Run( - name, - func(b *testing.B) { - // Skip short runs, benchmark gets re-executed with a larger N - if b.N < bc.minMessages { - b.ResetTimer() - return - } - - subjects := make([]string, bc.numSubjects) - for i := 0; i < bc.numSubjects; i++ { - subjects[i] = fmt.Sprintf("s-%d", i+1) - } - - if verbose { - b.Logf("Running %s with %d ops", name, b.N) - } - - if verbose { - b.Logf("Setting up %d nodes", bc.clusterSize) - } - var connectURL string - - if bc.clusterSize == 1 { - s := RunBasicJetStreamServer(b) - defer s.Shutdown() - connectURL = s.ClientURL() - } else { - cl := createJetStreamClusterExplicit(b, "BENCH_PUB", bc.clusterSize) - defer cl.shutdown() - cl.waitOnClusterReadyWithNumPeers(bc.clusterSize) - cl.waitOnLeader() - connectURL = cl.randomServer().ClientURL() - } - - nc, err := nats.Connect(connectURL) - if err != nil { - b.Fatalf("Failed to create client: %v", err) - } - defer nc.Close() - - jsOpts := []nats.JSOpt{ - nats.MaxWait(10 * time.Second), - } - - if pc.asyncWindow > 0 && pc.pType == Async { - jsOpts = append(jsOpts, nats.PublishAsyncMaxPending(pc.asyncWindow)) - } - - js, err := nc.JetStream(jsOpts...) - if err != nil { - b.Fatalf("Unexpected error getting JetStream context: %v", err) - } - - if verbose { - b.Logf("Creating stream with R=%d and %d input subjects", bc.replicas, bc.numSubjects) - } - streamConfig := &nats.StreamConfig{ - Name: "S", - Subjects: subjects, - Replicas: bc.replicas, - } - if _, err := js.AddStream(streamConfig); err != nil { - b.Fatalf("Error creating stream: %v", err) - } - - if verbose { - b.Logf("Running %v publisher with message size: %dB", pc.pType, bc.messageSize) - } - - // Benchmark starts here - b.ResetTimer() - - var published, errors int - switch pc.pType { - case Sync: - published, errors = runSyncPublisher(b, js, bc.messageSize, subjects) - case Async: - published, errors = runAsyncPublisher(b, js, bc.messageSize, subjects, pc.asyncWindow) - } - - // Benchmark ends here - b.StopTimer() - - if published+errors != b.N { - b.Fatalf("Something doesn't add up: %d + %d != %d", published, errors, b.N) - } - - b.ReportMetric(float64(errors)*100/float64(b.N), "%error") - }, - ) - } - }, - ) - } -} diff --git a/server/jetstream_benchmark_test.go b/server/jetstream_benchmark_test.go new file mode 100644 index 000000000..94ded0ef9 --- /dev/null +++ b/server/jetstream_benchmark_test.go @@ -0,0 +1,1376 @@ +// Copyright 2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !skip_js_tests && !skip_js_cluster_tests && !skip_js_cluster_tests_2 +// +build !skip_js_tests,!skip_js_cluster_tests,!skip_js_cluster_tests_2 + +package server + +import ( + "fmt" + "math/rand" + "sync" + "testing" + "time" + + "github.com/nats-io/nats.go" +) + +func BenchmarkJetStreamConsume(b *testing.B) { + + const ( + verbose = false + streamName = "S" + subject = "s" + seed = 12345 + publishTimeout = 30 * time.Second + ) + + runSyncPushConsumer := func(b *testing.B, js nats.JetStreamContext, streamName, subject string) (int, int, int) { + const nextMsgTimeout = 3 * time.Second + + subOpts := []nats.SubOpt{ + nats.BindStream(streamName), + } + sub, err := js.SubscribeSync("", subOpts...) + if err != nil { + b.Fatalf("Failed to subscribe: %v", err) + } + defer sub.Unsubscribe() + + bitset := NewBitset(uint64(b.N)) + uniqueConsumed, duplicates, errors := 0, 0, 0 + + b.ResetTimer() + + for uniqueConsumed < b.N { + msg, err := sub.NextMsg(nextMsgTimeout) + if err != nil { + b.Fatalf("No more messages (received: %d/%d)", uniqueConsumed, b.N) + } + + metadata, mdErr := msg.Metadata() + if mdErr != nil { + errors++ + continue + } + + ackErr := msg.Ack() + if ackErr != nil { + errors++ + continue + } + + seq := metadata.Sequence.Stream + + index := seq - 1 + if bitset.get(index) { + duplicates++ + continue + } + + uniqueConsumed++ + bitset.set(index, true) + b.SetBytes(int64(len(msg.Data))) + + if verbose && uniqueConsumed%1000 == 0 { + b.Logf("Consumed: %d/%d", bitset.count(), b.N) + } + } + + b.StopTimer() + + return uniqueConsumed, duplicates, errors + } + + runAsyncPushConsumer := func(b *testing.B, js nats.JetStreamContext, streamName, subject string, ordered, durable bool) (int, int, int) { + const timeout = 3 * time.Minute + bitset := NewBitset(uint64(b.N)) + doneCh := make(chan bool, 1) + uniqueConsumed, duplicates, errors := 0, 0, 0 + + handleMsg := func(msg *nats.Msg) { + metadata, mdErr := msg.Metadata() + if mdErr != nil { + // fmt.Printf("Metadata error: %v\n", mdErr) + errors++ + return + } + + // Ordered defaults to AckNone policy, don't try to ACK + if !ordered { + ackErr := msg.Ack() + if ackErr != nil { + // fmt.Printf("Ack error: %v\n", ackErr) + errors++ + return + } + } + + seq := metadata.Sequence.Stream + + index := seq - 1 + if bitset.get(index) { + duplicates++ + return + } + + uniqueConsumed++ + bitset.set(index, true) + b.SetBytes(int64(len(msg.Data))) + + if uniqueConsumed == b.N { + msg.Sub.Unsubscribe() + doneCh <- true + } + if verbose && uniqueConsumed%1000 == 0 { + b.Logf("Consumed %d/%d", uniqueConsumed, b.N) + } + } + + subOpts := []nats.SubOpt{ + nats.BindStream(streamName), + } + + if ordered { + subOpts = append(subOpts, nats.OrderedConsumer()) + } + + if durable { + subOpts = append(subOpts, nats.Durable("c")) + } + + sub, err := js.Subscribe("", handleMsg, subOpts...) + if err != nil { + b.Fatalf("Failed to subscribe: %v", err) + } + defer sub.Unsubscribe() + + b.ResetTimer() + + select { + case <-doneCh: + b.StopTimer() + case <-time.After(timeout): + b.Fatalf("Timeout, %d/%d received, %d errors", uniqueConsumed, b.N, errors) + } + + return uniqueConsumed, duplicates, errors + } + + runPullConsumer := func(b *testing.B, js nats.JetStreamContext, streamName, subject string, durable bool) (int, int, int) { + const fetchMaxWait = nats.MaxWait(3 * time.Second) + const fetchMaxMessages = 1000 + + bitset := NewBitset(uint64(b.N)) + uniqueConsumed, duplicates, errors := 0, 0, 0 + + subOpts := []nats.SubOpt{ + nats.BindStream(streamName), + } + + consumerName := "" // Default ephemeral + if durable { + consumerName = "c" // Durable + } + + sub, err := js.PullSubscribe("", consumerName, subOpts...) + if err != nil { + b.Fatalf("Failed to subscribe: %v", err) + } + defer sub.Unsubscribe() + + b.ResetTimer() + + fetchLoop: + for { + msgs, err := sub.Fetch(fetchMaxMessages, fetchMaxWait) + if err != nil { + b.Fatalf("Failed to fetch: %v", err) + } + + processMsgsLoop: + for _, msg := range msgs { + metadata, mdErr := msg.Metadata() + if mdErr != nil { + errors++ + continue processMsgsLoop + } + + ackErr := msg.Ack() + if ackErr != nil { + errors++ + continue processMsgsLoop + } + + seq := metadata.Sequence.Stream + + index := seq - 1 + if bitset.get(index) { + duplicates++ + continue processMsgsLoop + } + + uniqueConsumed++ + bitset.set(index, true) + b.SetBytes(int64(len(msg.Data))) + + if uniqueConsumed == b.N { + msg.Sub.Unsubscribe() + break fetchLoop + } + + if verbose && uniqueConsumed%1000 == 0 { + b.Logf("Consumed %d/%d", uniqueConsumed, b.N) + } + } + } + + b.StopTimer() + + return uniqueConsumed, duplicates, errors + } + + type ConsumerType string + const ( + PushSync ConsumerType = "PUSH[Sync,Ephemeral]" + PushAsync ConsumerType = "PUSH[Async,Ephemeral]" + PushAsyncOrdered ConsumerType = "PUSH[Async,Ordered]" + PushAsyncDurable ConsumerType = "PUSH[Async,Durable]" + PullDurable ConsumerType = "PULL[Durable]" + PullEphemeral ConsumerType = "PULL[Ephemeral]" + ) + + benchmarksCases := []struct { + clusterSize int + replicas int + messageSize int + minMessages int + }{ + {1, 1, 10, 100_000}, // Single node, 10B messages, ~1MiB minimum + {1, 1, 1024, 1_000}, // Single node, 1KB messages, ~1MiB minimum + {3, 3, 10, 100_000}, // Cluster, R3, 10B messages, ~1MiB minimum + {3, 3, 1024, 1_000}, // Cluster, R3, 1KB messages, ~1MiB minimum + } + + //Each of the cases above is run with each of the consumer types + consumerTypes := []ConsumerType{ + PushSync, + PushAsync, + PushAsyncOrdered, + PushAsyncDurable, + PullDurable, + PullEphemeral, + } + + for _, bc := range benchmarksCases { + + name := fmt.Sprintf( + "N=%d,R=%d,MsgSz=%db", + bc.clusterSize, + bc.replicas, + bc.messageSize, + ) + + b.Run( + name, + func(b *testing.B) { + + for _, ct := range consumerTypes { + name := fmt.Sprintf( + "%v", + ct, + ) + b.Run( + name, + func(b *testing.B) { + // Skip short runs, benchmark gets re-executed with a larger N + if b.N < bc.minMessages { + b.ResetTimer() + return + } + + if verbose { + b.Logf("Running %s with %d messages", name, b.N) + } + + if verbose { + b.Logf("Setting up %d nodes", bc.clusterSize) + } + var connectURL string + if bc.clusterSize == 1 { + s := RunBasicJetStreamServer(b) + defer s.Shutdown() + connectURL = s.ClientURL() + } else { + cl := createJetStreamClusterExplicit(b, "BENCH_PUB", bc.clusterSize) + defer cl.shutdown() + cl.waitOnClusterReadyWithNumPeers(bc.clusterSize) + cl.waitOnLeader() + connectURL = cl.randomServer().ClientURL() + } + + nc, js := jsClientConnectURL(b, connectURL) + defer nc.Close() + + if verbose { + b.Logf("Creating stream with R=%d", bc.replicas) + } + streamConfig := &nats.StreamConfig{ + Name: streamName, + Subjects: []string{subject}, + Replicas: bc.replicas, + } + if _, err := js.AddStream(streamConfig); err != nil { + b.Fatalf("Error creating stream: %v", err) + } + + rng := rand.New(rand.NewSource(int64(seed))) + message := make([]byte, bc.messageSize) + publishedCount := 0 + for publishedCount < b.N { + rng.Read(message) + _, err := js.PublishAsync(subject, message) + if err != nil { + continue + } else { + publishedCount++ + } + } + + select { + case <-js.PublishAsyncComplete(): + if verbose { + b.Logf("Published %d messages", b.N) + } + case <-time.After(publishTimeout): + b.Fatalf("Publish timed out") + } + + // Discard time spent during setup + // Consumer may reset again further in + b.ResetTimer() + + var consumed, duplicates, errors int + + const ( + ordered = true + unordered = false + durable = true + ephemeral = false + ) + + switch ct { + case PushSync: + consumed, duplicates, errors = runSyncPushConsumer(b, js, streamName, subject) + case PushAsync: + consumed, duplicates, errors = runAsyncPushConsumer(b, js, streamName, subject, unordered, ephemeral) + case PushAsyncOrdered: + consumed, duplicates, errors = runAsyncPushConsumer(b, js, streamName, subject, ordered, ephemeral) + case PushAsyncDurable: + consumed, duplicates, errors = runAsyncPushConsumer(b, js, streamName, subject, unordered, durable) + case PullDurable: + consumed, duplicates, errors = runPullConsumer(b, js, streamName, subject, durable) + case PullEphemeral: + consumed, duplicates, errors = runPullConsumer(b, js, streamName, subject, ephemeral) + default: + b.Fatalf("Unknown consumer type: %v", ct) + } + + // Benchmark ends here, (consumer may have stopped earlier) + b.StopTimer() + + if consumed != b.N { + b.Fatalf("Something doesn't add up: %d != %d", consumed, b.N) + } + + b.ReportMetric(float64(duplicates)*100/float64(b.N), "%dupe") + b.ReportMetric(float64(errors)*100/float64(b.N), "%error") + }, + ) + } + }, + ) + } +} + +func BenchmarkJetStreamPublish(b *testing.B) { + + const ( + verbose = false + seed = 12345 + ) + + runSyncPublisher := func(b *testing.B, js nats.JetStreamContext, messageSize int, subjects []string) (int, int) { + published, errors := 0, 0 + rng := rand.New(rand.NewSource(int64(seed))) + message := make([]byte, messageSize) + + b.ResetTimer() + + for i := 1; i <= b.N; i++ { + rng.Read(message) // TODO may skip this? + subject := subjects[rng.Intn(len(subjects))] + _, pubErr := js.Publish(subject, message) + if pubErr != nil { + errors++ + } else { + published++ + b.SetBytes(int64(messageSize)) + } + + if verbose && i%1000 == 0 { + b.Logf("Published %d/%d, %d errors", i, b.N, errors) + } + } + + b.StopTimer() + + return published, errors + } + + runAsyncPublisher := func(b *testing.B, js nats.JetStreamContext, messageSize int, subjects []string, asyncWindow int) (int, int) { + const publishCompleteMaxWait = 30 * time.Second + rng := rand.New(rand.NewSource(int64(seed))) + message := make([]byte, messageSize) + pending := make([]nats.PubAckFuture, 0, asyncWindow) + published, errors := 0, 0 + + b.ResetTimer() + + for i := 1; i <= b.N; i++ { + rng.Read(message) // TODO may skip this? + subject := subjects[rng.Intn(len(subjects))] + pubAckFuture, err := js.PublishAsync(subject, message) + if err != nil { + errors++ + continue + } + pending = append(pending, pubAckFuture) + + // Regularly trim the list of pending + if i%asyncWindow == 0 { + newPending := make([]nats.PubAckFuture, 0, asyncWindow) + for _, pubAckFuture := range pending { + select { + case <-pubAckFuture.Ok(): + published++ + b.SetBytes(int64(messageSize)) + case <-pubAckFuture.Err(): + errors++ + default: + // This pubAck is still pending, keep it + newPending = append(newPending, pubAckFuture) + } + } + pending = newPending + } + + if verbose && i%1000 == 0 { + b.Logf("Published %d/%d, %d errors", i, b.N, errors) + } + } + + // All published, wait for completed + select { + case <-js.PublishAsyncComplete(): + case <-time.After(publishCompleteMaxWait): + b.Fatalf("Publish timed out") + } + + // Clear whatever is left pending + for _, pubAckFuture := range pending { + select { + case <-pubAckFuture.Ok(): + published++ + b.SetBytes(int64(messageSize)) + case <-pubAckFuture.Err(): + errors++ + default: + b.Fatalf("PubAck is still pending after publish completed") + } + } + + b.StopTimer() + + return published, errors + } + + type PublishType string + const ( + Sync PublishType = "Sync" + Async PublishType = "Async" + ) + + benchmarksCases := []struct { + clusterSize int + replicas int + messageSize int + numSubjects int + minMessages int + }{ + {1, 1, 10, 1, 100_000}, // Single node, 10B messages, ~1MB minimum + {1, 1, 1024, 1, 1_000}, // Single node, 1KB messages, ~1MB minimum + {3, 3, 10, 1, 100_000}, // 3-nodes cluster, R=3, 10B messages, ~1MB minimum + {3, 3, 1024, 1, 1_000}, // 3-nodes cluster, R=3, 10B messages, ~1MB minimum + } + + // All the cases above are run with each of the publisher cases below + publisherCases := []struct { + pType PublishType + asyncWindow int + }{ + {Sync, -1}, + {Async, 1000}, + {Async, 4000}, + {Async, 8000}, + } + + for _, bc := range benchmarksCases { + name := fmt.Sprintf( + "N=%d,R=%d,MsgSz=%db,Subjs=%d", + bc.clusterSize, + bc.replicas, + bc.messageSize, + bc.numSubjects, + ) + + b.Run( + name, + func(b *testing.B) { + + for _, pc := range publisherCases { + name := fmt.Sprintf("%v", pc.pType) + if pc.pType == Async && pc.asyncWindow > 0 { + name = fmt.Sprintf("%s[W:%d]", name, pc.asyncWindow) + } + + b.Run( + name, + func(b *testing.B) { + // Skip short runs, benchmark gets re-executed with a larger N + if b.N < bc.minMessages { + b.ResetTimer() + return + } + + subjects := make([]string, bc.numSubjects) + for i := 0; i < bc.numSubjects; i++ { + subjects[i] = fmt.Sprintf("s-%d", i+1) + } + + if verbose { + b.Logf("Running %s with %d ops", name, b.N) + } + + if verbose { + b.Logf("Setting up %d nodes", bc.clusterSize) + } + var connectURL string + + if bc.clusterSize == 1 { + s := RunBasicJetStreamServer(b) + defer s.Shutdown() + connectURL = s.ClientURL() + } else { + cl := createJetStreamClusterExplicit(b, "BENCH_PUB", bc.clusterSize) + defer cl.shutdown() + cl.waitOnClusterReadyWithNumPeers(bc.clusterSize) + cl.waitOnLeader() + connectURL = cl.randomServer().ClientURL() + } + + nc, err := nats.Connect(connectURL) + if err != nil { + b.Fatalf("Failed to create client: %v", err) + } + defer nc.Close() + + jsOpts := []nats.JSOpt{ + nats.MaxWait(10 * time.Second), + } + + if pc.asyncWindow > 0 && pc.pType == Async { + jsOpts = append(jsOpts, nats.PublishAsyncMaxPending(pc.asyncWindow)) + } + + js, err := nc.JetStream(jsOpts...) + if err != nil { + b.Fatalf("Unexpected error getting JetStream context: %v", err) + } + + if verbose { + b.Logf("Creating stream with R=%d and %d input subjects", bc.replicas, bc.numSubjects) + } + streamConfig := &nats.StreamConfig{ + Name: "S", + Subjects: subjects, + Replicas: bc.replicas, + } + if _, err := js.AddStream(streamConfig); err != nil { + b.Fatalf("Error creating stream: %v", err) + } + + if verbose { + b.Logf("Running %v publisher with message size: %dB", pc.pType, bc.messageSize) + } + + // Benchmark starts here + b.ResetTimer() + + var published, errors int + switch pc.pType { + case Sync: + published, errors = runSyncPublisher(b, js, bc.messageSize, subjects) + case Async: + published, errors = runAsyncPublisher(b, js, bc.messageSize, subjects, pc.asyncWindow) + } + + // Benchmark ends here + b.StopTimer() + + if published+errors != b.N { + b.Fatalf("Something doesn't add up: %d + %d != %d", published, errors, b.N) + } + + b.ReportMetric(float64(errors)*100/float64(b.N), "%error") + }, + ) + } + }, + ) + } +} + +func BenchmarkJetStreamInterestStreamWithLimit(b *testing.B) { + + const ( + verbose = true + seed = 12345 + publishBatchSize = 100 + messageSize = 256 + numSubjects = 2500 + subjectPrefix = "S" + numPublishers = 4 + randomData = true + warmupMessages = 1 + ) + + if verbose { + b.Logf( + "BatchSize: %d, MsgSize: %d, Subjects: %d, Publishers: %d, Random Message: %v", + publishBatchSize, + messageSize, + numSubjects, + numPublishers, + randomData, + ) + } + + // Benchmark parameters: sub-benchmarks are executed for every combination of the following 3 groups + // Unless a more restrictive filter is specified, e.g.: + // BenchmarkJetStreamInterestStreamWithLimit/.*R=3.*/Storage=Memory/unlimited + + // Parameter: Number of nodes and number of stream replicas + clusterAndReplicasCases := []struct { + clusterSize int + replicas int + }{ + {1, 1}, // Single node, R=1 + {3, 3}, // 3-nodes cluster, R=3 + } + + // Parameter: Stream storage type + storageTypeCases := []nats.StorageType{ + nats.MemoryStorage, + nats.FileStorage, + } + + // Parameter: Stream limit configuration + limitConfigCases := map[string]func(*nats.StreamConfig){ + "unlimited": func(config *nats.StreamConfig) { + }, + "MaxMsg=1000": func(config *nats.StreamConfig) { + config.MaxMsgs = 100 + }, + "MaxMsg=10": func(config *nats.StreamConfig) { + config.MaxMsgs = 10 + }, + "MaxPerSubject=10": func(config *nats.StreamConfig) { + config.MaxMsgsPerSubject = 10 + }, + "MaxAge=1s": func(config *nats.StreamConfig) { + config.MaxAge = 1 * time.Second + }, + "MaxBytes=1MB": func(config *nats.StreamConfig) { + config.MaxBytes = 1024 * 1024 + }, + } + + // Helper: Stand up in-process single node or cluster + setupCluster := func(b *testing.B, clusterSize int) (string, func()) { + var connectURL string + var shutdownFunc func() + + if clusterSize == 1 { + s := RunBasicJetStreamServer(b) + shutdownFunc = s.Shutdown + connectURL = s.ClientURL() + } else { + cl := createJetStreamClusterExplicit(b, "BENCH_PUB", clusterSize) + shutdownFunc = cl.shutdown + cl.waitOnClusterReadyWithNumPeers(clusterSize) + cl.waitOnLeader() + connectURL = cl.randomServer().ClientURL() + //connectURL = cl.leader().ClientURL() + } + + return connectURL, shutdownFunc + } + + // Helper: Create the stream + setupStream := func(b *testing.B, connectURL string, streamConfig *nats.StreamConfig) { + // Connect + nc, err := nats.Connect(connectURL) + if err != nil { + b.Fatalf("Failed to create client: %v", err) + } + defer nc.Close() + + jsOpts := []nats.JSOpt{} + + js, err := nc.JetStream(jsOpts...) + if err != nil { + b.Fatalf("Unexpected error getting JetStream context: %v", err) + } + + if _, err := js.AddStream(streamConfig); err != nil { + b.Fatalf("Error creating stream: %v", err) + } + } + + // Context shared by publishers routines + type PublishersContext = struct { + readyWg sync.WaitGroup + completedWg sync.WaitGroup + messagesLeft int + lock sync.Mutex + errors int + } + + // Helper: Publish synchronously as Goroutine + publish := func(publisherId int, ctx *PublishersContext, js nats.JetStreamContext) { + defer ctx.completedWg.Done() + errors := 0 + messageBuf := make([]byte, messageSize) + rng := rand.New(rand.NewSource(int64(seed + publisherId))) + + // Warm up: publish a few messages + for i := 0; i < warmupMessages; i++ { + subject := fmt.Sprintf("%s.%d", subjectPrefix, rng.Intn(numSubjects)) + if randomData { + rng.Read(messageBuf) + } + _, err := js.Publish(subject, messageBuf) + if err != nil { + b.Logf("Warning: failed to publish warmup message: %s", err) + } + } + + // Signal this publisher is ready + ctx.readyWg.Done() + + for { + // Obtain a batch of messages to publish + batchSize := 0 + { + ctx.lock.Lock() + if ctx.messagesLeft >= publishBatchSize { + batchSize = publishBatchSize + } else if ctx.messagesLeft < publishBatchSize { + batchSize = ctx.messagesLeft + } + ctx.messagesLeft -= batchSize + ctx.lock.Unlock() + } + + // Nothing left to publish, terminate + if batchSize == 0 { + ctx.lock.Lock() + ctx.errors += errors + ctx.lock.Unlock() + return + } + + // Publish a batch of messages + for i := 0; i < batchSize; i++ { + subject := fmt.Sprintf("%s.%d", subjectPrefix, rng.Intn(numSubjects)) + if randomData { + rng.Read(messageBuf) + } + _, err := js.Publish(subject, messageBuf) + if err != nil { + errors += 1 + } + } + } + } + + // Benchmark matrix: (cluster and replicas) * (storage type) * (stream limit) + for _, benchmarkCase := range clusterAndReplicasCases { + b.Run( + fmt.Sprintf( + "N=%d,R=%d", + benchmarkCase.clusterSize, + benchmarkCase.replicas, + ), + func(b *testing.B) { + for _, storageType := range storageTypeCases { + b.Run( + fmt.Sprintf("Storage=%v", storageType), + func(b *testing.B) { + + for limitDescription, limitConfigFunc := range limitConfigCases { + b.Run( + limitDescription, + func(b *testing.B) { + // Stop timer during setup + b.StopTimer() + b.ResetTimer() + + // Set per-iteration bytes to calculate throughput (a.k.a. speed) + b.SetBytes(messageSize) + + // Print benchmark parameters + if verbose { + b.Logf( + "Stream: %+v, Storage: [%v] Limit: [%s], Ops: %d", + benchmarkCase, + storageType, + limitDescription, + b.N, + ) + } + + // Setup server or cluster + connectURL, shutdownFunc := setupCluster(b, benchmarkCase.clusterSize) + defer shutdownFunc() + + // Common stream configuration + streamConfig := &nats.StreamConfig{ + Name: "S", + Subjects: []string{fmt.Sprintf("%s.>", subjectPrefix)}, + Replicas: benchmarkCase.replicas, + Storage: storageType, + Discard: DiscardOld, + Retention: DiscardOld, + } + // Configure stream limit + limitConfigFunc(streamConfig) + // Create stream + setupStream(b, connectURL, streamConfig) + + // Set up publishers shared context + var pubCtx PublishersContext + pubCtx.readyWg.Add(numPublishers) + pubCtx.completedWg.Add(numPublishers) + + // Hold this lock until all publishers are ready + pubCtx.lock.Lock() + pubCtx.messagesLeft = b.N + + // Spawn publishers routines, each with its own connection and JS context + for i := 0; i < numPublishers; i++ { + nc, err := nats.Connect(connectURL) + if err != nil { + b.Fatal(err) + } + defer nc.Close() + js, err := nc.JetStream() + if err != nil { + b.Fatal(err) + } + go publish(i, &pubCtx, js) + } + + // Wait for all publishers to be ready + pubCtx.readyWg.Wait() + + // Benchmark starts here + b.StartTimer() + + // Unblock the publishers + pubCtx.lock.Unlock() + + // Wait for all publishers to complete + pubCtx.completedWg.Wait() + + // Benchmark ends here + b.StopTimer() + + // Sanity check, publishers may have died before completing + if pubCtx.messagesLeft != 0 { + b.Fatalf("Some messages left: %d", pubCtx.messagesLeft) + } + + b.ReportMetric(float64(pubCtx.errors)*100/float64(b.N), "%error") + }, + ) + } + }, + ) + } + }, + ) + } +} + +func BenchmarkJetStreamKV(b *testing.B) { + + const ( + verbose = false + kvNamePrefix = "B_" + keyPrefix = "K_" + seed = 12345 + minOps = 1_000 + ) + + runKVGet := func(b *testing.B, kvs []nats.KeyValue, keys []string) int { + rng := rand.New(rand.NewSource(int64(seed))) + errors := 0 + + b.ResetTimer() + + for i := 1; i <= b.N; i++ { + kv := kvs[rng.Intn(len(kvs))] + key := keys[rng.Intn(len(keys))] + kve, err := kv.Get(key) + if err != nil { + errors++ + continue + } + + b.SetBytes(int64(len(kve.Value()))) + + if verbose && i%1000 == 0 { + b.Logf("Completed %d/%d Get ops", i, b.N) + } + } + + b.StopTimer() + return errors + } + + runKVPut := func(b *testing.B, kvs []nats.KeyValue, keys []string, valueSize int) int { + rng := rand.New(rand.NewSource(int64(seed))) + value := make([]byte, valueSize) + errors := 0 + + b.ResetTimer() + + for i := 1; i <= b.N; i++ { + kv := kvs[rng.Intn(len(kvs))] + key := keys[rng.Intn(len(keys))] + rng.Read(value) + _, err := kv.Put(key, value) + if err != nil { + errors++ + continue + } + + b.SetBytes(int64(valueSize)) + + if verbose && i%1000 == 0 { + b.Logf("Completed %d/%d Put ops", i, b.N) + } + } + + b.StopTimer() + return errors + } + + runKVUpdate := func(b *testing.B, kvs []nats.KeyValue, keys []string, valueSize int) int { + rng := rand.New(rand.NewSource(int64(seed))) + value := make([]byte, valueSize) + errors := 0 + + b.ResetTimer() + + for i := 1; i <= b.N; i++ { + kv := kvs[rng.Intn(len(kvs))] + key := keys[rng.Intn(len(keys))] + + kve, getErr := kv.Get(key) + if getErr != nil { + errors++ + continue + } + + rng.Read(value) + _, updateErr := kv.Update(key, value, kve.Revision()) + if updateErr != nil { + errors++ + continue + } + + b.SetBytes(int64(valueSize)) + + if verbose && i%1000 == 0 { + b.Logf("Completed %d/%d Update ops", i, b.N) + } + } + + b.StopTimer() + return errors + } + + type WorkloadType string + const ( + Get WorkloadType = "GET" + Put WorkloadType = "PUT" + Update WorkloadType = "CAS" + ) + + benchmarksCases := []struct { + clusterSize int + replicas int + numBuckets int + numKeys int + valueSize int + }{ + {1, 1, 1, 100, 100}, // 1 node, 1 bucket with 100 keys, 100B values + {1, 1, 10, 1000, 100}, // 1 node, 10 buckets with 1000 keys, 100B values + {3, 3, 1, 100, 100}, // 3 nodes, 1 bucket with 100 keys, 100B values + {3, 3, 10, 1000, 100}, // 3 nodes, 10 buckets with 1000 keys, 100B values + {3, 3, 10, 1000, 1024}, // 3 nodes, 10 buckets with 1000 keys, 1KB values + } + + workloadCases := []WorkloadType{ + Get, + Put, + Update, + } + + for _, bc := range benchmarksCases { + + bName := fmt.Sprintf( + "N=%d,R=%d,B=%d,K=%d,ValSz=%db", + bc.clusterSize, + bc.replicas, + bc.numBuckets, + bc.numKeys, + bc.valueSize, + ) + + b.Run( + bName, + func(b *testing.B) { + for _, wc := range workloadCases { + wName := fmt.Sprintf("%v", wc) + b.Run( + wName, + func(b *testing.B) { + // Skip short runs, benchmark gets re-executed with a larger N + if b.N < minOps { + b.ResetTimer() + return + } + + if verbose { + b.Logf("Running %s workload %s with %d messages", wName, bName, b.N) + } + + if verbose { + b.Logf("Setting up %d nodes", bc.clusterSize) + } + var connectURL string + if bc.clusterSize == 1 { + s := RunBasicJetStreamServer(b) + defer s.Shutdown() + connectURL = s.ClientURL() + } else { + cl := createJetStreamClusterExplicit(b, "BENCH_KV", bc.clusterSize) + defer cl.shutdown() + cl.waitOnClusterReadyWithNumPeers(bc.clusterSize) + cl.waitOnLeader() + connectURL = cl.randomServer().ClientURL() + } + + nc, js := jsClientConnectURL(b, connectURL) + defer nc.Close() + + // Pre-generate all keys + keys := make([]string, 0, bc.numKeys) + for i := 1; i <= bc.numKeys; i++ { + key := fmt.Sprintf("%s%d", keyPrefix, i) + keys = append(keys, key) + } + + // Initialize all KVs + kvs := make([]nats.KeyValue, 0, bc.numBuckets) + for i := 1; i <= bc.numBuckets; i++ { + // Create bucket + kvName := fmt.Sprintf("%s%d", kvNamePrefix, i) + if verbose { + b.Logf("Creating KV %s with R=%d", kvName, bc.replicas) + } + kvConfig := &nats.KeyValueConfig{ + Bucket: kvName, + Replicas: bc.replicas, + } + kv, err := js.CreateKeyValue(kvConfig) + if err != nil { + b.Fatalf("Error creating KV: %v", err) + } + kvs = append(kvs, kv) + + // Initialize all keys + rng := rand.New(rand.NewSource(int64(seed * i))) + value := make([]byte, bc.valueSize) + for _, key := range keys { + rng.Read(value) + _, err := kv.Create(key, value) + if err != nil { + b.Fatalf("Failed to initialize %s/%s: %v", kvName, key, err) + } + } + } + + // Discard time spent during setup + // May reset again further in + b.ResetTimer() + + var errors int + + switch wc { + case Get: + errors = runKVGet(b, kvs, keys) + case Put: + errors = runKVPut(b, kvs, keys, bc.valueSize) + case Update: + errors = runKVUpdate(b, kvs, keys, bc.valueSize) + default: + b.Fatalf("Unknown workload type: %v", wc) + } + + // Benchmark ends here, (may have stopped earlier) + b.StopTimer() + + b.ReportMetric(float64(errors)*100/float64(b.N), "%error") + }, + ) + } + }, + ) + } +} + +func BenchmarkJetStreamObjStore(b *testing.B) { + const ( + verbose = false + objStoreName = "B" + keyPrefix = "K_" + seed = 12345 + initKeys = true + + // read/write ratios + ReadOnly = 1.0 + WriteOnly = 0.0 + ) + + // rwRatio to string + rwRatioToString := func(rwRatio float64) string { + switch rwRatio { + case ReadOnly: + return "readOnly" + case WriteOnly: + return "writeOnly" + default: + return fmt.Sprintf("%0.1f", rwRatio) + } + } + + // benchmark for object store by performing read/write operations with data of random size + RunObjStoreBenchmark := func(b *testing.B, objStore nats.ObjectStore, minObjSz int, maxObjSz int, numKeys int, rwRatio float64) (int, int, int) { + var ( + errors int + reads int + writes int + ) + + rng := rand.New(rand.NewSource(int64(seed))) + + // Each operation is processing a random amount of bytes within a size range which + // will be either read from or written to an object store bucket. However, here we are + // approximating the size of the processed data with a simple average of the range. + b.SetBytes(int64((minObjSz + maxObjSz) / 2)) + + for i := 1; i <= b.N; i++ { + key := fmt.Sprintf("%s_%d", keyPrefix, rng.Intn(numKeys)) + var err error + + rwOp := rng.Float64() + switch { + case rwOp <= rwRatio: + // Read Op + _, err = objStore.GetBytes(key) + reads++ + case rwOp > rwRatio: + // Write Op + // dataSz is a random value between min-max object size and cannot be less than 1 byte + dataSz := rng.Intn(maxObjSz-minObjSz+1) + minObjSz + data := make([]byte, dataSz) + rng.Read(data) + _, err = objStore.PutBytes(key, data) + writes++ + } + if err != nil { + errors++ + } + + if verbose && i%1000 == 0 { + b.Logf("Completed: %d reads, %d writes, %d errors. %d/%d total operations have been completed.", reads, writes, errors, i, b.N) + } + } + return errors, reads, writes + } + + // benchmark cases table + benchmarkCases := []struct { + storage nats.StorageType + numKeys int + minObjSz int + maxObjSz int + }{ + // TODO remove duplicates and fix comments + {nats.MemoryStorage, 100, 1024, 102400}, // mem storage, 100 objects sized (1KB-100KB) + {nats.MemoryStorage, 100, 102400, 1048576}, // mem storage, 100 objects sized (100KB-1MB) + {nats.MemoryStorage, 1000, 10240, 102400}, // mem storage, 1k objects of various size (10KB - 100KB) + {nats.FileStorage, 100, 1024, 102400}, // file storage, 100 objects sized (1KB-100KB) + {nats.FileStorage, 1000, 10240, 1048576}, // file storage, 1k objects of various size (10KB - 1MB) + {nats.FileStorage, 100, 102400, 1048576}, // file storage, 100 objects sized (100KB-1MB) + {nats.FileStorage, 100, 1048576, 10485760}, // file storage, 100 objects sized (1MB-10MB) + {nats.FileStorage, 10, 10485760, 104857600}, // file storage, 10 objects sized (10MB-100MB) + + } + + var ( + clusterSizeCases = []int{1, 3} + rwRatioCases = []float64{ReadOnly, WriteOnly, 0.8} + ) + + // Test with either single node or 3 node cluster + for _, clusterSize := range clusterSizeCases { + replicas := clusterSize + cName := fmt.Sprintf("N=%d,R=%d", clusterSize, replicas) + b.Run( + cName, + func(b *testing.B) { + for _, rwRatio := range rwRatioCases { + rName := fmt.Sprintf("workload=%s", rwRatioToString(rwRatio)) + b.Run( + rName, + func(b *testing.B) { + // Test all tabled benchmark cases + for _, bc := range benchmarkCases { + bName := fmt.Sprintf("K=%d,storage=%s,minObjSz=%db,maxObjSz=%db", bc.numKeys, bc.storage, bc.minObjSz, bc.maxObjSz) + b.Run( + bName, + func(b *testing.B) { + + // Test setup + rng := rand.New(rand.NewSource(int64(seed))) + + if verbose { + b.Logf("Setting up %d nodes", replicas) + } + var ( + connectURL string + cl *cluster + ) + if clusterSize == 1 { + s := RunBasicJetStreamServer(b) + defer s.Shutdown() + connectURL = s.ClientURL() + } else { + cl = createJetStreamClusterExplicit(b, "BENCH_OBJ_STORE", clusterSize) + defer cl.shutdown() + cl.waitOnClusterReadyWithNumPeers(replicas) + cl.waitOnLeader() + // connect to leader and not replicas + connectURL = cl.leader().ClientURL() + } + nc, js := jsClientConnectURL(b, connectURL) + defer nc.Close() + + // Initialize object store + if verbose { + b.Logf("Creating ObjectStore %s with R=%d", objStoreName, replicas) + } + objStoreConfig := &nats.ObjectStoreConfig{ + Bucket: objStoreName, + Replicas: replicas, + Storage: bc.storage, + } + objStore, err := js.CreateObjectStore(objStoreConfig) + if err != nil { + b.Fatalf("Error creating ObjectStore: %v", err) + } + + // if cluster_size > 1, connect to stream leader + if cl != nil { + nc.Close() + connectURL = cl.streamLeader("$G", fmt.Sprintf("OBJ_%s", objStoreName)).ClientURL() + nc, js := jsClientConnectURL(b, connectURL) + defer nc.Close() + objStore, err = js.ObjectStore(objStoreName) + if err != nil { + b.Fatalf("Error binding to ObjectStore: %v", err) + } + } + + // Initialize keys + if initKeys { + for n := 0; n < bc.numKeys; n++ { + key := fmt.Sprintf("%s_%d", keyPrefix, n) + dataSz := rng.Intn(bc.maxObjSz-bc.minObjSz+1) + bc.minObjSz + value := make([]byte, dataSz) + rng.Read(value) + _, err := objStore.PutBytes(key, value) + if err != nil { + b.Fatalf("Failed to initialize %s/%s: %v", objStoreName, key, err) + } + } + } + + b.ResetTimer() + + // Run benchmark + errors, reads, writes := RunObjStoreBenchmark(b, objStore, bc.minObjSz, bc.maxObjSz, bc.numKeys, rwRatio) + + // Report metrics + b.ReportMetric(float64(errors)*100/float64(b.N), "%error") + b.ReportMetric(float64(reads), "reads") + b.ReportMetric(float64(writes), "writes") + + }, + ) + } + }, + ) + + } + }, + ) + } +} diff --git a/server/jetstream_chaos_cluster_test.go b/server/jetstream_chaos_cluster_test.go deleted file mode 100644 index 6d409d290..000000000 --- a/server/jetstream_chaos_cluster_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build js_chaos_tests -// +build js_chaos_tests - -package server - -import ( - "testing" - "time" -) - -// Bounces the entire set of nodes, then brings them back up. -// Fail if some nodes don't come back online. -func TestJetStreamChaosClusterBounce(t *testing.T) { - - const duration = 60 * time.Second - const clusterSize = 3 - - c := createJetStreamClusterExplicit(t, "R3", clusterSize) - defer c.shutdown() - - chaos := createClusterChaosMonkeyController( - t, - c, - &clusterBouncerChaosMonkey{ - minDowntime: 0 * time.Second, - maxDowntime: 2 * time.Second, - minDownServers: clusterSize, - maxDownServers: clusterSize, - pause: 3 * time.Second, - }, - ) - chaos.start() - defer chaos.stop() - - <-time.After(duration) -} - -// Bounces a subset of the nodes, then brings them back up. -// Fails if some nodes don't come back online. -func TestJetStreamChaosClusterBounceSubset(t *testing.T) { - - const duration = 60 * time.Second - const clusterSize = 3 - - c := createJetStreamClusterExplicit(t, "R3", clusterSize) - defer c.shutdown() - - chaos := createClusterChaosMonkeyController( - t, - c, - &clusterBouncerChaosMonkey{ - minDowntime: 0 * time.Second, - maxDowntime: 2 * time.Second, - minDownServers: 1, - maxDownServers: clusterSize, - pause: 3 * time.Second, - }, - ) - chaos.start() - defer chaos.stop() - - <-time.After(duration) -} diff --git a/server/jetstream_chaos_consumer_test.go b/server/jetstream_chaos_consumer_test.go deleted file mode 100644 index bd21104bd..000000000 --- a/server/jetstream_chaos_consumer_test.go +++ /dev/null @@ -1,615 +0,0 @@ -// Copyright 2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build js_chaos_tests -// +build js_chaos_tests - -package server - -import ( - "bytes" - "fmt" - "strings" - "sync" - "testing" - "time" - - "github.com/nats-io/nats.go" -) - -const ( - chaosConsumerTestsClusterName = "CONSUMERS_CHAOS_TEST" - chaosConsumerTestsStreamName = "CONSUMER_CHAOS_TEST_STREAM" - chaosConsumerTestsSubject = "foo" - chaosConsumerTestsDebug = false -) - -// Creates stream and fills it with the given number of messages. -// Each message is the string representation of the stream sequence number, -// e.g. the first message (seqno: 1) contains data "1". -// This allows consumers to verify the content of each message without tracking additional state -func createStreamForConsumerChaosTest(t *testing.T, c *cluster, replicas, numMessages int) { - t.Helper() - - const publishBatchSize = 1_000 - - pubNc, pubJs := jsClientConnectCluster(t, c) - defer pubNc.Close() - - _, err := pubJs.AddStream(&nats.StreamConfig{ - Name: chaosConsumerTestsStreamName, - Subjects: []string{chaosConsumerTestsSubject}, - Replicas: replicas, - }) - if err != nil { - t.Fatalf("Error creating stream: %v", err) - } - - ackFutures := make([]nats.PubAckFuture, 0, publishBatchSize) - - for i := 1; i <= numMessages; i++ { - message := []byte(fmt.Sprintf("%d", i)) - pubAckFuture, err := pubJs.PublishAsync(chaosConsumerTestsSubject, message, nats.ExpectLastSequence(uint64(i-1))) - if err != nil { - t.Fatalf("Publish error: %s", err) - } - ackFutures = append(ackFutures, pubAckFuture) - - if (i > 0 && i%publishBatchSize == 0) || i == numMessages { - select { - case <-pubJs.PublishAsyncComplete(): - for _, pubAckFuture := range ackFutures { - select { - case <-pubAckFuture.Ok(): - // Noop - case pubAckErr := <-pubAckFuture.Err(): - t.Fatalf("Error publishing: %s", pubAckErr) - case <-time.After(30 * time.Second): - t.Fatalf("Timeout verifying pubAck for message: %s", pubAckFuture.Msg().Data) - } - } - ackFutures = make([]nats.PubAckFuture, 0, publishBatchSize) - t.Logf("Published %d/%d messages", i, numMessages) - - case <-time.After(30 * time.Second): - t.Fatalf("Publish timed out") - } - } - } -} - -// Verify ordered delivery despite cluster-wide outages -func TestJetStreamChaosConsumerOrdered(t *testing.T) { - - const numMessages = 30_000 - const maxRetries = 100 - const retryDelay = 500 * time.Millisecond - const fetchTimeout = 250 * time.Millisecond - const clusterSize = 3 - const replicas = 3 - - c := createJetStreamClusterExplicit(t, chaosConsumerTestsClusterName, clusterSize) - defer c.shutdown() - - createStreamForConsumerChaosTest(t, c, replicas, numMessages) - - chaos := createClusterChaosMonkeyController( - t, - c, - &clusterBouncerChaosMonkey{ - minDowntime: 0 * time.Second, - maxDowntime: 2 * time.Second, - minDownServers: clusterSize, // Whole cluster outage - maxDownServers: clusterSize, - pause: 1 * time.Second, - }, - ) - - subNc, subJs := jsClientConnectCluster(t, c) - defer subNc.Close() - - sub, err := subJs.SubscribeSync( - chaosConsumerTestsSubject, - nats.OrderedConsumer(), - ) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - defer sub.Unsubscribe() - - if chaosConsumerTestsDebug { - t.Logf("Initial subscription: %s", toIndentedJsonString(sub)) - } - - chaos.start() - defer chaos.stop() - - for i := 1; i <= numMessages; i++ { - var msg *nats.Msg - var nextMsgErr error - var expectedMsgData = []byte(fmt.Sprintf("%d", i)) - - nextMsgRetryLoop: - for r := 0; r <= maxRetries; r++ { - msg, nextMsgErr = sub.NextMsg(fetchTimeout) - if nextMsgErr == nil { - break nextMsgRetryLoop - } else if r == maxRetries { - t.Fatalf("Exceeded max retries for NextMsg") - } else if nextMsgErr == nats.ErrBadSubscription { - t.Fatalf("Subscription is invalid: %s", toIndentedJsonString(sub)) - } else { - time.Sleep(retryDelay) - } - } - - metadata, err := msg.Metadata() - if err != nil { - t.Fatalf("Failed to get message metadata: %v", err) - } - - if metadata.Sequence.Stream != uint64(i) { - t.Fatalf("Expecting stream sequence %d, got %d instead", i, metadata.Sequence.Stream) - } - - if !bytes.Equal(msg.Data, expectedMsgData) { - t.Fatalf("Expecting message %s, got %s instead", expectedMsgData, msg.Data) - } - - // Simulate application processing (and gives the monkey some time to brew chaos) - time.Sleep(10 * time.Millisecond) - - if i%1000 == 0 { - t.Logf("Consumed %d/%d", i, numMessages) - } - } -} - -// Verify ordered delivery despite cluster-wide outages -func TestJetStreamChaosConsumerAsync(t *testing.T) { - - const numMessages = 30_000 - const timeout = 30 * time.Second // No (new) messages for 30s => terminate - const maxRetries = 25 - const retryDelay = 500 * time.Millisecond - const clusterSize = 3 - const replicas = 3 - - c := createJetStreamClusterExplicit(t, chaosConsumerTestsClusterName, clusterSize) - defer c.shutdown() - - createStreamForConsumerChaosTest(t, c, replicas, numMessages) - - chaos := createClusterChaosMonkeyController( - t, - c, - &clusterBouncerChaosMonkey{ - minDowntime: 0 * time.Second, - maxDowntime: 2 * time.Second, - minDownServers: clusterSize, - maxDownServers: clusterSize, - pause: 2 * time.Second, - }, - ) - - subNc, subJs := jsClientConnectCluster(t, c) - defer subNc.Close() - - timeoutTimer := time.NewTimer(timeout) - deliveryCount := uint64(0) - received := NewBitset(numMessages) - - handleMsg := func(msg *nats.Msg) { - deliveryCount += 1 - - metadata, err := msg.Metadata() - if err != nil { - t.Fatalf("Failed to get message metadata: %v", err) - } - seq := metadata.Sequence.Stream - - var expectedMsgData = []byte(fmt.Sprintf("%d", seq)) - if !bytes.Equal(msg.Data, expectedMsgData) { - t.Fatalf("Expecting message content '%s', got '%s' instead", expectedMsgData, msg.Data) - } - - isDupe := received.get(seq - 1) - - if isDupe { - if chaosConsumerTestsDebug { - t.Logf("Duplicate message delivery, seq: %d", seq) - } - return - } - - // Mark this sequence as received - received.set(seq-1, true) - if received.count() < numMessages { - // Reset timeout - timeoutTimer.Reset(timeout) - } else { - // All received, speed up the shutdown - timeoutTimer.Reset(1 * time.Second) - } - - if received.count()%1000 == 0 { - t.Logf("Consumed %d/%d", received.count(), numMessages) - } - - // Simulate application processing (and gives the monkey some time to brew chaos) - time.Sleep(10 * time.Millisecond) - - ackRetryLoop: - for i := 0; i <= maxRetries; i++ { - ackErr := msg.Ack() - if ackErr == nil { - break ackRetryLoop - } else if i == maxRetries { - t.Fatalf("Failed to ACK message %d (retried %d times)", seq, maxRetries) - } else { - time.Sleep(retryDelay) - } - } - } - - subOpts := []nats.SubOpt{} - sub, err := subJs.Subscribe(chaosConsumerTestsSubject, handleMsg, subOpts...) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - defer sub.Unsubscribe() - - chaos.start() - defer chaos.stop() - - // Wait for long enough silence. - // Either a stall, or all messages received - <-timeoutTimer.C - - // Shut down consumer - sub.Unsubscribe() - - uniqueDeliveredCount := received.count() - - t.Logf( - "Delivered %d/%d messages %d duplicate deliveries", - uniqueDeliveredCount, - numMessages, - deliveryCount-uniqueDeliveredCount, - ) - - if uniqueDeliveredCount != numMessages { - t.Fatalf("No new message delivered in the last %s, %d/%d messages never delivered", timeout, numMessages-uniqueDeliveredCount, numMessages) - } -} - -// Verify durable consumer retains state despite cluster-wide outages -// The consumer connection is also periodically closed, and the consumer 'resumes' on a different one -func TestJetStreamChaosConsumerDurable(t *testing.T) { - - const numMessages = 30_000 - const timeout = 30 * time.Second // No (new) messages for 60s => terminate - const clusterSize = 3 - const replicas = 3 - const maxRetries = 25 - const retryDelay = 500 * time.Millisecond - const durableConsumerName = "durable" - - c := createJetStreamClusterExplicit(t, chaosConsumerTestsClusterName, clusterSize) - defer c.shutdown() - - createStreamForConsumerChaosTest(t, c, replicas, numMessages) - - chaos := createClusterChaosMonkeyController( - t, - c, - &clusterBouncerChaosMonkey{ - minDowntime: 0 * time.Second, - maxDowntime: 2 * time.Second, - minDownServers: 1, - maxDownServers: clusterSize, - pause: 3 * time.Second, - }, - ) - - var nc *nats.Conn - var sub *nats.Subscription - var subLock sync.Mutex - - var handleMsgFun func(msg *nats.Msg) - var natsURL string - - { - var sb strings.Builder - for _, s := range c.servers { - sb.WriteString(s.ClientURL()) - sb.WriteString(",") - } - natsURL = sb.String() - } - - resetDurableConsumer := func() { - subLock.Lock() - defer subLock.Unlock() - - if nc != nil { - nc.Close() - } - - var newNc *nats.Conn - connectRetryLoop: - for r := 0; r <= maxRetries; r++ { - var connErr error - newNc, connErr = nats.Connect(natsURL) - if connErr == nil { - break connectRetryLoop - } else if r == maxRetries { - t.Fatalf("Failed to connect, exceeded max retries, last error: %s", connErr) - } else { - time.Sleep(retryDelay) - } - } - - var newJs nats.JetStreamContext - jsRetryLoop: - for r := 0; r <= maxRetries; r++ { - var jsErr error - newJs, jsErr = newNc.JetStream(nats.MaxWait(10 * time.Second)) - if jsErr == nil { - break jsRetryLoop - } else if r == maxRetries { - t.Fatalf("Failed to get JS, exceeded max retries, last error: %s", jsErr) - } else { - time.Sleep(retryDelay) - } - } - - subOpts := []nats.SubOpt{ - nats.Durable(durableConsumerName), - } - - var newSub *nats.Subscription - subscribeRetryLoop: - for i := 0; i <= maxRetries; i++ { - var subErr error - newSub, subErr = newJs.Subscribe(chaosConsumerTestsSubject, handleMsgFun, subOpts...) - if subErr == nil { - ci, err := newJs.ConsumerInfo(chaosConsumerTestsStreamName, durableConsumerName) - if err == nil { - if chaosConsumerTestsDebug { - t.Logf("Consumer info:\n %s", toIndentedJsonString(ci)) - } - } else { - t.Logf("Failed to retrieve consumer info: %s", err) - } - - break subscribeRetryLoop - } else if i == maxRetries { - t.Fatalf("Exceeded max retries creating subscription: %v", subErr) - } else { - time.Sleep(retryDelay) - } - } - - nc, sub = newNc, newSub - } - - timeoutTimer := time.NewTimer(timeout) - deliveryCount := uint64(0) - received := NewBitset(numMessages) - - handleMsgFun = func(msg *nats.Msg) { - - subLock.Lock() - if msg.Sub != sub { - // Message from a previous instance of durable consumer, drop - defer subLock.Unlock() - return - } - subLock.Unlock() - - deliveryCount += 1 - - metadata, err := msg.Metadata() - if err != nil { - t.Fatalf("Failed to get message metadata: %v", err) - } - seq := metadata.Sequence.Stream - - var expectedMsgData = []byte(fmt.Sprintf("%d", seq)) - if !bytes.Equal(msg.Data, expectedMsgData) { - t.Fatalf("Expecting message content '%s', got '%s' instead", expectedMsgData, msg.Data) - } - - isDupe := received.get(seq - 1) - - if isDupe { - if chaosConsumerTestsDebug { - t.Logf("Duplicate message delivery, seq: %d", seq) - } - return - } - - // Mark this sequence as received - received.set(seq-1, true) - if received.count() < numMessages { - // Reset timeout - timeoutTimer.Reset(timeout) - } else { - // All received, speed up the shutdown - timeoutTimer.Reset(1 * time.Second) - } - - // Simulate application processing (and gives the monkey some time to brew chaos) - time.Sleep(10 * time.Millisecond) - - ackRetryLoop: - for i := 0; i <= maxRetries; i++ { - ackErr := msg.Ack() - if ackErr == nil { - break ackRetryLoop - } else if i == maxRetries { - t.Fatalf("Failed to ACK message %d (retried %d times)", seq, maxRetries) - } else { - time.Sleep(retryDelay) - } - } - - if received.count()%1000 == 0 { - t.Logf("Consumed %d/%d, duplicate deliveries: %d", received.count(), numMessages, deliveryCount-received.count()) - // Close connection and resume consuming on a different one - resetDurableConsumer() - } - } - - resetDurableConsumer() - - chaos.start() - defer chaos.stop() - - // Wait for long enough silence. - // Either a stall, or all messages received - <-timeoutTimer.C - - // Shut down consumer - if sub != nil { - sub.Unsubscribe() - } - - uniqueDeliveredCount := received.count() - - t.Logf( - "Delivered %d/%d messages %d duplicate deliveries", - uniqueDeliveredCount, - numMessages, - deliveryCount-uniqueDeliveredCount, - ) - - if uniqueDeliveredCount != numMessages { - t.Fatalf("No new message delivered in the last %s, %d/%d messages never delivered", timeout, numMessages-uniqueDeliveredCount, numMessages) - } -} - -func TestJetStreamChaosConsumerPull(t *testing.T) { - - const numMessages = 10_000 - const maxRetries = 100 - const retryDelay = 500 * time.Millisecond - const fetchTimeout = 250 * time.Millisecond - const fetchBatchSize = 100 - const clusterSize = 3 - const replicas = 3 - const durableConsumerName = "durable" - - c := createJetStreamClusterExplicit(t, chaosConsumerTestsClusterName, clusterSize) - defer c.shutdown() - - createStreamForConsumerChaosTest(t, c, replicas, numMessages) - - chaos := createClusterChaosMonkeyController( - t, - c, - &clusterBouncerChaosMonkey{ - minDowntime: 0 * time.Second, - maxDowntime: 2 * time.Second, - minDownServers: clusterSize, // Whole cluster outage - maxDownServers: clusterSize, - pause: 1 * time.Second, - }, - ) - - subNc, subJs := jsClientConnectCluster(t, c) - defer subNc.Close() - - sub, err := subJs.PullSubscribe( - chaosConsumerTestsSubject, - durableConsumerName, - ) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - defer sub.Unsubscribe() - - if chaosConsumerTestsDebug { - t.Logf("Initial subscription: %s", toIndentedJsonString(sub)) - } - - chaos.start() - defer chaos.stop() - - fetchMaxWait := nats.MaxWait(fetchTimeout) - received := NewBitset(numMessages) - deliveredCount := uint64(0) - - for received.count() < numMessages { - - var msgs []*nats.Msg - var fetchErr error - - fetchRetryLoop: - for r := 0; r <= maxRetries; r++ { - msgs, fetchErr = sub.Fetch(fetchBatchSize, fetchMaxWait) - if fetchErr == nil { - break fetchRetryLoop - } else if r == maxRetries { - t.Fatalf("Exceeded max retries for Fetch, last error: %s", fetchErr) - } else if fetchErr == nats.ErrBadSubscription { - t.Fatalf("Subscription is invalid: %s", toIndentedJsonString(sub)) - } else { - // t.Logf("Fetch error: %v", fetchErr) - time.Sleep(retryDelay) - } - } - - for _, msg := range msgs { - - deliveredCount += 1 - - metadata, err := msg.Metadata() - if err != nil { - t.Fatalf("Failed to get message metadata: %v", err) - } - - streamSeq := metadata.Sequence.Stream - - expectedMsgData := []byte(fmt.Sprintf("%d", streamSeq)) - - if !bytes.Equal(msg.Data, expectedMsgData) { - t.Fatalf("Expecting message %s, got %s instead", expectedMsgData, msg.Data) - } - - isDupe := received.get(streamSeq - 1) - - received.set(streamSeq-1, true) - - // Simulate application processing (and gives the monkey some time to brew chaos) - time.Sleep(10 * time.Millisecond) - - ackRetryLoop: - for r := 0; r <= maxRetries; r++ { - ackErr := msg.Ack() - if ackErr == nil { - break ackRetryLoop - } else if r == maxRetries { - t.Fatalf("Failed to ACK message %d, last error: %s", streamSeq, ackErr) - } else { - time.Sleep(retryDelay) - } - } - - if !isDupe && received.count()%1000 == 0 { - t.Logf("Consumed %d/%d (duplicates: %d)", received.count(), numMessages, deliveredCount-received.count()) - } - } - } -} diff --git a/server/jetstream_chaos_helpers_test.go b/server/jetstream_chaos_helpers_test.go deleted file mode 100644 index 5b3f9001c..000000000 --- a/server/jetstream_chaos_helpers_test.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build js_chaos_tests -// +build js_chaos_tests - -package server - -import ( - "fmt" - "math/rand" - "sync" - "testing" - "time" -) - -// Additional cluster helpers - -func (c *cluster) waitOnClusterHealthz() { - c.t.Helper() - for _, cs := range c.servers { - c.waitOnServerHealthz(cs) - } -} - -func (c *cluster) stopSubset(toStop []*Server) { - c.t.Helper() - for _, s := range toStop { - s.Shutdown() - } -} - -func (c *cluster) selectRandomServers(numServers int) []*Server { - c.t.Helper() - if numServers > len(c.servers) { - panic(fmt.Sprintf("Can't select %d servers in a cluster of %d", numServers, len(c.servers))) - } - var selectedServers []*Server - selectedServers = append(selectedServers, c.servers...) - rand.Shuffle(len(selectedServers), func(x, y int) { - selectedServers[x], selectedServers[y] = selectedServers[y], selectedServers[x] - }) - return selectedServers[0:numServers] -} - -// Support functions for "chaos" testing (random injected failures) - -type ChaosMonkeyController interface { - // Launch the monkey as background routine and return - start() - // Stop a monkey that was previously started - stop() - // Run the monkey synchronously, until it is manually stopped via stopCh - run() -} - -type ClusterChaosMonkey interface { - // Set defaults and validates the monkey parameters - validate(t *testing.T, c *cluster) - // Run the monkey synchronously, until it is manually stopped via stopCh - run(t *testing.T, c *cluster, stopCh <-chan bool) -} - -// Chaos Monkey Controller that acts on a cluster -type clusterChaosMonkeyController struct { - t *testing.T - cluster *cluster - wg sync.WaitGroup - stopCh chan bool - ccm ClusterChaosMonkey -} - -func createClusterChaosMonkeyController(t *testing.T, c *cluster, ccm ClusterChaosMonkey) ChaosMonkeyController { - ccm.validate(t, c) - return &clusterChaosMonkeyController{ - t: t, - cluster: c, - stopCh: make(chan bool, 3), - ccm: ccm, - } -} - -func (m *clusterChaosMonkeyController) start() { - m.t.Logf("🐵 Starting monkey") - m.wg.Add(1) - go func() { - defer m.wg.Done() - m.run() - }() -} - -func (m *clusterChaosMonkeyController) stop() { - m.t.Logf("🐵 Stopping monkey") - m.stopCh <- true - m.wg.Wait() - m.t.Logf("🐵 Monkey stopped") -} - -func (m *clusterChaosMonkeyController) run() { - m.ccm.run(m.t, m.cluster, m.stopCh) -} - -// Cluster Chaos Monkey that selects a random subset of the nodes in a cluster (according to min/max provided), -// shuts them down for a given duration (according to min/max provided), then brings them back up. -// Then sleeps for a given time, and does it again until stopped. -type clusterBouncerChaosMonkey struct { - minDowntime time.Duration - maxDowntime time.Duration - minDownServers int - maxDownServers int - pause time.Duration -} - -func (m *clusterBouncerChaosMonkey) validate(t *testing.T, c *cluster) { - if m.minDowntime > m.maxDowntime { - t.Fatalf("Min downtime %v cannot be larger than max downtime %v", m.minDowntime, m.maxDowntime) - } - - if m.minDownServers > m.maxDownServers { - t.Fatalf("Min down servers %v cannot be larger than max down servers %v", m.minDownServers, m.maxDownServers) - } -} - -func (m *clusterBouncerChaosMonkey) run(t *testing.T, c *cluster, stopCh <-chan bool) { - for { - // Pause between actions - select { - case <-stopCh: - return - case <-time.After(m.pause): - } - - // Pick a random subset of servers - numServersDown := rand.Intn(1+m.maxDownServers-m.minDownServers) + m.minDownServers - servers := c.selectRandomServers(numServersDown) - serverNames := []string{} - for _, s := range servers { - serverNames = append(serverNames, s.info.Name) - } - - // Pick a random outage interval - minOutageNanos := m.minDowntime.Nanoseconds() - maxOutageNanos := m.maxDowntime.Nanoseconds() - outageDurationNanos := rand.Int63n(1+maxOutageNanos-minOutageNanos) + minOutageNanos - outageDuration := time.Duration(outageDurationNanos) - - // Take down selected servers - t.Logf("🐵 Taking down %d/%d servers for %v (%v)", numServersDown, len(c.servers), outageDuration, serverNames) - c.stopSubset(servers) - - // Wait for the "outage" duration - select { - case <-stopCh: - return - case <-time.After(outageDuration): - } - - // Restart servers and wait for cluster to be healthy - t.Logf("🐵 Restoring cluster") - c.restartAllSamePorts() - c.waitOnClusterHealthz() - - c.waitOnClusterReady() - c.waitOnAllCurrent() - c.waitOnLeader() - } -} diff --git a/server/jetstream_chaos_kv_test.go b/server/jetstream_chaos_kv_test.go deleted file mode 100644 index 6c177a9ad..000000000 --- a/server/jetstream_chaos_kv_test.go +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright 2022 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build js_chaos_tests -// +build js_chaos_tests - -package server - -import ( - "fmt" - "math/rand" - "strings" - "sync" - "testing" - "time" - - "github.com/nats-io/nats.go" -) - -const ( - chaosKvTestsClusterName = "KV_CHAOS_TEST" - chaosKvTestsBucketName = "KV_CHAOS_TEST_BUCKET" - chaosKvTestsSubject = "foo" - chaosKvTestsDebug = false -) - -// Creates KV store (a.k.a. bucket). -func createBucketForKvChaosTest(t *testing.T, c *cluster, replicas int) { - t.Helper() - - pubNc, pubJs := jsClientConnectCluster(t, c) - defer pubNc.Close() - - config := nats.KeyValueConfig{ - Bucket: chaosKvTestsBucketName, - Replicas: replicas, - Description: "Test bucket", - } - - kvs, err := pubJs.CreateKeyValue(&config) - if err != nil { - t.Fatalf("Error creating bucket: %v", err) - } - - status, err := kvs.Status() - if err != nil { - t.Fatalf("Error retrieving bucket status: %v", err) - } - t.Logf("Bucket created: %s", status.Bucket()) -} - -// Single client performs a set of PUT on a single key. -// If PUT is successful, perform a GET on the same key. -// If GET is successful, ensure key revision and value match the most recent successful write. -func TestJetStreamChaosKvPutGet(t *testing.T) { - - const numOps = 100_000 - const clusterSize = 3 - const replicas = 3 - const key = "key" - const staleReadsOk = true // Set to false to check for violations of 'read committed' consistency - - c := createJetStreamClusterExplicit(t, chaosKvTestsClusterName, clusterSize) - defer c.shutdown() - - createBucketForKvChaosTest(t, c, replicas) - - chaos := createClusterChaosMonkeyController( - t, - c, - &clusterBouncerChaosMonkey{ - minDowntime: 0 * time.Second, - maxDowntime: 2 * time.Second, - minDownServers: clusterSize, // Whole cluster outage - maxDownServers: clusterSize, - pause: 1 * time.Second, - }, - ) - - nc, js := jsClientConnectCluster(t, c) - defer nc.Close() - - // Create KV bucket - kv, err := js.KeyValue(chaosKvTestsBucketName) - if err != nil { - t.Fatalf("Failed to get KV store: %v", err) - } - - // Initialize the only key - firstRevision, err := kv.Create(key, []byte("INITIAL VALUE")) - if err != nil { - t.Fatalf("Failed to create key: %v", err) - } else if firstRevision != 1 { - t.Fatalf("Unexpected revision: %d", firstRevision) - } - - // Start chaos - chaos.start() - defer chaos.stop() - - staleReadsCount := uint64(0) - successCount := uint64(0) - - previousRevision := firstRevision - -putGetLoop: - for i := 1; i <= numOps; i++ { - - if i%1000 == 0 { - t.Logf("Completed %d/%d PUT+GET operations", i, numOps) - } - - // PUT a value - putValue := fmt.Sprintf("value-%d", i) - putRevision, err := kv.Put(key, []byte(putValue)) - if err != nil { - t.Logf("PUT error: %v", err) - continue putGetLoop - } - - // Check revision is monotonically increasing - if putRevision <= previousRevision { - t.Fatalf("PUT produced revision %d which is not greater than the previous successful PUT revision: %d", putRevision, previousRevision) - } - - previousRevision = putRevision - - // If PUT was successful, GET the same - kve, err := kv.Get(key) - if err == nats.ErrKeyNotFound { - t.Fatalf("GET key not found, but key does exists (last PUT revision: %d)", putRevision) - } else if err != nil { - t.Logf("GET error: %v", err) - continue putGetLoop - } - - getValue := string(kve.Value()) - getRevision := kve.Revision() - - if putRevision > getRevision { - // Stale read, violates 'read committed' consistency criteria - if !staleReadsOk { - t.Fatalf("PUT value %s (rev: %d) then read value %s (rev: %d)", putValue, putRevision, getValue, getRevision) - } else { - staleReadsCount += 1 - } - } else if putRevision < getRevision { - // Returned revision is higher than any ever written, this should never happen - t.Fatalf("GET returned revision %d, but most recent expected revision is %d", getRevision, putRevision) - } else if putValue != getValue { - // Returned revision matches latest, but values do not, this should never happen - t.Fatalf("GET returned revision %d with value %s, but value %s was just committed for that same revision", getRevision, getValue, putValue) - } else { - // Get returned the latest revision/value - successCount += 1 - if chaosKvTestsDebug { - t.Logf("PUT+GET %s=%s (rev: %d)", key, putValue, putRevision) - } - } - } - - t.Logf("Completed %d PUT+GET cycles of which %d successful, %d GETs returned a stale value", numOps, successCount, staleReadsCount) -} - -// A variant TestJetStreamChaosKvPutGet where PUT is retried until successful, and GET is retried until it returns the latest known key revision. -// This validates than a confirmed PUT value is never lost, and becomes eventually visible. -func TestJetStreamChaosKvPutGetWithRetries(t *testing.T) { - - const numOps = 10_000 - const maxRetries = 20 - const retryDelay = 100 * time.Millisecond - const clusterSize = 3 - const replicas = 3 - const key = "key" - - c := createJetStreamClusterExplicit(t, chaosKvTestsClusterName, clusterSize) - defer c.shutdown() - - createBucketForKvChaosTest(t, c, replicas) - - chaos := createClusterChaosMonkeyController( - t, - c, - &clusterBouncerChaosMonkey{ - minDowntime: 0 * time.Second, - maxDowntime: 2 * time.Second, - minDownServers: clusterSize, // Whole cluster outage - maxDownServers: clusterSize, - pause: 1 * time.Second, - }, - ) - - nc, js := jsClientConnectCluster(t, c) - defer nc.Close() - - kv, err := js.KeyValue(chaosKvTestsBucketName) - if err != nil { - t.Fatalf("Failed to get KV store: %v", err) - } - - // Initialize key value - firstRevision, err := kv.Create(key, []byte("INITIAL VALUE")) - if err != nil { - t.Fatalf("Failed to create key: %v", err) - } else if firstRevision != 1 { - t.Fatalf("Unexpected revision: %d", firstRevision) - } - - // Start chaos - chaos.start() - defer chaos.stop() - - staleReadCount := 0 - previousRevision := firstRevision - -putGetLoop: - for i := 1; i <= numOps; i++ { - - if i%1000 == 0 { - t.Logf("Completed %d/%d PUT+GET operations", i, numOps) - } - - putValue := fmt.Sprintf("value-%d", i) - putRevision := uint64(0) - - // Put new value for key, retry until successful or out of retries - putRetryLoop: - for r := 0; r <= maxRetries; r++ { - var putErr error - putRevision, putErr = kv.Put(key, []byte(putValue)) - if putErr == nil { - break putRetryLoop - } else if r == maxRetries { - t.Fatalf("Failed to PUT (retried %d times): %v", maxRetries, putErr) - } else { - if chaosKvTestsDebug { - t.Logf("PUT error: %v", putErr) - } - time.Sleep(retryDelay) - } - } - - // Ensure key version is monotonically increasing - if putRevision <= previousRevision { - t.Fatalf("Latest PUT created revision %d which is not greater than the previous revision: %d", putRevision, previousRevision) - } - previousRevision = putRevision - - // Read value for key, retry until successful, and validate corresponding version and value - getRetryLoop: - for r := 0; r <= maxRetries; r++ { - var getErr error - kve, getErr := kv.Get(key) - if getErr != nil && r == maxRetries { - t.Fatalf("Failed to GET (retried %d times): %v", maxRetries, getErr) - } else if getErr != nil { - if chaosKvTestsDebug { - t.Logf("GET error: %v", getErr) - } - time.Sleep(retryDelay) - continue getRetryLoop - } - - // GET successful, check revision and value - getValue := string(kve.Value()) - getRevision := kve.Revision() - - if putRevision == getRevision { - if putValue != getValue { - t.Fatalf("Unexpected value %s for revision %d, expected: %s", getValue, getRevision, putValue) - } - if chaosKvTestsDebug { - t.Logf("PUT+GET %s=%s (rev: %d) (retry: %d)", key, putValue, putRevision, r) - } - continue putGetLoop - } else if getRevision > putRevision { - t.Fatalf("GET returned version that should not exist yet: %d, last created: %d", getRevision, putRevision) - } else { // get revision < put revision - staleReadCount += 1 - if chaosKvTestsDebug { - t.Logf("GET got stale value: %v (rev: %d, latest: %d)", getValue, getRevision, putRevision) - } - time.Sleep(retryDelay) - continue getRetryLoop - } - } - } - - t.Logf("Client completed %d PUT+GET cycles, %d GET returned a stale value", numOps, staleReadCount) -} - -// Multiple clients updating a finite set of keys with CAS semantics. -// TODO check that revision is never lower than last one seen -// TODO check that KeyNotFound is never returned, as keys are initialized beforehand -func TestJetStreamChaosKvCAS(t *testing.T) { - const numOps = 10_000 - const maxRetries = 50 - const retryDelay = 300 * time.Millisecond - const clusterSize = 3 - const replicas = 3 - const numKeys = 15 - const numClients = 5 - - c := createJetStreamClusterExplicit(t, chaosKvTestsClusterName, clusterSize) - defer c.shutdown() - - createBucketForKvChaosTest(t, c, replicas) - - chaos := createClusterChaosMonkeyController( - t, - c, - &clusterBouncerChaosMonkey{ - minDowntime: 0 * time.Second, - maxDowntime: 2 * time.Second, - minDownServers: clusterSize, // Whole cluster outage - maxDownServers: clusterSize, - pause: 1 * time.Second, - }, - ) - - nc, js := jsClientConnectCluster(t, c) - defer nc.Close() - - // Create bucket - kv, err := js.KeyValue(chaosKvTestsBucketName) - if err != nil { - t.Fatalf("Failed to get KV store: %v", err) - } - - // Create set of keys and initialize them with dummy value - keys := make([]string, numKeys) - for k := 0; k < numKeys; k++ { - key := fmt.Sprintf("key-%d", k) - keys[k] = key - - _, err := kv.Create(key, []byte("Initial value")) - if err != nil { - t.Fatalf("Failed to create key: %v", err) - } - } - - wgStart := sync.WaitGroup{} - wgComplete := sync.WaitGroup{} - - // Client routine - client := func(clientId int, kv nats.KeyValue) { - defer wgComplete.Done() - - rng := rand.New(rand.NewSource(int64(clientId))) - successfulUpdates := 0 - casRejectUpdates := 0 - otherUpdateErrors := 0 - - // Map to track last known revision for each of the keys - knownRevisions := map[string]uint64{} - for _, key := range keys { - knownRevisions[key] = 0 - } - - // Wait for all clients to reach this point before proceeding - wgStart.Done() - wgStart.Wait() - - for i := 1; i <= numOps; i++ { - - if i%1000 == 0 { - t.Logf("Client %d completed %d/%d updates", clientId, i, numOps) - } - - // Pick random key from the set - key := keys[rng.Intn(numKeys)] - - // Prepare unique value to be written - value := fmt.Sprintf("client: %d operation %d", clientId, i) - - // Try to update a key with CAS - newRevision, updateErr := kv.Update(key, []byte(value), knownRevisions[key]) - if updateErr == nil { - // Update successful - knownRevisions[key] = newRevision - successfulUpdates += 1 - if chaosKvTestsDebug { - t.Logf("Client %d updated key %s, new revision: %d", clientId, key, newRevision) - } - } else if updateErr != nil && strings.Contains(fmt.Sprint(updateErr), "wrong last sequence") { - // CAS rejected update, learn current revision for this key - casRejectUpdates += 1 - - for r := 0; r <= maxRetries; r++ { - kve, getErr := kv.Get(key) - if getErr == nil { - currentRevision := kve.Revision() - if currentRevision < knownRevisions[key] { - // Revision number moved backward, this should never happen - t.Fatalf("Current revision for key %s is %d, which is lower than the last known revision %d", key, currentRevision, knownRevisions[key]) - - } - - knownRevisions[key] = currentRevision - if chaosKvTestsDebug { - t.Logf("Client %d learn key %s revision: %d", clientId, key, currentRevision) - } - break - } else if r == maxRetries { - t.Fatalf("Failed to GET (retried %d times): %v", maxRetries, getErr) - } else { - time.Sleep(retryDelay) - } - } - } else { - // Other update error - otherUpdateErrors += 1 - if chaosKvTestsDebug { - t.Logf("Client %d update error for key %s: %v", clientId, key, updateErr) - } - time.Sleep(retryDelay) - } - } - t.Logf("Client %d done, %d kv updates, %d CAS rejected, %d other errors", clientId, successfulUpdates, casRejectUpdates, otherUpdateErrors) - } - - // Launch all clients - for i := 1; i <= numClients; i++ { - cNc, cJs := jsClientConnectCluster(t, c) - defer cNc.Close() - - cKv, err := cJs.KeyValue(chaosKvTestsBucketName) - if err != nil { - t.Fatalf("Failed to get KV store: %v", err) - } - - wgStart.Add(1) - wgComplete.Add(1) - go client(i, cKv) - } - - // Wait for clients to be connected and ready - wgStart.Wait() - - // Start failures - chaos.start() - defer chaos.stop() - - // Wait for all clients to be done - wgComplete.Wait() -} diff --git a/server/jetstream_chaos_test.go b/server/jetstream_chaos_test.go new file mode 100644 index 000000000..b08733828 --- /dev/null +++ b/server/jetstream_chaos_test.go @@ -0,0 +1,1285 @@ +// Copyright 2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js_chaos_tests +// +build js_chaos_tests + +package server + +import ( + "bytes" + "encoding/json" + "fmt" + "math/rand" + "strings" + "sync" + "testing" + "time" + + "github.com/nats-io/nats.go" +) + +// Support functions for "chaos" testing (random injected failures) + +type ChaosMonkeyController interface { + // Launch the monkey as background routine and return + start() + // Stop a monkey that was previously started + stop() + // Run the monkey synchronously, until it is manually stopped via stopCh + run() +} + +type ClusterChaosMonkey interface { + // Set defaults and validates the monkey parameters + validate(t *testing.T, c *cluster) + // Run the monkey synchronously, until it is manually stopped via stopCh + run(t *testing.T, c *cluster, stopCh <-chan bool) +} + +// Chaos Monkey Controller that acts on a cluster +type clusterChaosMonkeyController struct { + t *testing.T + cluster *cluster + wg sync.WaitGroup + stopCh chan bool + ccm ClusterChaosMonkey +} + +func createClusterChaosMonkeyController(t *testing.T, c *cluster, ccm ClusterChaosMonkey) ChaosMonkeyController { + ccm.validate(t, c) + return &clusterChaosMonkeyController{ + t: t, + cluster: c, + stopCh: make(chan bool, 3), + ccm: ccm, + } +} + +func (m *clusterChaosMonkeyController) start() { + m.t.Logf("🐵 Starting monkey") + m.wg.Add(1) + go func() { + defer m.wg.Done() + m.run() + }() +} + +func (m *clusterChaosMonkeyController) stop() { + m.t.Logf("🐵 Stopping monkey") + m.stopCh <- true + m.wg.Wait() + m.t.Logf("🐵 Monkey stopped") +} + +func (m *clusterChaosMonkeyController) run() { + m.ccm.run(m.t, m.cluster, m.stopCh) +} + +// Cluster Chaos Monkey that selects a random subset of the nodes in a cluster (according to min/max provided), +// shuts them down for a given duration (according to min/max provided), then brings them back up. +// Then sleeps for a given time, and does it again until stopped. +type clusterBouncerChaosMonkey struct { + minDowntime time.Duration + maxDowntime time.Duration + minDownServers int + maxDownServers int + pause time.Duration +} + +func (m *clusterBouncerChaosMonkey) validate(t *testing.T, c *cluster) { + if m.minDowntime > m.maxDowntime { + t.Fatalf("Min downtime %v cannot be larger than max downtime %v", m.minDowntime, m.maxDowntime) + } + + if m.minDownServers > m.maxDownServers { + t.Fatalf("Min down servers %v cannot be larger than max down servers %v", m.minDownServers, m.maxDownServers) + } +} + +func (m *clusterBouncerChaosMonkey) run(t *testing.T, c *cluster, stopCh <-chan bool) { + for { + // Pause between actions + select { + case <-stopCh: + return + case <-time.After(m.pause): + } + + // Pick a random subset of servers + numServersDown := rand.Intn(1+m.maxDownServers-m.minDownServers) + m.minDownServers + servers := c.selectRandomServers(numServersDown) + serverNames := []string{} + for _, s := range servers { + serverNames = append(serverNames, s.info.Name) + } + + // Pick a random outage interval + minOutageNanos := m.minDowntime.Nanoseconds() + maxOutageNanos := m.maxDowntime.Nanoseconds() + outageDurationNanos := rand.Int63n(1+maxOutageNanos-minOutageNanos) + minOutageNanos + outageDuration := time.Duration(outageDurationNanos) + + // Take down selected servers + t.Logf("🐵 Taking down %d/%d servers for %v (%v)", numServersDown, len(c.servers), outageDuration, serverNames) + c.stopSubset(servers) + + // Wait for the "outage" duration + select { + case <-stopCh: + return + case <-time.After(outageDuration): + } + + // Restart servers and wait for cluster to be healthy + t.Logf("🐵 Restoring cluster") + c.restartAllSamePorts() + c.waitOnClusterHealthz() + + c.waitOnClusterReady() + c.waitOnAllCurrent() + c.waitOnLeader() + } +} + +// Additional cluster methods for chaos testing + +func (c *cluster) waitOnClusterHealthz() { + c.t.Helper() + for _, cs := range c.servers { + c.waitOnServerHealthz(cs) + } +} + +func (c *cluster) stopSubset(toStop []*Server) { + c.t.Helper() + for _, s := range toStop { + s.Shutdown() + } +} + +func (c *cluster) selectRandomServers(numServers int) []*Server { + c.t.Helper() + if numServers > len(c.servers) { + panic(fmt.Sprintf("Can't select %d servers in a cluster of %d", numServers, len(c.servers))) + } + var selectedServers []*Server + selectedServers = append(selectedServers, c.servers...) + rand.Shuffle(len(selectedServers), func(x, y int) { + selectedServers[x], selectedServers[y] = selectedServers[y], selectedServers[x] + }) + return selectedServers[0:numServers] +} + +// Other helpers + +func jsClientConnectCluster(t testing.TB, c *cluster) (*nats.Conn, nats.JetStreamContext) { + serverConnectURLs := make([]string, len(c.servers)) + for i, server := range c.servers { + serverConnectURLs[i] = server.ClientURL() + } + connectURL := strings.Join(serverConnectURLs, ",") + + nc, err := nats.Connect(connectURL) + if err != nil { + t.Fatalf("Failed to connect: %s", err) + } + + js, err := nc.JetStream() + if err != nil { + t.Fatalf("Failed to init JetStream context: %s", err) + } + + return nc, js +} + +func toIndentedJsonString(v any) string { + s, err := json.MarshalIndent(v, "", " ") + if err != nil { + panic(err) + } + return string(s) +} + +// Bounces the entire set of nodes, then brings them back up. +// Fail if some nodes don't come back online. +func TestJetStreamChaosClusterBounce(t *testing.T) { + + const duration = 60 * time.Second + const clusterSize = 3 + + c := createJetStreamClusterExplicit(t, "R3", clusterSize) + defer c.shutdown() + + chaos := createClusterChaosMonkeyController( + t, + c, + &clusterBouncerChaosMonkey{ + minDowntime: 0 * time.Second, + maxDowntime: 2 * time.Second, + minDownServers: clusterSize, + maxDownServers: clusterSize, + pause: 3 * time.Second, + }, + ) + chaos.start() + defer chaos.stop() + + <-time.After(duration) +} + +// Bounces a subset of the nodes, then brings them back up. +// Fails if some nodes don't come back online. +func TestJetStreamChaosClusterBounceSubset(t *testing.T) { + + const duration = 60 * time.Second + const clusterSize = 3 + + c := createJetStreamClusterExplicit(t, "R3", clusterSize) + defer c.shutdown() + + chaos := createClusterChaosMonkeyController( + t, + c, + &clusterBouncerChaosMonkey{ + minDowntime: 0 * time.Second, + maxDowntime: 2 * time.Second, + minDownServers: 1, + maxDownServers: clusterSize, + pause: 3 * time.Second, + }, + ) + chaos.start() + defer chaos.stop() + + <-time.After(duration) +} + +const ( + chaosConsumerTestsClusterName = "CONSUMERS_CHAOS_TEST" + chaosConsumerTestsStreamName = "CONSUMER_CHAOS_TEST_STREAM" + chaosConsumerTestsSubject = "foo" + chaosConsumerTestsDebug = false +) + +// Creates stream and fills it with the given number of messages. +// Each message is the string representation of the stream sequence number, +// e.g. the first message (seqno: 1) contains data "1". +// This allows consumers to verify the content of each message without tracking additional state +func createStreamForConsumerChaosTest(t *testing.T, c *cluster, replicas, numMessages int) { + t.Helper() + + const publishBatchSize = 1_000 + + pubNc, pubJs := jsClientConnectCluster(t, c) + defer pubNc.Close() + + _, err := pubJs.AddStream(&nats.StreamConfig{ + Name: chaosConsumerTestsStreamName, + Subjects: []string{chaosConsumerTestsSubject}, + Replicas: replicas, + }) + if err != nil { + t.Fatalf("Error creating stream: %v", err) + } + + ackFutures := make([]nats.PubAckFuture, 0, publishBatchSize) + + for i := 1; i <= numMessages; i++ { + message := []byte(fmt.Sprintf("%d", i)) + pubAckFuture, err := pubJs.PublishAsync(chaosConsumerTestsSubject, message, nats.ExpectLastSequence(uint64(i-1))) + if err != nil { + t.Fatalf("Publish error: %s", err) + } + ackFutures = append(ackFutures, pubAckFuture) + + if (i > 0 && i%publishBatchSize == 0) || i == numMessages { + select { + case <-pubJs.PublishAsyncComplete(): + for _, pubAckFuture := range ackFutures { + select { + case <-pubAckFuture.Ok(): + // Noop + case pubAckErr := <-pubAckFuture.Err(): + t.Fatalf("Error publishing: %s", pubAckErr) + case <-time.After(30 * time.Second): + t.Fatalf("Timeout verifying pubAck for message: %s", pubAckFuture.Msg().Data) + } + } + ackFutures = make([]nats.PubAckFuture, 0, publishBatchSize) + t.Logf("Published %d/%d messages", i, numMessages) + + case <-time.After(30 * time.Second): + t.Fatalf("Publish timed out") + } + } + } +} + +// Verify ordered delivery despite cluster-wide outages +func TestJetStreamChaosConsumerOrdered(t *testing.T) { + + const numMessages = 5_000 + const numBatch = 500 + const maxRetries = 100 + const retryDelay = 500 * time.Millisecond + const fetchTimeout = 250 * time.Millisecond + const clusterSize = 3 + const replicas = 3 + + c := createJetStreamClusterExplicit(t, chaosConsumerTestsClusterName, clusterSize) + defer c.shutdown() + + createStreamForConsumerChaosTest(t, c, replicas, numMessages) + + chaos := createClusterChaosMonkeyController( + t, + c, + &clusterBouncerChaosMonkey{ + minDowntime: 0 * time.Second, + maxDowntime: 2 * time.Second, + minDownServers: clusterSize, // Whole cluster outage + maxDownServers: clusterSize, + pause: 1 * time.Second, + }, + ) + + subNc, subJs := jsClientConnectCluster(t, c) + defer subNc.Close() + + sub, err := subJs.SubscribeSync( + chaosConsumerTestsSubject, + nats.OrderedConsumer(), + ) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer sub.Unsubscribe() + + if chaosConsumerTestsDebug { + t.Logf("Initial subscription: %s", toIndentedJsonString(sub)) + } + + chaos.start() + defer chaos.stop() + + for i := 1; i <= numMessages; i++ { + var msg *nats.Msg + var nextMsgErr error + var expectedMsgData = []byte(fmt.Sprintf("%d", i)) + + nextMsgRetryLoop: + for r := 0; r <= maxRetries; r++ { + msg, nextMsgErr = sub.NextMsg(fetchTimeout) + if nextMsgErr == nil { + break nextMsgRetryLoop + } else if r == maxRetries { + t.Fatalf("Exceeded max retries for NextMsg") + } else if nextMsgErr == nats.ErrBadSubscription { + t.Fatalf("Subscription is invalid: %s", toIndentedJsonString(sub)) + } else { + time.Sleep(retryDelay) + } + } + + metadata, err := msg.Metadata() + if err != nil { + t.Fatalf("Failed to get message metadata: %v", err) + } + + if metadata.Sequence.Stream != uint64(i) { + t.Fatalf("Expecting stream sequence %d, got %d instead", i, metadata.Sequence.Stream) + } + + if !bytes.Equal(msg.Data, expectedMsgData) { + t.Fatalf("Expecting message %s, got %s instead", expectedMsgData, msg.Data) + } + + // Simulate application processing (and gives the monkey some time to brew chaos) + time.Sleep(10 * time.Millisecond) + + if i%numBatch == 0 { + t.Logf("Consumed %d/%d", i, numMessages) + } + } +} + +// Verify ordered delivery despite cluster-wide outages +func TestJetStreamChaosConsumerAsync(t *testing.T) { + + const numMessages = 5_000 + const numBatch = 500 + const timeout = 30 * time.Second // No (new) messages for 30s => terminate + const maxRetries = 25 + const retryDelay = 500 * time.Millisecond + const clusterSize = 3 + const replicas = 3 + + c := createJetStreamClusterExplicit(t, chaosConsumerTestsClusterName, clusterSize) + defer c.shutdown() + + createStreamForConsumerChaosTest(t, c, replicas, numMessages) + + chaos := createClusterChaosMonkeyController( + t, + c, + &clusterBouncerChaosMonkey{ + minDowntime: 0 * time.Second, + maxDowntime: 2 * time.Second, + minDownServers: clusterSize, + maxDownServers: clusterSize, + pause: 2 * time.Second, + }, + ) + + subNc, subJs := jsClientConnectCluster(t, c) + defer subNc.Close() + + timeoutTimer := time.NewTimer(timeout) + deliveryCount := uint64(0) + received := NewBitset(numMessages) + + handleMsg := func(msg *nats.Msg) { + deliveryCount += 1 + + metadata, err := msg.Metadata() + if err != nil { + t.Fatalf("Failed to get message metadata: %v", err) + } + seq := metadata.Sequence.Stream + + var expectedMsgData = []byte(fmt.Sprintf("%d", seq)) + if !bytes.Equal(msg.Data, expectedMsgData) { + t.Fatalf("Expecting message content '%s', got '%s' instead", expectedMsgData, msg.Data) + } + + isDupe := received.get(seq - 1) + + if isDupe { + if chaosConsumerTestsDebug { + t.Logf("Duplicate message delivery, seq: %d", seq) + } + return + } + + // Mark this sequence as received + received.set(seq-1, true) + if received.count() < numMessages { + // Reset timeout + timeoutTimer.Reset(timeout) + } else { + // All received, speed up the shutdown + timeoutTimer.Reset(1 * time.Second) + } + + if received.count()%numBatch == 0 { + t.Logf("Consumed %d/%d", received.count(), numMessages) + } + + // Simulate application processing (and gives the monkey some time to brew chaos) + time.Sleep(10 * time.Millisecond) + + ackRetryLoop: + for i := 0; i <= maxRetries; i++ { + ackErr := msg.Ack() + if ackErr == nil { + break ackRetryLoop + } else if i == maxRetries { + t.Fatalf("Failed to ACK message %d (retried %d times)", seq, maxRetries) + } else { + time.Sleep(retryDelay) + } + } + } + + subOpts := []nats.SubOpt{} + sub, err := subJs.Subscribe(chaosConsumerTestsSubject, handleMsg, subOpts...) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer sub.Unsubscribe() + + chaos.start() + defer chaos.stop() + + // Wait for long enough silence. + // Either a stall, or all messages received + <-timeoutTimer.C + + // Shut down consumer + sub.Unsubscribe() + + uniqueDeliveredCount := received.count() + + t.Logf( + "Delivered %d/%d messages %d duplicate deliveries", + uniqueDeliveredCount, + numMessages, + deliveryCount-uniqueDeliveredCount, + ) + + if uniqueDeliveredCount != numMessages { + t.Fatalf("No new message delivered in the last %s, %d/%d messages never delivered", timeout, numMessages-uniqueDeliveredCount, numMessages) + } +} + +// Verify durable consumer retains state despite cluster-wide outages +// The consumer connection is also periodically closed, and the consumer 'resumes' on a different one +func TestJetStreamChaosConsumerDurable(t *testing.T) { + + const numMessages = 5_000 + const numBatch = 500 + const timeout = 30 * time.Second // No (new) messages for 60s => terminate + const clusterSize = 3 + const replicas = 3 + const maxRetries = 25 + const retryDelay = 500 * time.Millisecond + const durableConsumerName = "durable" + + c := createJetStreamClusterExplicit(t, chaosConsumerTestsClusterName, clusterSize) + defer c.shutdown() + + createStreamForConsumerChaosTest(t, c, replicas, numMessages) + + chaos := createClusterChaosMonkeyController( + t, + c, + &clusterBouncerChaosMonkey{ + minDowntime: 0 * time.Second, + maxDowntime: 2 * time.Second, + minDownServers: 1, + maxDownServers: clusterSize, + pause: 3 * time.Second, + }, + ) + + var nc *nats.Conn + var sub *nats.Subscription + var subLock sync.Mutex + + var handleMsgFun func(msg *nats.Msg) + var natsURL string + + { + var sb strings.Builder + for _, s := range c.servers { + sb.WriteString(s.ClientURL()) + sb.WriteString(",") + } + natsURL = sb.String() + } + + resetDurableConsumer := func() { + subLock.Lock() + defer subLock.Unlock() + + if nc != nil { + nc.Close() + } + + var newNc *nats.Conn + connectRetryLoop: + for r := 0; r <= maxRetries; r++ { + var connErr error + newNc, connErr = nats.Connect(natsURL) + if connErr == nil { + break connectRetryLoop + } else if r == maxRetries { + t.Fatalf("Failed to connect, exceeded max retries, last error: %s", connErr) + } else { + time.Sleep(retryDelay) + } + } + + var newJs nats.JetStreamContext + jsRetryLoop: + for r := 0; r <= maxRetries; r++ { + var jsErr error + newJs, jsErr = newNc.JetStream(nats.MaxWait(10 * time.Second)) + if jsErr == nil { + break jsRetryLoop + } else if r == maxRetries { + t.Fatalf("Failed to get JS, exceeded max retries, last error: %s", jsErr) + } else { + time.Sleep(retryDelay) + } + } + + subOpts := []nats.SubOpt{ + nats.Durable(durableConsumerName), + } + + var newSub *nats.Subscription + subscribeRetryLoop: + for i := 0; i <= maxRetries; i++ { + var subErr error + newSub, subErr = newJs.Subscribe(chaosConsumerTestsSubject, handleMsgFun, subOpts...) + if subErr == nil { + ci, err := newJs.ConsumerInfo(chaosConsumerTestsStreamName, durableConsumerName) + if err == nil { + if chaosConsumerTestsDebug { + t.Logf("Consumer info:\n %s", toIndentedJsonString(ci)) + } + } else { + t.Logf("Failed to retrieve consumer info: %s", err) + } + + break subscribeRetryLoop + } else if i == maxRetries { + t.Fatalf("Exceeded max retries creating subscription: %v", subErr) + } else { + time.Sleep(retryDelay) + } + } + + nc, sub = newNc, newSub + } + + timeoutTimer := time.NewTimer(timeout) + deliveryCount := uint64(0) + received := NewBitset(numMessages) + + handleMsgFun = func(msg *nats.Msg) { + + subLock.Lock() + if msg.Sub != sub { + // Message from a previous instance of durable consumer, drop + defer subLock.Unlock() + return + } + subLock.Unlock() + + deliveryCount += 1 + + metadata, err := msg.Metadata() + if err != nil { + t.Fatalf("Failed to get message metadata: %v", err) + } + seq := metadata.Sequence.Stream + + var expectedMsgData = []byte(fmt.Sprintf("%d", seq)) + if !bytes.Equal(msg.Data, expectedMsgData) { + t.Fatalf("Expecting message content '%s', got '%s' instead", expectedMsgData, msg.Data) + } + + isDupe := received.get(seq - 1) + + if isDupe { + if chaosConsumerTestsDebug { + t.Logf("Duplicate message delivery, seq: %d", seq) + } + return + } + + // Mark this sequence as received + received.set(seq-1, true) + if received.count() < numMessages { + // Reset timeout + timeoutTimer.Reset(timeout) + } else { + // All received, speed up the shutdown + timeoutTimer.Reset(1 * time.Second) + } + + // Simulate application processing (and gives the monkey some time to brew chaos) + time.Sleep(10 * time.Millisecond) + + ackRetryLoop: + for i := 0; i <= maxRetries; i++ { + ackErr := msg.Ack() + if ackErr == nil { + break ackRetryLoop + } else if i == maxRetries { + t.Fatalf("Failed to ACK message %d (retried %d times)", seq, maxRetries) + } else { + time.Sleep(retryDelay) + } + } + + if received.count()%numBatch == 0 { + t.Logf("Consumed %d/%d, duplicate deliveries: %d", received.count(), numMessages, deliveryCount-received.count()) + // Close connection and resume consuming on a different one + resetDurableConsumer() + } + } + + resetDurableConsumer() + + chaos.start() + defer chaos.stop() + + // Wait for long enough silence. + // Either a stall, or all messages received + <-timeoutTimer.C + + // Shut down consumer + if sub != nil { + sub.Unsubscribe() + } + + uniqueDeliveredCount := received.count() + + t.Logf( + "Delivered %d/%d messages %d duplicate deliveries", + uniqueDeliveredCount, + numMessages, + deliveryCount-uniqueDeliveredCount, + ) + + if uniqueDeliveredCount != numMessages { + t.Fatalf("No new message delivered in the last %s, %d/%d messages never delivered", timeout, numMessages-uniqueDeliveredCount, numMessages) + } +} + +func TestJetStreamChaosConsumerPull(t *testing.T) { + + const numMessages = 5_000 + const numBatch = 500 + const maxRetries = 100 + const retryDelay = 500 * time.Millisecond + const fetchTimeout = 250 * time.Millisecond + const fetchBatchSize = 100 + const clusterSize = 3 + const replicas = 3 + const durableConsumerName = "durable" + + c := createJetStreamClusterExplicit(t, chaosConsumerTestsClusterName, clusterSize) + defer c.shutdown() + + createStreamForConsumerChaosTest(t, c, replicas, numMessages) + + chaos := createClusterChaosMonkeyController( + t, + c, + &clusterBouncerChaosMonkey{ + minDowntime: 0 * time.Second, + maxDowntime: 2 * time.Second, + minDownServers: clusterSize, // Whole cluster outage + maxDownServers: clusterSize, + pause: 1 * time.Second, + }, + ) + + subNc, subJs := jsClientConnectCluster(t, c) + defer subNc.Close() + + sub, err := subJs.PullSubscribe( + chaosConsumerTestsSubject, + durableConsumerName, + ) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer sub.Unsubscribe() + + if chaosConsumerTestsDebug { + t.Logf("Initial subscription: %s", toIndentedJsonString(sub)) + } + + chaos.start() + defer chaos.stop() + + fetchMaxWait := nats.MaxWait(fetchTimeout) + received := NewBitset(numMessages) + deliveredCount := uint64(0) + + for received.count() < numMessages { + + var msgs []*nats.Msg + var fetchErr error + + fetchRetryLoop: + for r := 0; r <= maxRetries; r++ { + msgs, fetchErr = sub.Fetch(fetchBatchSize, fetchMaxWait) + if fetchErr == nil { + break fetchRetryLoop + } else if r == maxRetries { + t.Fatalf("Exceeded max retries for Fetch, last error: %s", fetchErr) + } else if fetchErr == nats.ErrBadSubscription { + t.Fatalf("Subscription is invalid: %s", toIndentedJsonString(sub)) + } else { + // t.Logf("Fetch error: %v", fetchErr) + time.Sleep(retryDelay) + } + } + + for _, msg := range msgs { + + deliveredCount += 1 + + metadata, err := msg.Metadata() + if err != nil { + t.Fatalf("Failed to get message metadata: %v", err) + } + + streamSeq := metadata.Sequence.Stream + + expectedMsgData := []byte(fmt.Sprintf("%d", streamSeq)) + + if !bytes.Equal(msg.Data, expectedMsgData) { + t.Fatalf("Expecting message %s, got %s instead", expectedMsgData, msg.Data) + } + + isDupe := received.get(streamSeq - 1) + + received.set(streamSeq-1, true) + + // Simulate application processing (and gives the monkey some time to brew chaos) + time.Sleep(10 * time.Millisecond) + + ackRetryLoop: + for r := 0; r <= maxRetries; r++ { + ackErr := msg.Ack() + if ackErr == nil { + break ackRetryLoop + } else if r == maxRetries { + t.Fatalf("Failed to ACK message %d, last error: %s", streamSeq, ackErr) + } else { + time.Sleep(retryDelay) + } + } + + if !isDupe && received.count()%numBatch == 0 { + t.Logf("Consumed %d/%d (duplicates: %d)", received.count(), numMessages, deliveredCount-received.count()) + } + } + } +} + +const ( + chaosKvTestsClusterName = "KV_CHAOS_TEST" + chaosKvTestsBucketName = "KV_CHAOS_TEST_BUCKET" + chaosKvTestsSubject = "foo" + chaosKvTestsDebug = false +) + +// Creates KV store (a.k.a. bucket). +func createBucketForKvChaosTest(t *testing.T, c *cluster, replicas int) { + t.Helper() + + pubNc, pubJs := jsClientConnectCluster(t, c) + defer pubNc.Close() + + config := nats.KeyValueConfig{ + Bucket: chaosKvTestsBucketName, + Replicas: replicas, + Description: "Test bucket", + } + + kvs, err := pubJs.CreateKeyValue(&config) + if err != nil { + t.Fatalf("Error creating bucket: %v", err) + } + + status, err := kvs.Status() + if err != nil { + t.Fatalf("Error retrieving bucket status: %v", err) + } + t.Logf("Bucket created: %s", status.Bucket()) +} + +// Single client performs a set of PUT on a single key. +// If PUT is successful, perform a GET on the same key. +// If GET is successful, ensure key revision and value match the most recent successful write. +func TestJetStreamChaosKvPutGet(t *testing.T) { + + const numOps = 100_000 + const clusterSize = 3 + const replicas = 3 + const key = "key" + const staleReadsOk = true // Set to false to check for violations of 'read committed' consistency + + c := createJetStreamClusterExplicit(t, chaosKvTestsClusterName, clusterSize) + defer c.shutdown() + + createBucketForKvChaosTest(t, c, replicas) + + chaos := createClusterChaosMonkeyController( + t, + c, + &clusterBouncerChaosMonkey{ + minDowntime: 0 * time.Second, + maxDowntime: 2 * time.Second, + minDownServers: clusterSize, // Whole cluster outage + maxDownServers: clusterSize, + pause: 1 * time.Second, + }, + ) + + nc, js := jsClientConnectCluster(t, c) + defer nc.Close() + + // Create KV bucket + kv, err := js.KeyValue(chaosKvTestsBucketName) + if err != nil { + t.Fatalf("Failed to get KV store: %v", err) + } + + // Initialize the only key + firstRevision, err := kv.Create(key, []byte("INITIAL VALUE")) + if err != nil { + t.Fatalf("Failed to create key: %v", err) + } else if firstRevision != 1 { + t.Fatalf("Unexpected revision: %d", firstRevision) + } + + // Start chaos + chaos.start() + defer chaos.stop() + + staleReadsCount := uint64(0) + successCount := uint64(0) + + previousRevision := firstRevision + +putGetLoop: + for i := 1; i <= numOps; i++ { + + if i%1000 == 0 { + t.Logf("Completed %d/%d PUT+GET operations", i, numOps) + } + + // PUT a value + putValue := fmt.Sprintf("value-%d", i) + putRevision, err := kv.Put(key, []byte(putValue)) + if err != nil { + t.Logf("PUT error: %v", err) + continue putGetLoop + } + + // Check revision is monotonically increasing + if putRevision <= previousRevision { + t.Fatalf("PUT produced revision %d which is not greater than the previous successful PUT revision: %d", putRevision, previousRevision) + } + + previousRevision = putRevision + + // If PUT was successful, GET the same + kve, err := kv.Get(key) + if err == nats.ErrKeyNotFound { + t.Fatalf("GET key not found, but key does exists (last PUT revision: %d)", putRevision) + } else if err != nil { + t.Logf("GET error: %v", err) + continue putGetLoop + } + + getValue := string(kve.Value()) + getRevision := kve.Revision() + + if putRevision > getRevision { + // Stale read, violates 'read committed' consistency criteria + if !staleReadsOk { + t.Fatalf("PUT value %s (rev: %d) then read value %s (rev: %d)", putValue, putRevision, getValue, getRevision) + } else { + staleReadsCount += 1 + } + } else if putRevision < getRevision { + // Returned revision is higher than any ever written, this should never happen + t.Fatalf("GET returned revision %d, but most recent expected revision is %d", getRevision, putRevision) + } else if putValue != getValue { + // Returned revision matches latest, but values do not, this should never happen + t.Fatalf("GET returned revision %d with value %s, but value %s was just committed for that same revision", getRevision, getValue, putValue) + } else { + // Get returned the latest revision/value + successCount += 1 + if chaosKvTestsDebug { + t.Logf("PUT+GET %s=%s (rev: %d)", key, putValue, putRevision) + } + } + } + + t.Logf("Completed %d PUT+GET cycles of which %d successful, %d GETs returned a stale value", numOps, successCount, staleReadsCount) +} + +// A variant TestJetStreamChaosKvPutGet where PUT is retried until successful, and GET is retried until it returns the latest known key revision. +// This validates than a confirmed PUT value is never lost, and becomes eventually visible. +func TestJetStreamChaosKvPutGetWithRetries(t *testing.T) { + + const numOps = 10_000 + const maxRetries = 20 + const retryDelay = 100 * time.Millisecond + const clusterSize = 3 + const replicas = 3 + const key = "key" + + c := createJetStreamClusterExplicit(t, chaosKvTestsClusterName, clusterSize) + defer c.shutdown() + + createBucketForKvChaosTest(t, c, replicas) + + chaos := createClusterChaosMonkeyController( + t, + c, + &clusterBouncerChaosMonkey{ + minDowntime: 0 * time.Second, + maxDowntime: 2 * time.Second, + minDownServers: clusterSize, // Whole cluster outage + maxDownServers: clusterSize, + pause: 1 * time.Second, + }, + ) + + nc, js := jsClientConnectCluster(t, c) + defer nc.Close() + + kv, err := js.KeyValue(chaosKvTestsBucketName) + if err != nil { + t.Fatalf("Failed to get KV store: %v", err) + } + + // Initialize key value + firstRevision, err := kv.Create(key, []byte("INITIAL VALUE")) + if err != nil { + t.Fatalf("Failed to create key: %v", err) + } else if firstRevision != 1 { + t.Fatalf("Unexpected revision: %d", firstRevision) + } + + // Start chaos + chaos.start() + defer chaos.stop() + + staleReadCount := 0 + previousRevision := firstRevision + +putGetLoop: + for i := 1; i <= numOps; i++ { + + if i%1000 == 0 { + t.Logf("Completed %d/%d PUT+GET operations", i, numOps) + } + + putValue := fmt.Sprintf("value-%d", i) + putRevision := uint64(0) + + // Put new value for key, retry until successful or out of retries + putRetryLoop: + for r := 0; r <= maxRetries; r++ { + var putErr error + putRevision, putErr = kv.Put(key, []byte(putValue)) + if putErr == nil { + break putRetryLoop + } else if r == maxRetries { + t.Fatalf("Failed to PUT (retried %d times): %v", maxRetries, putErr) + } else { + if chaosKvTestsDebug { + t.Logf("PUT error: %v", putErr) + } + time.Sleep(retryDelay) + } + } + + // Ensure key version is monotonically increasing + if putRevision <= previousRevision { + t.Fatalf("Latest PUT created revision %d which is not greater than the previous revision: %d", putRevision, previousRevision) + } + previousRevision = putRevision + + // Read value for key, retry until successful, and validate corresponding version and value + getRetryLoop: + for r := 0; r <= maxRetries; r++ { + var getErr error + kve, getErr := kv.Get(key) + if getErr != nil && r == maxRetries { + t.Fatalf("Failed to GET (retried %d times): %v", maxRetries, getErr) + } else if getErr != nil { + if chaosKvTestsDebug { + t.Logf("GET error: %v", getErr) + } + time.Sleep(retryDelay) + continue getRetryLoop + } + + // GET successful, check revision and value + getValue := string(kve.Value()) + getRevision := kve.Revision() + + if putRevision == getRevision { + if putValue != getValue { + t.Fatalf("Unexpected value %s for revision %d, expected: %s", getValue, getRevision, putValue) + } + if chaosKvTestsDebug { + t.Logf("PUT+GET %s=%s (rev: %d) (retry: %d)", key, putValue, putRevision, r) + } + continue putGetLoop + } else if getRevision > putRevision { + t.Fatalf("GET returned version that should not exist yet: %d, last created: %d", getRevision, putRevision) + } else { // get revision < put revision + staleReadCount += 1 + if chaosKvTestsDebug { + t.Logf("GET got stale value: %v (rev: %d, latest: %d)", getValue, getRevision, putRevision) + } + time.Sleep(retryDelay) + continue getRetryLoop + } + } + } + + t.Logf("Client completed %d PUT+GET cycles, %d GET returned a stale value", numOps, staleReadCount) +} + +// Multiple clients updating a finite set of keys with CAS semantics. +// TODO check that revision is never lower than last one seen +// TODO check that KeyNotFound is never returned, as keys are initialized beforehand +func TestJetStreamChaosKvCAS(t *testing.T) { + const numOps = 10_000 + const maxRetries = 50 + const retryDelay = 300 * time.Millisecond + const clusterSize = 3 + const replicas = 3 + const numKeys = 15 + const numClients = 5 + + c := createJetStreamClusterExplicit(t, chaosKvTestsClusterName, clusterSize) + defer c.shutdown() + + createBucketForKvChaosTest(t, c, replicas) + + chaos := createClusterChaosMonkeyController( + t, + c, + &clusterBouncerChaosMonkey{ + minDowntime: 0 * time.Second, + maxDowntime: 2 * time.Second, + minDownServers: clusterSize, // Whole cluster outage + maxDownServers: clusterSize, + pause: 1 * time.Second, + }, + ) + + nc, js := jsClientConnectCluster(t, c) + defer nc.Close() + + // Create bucket + kv, err := js.KeyValue(chaosKvTestsBucketName) + if err != nil { + t.Fatalf("Failed to get KV store: %v", err) + } + + // Create set of keys and initialize them with dummy value + keys := make([]string, numKeys) + for k := 0; k < numKeys; k++ { + key := fmt.Sprintf("key-%d", k) + keys[k] = key + + _, err := kv.Create(key, []byte("Initial value")) + if err != nil { + t.Fatalf("Failed to create key: %v", err) + } + } + + wgStart := sync.WaitGroup{} + wgComplete := sync.WaitGroup{} + + // Client routine + client := func(clientId int, kv nats.KeyValue) { + defer wgComplete.Done() + + rng := rand.New(rand.NewSource(int64(clientId))) + successfulUpdates := 0 + casRejectUpdates := 0 + otherUpdateErrors := 0 + + // Map to track last known revision for each of the keys + knownRevisions := map[string]uint64{} + for _, key := range keys { + knownRevisions[key] = 0 + } + + // Wait for all clients to reach this point before proceeding + wgStart.Done() + wgStart.Wait() + + for i := 1; i <= numOps; i++ { + + if i%1000 == 0 { + t.Logf("Client %d completed %d/%d updates", clientId, i, numOps) + } + + // Pick random key from the set + key := keys[rng.Intn(numKeys)] + + // Prepare unique value to be written + value := fmt.Sprintf("client: %d operation %d", clientId, i) + + // Try to update a key with CAS + newRevision, updateErr := kv.Update(key, []byte(value), knownRevisions[key]) + if updateErr == nil { + // Update successful + knownRevisions[key] = newRevision + successfulUpdates += 1 + if chaosKvTestsDebug { + t.Logf("Client %d updated key %s, new revision: %d", clientId, key, newRevision) + } + } else if updateErr != nil && strings.Contains(fmt.Sprint(updateErr), "wrong last sequence") { + // CAS rejected update, learn current revision for this key + casRejectUpdates += 1 + + for r := 0; r <= maxRetries; r++ { + kve, getErr := kv.Get(key) + if getErr == nil { + currentRevision := kve.Revision() + if currentRevision < knownRevisions[key] { + // Revision number moved backward, this should never happen + t.Fatalf("Current revision for key %s is %d, which is lower than the last known revision %d", key, currentRevision, knownRevisions[key]) + + } + + knownRevisions[key] = currentRevision + if chaosKvTestsDebug { + t.Logf("Client %d learn key %s revision: %d", clientId, key, currentRevision) + } + break + } else if r == maxRetries { + t.Fatalf("Failed to GET (retried %d times): %v", maxRetries, getErr) + } else { + time.Sleep(retryDelay) + } + } + } else { + // Other update error + otherUpdateErrors += 1 + if chaosKvTestsDebug { + t.Logf("Client %d update error for key %s: %v", clientId, key, updateErr) + } + time.Sleep(retryDelay) + } + } + t.Logf("Client %d done, %d kv updates, %d CAS rejected, %d other errors", clientId, successfulUpdates, casRejectUpdates, otherUpdateErrors) + } + + // Launch all clients + for i := 1; i <= numClients; i++ { + cNc, cJs := jsClientConnectCluster(t, c) + defer cNc.Close() + + cKv, err := cJs.KeyValue(chaosKvTestsBucketName) + if err != nil { + t.Fatalf("Failed to get KV store: %v", err) + } + + wgStart.Add(1) + wgComplete.Add(1) + go client(i, cKv) + } + + // Wait for clients to be connected and ready + wgStart.Wait() + + // Start failures + chaos.start() + defer chaos.stop() + + // Wait for all clients to be done + wgComplete.Wait() +} diff --git a/server/jetstream_cluster.go b/server/jetstream_cluster.go index 2d9f39679..e3c552069 100644 --- a/server/jetstream_cluster.go +++ b/server/jetstream_cluster.go @@ -281,6 +281,37 @@ func (s *Server) JetStreamStepdownStream(account, stream string) error { return nil } +func (s *Server) JetStreamStepdownConsumer(account, stream, consumer string) error { + js, cc := s.getJetStreamCluster() + if js == nil { + return NewJSNotEnabledError() + } + if cc == nil { + return NewJSClusterNotActiveError() + } + // Grab account + acc, err := s.LookupAccount(account) + if err != nil { + return err + } + // Grab stream + mset, err := acc.lookupStream(stream) + if err != nil { + return err + } + + o := mset.lookupConsumer(consumer) + if o == nil { + return NewJSConsumerNotFoundError() + } + + if node := o.raftNode(); node != nil && node.Leader() { + node.StepDown() + } + + return nil +} + func (s *Server) JetStreamSnapshotStream(account, stream string) error { js, cc := s.getJetStreamCluster() if js == nil { @@ -320,7 +351,7 @@ func (s *Server) JetStreamClusterPeers() []string { defer js.mu.RUnlock() cc := js.cluster - if !cc.isLeader() { + if !cc.isLeader() || cc.meta == nil { return nil } peers := cc.meta.Peers() @@ -403,42 +434,106 @@ func (cc *jetStreamCluster) isStreamCurrent(account, stream string) bool { return false } +// Restart the stream in question. +// Should only be called when the stream is know in a bad state. +func (js *jetStream) restartStream(acc *Account, csa *streamAssignment) { + js.mu.Lock() + cc := js.cluster + if cc == nil { + js.mu.Unlock() + return + } + // Need to lookup the one directly from the meta layer, what we get handed is a copy if coming from isStreamHealthy. + asa := cc.streams[acc.Name] + if asa == nil { + js.mu.Unlock() + return + } + sa := asa[csa.Config.Name] + if sa == nil { + js.mu.Unlock() + return + } + // Make sure to clear out the raft node if still present in the meta layer. + if rg := sa.Group; rg != nil && rg.node != nil { + if rg.node.State() != Closed { + rg.node.Stop() + } + rg.node = nil + } + js.mu.Unlock() + + // Process stream assignment to recreate. + js.processStreamAssignment(sa) + + // If we had consumers assigned to this server they will be present in the copy, csa. + // They also need to be processed. The csa consumers is a copy of only our consumers, + // those assigned to us, but the consumer assignment's there are direct from the meta + // layer to make this part much easier and avoid excessive lookups. + for _, cca := range csa.consumers { + if cca.deleted { + continue + } + // Need to look up original as well here to make sure node is nil. + js.mu.Lock() + ca := sa.consumers[cca.Name] + if ca != nil && ca.Group != nil { + // Make sure the node is stopped if still running. + if node := ca.Group.node; node != nil && node.State() != Closed { + node.Stop() + } + // Make sure node is wiped. + ca.Group.node = nil + } + js.mu.Unlock() + if ca != nil { + js.processConsumerAssignment(ca) + } + } +} + // isStreamHealthy will determine if the stream is up to date or very close. // For R1 it will make sure the stream is present on this server. -// Read lock should be held. -func (cc *jetStreamCluster) isStreamHealthy(account, stream string) bool { +func (js *jetStream) isStreamHealthy(acc *Account, sa *streamAssignment) bool { + js.mu.Lock() + s, cc := js.srv, js.cluster if cc == nil { // Non-clustered mode + js.mu.Unlock() return true } - as := cc.streams[account] - if as == nil { - return false - } - sa := as[stream] - if sa == nil { - return false - } + + // Pull the group out. rg := sa.Group if rg == nil { + js.mu.Unlock() + return false + } + + streamName := sa.Config.Name + node := rg.node + js.mu.Unlock() + + // First lookup stream and make sure its there. + mset, err := acc.lookupStream(streamName) + if err != nil { + js.restartStream(acc, sa) return false } - if rg.node == nil || rg.node.Healthy() { + if node == nil || node.Healthy() { // Check if we are processing a snapshot and are catching up. - acc, err := cc.s.LookupAccount(account) - if err != nil { - return false - } - mset, err := acc.lookupStream(stream) - if err != nil { - return false + if !mset.isCatchingUp() { + return true } - if mset.isCatchingUp() { - return false + } else if node != nil { + if node != mset.raftNode() { + s.Warnf("Detected stream cluster node skew '%s > %s'", acc.GetName(), streamName) + node.Delete() + mset.resetClusteredState(nil) + } else if node.State() == Closed { + js.restartStream(acc, sa) } - // Success. - return true } return false @@ -446,28 +541,71 @@ func (cc *jetStreamCluster) isStreamHealthy(account, stream string) bool { // isConsumerCurrent will determine if the consumer is up to date. // For R1 it will make sure the consunmer is present on this server. -// Read lock should be held. -func (cc *jetStreamCluster) isConsumerCurrent(account, stream, consumer string) bool { +func (js *jetStream) isConsumerHealthy(mset *stream, consumer string, ca *consumerAssignment) bool { + if mset == nil { + return false + } + + js.mu.RLock() + cc := js.cluster if cc == nil { // Non-clustered mode + js.mu.RUnlock() return true } - acc, err := cc.s.LookupAccount(account) - if err != nil { + // These are required. + if ca == nil || ca.Group == nil { + js.mu.RUnlock() return false } - mset, err := acc.lookupStream(stream) - if err != nil { - return false + s := js.srv + js.mu.RUnlock() + + // Capture RAFT node from assignment. + node := ca.Group.node + + // When we try to restart we nil out the node if applicable + // and reprocess the consumer assignment. + restartConsumer := func() { + js.mu.Lock() + // Make sure the node is stopped if still running. + if node != nil && node.State() != Closed { + node.Stop() + } + ca.Group.node = nil + deleted := ca.deleted + js.mu.Unlock() + if !deleted { + js.processConsumerAssignment(ca) + } } + + // Check if not running at all. o := mset.lookupConsumer(consumer) if o == nil { + restartConsumer() return false } - if n := o.raftNode(); n != nil && !n.Current() { - return false + + // Check RAFT node state. + if node == nil || node.Healthy() { + return true + } else if node != nil { + if node != o.raftNode() { + mset.mu.RLock() + accName, streamName := mset.acc.GetName(), mset.cfg.Name + mset.mu.RUnlock() + s.Warnf("Detected consumer cluster node skew '%s > %s'", accName, streamName, consumer) + node.Delete() + o.deleteWithoutAdvisory() + restartConsumer() + } else if node.State() == Closed { + // We have a consumer, and it should have a running node but it is closed. + o.stop() + restartConsumer() + } } - return true + return false } // subjectsOverlap checks all existing stream assignments for the account cross-cluster for subject overlap @@ -622,11 +760,14 @@ func (js *jetStream) setupMetaGroup() error { return err } + // Register our server. + fs.registerServer(s) + cfg := &RaftConfig{Name: defaultMetaGroupName, Store: storeDir, Log: fs} // If we are soliciting leafnode connections and we are sharing a system account and do not disable it with a hint, // we want to move to observer mode so that we extend the solicited cluster or supercluster but do not form our own. - cfg.Observer = s.canExtendOtherDomain() && s.opts.JetStreamExtHint != jsNoExtend + cfg.Observer = s.canExtendOtherDomain() && s.getOpts().JetStreamExtHint != jsNoExtend var bootstrap bool if ps, err := readPeerState(storeDir); err != nil { @@ -754,7 +895,7 @@ func (js *jetStream) isGroupLeaderless(rg *raftGroup) bool { // If we don't have a leader. if rg.node.GroupLeader() == _EMPTY_ { // Threshold for jetstream startup. - const startupThreshold = 5 * time.Second + const startupThreshold = 10 * time.Second if rg.node.HadPreviousLeader() { // Make sure we have been running long enough to intelligently determine this. @@ -960,36 +1101,62 @@ type recoveryUpdates struct { // Streams and consumers are recovered from disk, and the meta layer's mappings // should clean them up, but under crash scenarios there could be orphans. func (js *jetStream) checkForOrphans() { - js.mu.Lock() - defer js.mu.Unlock() - consumerName := func(o *consumer) string { o.mu.RLock() defer o.mu.RUnlock() return o.name } + // Can not hold jetstream lock while trying to delete streams or consumers. + js.mu.Lock() s, cc := js.srv, js.cluster s.Debugf("JetStream cluster checking for orphans") + var streams []*stream + var consumers []*consumer + for accName, jsa := range js.accounts { asa := cc.streams[accName] for stream, mset := range jsa.streams { if sa := asa[stream]; sa == nil { - s.Warnf("Detected orphaned stream '%s > %s', will cleanup", accName, stream) - mset.delete() + streams = append(streams, mset) } else { // This one is good, check consumers now. for _, o := range mset.getConsumers() { consumer := consumerName(o) if sa.consumers[consumer] == nil { - s.Warnf("Detected orphaned consumer '%s > %s > %s', will cleanup", accName, stream, consumer) - o.delete() + consumers = append(consumers, o) } } } } } + js.mu.Unlock() + + for _, mset := range streams { + mset.mu.RLock() + accName, stream := mset.acc.Name, mset.cfg.Name + mset.mu.RUnlock() + s.Warnf("Detected orphaned stream '%s > %s', will cleanup", accName, stream) + if err := mset.delete(); err != nil { + s.Warnf("Deleting stream encountered an error: %v", err) + } + } + for _, o := range consumers { + o.mu.RLock() + accName, mset, consumer := o.acc.Name, o.mset, o.name + o.mu.RUnlock() + stream := "N/A" + if mset != nil { + mset.mu.RLock() + stream = mset.cfg.Name + mset.mu.RUnlock() + } + s.Warnf("Detected orphaned consumer '%s > %s > %s', will cleanup", accName, stream, consumer) + if err := o.delete(); err != nil { + s.Warnf("Deleting consumer encountered an error: %v", err) + } + } } func (js *jetStream) monitorCluster() { @@ -1004,7 +1171,7 @@ func (js *jetStream) monitorCluster() { // Make sure to stop the raft group on exit to prevent accidental memory bloat. defer n.Stop() - const compactInterval = 2 * time.Minute + const compactInterval = time.Minute t := time.NewTicker(compactInterval) defer t.Stop() @@ -1013,11 +1180,22 @@ func (js *jetStream) monitorCluster() { lt := time.NewTicker(leaderCheckInterval) defer lt.Stop() + const healthCheckInterval = 2 * time.Minute + ht := time.NewTicker(healthCheckInterval) + defer ht.Stop() + + // Utility to check health. + checkHealth := func() { + if hs := s.healthz(nil); hs.Error != _EMPTY_ { + s.Warnf("%v", hs.Error) + } + } + var ( - isLeader bool - lastSnap []byte - lastSnapTime time.Time - minSnapDelta = 10 * time.Second + isLeader bool + lastSnapTime time.Time + compactSizeMin = uint64(8 * 1024 * 1024) // 8MB + minSnapDelta = 10 * time.Second ) // Highwayhash key for generating hashes. @@ -1033,10 +1211,10 @@ func (js *jetStream) monitorCluster() { if js.isMetaRecovering() { return } - snap := js.metaSnapshot() - if hash := highwayhash.Sum(snap, key); !bytes.Equal(hash[:], lastSnap) { - if err := n.InstallSnapshot(snap); err == nil { - lastSnap, lastSnapTime = hash[:], time.Now() + // For the meta layer we want to snapshot when asked if we need one or have any entries that we can compact. + if ne, _ := n.Size(); ne > 0 || n.NeedSnapshot() { + if err := n.InstallSnapshot(js.metaSnapshot()); err == nil { + lastSnapTime = time.Now() } else if err != errNoSnapAvailable && err != errNodeClosed { s.Warnf("Error snapshotting JetStream cluster state: %v", err) } @@ -1087,26 +1265,32 @@ func (js *jetStream) monitorCluster() { ru = nil s.Debugf("Recovered JetStream cluster metadata") js.checkForOrphans() + // Do a health check here as well. + go checkHealth() continue } // FIXME(dlc) - Deal with errors. if didSnap, didStreamRemoval, didConsumerRemoval, err := js.applyMetaEntries(ce.Entries, ru); err == nil { _, nb := n.Applied(ce.Index) - if js.hasPeerEntries(ce.Entries) || didSnap || didStreamRemoval { - // Since we received one make sure we have our own since we do not store - // our meta state outside of raft. + if js.hasPeerEntries(ce.Entries) || didStreamRemoval || (didSnap && !isLeader) { doSnapshot() } else if didConsumerRemoval && time.Since(lastSnapTime) > minSnapDelta/2 { doSnapshot() - } else if lls := len(lastSnap); nb > uint64(lls*8) && lls > 0 && time.Since(lastSnapTime) > minSnapDelta { + } else if nb > compactSizeMin && time.Since(lastSnapTime) > minSnapDelta { doSnapshot() } + ce.ReturnToPool() } } aq.recycle(&ces) + case isLeader = <-lch: + // For meta layer synchronize everyone to our state on becoming leader. + if isLeader { + n.SendSnapshot(js.metaSnapshot()) + } + // Process the change. js.processLeaderChange(isLeader) - if isLeader { s.sendInternalMsgLocked(serverStatsPingReqSubj, _EMPTY_, nil, nil) // Install a snapshot as we become leader. @@ -1120,6 +1304,10 @@ func (js *jetStream) monitorCluster() { if n.Leader() { js.checkClusterSize() } + case <-ht.C: + // Do this in a separate go routine. + go checkHealth() + case <-lt.C: s.Debugf("Checking JetStream cluster state") // If we have a current leader or had one in the past we can cancel this here since the metaleader @@ -1427,6 +1615,9 @@ func (js *jetStream) processAddPeer(peer string) { defer js.mu.Unlock() s, cc := js.srv, js.cluster + if cc == nil || cc.meta == nil { + return + } isLeader := cc.isLeader() // Now check if we are meta-leader. We will check for any re-assignments. @@ -1468,7 +1659,7 @@ func (js *jetStream) processAddPeer(peer string) { func (js *jetStream) processRemovePeer(peer string) { js.mu.Lock() s, cc := js.srv, js.cluster - if cc.meta == nil { + if cc == nil || cc.meta == nil { js.mu.Unlock() return } @@ -1532,6 +1723,9 @@ func (js *jetStream) removePeerFromStreamLocked(sa *streamAssignment, peer strin } s, cc, csa := js.srv, js.cluster, sa.copyGroup() + if cc == nil || cc.meta == nil { + return false + } replaced := cc.remapStreamAssignment(csa, peer) if !replaced { s.Warnf("JetStream cluster could not replace peer for stream '%s > %s'", sa.Client.serviceAccount(), sa.Config.Name) @@ -1685,7 +1879,7 @@ func (js *jetStream) applyMetaEntries(entries []*Entry, ru *recoveryUpdates) (bo js.processUpdateStreamAssignment(sa) } default: - panic("JetStream Cluster Unknown meta entry op type") + panic(fmt.Sprintf("JetStream Cluster Unknown meta entry op type: %v", entryOp(buf[0]))) } } } @@ -1773,6 +1967,8 @@ func (js *jetStream) createRaftGroup(accName string, rg *raftGroup, storage Stor s.Errorf("Error creating filestore WAL: %v", err) return err } + // Register our server. + fs.registerServer(s) store = fs } else { ms, err := newMemStore(&StreamConfig{Name: rg.Name, Storage: MemoryStorage}) @@ -1869,14 +2065,24 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps return } + // Make sure only one is running. + if mset != nil { + if mset.checkInMonitor() { + return + } + defer mset.clearMonitorRunning() + } + + // Make sure to stop the raft group on exit to prevent accidental memory bloat. + // This should be below the checkInMonitor call though to avoid stopping it out + // from underneath the one that is running since it will be the same raft node. + defer n.Stop() + qch, lch, aq, uch, ourPeerId := n.QuitC(), n.LeadChangeC(), n.ApplyQ(), mset.updateC(), meta.ID() s.Debugf("Starting stream monitor for '%s > %s' [%s]", sa.Client.serviceAccount(), sa.Config.Name, n.Group()) defer s.Debugf("Exiting stream monitor for '%s > %s' [%s]", sa.Client.serviceAccount(), sa.Config.Name, n.Group()) - // Make sure to stop the raft group on exit to prevent accidental memory bloat. - defer n.Stop() - // Make sure we do not leave the apply channel to fill up and block the raft layer. defer func() { if n.State() == Closed { @@ -1913,33 +2119,37 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps } accName := acc.GetName() - // Hash of the last snapshot (fixed size in memory). - var lastSnap []byte + // Used to represent how we can detect a changed state quickly and without representing + // a complete and detailed state which could be costly in terms of memory, cpu and GC. + // This only entails how many messages, and the first and last sequence of the stream. + // This is all that is needed to detect a change, and we can get this from FilteredState() + // with and empty filter. + var lastState SimpleState var lastSnapTime time.Time - // Highwayhash key for generating hashes. - key := make([]byte, 32) - rand.Read(key) - // Should only to be called from leader. doSnapshot := func() { if mset == nil || isRestore || time.Since(lastSnapTime) < minSnapDelta { return } - snap := mset.stateSnapshot() - ne, nb := n.Size() - hash := highwayhash.Sum(snap, key) + // Before we actually calculate the detailed state and encode it, let's check the + // simple state to detect any changes. + curState := mset.store.FilteredState(0, _EMPTY_) + // If the state hasn't changed but the log has gone way over // the compaction size then we will want to compact anyway. // This shouldn't happen for streams like it can for pull // consumers on idle streams but better to be safe than sorry! - if !bytes.Equal(hash[:], lastSnap) || ne >= compactNumMin || nb >= compactSizeMin { - if err := n.InstallSnapshot(snap); err == nil { - lastSnap, lastSnapTime = hash[:], time.Now() - } else if err != errNoSnapAvailable && err != errNodeClosed { - s.Warnf("Failed to install snapshot for '%s > %s' [%s]: %v", mset.acc.Name, mset.name(), n.Group(), err) - } + ne, nb := n.Size() + if curState == lastState && ne < compactNumMin && nb < compactSizeMin { + return + } + + if err := n.InstallSnapshot(mset.stateSnapshot()); err == nil { + lastState, lastSnapTime = curState, time.Now() + } else if err != errNoSnapAvailable && err != errNodeClosed { + s.Warnf("Failed to install snapshot for '%s > %s' [%s]: %v", mset.acc.Name, mset.name(), n.Group(), err) } } @@ -1987,6 +2197,31 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps } defer stopDirectMonitoring() + // Check if we are interest based and if so and we have an active stream wait until we + // have the consumers attached. This can become important when a server has lots of assets + // since we process streams first then consumers as an asset class. + if mset != nil && mset.isInterestRetention() { + js.mu.RLock() + numExpectedConsumers := len(sa.consumers) + js.mu.RUnlock() + if mset.numConsumers() < numExpectedConsumers { + s.Debugf("Waiting for consumers for interest based stream '%s > %s'", accName, mset.name()) + // Wait up to 10s + const maxWaitTime = 10 * time.Second + const sleepTime = 250 * time.Millisecond + timeout := time.Now().Add(maxWaitTime) + for time.Now().Before(timeout) { + if mset.numConsumers() >= numExpectedConsumers { + break + } + time.Sleep(sleepTime) + } + if actual := mset.numConsumers(); actual < numExpectedConsumers { + s.Warnf("All consumers not online for '%s > %s': expected %d but only have %d", accName, mset.name(), numExpectedConsumers, actual) + } + } + } + // This is triggered during a scale up from R1 to clustered mode. We need the new followers to catchup, // similar to how we trigger the catchup mechanism post a backup/restore. // We can arrive here NOT being the leader, so we send the snapshot only if we are, and in this case @@ -1996,6 +2231,7 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps n.SendSnapshot(mset.stateSnapshot()) sendSnapshot = false } + for { select { case <-s.quitCh: @@ -2019,6 +2255,7 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps if err := js.applyStreamEntries(mset, ce, isRecovering); err == nil { // Update our applied. ne, nb = n.Applied(ce.Index) + ce.ReturnToPool() } else { s.Warnf("Error applying entries to '%s > %s': %v", accName, sa.Config.Name, err) if isClusterResetErr(err) { @@ -2047,9 +2284,13 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps case isLeader = <-lch: if isLeader { - if sendSnapshot && mset != nil && n != nil { - n.SendSnapshot(mset.stateSnapshot()) - sendSnapshot = false + if mset != nil && n != nil { + // Send a snapshot if being asked or if we are tracking + // a failed state so that followers sync. + if clfs := mset.clearCLFS(); clfs > 0 || sendSnapshot { + n.SendSnapshot(mset.stateSnapshot()) + sendSnapshot = false + } } if isRestore { acc, _ := s.LookupAccount(sa.Client.serviceAccount()) @@ -2082,27 +2323,19 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps // Here we are checking if we are not the leader but we have been asked to allow // direct access. We now allow non-leaders to participate in the queue group. if !isLeader && mset != nil { - mset.mu.Lock() - // Check direct gets first. - if mset.cfg.AllowDirect { - if mset.directSub == nil && mset.isCurrent() { - mset.subscribeToDirect() - } else { - startDirectAccessMonitoring() - } - } - // Now check for mirror directs as well. - if mset.cfg.MirrorDirect { - if mset.mirror != nil && mset.mirror.dsub == nil && mset.isCurrent() { - mset.subscribeToMirrorDirect() - } else { - startDirectAccessMonitoring() - } - } - mset.mu.Unlock() + startDirectAccessMonitoring() } case <-datc: + if mset == nil || isRecovering { + return + } + // If we are leader we can stop, we know this is setup now. + if isLeader { + stopDirectMonitoring() + return + } + mset.mu.Lock() ad, md, current := mset.cfg.AllowDirect, mset.cfg.MirrorDirect, mset.isCurrent() if !current { @@ -2126,11 +2359,12 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps mset.subscribeToMirrorDirect() } mset.mu.Unlock() - // Stop monitoring. + // Stop direct monitoring. stopDirectMonitoring() case <-t.C: doSnapshot() + case <-uch: // keep stream assignment current sa = mset.streamAssignment() @@ -2155,8 +2389,6 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps // Check to see where we are.. rg := mset.raftGroup() - ci := js.clusterInfo(rg) - mset.checkClusterInfo(ci) // Track the new peers and check the ones that are current. mset.mu.RLock() @@ -2168,6 +2400,10 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps continue } + // Make sure we have correct cluster information on the other peers. + ci := js.clusterInfo(rg) + mset.checkClusterInfo(ci) + newPeers, oldPeers, newPeerSet, oldPeerSet := genPeerInfo(rg.Peers, len(rg.Peers)-replicas) // If we are part of the new peerset and we have been passed the baton. @@ -2205,12 +2441,12 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps csa.Group.Cluster = s.cachedClusterName() cc.meta.ForwardProposal(encodeUpdateStreamAssignment(csa)) s.Noticef("Scaling down '%s > %s' to %+v", accName, sa.Config.Name, s.peerSetToNames(newPeers)) - } else { // We are the old leader here, from the original peer set. // We are simply waiting on the new peerset to be caught up so we can transfer leadership. var newLeaderPeer, newLeader string neededCurrent, current := replicas/2+1, 0 + for _, r := range ci.Replicas { if r.Current && newPeerSet[r.Peer] { current++ @@ -2222,6 +2458,7 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment, sendSnaps // Check if we have a quorom. if current >= neededCurrent { s.Noticef("Transfer of stream leader for '%s > %s' to '%s'", accName, sa.Config.Name, newLeader) + n.UpdateKnownPeers(newPeers) n.StepDown(newLeaderPeer) } } @@ -2364,6 +2601,12 @@ func (mset *stream) resetClusteredState(err error) bool { node.StepDown() } + // If we detect we are shutting down just return. + if js != nil && js.isShuttingDown() { + s.Debugf("Will not reset stream, jetstream shutting down") + return false + } + // Server if js.limitsExceeded(stype) { s.Debugf("Will not reset stream, server resources exceeded") @@ -2383,41 +2626,42 @@ func (mset *stream) resetClusteredState(err error) bool { // Preserve our current state and messages unless we have a first sequence mismatch. shouldDelete := err == errFirstSequenceMismatch - mset.stop(shouldDelete, false) - if sa != nil { - s.Warnf("Resetting stream cluster state for '%s > %s'", sa.Client.serviceAccount(), sa.Config.Name) - js.mu.Lock() - sa.Group.node = nil - js.mu.Unlock() - go js.restartClustered(acc, sa) - } - return true -} + // Need to do the rest in a separate Go routine. + go func() { + mset.monitorWg.Wait() + mset.resetAndWaitOnConsumers() + // Stop our stream. + mset.stop(shouldDelete, false) -// This will reset the stream and consumers. -// Should be done in separate go routine. -func (js *jetStream) restartClustered(acc *Account, sa *streamAssignment) { - // Check and collect consumers first. - js.mu.RLock() - var consumers []*consumerAssignment - if cc := js.cluster; cc != nil && cc.meta != nil { - ourID := cc.meta.ID() - for _, ca := range sa.consumers { - if rg := ca.Group; rg != nil && rg.isMember(ourID) { - rg.node = nil // Erase group raft/node state. - consumers = append(consumers, ca) + if sa != nil { + js.mu.Lock() + s.Warnf("Resetting stream cluster state for '%s > %s'", sa.Client.serviceAccount(), sa.Config.Name) + // Now wipe groups from assignments. + sa.Group.node = nil + var consumers []*consumerAssignment + if cc := js.cluster; cc != nil && cc.meta != nil { + ourID := cc.meta.ID() + for _, ca := range sa.consumers { + if rg := ca.Group; rg != nil && rg.isMember(ourID) { + rg.node = nil // Erase group raft/node state. + consumers = append(consumers, ca) + } + } + } + js.mu.Unlock() + + // This will reset the stream and consumers. + // Reset stream. + js.processClusterCreateStream(acc, sa) + // Reset consumers. + for _, ca := range consumers { + js.processClusterCreateConsumer(ca, nil, false) } } - } - js.mu.RUnlock() + }() - // Reset stream. - js.processClusterCreateStream(acc, sa) - // Reset consumers. - for _, ca := range consumers { - js.processClusterCreateConsumer(ca, nil, false) - } + return true } func isControlHdr(hdr []byte) bool { @@ -2464,11 +2708,15 @@ func (js *jetStream) applyStreamEntries(mset *stream, ce *CommittedEntry, isReco // Grab last sequence and CLFS. last, clfs := mset.lastSeqAndCLFS() - // We can skip if we know this is less than what we already have. if lseq-clfs < last { s.Debugf("Apply stream entries for '%s > %s' skipping message with sequence %d with last of %d", mset.account(), mset.name(), lseq+1-clfs, last) + + mset.mu.Lock() + // Check for any preAcks in case we are interest based. + mset.clearAllPreAcks(lseq + 1 - mset.clfs) + mset.mu.Unlock() continue } @@ -2482,7 +2730,9 @@ func (js *jetStream) applyStreamEntries(mset *stream, ce *CommittedEntry, isReco // Messages to be skipped have no subject or timestamp or msg or hdr. if subject == _EMPTY_ && ts == 0 && len(msg) == 0 && len(hdr) == 0 { // Skip and update our lseq. - mset.setLastSeq(mset.store.SkipMsg()) + last := mset.store.SkipMsg() + mset.setLastSeq(last) + mset.clearAllPreAcks(last) continue } @@ -2551,13 +2801,16 @@ func (js *jetStream) applyStreamEntries(mset *stream, ce *CommittedEntry, isReco } panic(err.Error()) } - // Ignore if we are recovering and we have already processed. - if isRecovering { - if mset.state().FirstSeq <= sp.LastSeq { - // Make sure all messages from the purge are gone. - mset.store.Compact(sp.LastSeq + 1) + // If no explicit request, fill in with leader stamped last sequence to protect ourselves on replay during server start. + if sp.Request == nil || sp.Request.Sequence == 0 { + purgeSeq := sp.LastSeq + 1 + if sp.Request == nil { + sp.Request = &JSApiStreamPurgeRequest{Sequence: purgeSeq} + } else if sp.Request.Keep == 0 { + sp.Request.Sequence = purgeSeq + } else if isRecovering { + continue } - continue } s := js.server() @@ -2582,7 +2835,7 @@ func (js *jetStream) applyStreamEntries(mset *stream, ce *CommittedEntry, isReco } } default: - panic("JetStream Cluster Unknown group entry op type!") + panic(fmt.Sprintf("JetStream Cluster Unknown group entry op type: %v", op)) } } else if e.Type == EntrySnapshot { if !isRecovering && mset != nil { @@ -2857,7 +3110,9 @@ func (js *jetStream) processStreamAssignment(sa *streamAssignment) bool { accStreams = make(map[string]*streamAssignment) } else if osa := accStreams[stream]; osa != nil && osa != sa { // Copy over private existing state from former SA. - sa.Group.node = osa.Group.node + if sa.Group != nil { + sa.Group.node = osa.Group.node + } sa.consumers = osa.consumers sa.responded = osa.responded sa.err = osa.err @@ -2948,7 +3203,9 @@ func (js *jetStream) processUpdateStreamAssignment(sa *streamAssignment) { } // Copy over private existing state from former SA. - sa.Group.node = osa.Group.node + if sa.Group != nil { + sa.Group.node = osa.Group.node + } sa.consumers = osa.consumers sa.err = osa.err @@ -2966,7 +3223,9 @@ func (js *jetStream) processUpdateStreamAssignment(sa *streamAssignment) { sa.responded = false } else { // Make sure to clean up any old node in case this stream moves back here. - sa.Group.node = nil + if sa.Group != nil { + sa.Group.node = nil + } } js.mu.Unlock() @@ -2998,19 +3257,23 @@ func (s *Server) removeStream(ourID string, mset *stream, nsa *streamAssignment) node.StepDown(nsa.Group.Preferred) } node.ProposeRemovePeer(ourID) - // shut down monitor by shutting down raft + // shutdown monitor by shutting down raft. node.Delete() } + var isShuttingDown bool // Make sure this node is no longer attached to our stream assignment. if js, _ := s.getJetStreamCluster(); js != nil { js.mu.Lock() nsa.Group.node = nil + isShuttingDown = js.shuttingDown js.mu.Unlock() } - // wait for monitor to be shut down - mset.monitorWg.Wait() + if !isShuttingDown { + // wait for monitor to be shutdown. + mset.monitorWg.Wait() + } mset.stop(true, false) } @@ -3146,6 +3409,7 @@ func (js *jetStream) processClusterCreateStream(acc *Account, sa *streamAssignme s, rg := js.srv, sa.Group alreadyRunning := rg.node != nil storage := sa.Config.Storage + restore := sa.Restore js.mu.RUnlock() // Process the raft group and make sure it's running if needed. @@ -3154,11 +3418,13 @@ func (js *jetStream) processClusterCreateStream(acc *Account, sa *streamAssignme // If we are restoring, create the stream if we are R>1 and not the preferred who handles the // receipt of the snapshot itself. shouldCreate := true - if sa.Restore != nil { + if restore != nil { if len(rg.Peers) == 1 || rg.node != nil && rg.node.ID() == rg.Preferred { shouldCreate = false } else { + js.mu.Lock() sa.Restore = nil + js.mu.Unlock() } } @@ -3428,7 +3694,12 @@ func (js *jetStream) processClusterDeleteStream(sa *streamAssignment, isMember, mset.monitorWg.Wait() err = mset.stop(true, wasLeader) stopped = true + } else if isMember { + s.Warnf("JetStream failed to lookup running stream while removing stream '%s > %s' from this server", + sa.Client.serviceAccount(), sa.Config.Name) } + } else if isMember { + s.Warnf("JetStream failed to lookup account while removing stream '%s > %s' from this server", sa.Client.serviceAccount(), sa.Config.Name) } // Always delete the node if present. @@ -3441,11 +3712,16 @@ func (js *jetStream) processClusterDeleteStream(sa *streamAssignment, isMember, // 2) node was nil (and couldn't be deleted) if !stopped || node == nil { if sacc := s.SystemAccount(); sacc != nil { - os.RemoveAll(filepath.Join(js.config.StoreDir, sacc.GetName(), defaultStoreDirName, sa.Group.Name)) + saccName := sacc.GetName() + os.RemoveAll(filepath.Join(js.config.StoreDir, saccName, defaultStoreDirName, sa.Group.Name)) // cleanup dependent consumer groups if !stopped { for _, ca := range sa.consumers { - os.RemoveAll(filepath.Join(js.config.StoreDir, sacc.GetName(), defaultStoreDirName, ca.Group.Name)) + // Make sure we cleanup any possible running nodes for the consumers. + if isMember && ca.Group != nil && ca.Group.node != nil { + ca.Group.node.Delete() + } + os.RemoveAll(filepath.Join(js.config.StoreDir, saccName, defaultStoreDirName, ca.Group.Name)) } } } @@ -3520,7 +3796,9 @@ func (js *jetStream) processConsumerAssignment(ca *consumerAssignment) { } else if oca := sa.consumers[ca.Name]; oca != nil { wasExisting = true // Copy over private existing state from former SA. - ca.Group.node = oca.Group.node + if ca.Group != nil { + ca.Group.node = oca.Group.node + } ca.responded = oca.responded ca.err = oca.err } @@ -3635,8 +3913,12 @@ func (js *jetStream) processConsumerRemoval(ca *consumerAssignment) { var needDelete bool if accStreams := cc.streams[ca.Client.serviceAccount()]; accStreams != nil { if sa := accStreams[ca.Stream]; sa != nil && sa.consumers != nil && sa.consumers[ca.Name] != nil { - needDelete = true - delete(sa.consumers, ca.Name) + oca := sa.consumers[ca.Name] + // Make sure this removal is for what we have, otherwise ignore. + if ca.Group != nil && oca.Group != nil && ca.Group.Name == oca.Group.Name { + needDelete = true + delete(sa.consumers, ca.Name) + } } } js.mu.Unlock() @@ -3700,6 +3982,7 @@ func (js *jetStream) processClusterCreateConsumer(ca *consumerAssignment, state if ca.Config.MemoryStorage { storage = MemoryStorage } + // No-op if R1. js.createRaftGroup(accName, rg, storage) } else { // If we are clustered update the known peers. @@ -3714,7 +3997,7 @@ func (js *jetStream) processClusterCreateConsumer(ca *consumerAssignment, state var didCreate, isConfigUpdate, needsLocalResponse bool if o == nil { // Add in the consumer if needed. - if o, err = mset.addConsumerWithAssignment(ca.Config, ca.Name, ca, false); err == nil { + if o, err = mset.addConsumerWithAssignment(ca.Config, ca.Name, ca, wasExisting); err == nil { didCreate = true } } else { @@ -3803,6 +4086,13 @@ func (js *jetStream) processClusterCreateConsumer(ca *consumerAssignment, state if rg.node != nil { rg.node.Delete() + // Clear the node here. + rg.node = nil + } + + // If we did seem to create a consumer make sure to stop it. + if o != nil { + o.stop() } var result *consumerAssignmentResult @@ -3859,6 +4149,7 @@ func (js *jetStream) processClusterCreateConsumer(ca *consumerAssignment, state // Clustered consumer. // Start our monitoring routine if needed. if !alreadyRunning && !o.isMonitorRunning() { + o.monitorWg.Add(1) s.startGoRoutine(func() { js.monitorConsumer(o, ca) }) } // For existing consumer, only send response if not recovering. @@ -4057,6 +4348,8 @@ func (js *jetStream) monitorConsumer(o *consumer, ca *consumerAssignment) { s, n, cc := js.server(), o.raftNode(), js.cluster defer s.grWG.Done() + defer o.monitorWg.Done() + if n == nil { s.Warnf("No RAFT group for '%s > %s > %s'", o.acc.Name, ca.Stream, ca.Name) return @@ -4068,14 +4361,16 @@ func (js *jetStream) monitorConsumer(o *consumer, ca *consumerAssignment) { } defer o.clearMonitorRunning() + // Make sure to stop the raft group on exit to prevent accidental memory bloat. + // This should be below the checkInMonitor call though to avoid stopping it out + // from underneath the one that is running since it will be the same raft node. + defer n.Stop() + qch, lch, aq, uch, ourPeerId := n.QuitC(), n.LeadChangeC(), n.ApplyQ(), o.updateC(), cc.meta.ID() s.Debugf("Starting consumer monitor for '%s > %s > %s' [%s]", o.acc.Name, ca.Stream, ca.Name, n.Group()) defer s.Debugf("Exiting consumer monitor for '%s > %s > %s' [%s]", o.acc.Name, ca.Stream, ca.Name, n.Group()) - // Make sure to stop the raft group on exit to prevent accidental memory bloat. - defer n.Stop() - const ( compactInterval = 2 * time.Minute compactSizeMin = 64 * 1024 // What is stored here is always small for consumers. @@ -4096,9 +4391,9 @@ func (js *jetStream) monitorConsumer(o *consumer, ca *consumerAssignment) { var lastSnap []byte var lastSnapTime time.Time - doSnapshot := func() { + doSnapshot := func(force bool) { // Bail if trying too fast and not in a forced situation. - if time.Since(lastSnapTime) < minSnapDelta { + if !force && time.Since(lastSnapTime) < minSnapDelta { return } @@ -4106,7 +4401,7 @@ func (js *jetStream) monitorConsumer(o *consumer, ca *consumerAssignment) { ne, nb := n.Size() if !n.NeedSnapshot() { // Check if we should compact etc. based on size of log. - if ne < compactNumMin && nb < compactSizeMin { + if !force && ne < compactNumMin && nb < compactSizeMin { return } } @@ -4164,15 +4459,16 @@ func (js *jetStream) monitorConsumer(o *consumer, ca *consumerAssignment) { if ce == nil { recovering = false if n.NeedSnapshot() { - doSnapshot() + doSnapshot(true) } - continue - } - if err := js.applyConsumerEntries(o, ce, isLeader); err == nil { + // Check our state if we are under an interest based stream. + o.checkStateForInterestStream() + } else if err := js.applyConsumerEntries(o, ce, isLeader); err == nil { ne, nb := n.Applied(ce.Index) + ce.ReturnToPool() // If we have at least min entries to compact, go ahead and snapshot/compact. if nb > 0 && ne >= compactNumMin || nb > compactSizeMin { - doSnapshot() + doSnapshot(false) } } else { s.Warnf("Error applying consumer entries to '%s > %s'", ca.Client.serviceAccount(), ca.Name) @@ -4183,8 +4479,20 @@ func (js *jetStream) monitorConsumer(o *consumer, ca *consumerAssignment) { if recovering && !isLeader { js.setConsumerAssignmentRecovering(ca) } + + // Synchronize everyone to our state. + if isLeader && n != nil { + // Only send out if we have state. + if _, _, applied := n.Progress(); applied > 0 { + if snap, err := o.store.EncodedState(); err == nil { + n.SendSnapshot(snap) + } + } + } + + // Process the change. if err := js.processConsumerLeaderChange(o, isLeader); err == nil && isLeader { - doSnapshot() + doSnapshot(true) } // We may receive a leader change after the consumer assignment which would cancel us @@ -4273,7 +4581,7 @@ func (js *jetStream) monitorConsumer(o *consumer, ca *consumerAssignment) { } case <-t.C: - doSnapshot() + doSnapshot(false) } } } @@ -4281,24 +4589,29 @@ func (js *jetStream) monitorConsumer(o *consumer, ca *consumerAssignment) { func (js *jetStream) applyConsumerEntries(o *consumer, ce *CommittedEntry, isLeader bool) error { for _, e := range ce.Entries { if e.Type == EntrySnapshot { - // No-op needed? - state, err := decodeConsumerState(e.Data) - if err != nil { - if mset, node := o.streamAndNode(); mset != nil && node != nil { - s := js.srv - s.Errorf("JetStream cluster could not decode consumer snapshot for '%s > %s > %s' [%s]", - mset.account(), mset.name(), o, node.Group()) + if !isLeader { + // No-op needed? + state, err := decodeConsumerState(e.Data) + if err != nil { + if mset, node := o.streamAndNode(); mset != nil && node != nil { + s := js.srv + s.Errorf("JetStream cluster could not decode consumer snapshot for '%s > %s > %s' [%s]", + mset.account(), mset.name(), o, node.Group()) + } + panic(err.Error()) } - panic(err.Error()) - } - if err = o.store.Update(state); err != nil { - o.mu.RLock() - s, acc, mset, name := o.srv, o.acc, o.mset, o.name - o.mu.RUnlock() - if s != nil && mset != nil { - s.Warnf("Consumer '%s > %s > %s' error on store update from snapshot entry: %v", acc, mset.name(), name, err) + if err = o.store.Update(state); err != nil { + o.mu.RLock() + s, acc, mset, name := o.srv, o.acc, o.mset, o.name + o.mu.RUnlock() + if s != nil && mset != nil { + s.Warnf("Consumer '%s > %s > %s' error on store update from snapshot entry: %v", acc, mset.name(), name, err) + } + } else { + o.checkStateForInterestStream() } } + } else if e.Type == EntryRemovePeer { js.mu.RLock() var ourID string @@ -4384,7 +4697,7 @@ func (js *jetStream) applyConsumerEntries(o *consumer, ce *CommittedEntry, isLea } o.mu.Unlock() default: - panic(fmt.Sprintf("JetStream Cluster Unknown group entry op type! %v", entryOp(buf[0]))) + panic(fmt.Sprintf("JetStream Cluster Unknown group entry op type: %v", entryOp(buf[0]))) } } } @@ -4393,13 +4706,20 @@ func (js *jetStream) applyConsumerEntries(o *consumer, ce *CommittedEntry, isLea func (o *consumer) processReplicatedAck(dseq, sseq uint64) { o.mu.Lock() + + mset := o.mset + if o.closed || mset == nil { + o.mu.Unlock() + return + } + // Update activity. o.lat = time.Now() + // Do actual ack update to store. o.store.UpdateAcks(dseq, sseq) - mset := o.mset - if mset == nil || o.retention == LimitsPolicy { + if o.retention == LimitsPolicy { o.mu.Unlock() return } @@ -4473,29 +4793,34 @@ func (js *jetStream) processConsumerLeaderChange(o *consumer, isLeader bool) err return errors.New("failed to update consumer leader status") } + if o == nil || o.isClosed() { + return stepDownIfLeader() + } + ca := o.consumerAssignment() if ca == nil { return stepDownIfLeader() } js.mu.Lock() s, account, err := js.srv, ca.Client.serviceAccount(), ca.err - client, subject, reply := ca.Client, ca.Subject, ca.Reply + client, subject, reply, streamName, consumerName := ca.Client, ca.Subject, ca.Reply, ca.Stream, ca.Name hasResponded := ca.responded ca.responded = true js.mu.Unlock() - streamName, consumerName := o.streamName(), o.String() acc, _ := s.LookupAccount(account) if acc == nil { return stepDownIfLeader() } if isLeader { + // ** added by Memphis logFunc := s.Noticef if strings.Contains(streamName, "$memphis") || strings.Contains(consumerName, "$memphis") { logFunc = s.Debugf } + // ** added by Memphis logFunc("JetStream cluster new consumer leader for '%s > %s > %s'", ca.Client.serviceAccount(), streamName, consumerName) s.sendConsumerLeaderElectAdvisory(o) // Check for peer removal and process here if needed. @@ -4643,6 +4968,9 @@ func (js *jetStream) processStreamAssignmentResults(sub *subscription, c *client defer js.mu.Unlock() s, cc := js.srv, js.cluster + if cc == nil || cc.meta == nil { + return + } // This should have been done already in processStreamAssignment, but in // case we have a code path that gets here with no processStreamAssignment, @@ -4716,6 +5044,9 @@ func (js *jetStream) processConsumerAssignmentResults(sub *subscription, c *clie defer js.mu.Unlock() s, cc := js.srv, js.cluster + if cc == nil || cc.meta == nil { + return + } if sa := js.streamAssignment(result.Account, result.Stream); sa != nil && sa.consumers != nil { if ca := sa.consumers[result.Consumer]; ca != nil && !ca.responded { @@ -4869,7 +5200,12 @@ func (cc *jetStreamCluster) remapStreamAssignment(sa *streamAssignment, removePe return true } - // If we are here let's remove the peer at least. + // If R1 just return to avoid bricking the stream. + if sa.Group.node == nil || len(sa.Group.Peers) == 1 { + return false + } + + // If we are here let's remove the peer at least, as long as we are R>1 for i, peer := range sa.Group.Peers { if peer == removePeer { sa.Group.Peers[i] = sa.Group.Peers[len(sa.Group.Peers)-1] @@ -5432,19 +5768,20 @@ var ( // blocking utility call to perform requests on the system account // returns (synchronized) v or error -func (s *Server) sysRequest(v interface{}, subjFormat string, args ...interface{}) (interface{}, error) { +func sysRequest[T any](s *Server, subjFormat string, args ...interface{}) (*T, error) { isubj := fmt.Sprintf(subjFormat, args...) + s.mu.Lock() inbox := s.newRespInbox() - results := make(chan interface{}, 1) - // Store our handler. - s.sys.replies[inbox] = func(sub *subscription, _ *client, _ *Account, subject, _ string, msg []byte) { - if err := json.Unmarshal(msg, v); err != nil { + results := make(chan *T, 1) + s.sys.replies[inbox] = func(_ *subscription, _ *client, _ *Account, _, _ string, msg []byte) { + var v T + if err := json.Unmarshal(msg, &v); err != nil { s.Warnf("Error unmarshalling response for request '%s':%v", isubj, err) return } select { - case results <- v: + case results <- &v: default: s.Warnf("Failed placing request response on internal channel") } @@ -5453,27 +5790,22 @@ func (s *Server) sysRequest(v interface{}, subjFormat string, args ...interface{ s.sendInternalMsgLocked(isubj, inbox, nil, nil) - const timeout = 2 * time.Second - notActive := time.NewTimer(timeout) - defer notActive.Stop() - - var err error - var data interface{} + defer func() { + s.mu.Lock() + defer s.mu.Unlock() + if s.sys != nil && s.sys.replies != nil { + delete(s.sys.replies, inbox) + } + }() select { case <-s.quitCh: - err = errReqSrvExit - case <-notActive.C: - err = errReqTimeout - case data = <-results: + return nil, errReqSrvExit + case <-time.After(2 * time.Second): + return nil, errReqTimeout + case data := <-results: + return data, nil } - // Clean up here. - s.mu.Lock() - if s.sys != nil && s.sys.replies != nil { - delete(s.sys.replies, inbox) - } - s.mu.Unlock() - return data, err } func (s *Server) jsClusteredStreamUpdateRequest(ci *ClientInfo, acc *Account, subject, reply string, rmsg []byte, cfg *StreamConfig, peerSet []string) { @@ -5485,6 +5817,10 @@ func (s *Server) jsClusteredStreamUpdateRequest(ci *ClientInfo, acc *Account, su // Now process the request and proposal. js.mu.Lock() defer js.mu.Unlock() + meta := cc.meta + if meta == nil { + return + } var resp = JSApiStreamUpdateResponse{ApiResponse: ApiResponse{Type: JSApiStreamUpdateResponseType}} @@ -5563,9 +5899,9 @@ func (s *Server) jsClusteredStreamUpdateRequest(ci *ClientInfo, acc *Account, su } else { // Need to release js lock. js.mu.Unlock() - if si, err := s.sysRequest(&StreamInfo{}, clusterStreamInfoT, ci.serviceAccount(), cfg.Name); err != nil { + if si, err := sysRequest[StreamInfo](s, clusterStreamInfoT, ci.serviceAccount(), cfg.Name); err != nil { msg = fmt.Sprintf("error retrieving info: %s", err.Error()) - } else if si := si.(*StreamInfo); si != nil { + } else if si != nil { currentCount := 0 if si.Cluster.Leader != _EMPTY_ { currentCount++ @@ -5598,6 +5934,18 @@ func (s *Server) jsClusteredStreamUpdateRequest(ci *ClientInfo, acc *Account, su if isReplicaChange { // We are adding new peers here. if newCfg.Replicas > len(rg.Peers) { + // Check if we do not have a cluster assigned, and if we do not make sure we + // try to pick one. This could happen with older streams that were assigned by + // previous servers. + if rg.Cluster == _EMPTY_ { + // Prefer placement directrives if we have them. + if newCfg.Placement != nil && newCfg.Placement.Cluster != _EMPTY_ { + rg.Cluster = newCfg.Placement.Cluster + } else { + // Fall back to the cluster assignment from the client. + rg.Cluster = ci.Cluster + } + } peers, err := cc.selectPeerGroup(newCfg.Replicas, rg.Cluster, newCfg, rg.Peers, 0, nil) if err != nil { resp.Error = NewJSClusterNoPeersError(err) @@ -5620,10 +5968,12 @@ func (s *Server) jsClusteredStreamUpdateRequest(ci *ClientInfo, acc *Account, su if !s.allPeersOffline(rg) { // Need to release js lock. js.mu.Unlock() - if si, err := s.sysRequest(&StreamInfo{}, clusterStreamInfoT, ci.serviceAccount(), cfg.Name); err != nil { + if si, err := sysRequest[StreamInfo](s, clusterStreamInfoT, ci.serviceAccount(), cfg.Name); err != nil { s.Warnf("Did not receive stream info results for '%s > %s' due to: %s", acc, cfg.Name, err) - } else if cl := si.(*StreamInfo).Cluster; cl != nil && cl.Leader != _EMPTY_ { - curLeader = getHash(cl.Leader) + } else if si != nil { + if cl := si.Cluster; cl != nil && cl.Leader != _EMPTY_ { + curLeader = getHash(cl.Leader) + } } // Re-acquire here. js.mu.Lock() @@ -5653,9 +6003,9 @@ func (s *Server) jsClusteredStreamUpdateRequest(ci *ClientInfo, acc *Account, su // Need to remap any consumers. for _, ca := range osa.consumers { - // Ephemerals are R=1, so only auto-remap durables, or R>1. + // Ephemerals are R=1, so only auto-remap durables, or R>1, unless stream is interest or workqueue policy. numPeers := len(ca.Group.Peers) - if ca.Config.Durable != _EMPTY_ || numPeers > 1 { + if ca.Config.Durable != _EMPTY_ || numPeers > 1 || cfg.Retention != LimitsPolicy { cca := ca.copyGroup() // Adjust preferred as needed. if numPeers == 1 && len(rg.Peers) > 1 { @@ -5748,13 +6098,12 @@ func (s *Server) jsClusteredStreamUpdateRequest(ci *ClientInfo, acc *Account, su } sa := &streamAssignment{Group: rg, Sync: osa.Sync, Created: osa.Created, Config: newCfg, Subject: subject, Reply: reply, Client: ci} - cc.meta.Propose(encodeUpdateStreamAssignment(sa)) + meta.Propose(encodeUpdateStreamAssignment(sa)) // Process any staged consumers. for _, ca := range consumers { - cc.meta.Propose(encodeAddConsumerAssignment(ca)) + meta.Propose(encodeAddConsumerAssignment(ca)) } - } func (s *Server) jsClusteredStreamDeleteRequest(ci *ClientInfo, acc *Account, stream, subject, reply string, rmsg []byte) { @@ -5766,6 +6115,10 @@ func (s *Server) jsClusteredStreamDeleteRequest(ci *ClientInfo, acc *Account, st js.mu.Lock() defer js.mu.Unlock() + if cc.meta == nil { + return + } + osa := js.streamAssignment(acc.Name, stream) if osa == nil { var resp = JSApiStreamDeleteResponse{ApiResponse: ApiResponse{Type: JSApiStreamDeleteResponseType}} @@ -5839,6 +6192,10 @@ func (s *Server) jsClusteredStreamRestoreRequest( js.mu.Lock() defer js.mu.Unlock() + if cc.meta == nil { + return + } + cfg := &req.Config resp := JSApiStreamRestoreResponse{ApiResponse: ApiResponse{Type: JSApiStreamRestoreResponseType}} @@ -6181,7 +6538,7 @@ LOOP: }) } - resp.Total = len(resp.Consumers) + resp.Total = ocnt resp.Limit = JSApiListLimit resp.Offset = offset resp.Missing = missingNames @@ -6210,6 +6567,10 @@ func (s *Server) jsClusteredConsumerDeleteRequest(ci *ClientInfo, acc *Account, js.mu.Lock() defer js.mu.Unlock() + if cc.meta == nil { + return + } + var resp = JSApiConsumerDeleteResponse{ApiResponse: ApiResponse{Type: JSApiConsumerDeleteResponseType}} sa := js.streamAssignment(acc.Name, stream) @@ -6401,6 +6762,10 @@ func (s *Server) jsClusteredConsumerRequest(ci *ClientInfo, acc *Account, subjec js.mu.Lock() defer js.mu.Unlock() + if cc.meta == nil { + return + } + // Lookup the stream assignment. sa := js.streamAssignment(acc.Name, stream) if sa == nil { @@ -6552,10 +6917,12 @@ func (s *Server) jsClusteredConsumerRequest(ci *ClientInfo, acc *Account, subjec if !s.allPeersOffline(ca.Group) { // Need to release js lock. js.mu.Unlock() - if ci, err := s.sysRequest(&ConsumerInfo{}, clusterConsumerInfoT, ci.serviceAccount(), sa.Config.Name, cfg.Durable); err != nil { + if ci, err := sysRequest[ConsumerInfo](s, clusterConsumerInfoT, ci.serviceAccount(), sa.Config.Name, cfg.Durable); err != nil { s.Warnf("Did not receive consumer info results for '%s > %s > %s' due to: %s", acc, sa.Config.Name, cfg.Durable, err) - } else if cl := ci.(*ConsumerInfo).Cluster; cl != nil { - curLeader = getHash(cl.Leader) + } else if ci != nil { + if cl := ci.Cluster; cl != nil { + curLeader = getHash(cl.Leader) + } } // Re-acquire here. js.mu.Lock() @@ -6740,7 +7107,7 @@ func encodeStreamMsg(subject, reply string, hdr, msg []byte, lseq uint64, ts int // Threshold for compression. // TODO(dlc) - Eventually make configurable. -const compressThreshold = 4 * 1024 +const compressThreshold = 256 // If allowed and contents over the threshold we will compress. func encodeStreamMsgAllowCompress(subject, reply string, hdr, msg []byte, lseq uint64, ts int64, compressOK bool) []byte { @@ -6883,7 +7250,7 @@ func (mset *stream) processClusteredInboundMsg(subject, reply string, hdr, msg [ // Check here pre-emptively if we have exceeded this server limits. if js.limitsExceeded(stype) { - s.resourcesExeededError() + s.resourcesExceededError() if canRespond { b, _ := json.Marshal(&JSPubAckResponse{PubAck: &PubAck{Stream: name}, Error: NewJSInsufficientResourcesError()}) outq.send(newJSPubMsg(reply, _EMPTY_, _EMPTY_, nil, b, nil, 0)) @@ -7027,7 +7394,7 @@ func (mset *stream) processClusteredInboundMsg(subject, reply string, hdr, msg [ // Check to see if we are being overrun. // TODO(dlc) - Make this a limit where we drop messages to protect ourselves, but allow to be configured. if mset.clseq-(lseq+clfs) > streamLagWarnThreshold { - lerr := fmt.Errorf("JetStream stream '%s > %s' has high message lag", jsa.acc().Name, mset.cfg.Name) + lerr := fmt.Errorf("JetStream stream '%s > %s' has high message lag", jsa.acc().Name, name) s.RateLimitWarnf(lerr.Error()) } mset.clMu.Unlock() @@ -7069,15 +7436,18 @@ func (mset *stream) calculateSyncRequest(state *StreamState, snap *streamSnapsho // processSnapshotDeletes will update our current store based on the snapshot // but only processing deletes and new FirstSeq / purges. func (mset *stream) processSnapshotDeletes(snap *streamSnapshot) { + mset.mu.Lock() var state StreamState mset.store.FastState(&state) - // Always adjust if FirstSeq has moved beyond our state. if snap.FirstSeq > state.FirstSeq { mset.store.Compact(snap.FirstSeq) mset.store.FastState(&state) - mset.setLastSeq(state.LastSeq) + mset.lseq = state.LastSeq + mset.clearAllPreAcksBelowFloor(state.FirstSeq) } + mset.mu.Unlock() + // Range the deleted and delete if applicable. for _, dseq := range snap.Deleted { if dseq > state.FirstSeq && dseq <= state.LastSeq { @@ -7262,28 +7632,47 @@ func (mset *stream) processSnapshot(snap *streamSnapshot) (e error) { // we are synched for the next message sequence properly. lastRequested := sreq.LastSeq checkFinalState := func() { - if mset != nil { - mset.mu.Lock() - var state StreamState + // Bail if no stream. + if mset == nil { + return + } + + mset.mu.Lock() + var state StreamState + mset.store.FastState(&state) + var didReset bool + firstExpected := lastRequested + 1 + if state.FirstSeq != firstExpected { + // Reset our notion of first. + mset.store.Compact(firstExpected) mset.store.FastState(&state) - var didReset bool - firstExpected := lastRequested + 1 - if state.FirstSeq != firstExpected { - // Reset our notion of first. - mset.store.Compact(firstExpected) - mset.store.FastState(&state) - // Make sure last is also correct in case this also moved. - mset.lseq = state.LastSeq - didReset = true - } - mset.mu.Unlock() - if didReset { - s.Warnf("Catchup for stream '%s > %s' resetting first sequence: %d on catchup complete", - mset.account(), mset.name(), firstExpected) - } + // Make sure last is also correct in case this also moved. + mset.lseq = state.LastSeq + mset.clearAllPreAcksBelowFloor(state.FirstSeq) + didReset = true + } + mset.mu.Unlock() + + if didReset { + s.Warnf("Catchup for stream '%s > %s' resetting first sequence: %d on catchup complete", + mset.account(), mset.name(), firstExpected) + } + + mset.mu.RLock() + consumers := make([]*consumer, 0, len(mset.consumers)) + for _, o := range mset.consumers { + consumers = append(consumers, o) + } + mset.mu.RUnlock() + for _, o := range consumers { + o.checkStateForInterestStream() } } + // Do not let this go on forever. + const maxRetries = 3 + var numRetries int + RETRY: // On retry, we need to release the semaphore we got. Call will be no-op // if releaseSem boolean has not been set to true on successfully getting @@ -7300,13 +7689,20 @@ RETRY: sub = nil } - // Block here if we have too many requests in flight. - <-s.syncOutSem - releaseSem = true if !s.isRunning() { return ErrServerNotRunning } + numRetries++ + if numRetries >= maxRetries { + // Force a hard reset here. + return errFirstSequenceMismatch + } + + // Block here if we have too many requests in flight. + <-s.syncOutSem + releaseSem = true + // We may have been blocked for a bit, so the reset need to ensure that we // consume the already fired timer. if !notActive.Stop() { @@ -7400,7 +7796,7 @@ RETRY: } else if err == NewJSInsufficientResourcesError() { notifyLeaderStopCatchup(mrec, err) if mset.js.limitsExceeded(mset.cfg.Storage) { - s.resourcesExeededError() + s.resourcesExceededError() } else { s.Warnf("Catchup for stream '%s > %s' errored, account resources exceeded: %v", mset.account(), mset.name(), err) } @@ -7472,11 +7868,17 @@ func (mset *stream) processCatchupMsg(msg []byte) (uint64, error) { return 0, errCatchupBadMsg } - mset.mu.RLock() + mset.mu.Lock() st := mset.cfg.Storage ddloaded := mset.ddloaded tierName := mset.tier - mset.mu.RUnlock() + + if mset.hasAllPreAcks(seq, subj) { + mset.clearAllPreAcks(seq) + // Mark this to be skipped + subj, ts = _EMPTY_, 0 + } + mset.mu.Unlock() if mset.js.limitsExceeded(st) { return 0, NewJSInsufficientResourcesError() @@ -7693,9 +8095,9 @@ func (mset *stream) processClusterStreamInfoRequest(reply string) { return } - // If we are not the leader let someone else possible respond first. + // If we are not the leader let someone else possibly respond first. if !isLeader { - time.Sleep(200 * time.Millisecond) + time.Sleep(500 * time.Millisecond) } si := &StreamInfo{ diff --git a/server/jetstream_cluster_1_test.go b/server/jetstream_cluster_1_test.go index b899f0e3d..8df84b379 100644 --- a/server/jetstream_cluster_1_test.go +++ b/server/jetstream_cluster_1_test.go @@ -971,6 +971,7 @@ func TestJetStreamClusterRestoreSingleConsumer(t *testing.T) { c.stopAll() c.restartAll() c.waitOnLeader() + c.waitOnStreamLeader("$G", "foo") s = c.randomServer() nc, js = jsClientConnect(t, s) @@ -3719,6 +3720,7 @@ func TestJetStreamClusterAccountPurge(t *testing.T) { resolver: { type: full dir: '%s/jwt' + timeout: "10ms" }`, ojwt, syspub, storeDir) }) defer c.shutdown() @@ -3891,7 +3893,6 @@ func TestJetStreamClusterAccountPurge(t *testing.T) { } c.restartAll() checkForDirs(t, 6, 4) - c.waitOnClusterReady() // unfortunately, this does not wait until leader is not catching up. purge(t) checkForDirs(t, 0, 0) c.stopAll() @@ -5818,18 +5819,6 @@ func TestJetStreamClusterFailMirrorsAndSources(t *testing.T) { }) } - testPrefix("mirror-bad-deliverprefix", JSStreamExternalDelPrefixOverlapsErrF, StreamConfig{ - Name: "MY_MIRROR_TEST", - Storage: FileStorage, - Mirror: &StreamSource{ - Name: "TEST", - External: &ExternalStream{ - ApiPrefix: "RI.JS.API", - // this will result in test.test.> which test.> would match - DeliverPrefix: "test", - }, - }, - }) testPrefix("mirror-bad-apiprefix", JSStreamExternalApiOverlapErrF, StreamConfig{ Name: "MY_MIRROR_TEST", Storage: FileStorage, @@ -5841,18 +5830,6 @@ func TestJetStreamClusterFailMirrorsAndSources(t *testing.T) { }, }, }) - testPrefix("source-bad-deliverprefix", JSStreamExternalDelPrefixOverlapsErrF, StreamConfig{ - Name: "MY_SOURCE_TEST", - Storage: FileStorage, - Sources: []*StreamSource{{ - Name: "TEST", - External: &ExternalStream{ - ApiPrefix: "RI.JS.API", - DeliverPrefix: "test", - }, - }, - }, - }) testPrefix("source-bad-apiprefix", JSStreamExternalApiOverlapErrF, StreamConfig{ Name: "MY_SOURCE_TEST", Storage: FileStorage, diff --git a/server/jetstream_cluster_2_test.go b/server/jetstream_cluster_2_test.go index bd530ca10..e927742f6 100644 --- a/server/jetstream_cluster_2_test.go +++ b/server/jetstream_cluster_2_test.go @@ -3305,6 +3305,7 @@ func TestJetStreamClusterStreamUpdateSyncBug(t *testing.T) { } // We need to snapshot to force upper layer catchup vs RAFT layer. + c.waitOnAllCurrent() mset, err := c.streamLeader("$G", "TEST").GlobalAccount().lookupStream("TEST") if err != nil { t.Fatalf("Expected to find a stream for %q", "TEST") @@ -3313,6 +3314,7 @@ func TestJetStreamClusterStreamUpdateSyncBug(t *testing.T) { t.Fatalf("Unexpected error: %v", err) } + c.waitOnAllCurrent() nsl = c.restartServer(nsl) c.waitOnStreamCurrent(nsl, "$G", "TEST") @@ -4322,7 +4324,7 @@ func TestJetStreamClusterStreamReplicaUpdates(t *testing.T) { require_NoError(t, err) c.waitOnStreamLeader("$G", "TEST") - checkFor(t, 5*time.Second, 100*time.Millisecond, func() error { + checkFor(t, 10*time.Second, 100*time.Millisecond, func() error { si, err = js.StreamInfo("TEST") require_NoError(t, err) if len(si.Cluster.Replicas) != r-1 { @@ -4936,11 +4938,11 @@ func TestJetStreamClusterDuplicateMsgIdsOnCatchupAndLeaderTakeover(t *testing.T) // Now restart sr = c.restartServer(sr) c.waitOnStreamCurrent(sr, "$G", "TEST") + c.waitOnStreamLeader("$G", "TEST") // Now make them the leader. for sr != c.streamLeader("$G", "TEST") { - _, err = nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "TEST"), nil, time.Second) - require_NoError(t, err) + nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "TEST"), nil, time.Second) c.waitOnStreamLeader("$G", "TEST") } @@ -5238,12 +5240,14 @@ func TestJetStreamClusterDeleteAndRestoreAndRestart(t *testing.T) { nc, js = jsClientConnect(t, c.randomServer()) defer nc.Close() - si, err := js.StreamInfo("TEST") - require_NoError(t, err) - - if si.State.Msgs != 22 { - t.Fatalf("State is not correct after restart") - } + checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { + si, err := js.StreamInfo("TEST") + require_NoError(t, err) + if si.State.Msgs != 22 { + return fmt.Errorf("State is not correct after restart, expected 22 msgs, got %d", si.State.Msgs) + } + return nil + }) ci, err := js.ConsumerInfo("TEST", "dlc") require_NoError(t, err) @@ -5782,9 +5786,6 @@ func TestJetStreamClusterConsumerDeliverNewMaxRedeliveriesAndServerRestart(t *te t.Fatalf("Expected timeout, got msg=%+v err=%v", msg, err) } - // Give a chance to things to be persisted - time.Sleep(300 * time.Millisecond) - // Check server restart nc.Close() c.stopAll() diff --git a/server/jetstream_cluster_3_test.go b/server/jetstream_cluster_3_test.go index 03e1e52e8..7786f414b 100644 --- a/server/jetstream_cluster_3_test.go +++ b/server/jetstream_cluster_3_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 The NATS Authors +// Copyright 2022-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -27,9 +27,11 @@ import ( "reflect" "strings" "sync" + "sync/atomic" "testing" "time" + "github.com/nats-io/jwt/v2" "github.com/nats-io/nats.go" ) @@ -313,7 +315,7 @@ func TestJetStreamClusterDeleteConsumerWhileServerDown(t *testing.T) { // Restart. s = c.restartServer(s) - checkFor(t, time.Second, 200*time.Millisecond, func() error { + checkFor(t, 10*time.Second, 200*time.Millisecond, func() error { hs := s.healthz(&HealthzOptions{ JSEnabledOnly: false, JSServerOnly: false, @@ -355,7 +357,7 @@ func TestJetStreamClusterDeleteConsumerWhileServerDown(t *testing.T) { // Restart. s = c.restartServer(s) - checkFor(t, time.Second, 200*time.Millisecond, func() error { + checkFor(t, time.Second*2, 200*time.Millisecond, func() error { hs := s.healthz(&HealthzOptions{ JSEnabledOnly: false, JSServerOnly: false, @@ -398,7 +400,7 @@ func TestJetStreamClusterNegativeReplicas(t *testing.T) { }) require_NoError(t, err) - // Check upadte now. + // Check update now. _, err = js.UpdateStream(&nats.StreamConfig{ Name: name, Replicas: -11, @@ -1046,6 +1048,7 @@ func TestJetStreamClusterSourceWithOptStartTime(t *testing.T) { sd := s.JetStreamConfig().StoreDir s.Shutdown() s = RunJetStreamServerOnPort(-1, sd) + defer s.Shutdown() } // Wait a bit before checking because sync'ing (even with the defect) @@ -1590,7 +1593,7 @@ func TestJetStreamGhostEphemeralsAfterRestart(t *testing.T) { defer nc.Close() subj := fmt.Sprintf(JSApiConsumerListT, "TEST") - checkFor(t, 5*time.Second, 200*time.Millisecond, func() error { + checkFor(t, 10*time.Second, 200*time.Millisecond, func() error { m, err := nc.Request(subj, nil, time.Second) if err != nil { return err @@ -2241,14 +2244,15 @@ func TestJetStreamClusterMemLeaderRestart(t *testing.T) { // Make sure that we have a META leader (there can always be a re-election) c.waitOnLeader() + c.waitOnStreamLeader(globalAccountName, "foo") // Should still have quorum and a new leader - checkFor(t, time.Second, 200*time.Millisecond, func() error { + checkFor(t, 5*time.Second, 200*time.Millisecond, func() error { osi, err = jsc.StreamInfo("foo") if err != nil { return fmt.Errorf("expected healthy stream asset, got %s", err.Error()) } - if osi.Cluster.Leader == "" { + if osi.Cluster.Leader == _EMPTY_ { return fmt.Errorf("expected healthy stream asset with new leader") } if osi.State.Msgs != uint64(toSend) { @@ -2728,13 +2732,16 @@ func TestJetStreamClusterInterestPolicyEphemeral(t *testing.T) { } const msgs = 5_000 - done, count := make(chan bool), 0 + done, count := make(chan bool, 1), 0 sub, err := js.Subscribe(_EMPTY_, func(msg *nats.Msg) { require_NoError(t, msg.Ack()) count++ if count >= msgs { - done <- true + select { + case done <- true: + default: + } } }, nats.Bind(test.stream, name), nats.ManualAck()) require_NoError(t, err) @@ -2923,3 +2930,2457 @@ func TestJetStreamClusterStreamMaxAgeScaleUp(t *testing.T) { }) } } + +func TestJetStreamClusterWorkQueueConsumerReplicatedAfterScaleUp(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Replicas: 1, + Subjects: []string{"WQ"}, + Retention: nats.WorkQueuePolicy, + }) + require_NoError(t, err) + + // Create an ephemeral consumer. + sub, err := js.SubscribeSync("WQ") + require_NoError(t, err) + + // Scale up to R3. + _, err = js.UpdateStream(&nats.StreamConfig{ + Name: "TEST", + Replicas: 3, + Subjects: []string{"WQ"}, + Retention: nats.WorkQueuePolicy, + }) + require_NoError(t, err) + c.waitOnStreamLeader(globalAccountName, "TEST") + + ci, err := sub.ConsumerInfo() + require_NoError(t, err) + + require_True(t, ci.Config.Replicas == 0 || ci.Config.Replicas == 3) + + c.waitOnConsumerLeader(globalAccountName, "TEST", ci.Name) + s := c.consumerLeader(globalAccountName, "TEST", ci.Name) + require_NotNil(t, s) + + mset, err := s.GlobalAccount().lookupStream("TEST") + require_NoError(t, err) + + o := mset.lookupConsumer(ci.Name) + require_NotNil(t, o) + require_NotNil(t, o.raftNode()) +} + +// https://github.com/nats-io/nats-server/issues/3953 +func TestJetStreamClusterWorkQueueAfterScaleUp(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Replicas: 1, + Subjects: []string{"WQ"}, + Retention: nats.WorkQueuePolicy, + }) + require_NoError(t, err) + + _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ + Durable: "d1", + DeliverSubject: "d1", + AckPolicy: nats.AckExplicitPolicy, + }) + require_NoError(t, err) + + wch := make(chan bool, 1) + _, err = nc.Subscribe("d1", func(msg *nats.Msg) { + msg.AckSync() + wch <- true + }) + require_NoError(t, err) + + _, err = js.UpdateStream(&nats.StreamConfig{ + Name: "TEST", + Replicas: 3, + Subjects: []string{"WQ"}, + Retention: nats.WorkQueuePolicy, + }) + require_NoError(t, err) + c.waitOnStreamLeader(globalAccountName, "TEST") + + sendStreamMsg(t, nc, "WQ", "SOME WORK") + <-wch + + checkFor(t, time.Second, 200*time.Millisecond, func() error { + si, err := js.StreamInfo("TEST") + require_NoError(t, err) + if si.State.Msgs == 0 { + return nil + } + return fmt.Errorf("Still have %d msgs left", si.State.Msgs) + }) +} + +func TestJetStreamClusterInterestBasedStreamAndConsumerSnapshots(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Replicas: 3, + Subjects: []string{"foo"}, + Retention: nats.InterestPolicy, + }) + require_NoError(t, err) + + sub, err := js.SubscribeSync("foo", nats.Durable("d22")) + require_NoError(t, err) + + num := 200 + for i := 0; i < num; i++ { + js.PublishAsync("foo", []byte("ok")) + } + select { + case <-js.PublishAsyncComplete(): + case <-time.After(5 * time.Second): + t.Fatalf("Did not receive completion signal") + } + + checkSubsPending(t, sub, num) + + // Shutdown one server. + s := c.randomServer() + s.Shutdown() + + c.waitOnStreamLeader(globalAccountName, "TEST") + + nc, js = jsClientConnect(t, c.randomServer()) + defer nc.Close() + + // Now ack all messages while the other server is down. + for i := 0; i < num; i++ { + m, err := sub.NextMsg(time.Second) + require_NoError(t, err) + m.AckSync() + } + + // Wait for all message acks to be processed and all messages to be removed. + checkFor(t, time.Second, 200*time.Millisecond, func() error { + si, err := js.StreamInfo("TEST") + require_NoError(t, err) + if si.State.Msgs == 0 { + return nil + } + return fmt.Errorf("Still have %d msgs left", si.State.Msgs) + }) + + // Force a snapshot on the consumer leader before restarting the downed server. + cl := c.consumerLeader(globalAccountName, "TEST", "d22") + require_NotNil(t, cl) + + mset, err := cl.GlobalAccount().lookupStream("TEST") + require_NoError(t, err) + + o := mset.lookupConsumer("d22") + require_NotNil(t, o) + + snap, err := o.store.EncodedState() + require_NoError(t, err) + + n := o.raftNode() + require_NotNil(t, n) + require_NoError(t, n.InstallSnapshot(snap)) + + // Now restart the downed server. + s = c.restartServer(s) + + // Make the restarted server the eventual leader. + checkFor(t, 20*time.Second, 200*time.Millisecond, func() error { + c.waitOnStreamLeader(globalAccountName, "TEST") + if sl := c.streamLeader(globalAccountName, "TEST"); sl != s { + sl.JetStreamStepdownStream(globalAccountName, "TEST") + return fmt.Errorf("Server %s is not leader yet", s) + } + return nil + }) + + si, err := js.StreamInfo("TEST") + require_NoError(t, err) + require_True(t, si.State.Msgs == 0) +} + +func TestJetStreamClusterConsumerFollowerStoreStateAckFloorBug(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Replicas: 3, + Subjects: []string{"foo"}, + }) + require_NoError(t, err) + + sub, err := js.PullSubscribe(_EMPTY_, "C", nats.BindStream("TEST"), nats.ManualAck()) + require_NoError(t, err) + + num := 100 + for i := 0; i < num; i++ { + sendStreamMsg(t, nc, "foo", "data") + } + + // This one prevents the state for pending from reaching 0 and resetting, which would not show the bug. + sendStreamMsg(t, nc, "foo", "data") + + // Ack all but one and out of order and make sure all consumers have the same stored state. + msgs, err := sub.Fetch(num, nats.MaxWait(time.Second)) + require_NoError(t, err) + require_True(t, len(msgs) == num) + + _, err = sub.Fetch(1, nats.MaxWait(time.Second)) + require_NoError(t, err) + + rand.Shuffle(len(msgs), func(i, j int) { msgs[i], msgs[j] = msgs[j], msgs[i] }) + for _, m := range msgs { + if err := m.AckSync(); err != nil { + t.Fatalf("Ack failed :%+v", err) + } + } + + checkConsumerState := func(delivered, ackFloor nats.SequenceInfo, numAckPending int) error { + expectedDelivered := uint64(num) + 1 + if delivered.Stream != expectedDelivered || delivered.Consumer != expectedDelivered { + return fmt.Errorf("Wrong delivered, expected %d got %+v", expectedDelivered, delivered) + } + expectedAck := uint64(num) + if ackFloor.Stream != expectedAck || ackFloor.Consumer != expectedAck { + return fmt.Errorf("Wrong ackFloor, expected %d got %+v", expectedAck, ackFloor) + } + if numAckPending != 1 { + return errors.New("Expected num ack pending to be 1") + } + return nil + } + + ci, err := js.ConsumerInfo("TEST", "C") + require_NoError(t, err) + require_NoError(t, checkConsumerState(ci.Delivered, ci.AckFloor, ci.NumAckPending)) + + // Check each consumer on each server for it's store state and make sure it matches as well. + checkFor(t, 20*time.Second, 200*time.Millisecond, func() error { + for _, s := range c.servers { + mset, err := s.GlobalAccount().lookupStream("TEST") + if err != nil { + return err + } + if mset == nil { + return errors.New("Mset should not be nil") + } + o := mset.lookupConsumer("C") + if o == nil { + return errors.New("Consumer should not be nil") + } + + state, err := o.store.State() + if err != nil { + return err + } + delivered := nats.SequenceInfo{Stream: state.Delivered.Stream, Consumer: state.Delivered.Consumer} + ackFloor := nats.SequenceInfo{Stream: state.AckFloor.Stream, Consumer: state.AckFloor.Consumer} + if err := checkConsumerState(delivered, ackFloor, len(state.Pending)); err != nil { + return err + } + } + return nil + }) + + // Now stepdown the consumer and move its leader and check the state after transition. + // Make the restarted server the eventual leader. + seen := make(map[*Server]bool) + cl := c.consumerLeader(globalAccountName, "TEST", "C") + require_NotNil(t, cl) + seen[cl] = true + + allSeen := func() bool { + for _, s := range c.servers { + if !seen[s] { + return false + } + } + return true + } + + checkAllLeaders := func() { + t.Helper() + checkFor(t, 20*time.Second, 200*time.Millisecond, func() error { + c.waitOnConsumerLeader(globalAccountName, "TEST", "C") + if allSeen() { + return nil + } + cl := c.consumerLeader(globalAccountName, "TEST", "C") + seen[cl] = true + ci, err := js.ConsumerInfo("TEST", "C") + if err != nil { + return err + } + if err := checkConsumerState(ci.Delivered, ci.AckFloor, ci.NumAckPending); err != nil { + return err + } + cl.JetStreamStepdownConsumer(globalAccountName, "TEST", "C") + return fmt.Errorf("Not all servers have been consumer leader yet") + }) + } + + checkAllLeaders() + + // No restart all servers and check again. + c.stopAll() + c.restartAll() + c.waitOnLeader() + + nc, js = jsClientConnect(t, c.randomServer()) + defer nc.Close() + + seen = make(map[*Server]bool) + checkAllLeaders() +} + +func TestJetStreamClusterInterestLeakOnDisableJetStream(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.leader()) + defer nc.Close() + + for i := 1; i <= 5; i++ { + _, err := js.AddStream(&nats.StreamConfig{ + Name: fmt.Sprintf("test_%d", i), + Subjects: []string{fmt.Sprintf("test_%d", i)}, + Replicas: 3, + }) + require_NoError(t, err) + } + + c.waitOnAllCurrent() + + server := c.randomNonLeader() + account := server.SystemAccount() + + server.DisableJetStream() + + var sublist []*subscription + account.sl.localSubs(&sublist, false) + + var danglingJSC, danglingRaft int + for _, sub := range sublist { + if strings.HasPrefix(string(sub.subject), "$JSC.") { + danglingJSC++ + } else if strings.HasPrefix(string(sub.subject), "$NRG.") { + danglingRaft++ + } + } + if danglingJSC > 0 || danglingRaft > 0 { + t.Fatalf("unexpected dangling interests for JetStream assets after shutdown (%d $JSC, %d $NRG)", danglingJSC, danglingRaft) + } +} + +func TestJetStreamClusterNoLeadersDuringLameDuck(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + // Grab the first server and set lameduck option directly. + s := c.servers[0] + s.optsMu.Lock() + s.opts.LameDuckDuration = 5 * time.Second + s.opts.LameDuckGracePeriod = -5 * time.Second + s.optsMu.Unlock() + + // Connect to the third server. + nc, js := jsClientConnect(t, c.servers[2]) + defer nc.Close() + + allServersHaveLeaders := func() bool { + haveLeader := make(map[*Server]bool) + for _, s := range c.servers { + s.rnMu.RLock() + for _, n := range s.raftNodes { + if n.Leader() { + haveLeader[s] = true + break + } + } + s.rnMu.RUnlock() + } + return len(haveLeader) == len(c.servers) + } + + // Create streams until we have a leader on all the servers. + var index int + checkFor(t, 10*time.Second, time.Millisecond, func() error { + if allServersHaveLeaders() { + return nil + } + index++ + _, err := js.AddStream(&nats.StreamConfig{ + Name: fmt.Sprintf("TEST_%d", index), + Subjects: []string{fmt.Sprintf("foo.%d", index)}, + Replicas: 3, + }) + require_NoError(t, err) + return fmt.Errorf("All servers do not have at least one leader") + }) + + // Put our server into lameduck mode. + // Need a client. + dummy, _ := jsClientConnect(t, s) + defer dummy.Close() + go s.lameDuckMode() + + // Wait for all leaders to move off. + checkFor(t, 2*time.Second, 50*time.Millisecond, func() error { + s.rnMu.RLock() + defer s.rnMu.RUnlock() + for _, n := range s.raftNodes { + if n.Leader() { + return fmt.Errorf("Server still has a leader") + } + } + return nil + }) + + // All leader evacuated. + + // Create a go routine that will create streams constantly. + qch := make(chan bool) + go func() { + var index int + for { + select { + case <-time.After(time.Millisecond): + index++ + _, err := js.AddStream(&nats.StreamConfig{ + Name: fmt.Sprintf("NEW_TEST_%d", index), + Subjects: []string{fmt.Sprintf("bar.%d", index)}, + Replicas: 3, + }) + if err != nil { + return + } + case <-qch: + return + } + } + }() + defer close(qch) + + // Make sure we do not have any leaders placed on the lameduck server. + for s.isRunning() { + var hasLeader bool + s.rnMu.RLock() + for _, n := range s.raftNodes { + hasLeader = hasLeader || n.Leader() + } + s.rnMu.RUnlock() + if hasLeader { + t.Fatalf("Server had a leader when it should not due to lameduck mode") + } + } +} + +func TestJetStreamClusterNoR1AssetsDuringLameDuck(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + // Grab the first server and set lameduck option directly. + s := c.servers[0] + s.optsMu.Lock() + s.opts.LameDuckDuration = 5 * time.Second + s.opts.LameDuckGracePeriod = -5 * time.Second + s.optsMu.Unlock() + + // Connect to the server to keep it alive when we go into LDM. + dummy, _ := jsClientConnect(t, s) + defer dummy.Close() + + // Connect to the third server. + nc, js := jsClientConnect(t, c.servers[2]) + defer nc.Close() + + // Now put the first server into lame duck mode. + go s.lameDuckMode() + + // Wait for news to arrive that the first server has gone into + // lame duck mode and been marked offline. + checkFor(t, 2*time.Second, 50*time.Millisecond, func() error { + id := s.info.ID + s := c.servers[2] + s.mu.RLock() + defer s.mu.RUnlock() + + var isOffline bool + s.nodeToInfo.Range(func(_, v any) bool { + ni := v.(nodeInfo) + if ni.id == id { + isOffline = ni.offline + return false + } + return true + }) + + if !isOffline { + return fmt.Errorf("first node is still online unexpectedly") + } + return nil + }) + + // Create a go routine that will create streams constantly. + qch := make(chan bool) + go func() { + var index int + for { + select { + case <-time.After(time.Millisecond * 25): + index++ + _, err := js.AddStream(&nats.StreamConfig{ + Name: fmt.Sprintf("NEW_TEST_%d", index), + Subjects: []string{fmt.Sprintf("bar.%d", index)}, + Replicas: 1, + }) + if err != nil { + return + } + case <-qch: + return + } + } + }() + defer close(qch) + + // Make sure we do not have any R1 assets placed on the lameduck server. + for s.isRunning() { + s.rnMu.RLock() + if s.js == nil || s.js.srv == nil || s.js.srv.gacc == nil { + s.rnMu.RUnlock() + break + } + hasAsset := len(s.js.srv.gacc.streams()) > 0 + s.rnMu.RUnlock() + if hasAsset { + t.Fatalf("Server had an R1 asset when it should not due to lameduck mode") + } + } +} + +// If a consumer has not been registered (possible in heavily loaded systems with lots of assets) +// it could miss the signal of a message going away. If that message was pending and expires the +// ack floor could fall below the stream first sequence. This test will force that condition and +// make sure the system resolves itself. +func TestJetStreamClusterConsumerAckFloorDrift(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + Replicas: 3, + MaxAge: 200 * time.Millisecond, + MaxMsgs: 10, + }) + require_NoError(t, err) + + sub, err := js.PullSubscribe("foo", "C") + require_NoError(t, err) + + for i := 0; i < 10; i++ { + sendStreamMsg(t, nc, "foo", "HELLO") + } + + // No-op but will surface as delivered. + _, err = sub.Fetch(10) + require_NoError(t, err) + + // We will grab the state with delivered being 10 and ackfloor being 0 directly. + cl := c.consumerLeader(globalAccountName, "TEST", "C") + require_NotNil(t, cl) + + mset, err := cl.GlobalAccount().lookupStream("TEST") + require_NoError(t, err) + o := mset.lookupConsumer("C") + require_NotNil(t, o) + o.mu.RLock() + state, err := o.store.State() + o.mu.RUnlock() + require_NoError(t, err) + require_NotNil(t, state) + + // Now let messages expire. + checkFor(t, time.Second, 100*time.Millisecond, func() error { + si, err := js.StreamInfo("TEST") + require_NoError(t, err) + if si.State.Msgs == 0 { + return nil + } + return fmt.Errorf("stream still has msgs") + }) + + // Set state to ackfloor of 5 and no pending. + state.AckFloor.Consumer = 5 + state.AckFloor.Stream = 5 + state.Pending = nil + + // Now put back the state underneath of the consumers. + for _, s := range c.servers { + mset, err := s.GlobalAccount().lookupStream("TEST") + require_NoError(t, err) + o := mset.lookupConsumer("C") + require_NotNil(t, o) + o.mu.Lock() + err = o.setStoreState(state) + cfs := o.store.(*consumerFileStore) + o.mu.Unlock() + require_NoError(t, err) + // The lower layer will ignore, so set more directly. + cfs.mu.Lock() + cfs.state = *state + cfs.mu.Unlock() + // Also snapshot to remove any raft entries that could affect it. + snap, err := o.store.EncodedState() + require_NoError(t, err) + require_NoError(t, o.raftNode().InstallSnapshot(snap)) + } + + cl.JetStreamStepdownConsumer(globalAccountName, "TEST", "C") + c.waitOnConsumerLeader(globalAccountName, "TEST", "C") + + checkFor(t, 5*time.Second, 100*time.Millisecond, func() error { + ci, err := js.ConsumerInfo("TEST", "C") + require_NoError(t, err) + // Make sure we catch this and adjust. + if ci.AckFloor.Stream == 10 && ci.AckFloor.Consumer == 10 { + return nil + } + return fmt.Errorf("AckFloor not correct, expected 10, got %+v", ci.AckFloor) + }) +} + +func TestJetStreamClusterInterestStreamFilteredConsumersWithNoInterest(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R5S", 5) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + Retention: nats.InterestPolicy, + Replicas: 3, + }) + require_NoError(t, err) + + // Create three subscribers. + ackCb := func(m *nats.Msg) { m.Ack() } + + _, err = js.Subscribe("foo", ackCb, nats.BindStream("TEST"), nats.ManualAck()) + require_NoError(t, err) + + _, err = js.Subscribe("bar", ackCb, nats.BindStream("TEST"), nats.ManualAck()) + require_NoError(t, err) + + _, err = js.Subscribe("baz", ackCb, nats.BindStream("TEST"), nats.ManualAck()) + require_NoError(t, err) + + // Now send 100 messages, randomly picking foo or bar, but never baz. + for i := 0; i < 100; i++ { + if rand.Intn(2) > 0 { + sendStreamMsg(t, nc, "foo", "HELLO") + } else { + sendStreamMsg(t, nc, "bar", "WORLD") + } + } + + // Messages are expected to go to 0. + checkFor(t, time.Second, 100*time.Millisecond, func() error { + si, err := js.StreamInfo("TEST") + require_NoError(t, err) + if si.State.Msgs == 0 { + return nil + } + return fmt.Errorf("stream still has msgs") + }) +} + +func TestJetStreamClusterChangeClusterAfterStreamCreate(t *testing.T) { + c := createJetStreamClusterExplicit(t, "NATS", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + Replicas: 3, + }) + require_NoError(t, err) + + for i := 0; i < 1000; i++ { + sendStreamMsg(t, nc, "foo", "HELLO") + } + + _, err = js.UpdateStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + Replicas: 1, + }) + require_NoError(t, err) + + c.stopAll() + + c.name = "FOO" + for _, o := range c.opts { + buf, err := os.ReadFile(o.ConfigFile) + require_NoError(t, err) + nbuf := bytes.Replace(buf, []byte("name: NATS"), []byte("name: FOO"), 1) + err = os.WriteFile(o.ConfigFile, nbuf, 0640) + require_NoError(t, err) + } + + c.restartAll() + c.waitOnLeader() + c.waitOnStreamLeader(globalAccountName, "TEST") + + nc, js = jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err = js.UpdateStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + Replicas: 3, + }) + // This should fail with no suitable peers, since the asset was created under the NATS cluster which has no peers. + require_Error(t, err, errors.New("nats: no suitable peers for placement")) + + // Make sure we can swap the cluster. + _, err = js.UpdateStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + Placement: &nats.Placement{Cluster: "FOO"}, + }) + require_NoError(t, err) +} + +// The consumer info() call does not take into account whether a consumer +// is a leader or not, so results would be very different when asking servers +// that housed consumer followers vs leaders. +func TestJetStreamClusterConsumerInfoForJszForFollowers(t *testing.T) { + c := createJetStreamClusterExplicit(t, "NATS", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + Replicas: 3, + }) + require_NoError(t, err) + + for i := 0; i < 1000; i++ { + sendStreamMsg(t, nc, "foo", "HELLO") + } + + sub, err := js.PullSubscribe("foo", "d") + require_NoError(t, err) + + fetch, ack := 122, 22 + msgs, err := sub.Fetch(fetch, nats.MaxWait(10*time.Second)) + require_NoError(t, err) + require_True(t, len(msgs) == fetch) + for _, m := range msgs[:ack] { + m.AckSync() + } + // Let acks propagate. + time.Sleep(100 * time.Millisecond) + + for _, s := range c.servers { + jsz, err := s.Jsz(&JSzOptions{Accounts: true, Consumer: true}) + require_NoError(t, err) + require_True(t, len(jsz.AccountDetails) == 1) + require_True(t, len(jsz.AccountDetails[0].Streams) == 1) + require_True(t, len(jsz.AccountDetails[0].Streams[0].Consumer) == 1) + consumer := jsz.AccountDetails[0].Streams[0].Consumer[0] + if consumer.Delivered.Consumer != uint64(fetch) || consumer.Delivered.Stream != uint64(fetch) { + t.Fatalf("Incorrect delivered for %v: %+v", s, consumer.Delivered) + } + if consumer.AckFloor.Consumer != uint64(ack) || consumer.AckFloor.Stream != uint64(ack) { + t.Fatalf("Incorrect ackfloor for %v: %+v", s, consumer.AckFloor) + } + } +} + +// Under certain scenarios we have seen consumers become stopped and cause healthz to fail. +// The specific scneario is heavy loads, and stream resets on upgrades that could orphan consumers. +func TestJetStreamClusterHealthzCheckForStoppedAssets(t *testing.T) { + c := createJetStreamClusterExplicit(t, "NATS", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + Replicas: 3, + }) + require_NoError(t, err) + + for i := 0; i < 1000; i++ { + sendStreamMsg(t, nc, "foo", "HELLO") + } + + sub, err := js.PullSubscribe("foo", "d") + require_NoError(t, err) + + fetch, ack := 122, 22 + msgs, err := sub.Fetch(fetch, nats.MaxWait(10*time.Second)) + require_NoError(t, err) + require_True(t, len(msgs) == fetch) + for _, m := range msgs[:ack] { + m.AckSync() + } + // Let acks propagate. + time.Sleep(100 * time.Millisecond) + + // We will now stop a stream on a given server. + s := c.randomServer() + mset, err := s.GlobalAccount().lookupStream("TEST") + require_NoError(t, err) + // Stop the stream + mset.stop(false, false) + + // Wait for exit. + time.Sleep(100 * time.Millisecond) + + checkFor(t, 5*time.Second, 500*time.Millisecond, func() error { + hs := s.healthz(nil) + if hs.Error != _EMPTY_ { + return errors.New(hs.Error) + } + return nil + }) + + // Now take out the consumer. + mset, err = s.GlobalAccount().lookupStream("TEST") + require_NoError(t, err) + + o := mset.lookupConsumer("d") + require_NotNil(t, o) + + o.stop() + // Wait for exit. + time.Sleep(100 * time.Millisecond) + + checkFor(t, 5*time.Second, 500*time.Millisecond, func() error { + hs := s.healthz(nil) + if hs.Error != _EMPTY_ { + return errors.New(hs.Error) + } + return nil + }) + + // Now just stop the raft node from underneath the consumer. + o = mset.lookupConsumer("d") + require_NotNil(t, o) + node := o.raftNode() + require_NotNil(t, node) + node.Stop() + + checkFor(t, 5*time.Second, 500*time.Millisecond, func() error { + hs := s.healthz(nil) + if hs.Error != _EMPTY_ { + return errors.New(hs.Error) + } + return nil + }) +} + +// Make sure that stopping a stream shutdowns down it's raft node. +func TestJetStreamClusterStreamNodeShutdownBugOnStop(t *testing.T) { + c := createJetStreamClusterExplicit(t, "NATS", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + Replicas: 3, + }) + require_NoError(t, err) + + for i := 0; i < 100; i++ { + sendStreamMsg(t, nc, "foo", "HELLO") + } + + s := c.randomServer() + numNodesStart := s.numRaftNodes() + mset, err := s.GlobalAccount().lookupStream("TEST") + require_NoError(t, err) + node := mset.raftNode() + require_NotNil(t, node) + node.InstallSnapshot(mset.stateSnapshot()) + // Stop the stream + mset.stop(false, false) + + if numNodes := s.numRaftNodes(); numNodes != numNodesStart-1 { + t.Fatalf("RAFT nodes after stream stop incorrect: %d vs %d", numNodesStart, numNodes) + } +} + +func TestJetStreamClusterStreamAccountingOnStoreError(t *testing.T) { + c := createJetStreamClusterWithTemplate(t, jsClusterMaxBytesAccountLimitTempl, "NATS", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + MaxBytes: 1 * 1024 * 1024 * 1024, + Replicas: 3, + }) + require_NoError(t, err) + + msg := strings.Repeat("Z", 32*1024) + for i := 0; i < 10; i++ { + sendStreamMsg(t, nc, "foo", msg) + } + s := c.randomServer() + acc, err := s.LookupAccount("$U") + require_NoError(t, err) + mset, err := acc.lookupStream("TEST") + require_NoError(t, err) + mset.mu.Lock() + mset.store.Stop() + sjs := mset.js + mset.mu.Unlock() + + // Now delete the stream + js.DeleteStream("TEST") + + // Wait for this to propgate. + // The bug will have us not release reserved resources properly. + checkFor(t, 10*time.Second, 200*time.Millisecond, func() error { + info, err := js.AccountInfo() + require_NoError(t, err) + // Default tier + if info.Store != 0 { + return fmt.Errorf("Expected store to be 0 but got %v", friendlyBytes(info.Store)) + } + return nil + }) + + // Now check js from server directly regarding reserved. + sjs.mu.RLock() + reserved := sjs.storeReserved + sjs.mu.RUnlock() + // Under bug will show 1GB + if reserved != 0 { + t.Fatalf("Expected store reserved to be 0 after stream delete, got %v", friendlyBytes(reserved)) + } +} + +func TestJetStreamClusterStreamAccountingDriftFixups(t *testing.T) { + c := createJetStreamClusterWithTemplate(t, jsClusterMaxBytesAccountLimitTempl, "NATS", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + MaxBytes: 2 * 1024 * 1024, + Replicas: 3, + }) + require_NoError(t, err) + + msg := strings.Repeat("Z", 32*1024) + for i := 0; i < 100; i++ { + sendStreamMsg(t, nc, "foo", msg) + } + + err = js.PurgeStream("TEST") + require_NoError(t, err) + + checkFor(t, 5*time.Second, 200*time.Millisecond, func() error { + info, err := js.AccountInfo() + require_NoError(t, err) + if info.Store != 0 { + return fmt.Errorf("Store usage not 0: %d", info.Store) + } + return nil + }) + + s := c.leader() + jsz, err := s.Jsz(nil) + require_NoError(t, err) + require_True(t, jsz.JetStreamStats.Store == 0) + + acc, err := s.LookupAccount("$U") + require_NoError(t, err) + mset, err := acc.lookupStream("TEST") + require_NoError(t, err) + mset.mu.RLock() + jsa, tier, stype := mset.jsa, mset.tier, mset.stype + mset.mu.RUnlock() + // Drift the usage. + jsa.updateUsage(tier, stype, -100) + + checkFor(t, time.Second, 200*time.Millisecond, func() error { + info, err := js.AccountInfo() + require_NoError(t, err) + if info.Store != 0 { + return fmt.Errorf("Store usage not 0: %d", info.Store) + } + return nil + }) + jsz, err = s.Jsz(nil) + require_NoError(t, err) + require_True(t, jsz.JetStreamStats.Store == 0) +} + +// Some older streams seem to have been created or exist with no explicit cluster setting. +// For server <= 2.9.16 you could not scale the streams up since we could not place them in another cluster. +func TestJetStreamClusterStreamScaleUpNoGroupCluster(t *testing.T) { + c := createJetStreamClusterExplicit(t, "NATS", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + }) + require_NoError(t, err) + + // Manually going to grab stream assignment and update it to be without the group cluster. + s := c.streamLeader(globalAccountName, "TEST") + mset, err := s.GlobalAccount().lookupStream("TEST") + require_NoError(t, err) + + sa := mset.streamAssignment() + require_NotNil(t, sa) + // Make copy to not change stream's + sa = sa.copyGroup() + // Remove cluster and preferred. + sa.Group.Cluster = _EMPTY_ + sa.Group.Preferred = _EMPTY_ + // Insert into meta layer. + s.mu.RLock() + s.js.cluster.meta.ForwardProposal(encodeUpdateStreamAssignment(sa)) + s.mu.RUnlock() + // Make sure it got propagated.. + checkFor(t, 10*time.Second, 200*time.Millisecond, func() error { + sa := mset.streamAssignment().copyGroup() + require_NotNil(t, sa) + if sa.Group.Cluster != _EMPTY_ { + return fmt.Errorf("Cluster still not cleared") + } + return nil + }) + // Now we know it has been nil'd out. Make sure we can scale up. + _, err = js.UpdateStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + Replicas: 3, + }) + require_NoError(t, err) +} + +// https://github.com/nats-io/nats-server/issues/4162 +func TestJetStreamClusterStaleDirectGetOnRestart(t *testing.T) { + c := createJetStreamClusterExplicit(t, "NATS", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + kv, err := js.CreateKeyValue(&nats.KeyValueConfig{ + Bucket: "TEST", + Replicas: 3, + }) + require_NoError(t, err) + + _, err = kv.PutString("foo", "bar") + require_NoError(t, err) + + // Close client in case we were connected to server below. + // We will recreate. + nc.Close() + + // Shutdown a non-leader. + s := c.randomNonStreamLeader(globalAccountName, "KV_TEST") + s.Shutdown() + + nc, js = jsClientConnect(t, c.randomServer()) + defer nc.Close() + + kv, err = js.KeyValue("TEST") + require_NoError(t, err) + + _, err = kv.PutString("foo", "baz") + require_NoError(t, err) + + errCh := make(chan error, 100) + done := make(chan struct{}) + + go func() { + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + kv, err := js.KeyValue("TEST") + if err != nil { + errCh <- err + return + } + + for { + select { + case <-done: + return + default: + entry, err := kv.Get("foo") + if err != nil { + errCh <- err + return + } + if v := string(entry.Value()); v != "baz" { + errCh <- fmt.Errorf("Got wrong value: %q", v) + } + } + } + }() + + // Restart + c.restartServer(s) + // Wait for a bit to make sure as this server participates in direct gets + // it does not server stale reads. + time.Sleep(2 * time.Second) + close(done) + + if len(errCh) > 0 { + t.Fatalf("Expected no errors but got %v", <-errCh) + } +} + +// This test mimics a user's setup where there is a cloud cluster/domain, and one for eu and ap that are leafnoded into the +// cloud cluster, and one for cn that is leafnoded into the ap cluster. +// We broke basic connectivity in 2.9.17 from publishing in eu for delivery in cn on same account which is daisy chained through ap. +// We will also test cross account delivery in this test as well. +func TestJetStreamClusterLeafnodePlusDaisyChainSetup(t *testing.T) { + var cloudTmpl = ` + listen: 127.0.0.1:-1 + server_name: %s + jetstream: {max_mem_store: 256MB, max_file_store: 2GB, domain: CLOUD, store_dir: '%s'} + + leaf { listen: 127.0.0.1:-1 } + + cluster { + name: %s + listen: 127.0.0.1:%d + routes = [%s] + } + + accounts { + F { + jetstream: enabled + users = [ { user: "F", pass: "pass" } ] + exports [ { stream: "F.>" } ] + } + T { + jetstream: enabled + users = [ { user: "T", pass: "pass" } ] + imports [ { stream: { account: F, subject: "F.>"} } ] + } + $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } + }` + + // Now create the cloud and make sure we are connected. + // Cloud + c := createJetStreamCluster(t, cloudTmpl, "CLOUD", _EMPTY_, 3, 22020, false) + defer c.shutdown() + + var lnTmpl = ` + listen: 127.0.0.1:-1 + server_name: %s + jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} + + {{leaf}} + + cluster { + name: %s + listen: 127.0.0.1:%d + routes = [%s] + } + + accounts { + F { + jetstream: enabled + users = [ { user: "F", pass: "pass" } ] + exports [ { stream: "F.>" } ] + } + T { + jetstream: enabled + users = [ { user: "T", pass: "pass" } ] + imports [ { stream: { account: F, subject: "F.>"} } ] + } + $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } + }` + + var leafFrag = ` + leaf { + listen: 127.0.0.1:-1 + remotes [ { urls: [ %s ], account: "T" }, { urls: [ %s ], account: "F" } ] + }` + + genLeafTmpl := func(tmpl string, c *cluster) string { + t.Helper() + // Create our leafnode cluster template first. + var lnt, lnf []string + for _, s := range c.servers { + if s.ClusterName() != c.name { + continue + } + ln := s.getOpts().LeafNode + lnt = append(lnt, fmt.Sprintf("nats://T:pass@%s:%d", ln.Host, ln.Port)) + lnf = append(lnf, fmt.Sprintf("nats://F:pass@%s:%d", ln.Host, ln.Port)) + } + lntc := strings.Join(lnt, ", ") + lnfc := strings.Join(lnf, ", ") + return strings.Replace(tmpl, "{{leaf}}", fmt.Sprintf(leafFrag, lntc, lnfc), 1) + } + + // Cluster EU + // Domain is "EU' + tmpl := strings.Replace(lnTmpl, "store_dir:", fmt.Sprintf(`domain: "%s", store_dir:`, "EU"), 1) + tmpl = genLeafTmpl(tmpl, c) + lceu := createJetStreamCluster(t, tmpl, "EU", "EU-", 3, 22110, false) + lceu.waitOnClusterReady() + defer lceu.shutdown() + + for _, s := range lceu.servers { + checkLeafNodeConnectedCount(t, s, 2) + } + + // Cluster AP + // Domain is "AP' + tmpl = strings.Replace(lnTmpl, "store_dir:", fmt.Sprintf(`domain: "%s", store_dir:`, "AP"), 1) + tmpl = genLeafTmpl(tmpl, c) + lcap := createJetStreamCluster(t, tmpl, "AP", "AP-", 3, 22180, false) + lcap.waitOnClusterReady() + defer lcap.shutdown() + + for _, s := range lcap.servers { + checkLeafNodeConnectedCount(t, s, 2) + } + + // Cluster CN + // Domain is "CN' + // This one connects to AP, not the cloud hub. + tmpl = strings.Replace(lnTmpl, "store_dir:", fmt.Sprintf(`domain: "%s", store_dir:`, "CN"), 1) + tmpl = genLeafTmpl(tmpl, lcap) + lccn := createJetStreamCluster(t, tmpl, "CN", "CN-", 3, 22280, false) + lccn.waitOnClusterReady() + defer lccn.shutdown() + + for _, s := range lccn.servers { + checkLeafNodeConnectedCount(t, s, 2) + } + + // Now connect to CN on account F and subscribe to data. + nc, _ := jsClientConnect(t, lccn.randomServer(), nats.UserInfo("F", "pass")) + defer nc.Close() + fsub, err := nc.SubscribeSync("F.EU.>") + require_NoError(t, err) + + // Same for account T where the import is. + nc, _ = jsClientConnect(t, lccn.randomServer(), nats.UserInfo("T", "pass")) + defer nc.Close() + tsub, err := nc.SubscribeSync("F.EU.>") + require_NoError(t, err) + + // Let sub propagate. + time.Sleep(500 * time.Millisecond) + + // Now connect to EU on account F and generate data. + nc, _ = jsClientConnect(t, lceu.randomServer(), nats.UserInfo("F", "pass")) + defer nc.Close() + + num := 10 + for i := 0; i < num; i++ { + err := nc.Publish("F.EU.DATA", []byte(fmt.Sprintf("MSG-%d", i))) + require_NoError(t, err) + } + + checkSubsPending(t, fsub, num) + // Since we export and import in each cluster, we will receive 4x. + // First hop from EU -> CLOUD is 1F and 1T + // Second hop from CLOUD -> AP is 1F, 1T and another 1T + // Third hop from AP -> CN is 1F, 1T, 1T and 1T + // Each cluster hop that has the export/import mapping will add another T message copy. + checkSubsPending(t, tsub, num*4) + + // Create stream in cloud. + nc, js := jsClientConnect(t, c.randomServer(), nats.UserInfo("F", "pass")) + defer nc.Close() + + _, err = js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"TEST.>"}, + Replicas: 3, + }) + require_NoError(t, err) + + for i := 0; i < 100; i++ { + sendStreamMsg(t, nc, fmt.Sprintf("TEST.%d", i), "OK") + } + + // Now connect to EU. + nc, js = jsClientConnect(t, lceu.randomServer(), nats.UserInfo("F", "pass")) + defer nc.Close() + + // Create a mirror. + _, err = js.AddStream(&nats.StreamConfig{ + Name: "M", + Mirror: &nats.StreamSource{ + Name: "TEST", + Domain: "CLOUD", + }, + }) + require_NoError(t, err) + + checkFor(t, time.Second, 200*time.Millisecond, func() error { + si, err := js.StreamInfo("M") + require_NoError(t, err) + if si.State.Msgs == 100 { + return nil + } + return fmt.Errorf("State not current: %+v", si.State) + }) +} + +// https://github.com/nats-io/nats-server/pull/4197 +func TestJetStreamClusterPurgeExReplayAfterRestart(t *testing.T) { + c := createJetStreamClusterExplicit(t, "P3F", 3) + defer c.shutdown() + + // Client based API + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"TEST.>"}, + Replicas: 3, + }) + require_NoError(t, err) + + sendStreamMsg(t, nc, "TEST.0", "OK") + sendStreamMsg(t, nc, "TEST.1", "OK") + sendStreamMsg(t, nc, "TEST.2", "OK") + + runTest := func(f func(js nats.JetStreamManager)) *nats.StreamInfo { + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + // install snapshot, then execute interior func, ensuring the purge will be recovered later + fsl := c.streamLeader(globalAccountName, "TEST") + fsl.JetStreamSnapshotStream(globalAccountName, "TEST") + + f(js) + time.Sleep(250 * time.Millisecond) + + fsl.Shutdown() + fsl.WaitForShutdown() + fsl = c.restartServer(fsl) + c.waitOnServerCurrent(fsl) + + nc, js = jsClientConnect(t, c.randomServer()) + defer nc.Close() + + c.waitOnStreamLeader(globalAccountName, "TEST") + sl := c.streamLeader(globalAccountName, "TEST") + + // keep stepping down so the stream leader matches the initial leader + // we need to check if it restored from the snapshot properly + for sl != fsl { + _, err := nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "TEST"), nil, time.Second) + require_NoError(t, err) + c.waitOnStreamLeader(globalAccountName, "TEST") + sl = c.streamLeader(globalAccountName, "TEST") + } + + si, err := js.StreamInfo("TEST") + require_NoError(t, err) + return si + } + si := runTest(func(js nats.JetStreamManager) { + err = js.PurgeStream("TEST", &nats.StreamPurgeRequest{Subject: "TEST.0"}) + require_NoError(t, err) + }) + if si.State.Msgs != 2 { + t.Fatalf("Expected 2 msgs after restart, got %d", si.State.Msgs) + } + if si.State.FirstSeq != 2 || si.State.LastSeq != 3 { + t.Fatalf("Expected FirstSeq=2, LastSeq=3 after restart, got FirstSeq=%d, LastSeq=%d", + si.State.FirstSeq, si.State.LastSeq) + } + + si = runTest(func(js nats.JetStreamManager) { + err = js.PurgeStream("TEST") + require_NoError(t, err) + // Send 2 more messages. + sendStreamMsg(t, nc, "TEST.1", "OK") + sendStreamMsg(t, nc, "TEST.2", "OK") + }) + if si.State.Msgs != 2 { + t.Fatalf("Expected 2 msgs after restart, got %d", si.State.Msgs) + } + if si.State.FirstSeq != 4 || si.State.LastSeq != 5 { + t.Fatalf("Expected FirstSeq=4, LastSeq=5 after restart, got FirstSeq=%d, LastSeq=%d", + si.State.FirstSeq, si.State.LastSeq) + } + + // Now test a keep + si = runTest(func(js nats.JetStreamManager) { + err = js.PurgeStream("TEST", &nats.StreamPurgeRequest{Keep: 1}) + require_NoError(t, err) + // Send 4 more messages. + sendStreamMsg(t, nc, "TEST.1", "OK") + sendStreamMsg(t, nc, "TEST.2", "OK") + sendStreamMsg(t, nc, "TEST.3", "OK") + sendStreamMsg(t, nc, "TEST.1", "OK") + }) + if si.State.Msgs != 5 { + t.Fatalf("Expected 5 msgs after restart, got %d", si.State.Msgs) + } + if si.State.FirstSeq != 5 || si.State.LastSeq != 9 { + t.Fatalf("Expected FirstSeq=5, LastSeq=9 after restart, got FirstSeq=%d, LastSeq=%d", + si.State.FirstSeq, si.State.LastSeq) + } + + // Now test a keep on a subject + si = runTest(func(js nats.JetStreamManager) { + err = js.PurgeStream("TEST", &nats.StreamPurgeRequest{Subject: "TEST.1", Keep: 1}) + require_NoError(t, err) + // Send 3 more messages. + sendStreamMsg(t, nc, "TEST.1", "OK") + sendStreamMsg(t, nc, "TEST.2", "OK") + sendStreamMsg(t, nc, "TEST.3", "OK") + }) + if si.State.Msgs != 7 { + t.Fatalf("Expected 7 msgs after restart, got %d", si.State.Msgs) + } + if si.State.FirstSeq != 5 || si.State.LastSeq != 12 { + t.Fatalf("Expected FirstSeq=5, LastSeq=12 after restart, got FirstSeq=%d, LastSeq=%d", + si.State.FirstSeq, si.State.LastSeq) + } +} + +func TestJetStreamClusterConsumerCleanupWithSameName(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3F", 3) + defer c.shutdown() + + // Client based API + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "UPDATES", + Subjects: []string{"DEVICE.*"}, + Replicas: 3, + }) + require_NoError(t, err) + + // Create a consumer that will be an R1 that we will auto-recreate but using the same name. + // We want to make sure that the system does not continually try to cleanup the new one from the old one. + + // Track the sequence for restart etc. + var seq atomic.Uint64 + + msgCB := func(msg *nats.Msg) { + msg.AckSync() + meta, err := msg.Metadata() + require_NoError(t, err) + seq.Store(meta.Sequence.Stream) + } + + waitOnSeqDelivered := func(expected uint64) { + checkFor(t, 10*time.Second, 200*time.Millisecond, func() error { + received := seq.Load() + if received == expected { + return nil + } + return fmt.Errorf("Seq is %d, want %d", received, expected) + }) + } + + doSub := func() { + _, err = js.Subscribe( + "DEVICE.22", + msgCB, + nats.ConsumerName("dlc"), + nats.SkipConsumerLookup(), + nats.StartSequence(seq.Load()+1), + nats.MaxAckPending(1), // One at a time. + nats.ManualAck(), + nats.ConsumerReplicas(1), + nats.ConsumerMemoryStorage(), + nats.MaxDeliver(1), + nats.InactiveThreshold(time.Second), + nats.IdleHeartbeat(250*time.Millisecond), + ) + require_NoError(t, err) + } + + // Track any errors for consumer not active so we can recreate the consumer. + errCh := make(chan error, 10) + nc.SetErrorHandler(func(c *nats.Conn, s *nats.Subscription, err error) { + if errors.Is(err, nats.ErrConsumerNotActive) { + s.Unsubscribe() + errCh <- err + doSub() + } + }) + + doSub() + + sendStreamMsg(t, nc, "DEVICE.22", "update-1") + sendStreamMsg(t, nc, "DEVICE.22", "update-2") + sendStreamMsg(t, nc, "DEVICE.22", "update-3") + waitOnSeqDelivered(3) + + // Shutdown the consumer's leader. + s := c.consumerLeader(globalAccountName, "UPDATES", "dlc") + s.Shutdown() + c.waitOnStreamLeader(globalAccountName, "UPDATES") + + // In case our client connection was to the same server. + nc, _ = jsClientConnect(t, c.randomServer()) + defer nc.Close() + + sendStreamMsg(t, nc, "DEVICE.22", "update-4") + sendStreamMsg(t, nc, "DEVICE.22", "update-5") + sendStreamMsg(t, nc, "DEVICE.22", "update-6") + + // Wait for the consumer not active error. + <-errCh + // Now restart server with the old consumer. + c.restartServer(s) + // Wait on all messages delivered. + waitOnSeqDelivered(6) + // Make sure no other errors showed up + require_True(t, len(errCh) == 0) +} + +func TestJetStreamClusterSnapshotAndRestoreWithHealthz(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"foo"}, + Replicas: 3, + }) + require_NoError(t, err) + + toSend, msg := 1000, bytes.Repeat([]byte("Z"), 1024) + for i := 0; i < toSend; i++ { + _, err := js.PublishAsync("foo", msg) + require_NoError(t, err) + } + select { + case <-js.PublishAsyncComplete(): + case <-time.After(5 * time.Second): + t.Fatalf("Did not receive completion signal") + } + + sreq := &JSApiStreamSnapshotRequest{ + DeliverSubject: nats.NewInbox(), + ChunkSize: 512, + } + req, _ := json.Marshal(sreq) + rmsg, err := nc.Request(fmt.Sprintf(JSApiStreamSnapshotT, "TEST"), req, time.Second) + require_NoError(t, err) + + var resp JSApiStreamSnapshotResponse + json.Unmarshal(rmsg.Data, &resp) + require_True(t, resp.Error == nil) + + state := *resp.State + cfg := *resp.Config + + var snapshot []byte + done := make(chan bool) + + sub, _ := nc.Subscribe(sreq.DeliverSubject, func(m *nats.Msg) { + // EOF + if len(m.Data) == 0 { + done <- true + return + } + // Could be writing to a file here too. + snapshot = append(snapshot, m.Data...) + // Flow ack + m.Respond(nil) + }) + defer sub.Unsubscribe() + + // Wait to receive the snapshot. + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatalf("Did not receive our snapshot in time") + } + + // Delete before we try to restore. + require_NoError(t, js.DeleteStream("TEST")) + + checkHealth := func() { + for _, s := range c.servers { + s.healthz(nil) + } + } + + var rresp JSApiStreamRestoreResponse + rreq := &JSApiStreamRestoreRequest{ + Config: cfg, + State: state, + } + req, _ = json.Marshal(rreq) + + rmsg, err = nc.Request(fmt.Sprintf(JSApiStreamRestoreT, "TEST"), req, 5*time.Second) + require_NoError(t, err) + + rresp.Error = nil + json.Unmarshal(rmsg.Data, &rresp) + require_True(t, resp.Error == nil) + + checkHealth() + + // We will now chunk the snapshot responses (and EOF). + var chunk [1024]byte + for i, r := 0, bytes.NewReader(snapshot); ; { + n, err := r.Read(chunk[:]) + if err != nil { + break + } + nc.Request(rresp.DeliverSubject, chunk[:n], time.Second) + i++ + // We will call healthz for all servers half way through the restore. + if i%100 == 0 { + checkHealth() + } + } + rmsg, err = nc.Request(rresp.DeliverSubject, nil, time.Second) + require_NoError(t, err) + rresp.Error = nil + json.Unmarshal(rmsg.Data, &rresp) + require_True(t, resp.Error == nil) + + si, err := js.StreamInfo("TEST") + require_NoError(t, err) + require_True(t, si.State.Msgs == uint64(toSend)) + + // Make sure stepdown works, this would fail before the fix. + _, err = nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "TEST"), nil, 5*time.Second) + require_NoError(t, err) + + si, err = js.StreamInfo("TEST") + require_NoError(t, err) + require_True(t, si.State.Msgs == uint64(toSend)) +} + +func TestJetStreamClusterBadEncryptKey(t *testing.T) { + c := createJetStreamClusterWithTemplate(t, jsClusterEncryptedTempl, "JSC", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + // Create 10 streams. + for i := 0; i < 10; i++ { + _, err := js.AddStream(&nats.StreamConfig{ + Name: fmt.Sprintf("TEST-%d", i), + Replicas: 3, + }) + require_NoError(t, err) + } + + // Grab random server. + s := c.randomServer() + s.Shutdown() + s.WaitForShutdown() + + var opts *Options + for i := 0; i < len(c.servers); i++ { + if c.servers[i] == s { + opts = c.opts[i] + break + } + } + require_NotNil(t, opts) + + // Replace key with an empty key. + buf, err := os.ReadFile(opts.ConfigFile) + require_NoError(t, err) + nbuf := bytes.Replace(buf, []byte("key: \"s3cr3t!\""), []byte("key: \"\""), 1) + err = os.WriteFile(opts.ConfigFile, nbuf, 0640) + require_NoError(t, err) + + // Make sure trying to start the server now fails. + s, err = NewServer(LoadConfig(opts.ConfigFile)) + require_NoError(t, err) + require_NotNil(t, s) + s.Start() + if err := s.readyForConnections(1 * time.Second); err == nil { + t.Fatalf("Expected server not to start") + } +} + +func TestJetStreamAccountUsageDrifts(t *testing.T) { + tmpl := ` + listen: 127.0.0.1:-1 + server_name: %s + jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} + leaf { + listen: 127.0.0.1:-1 + } + cluster { + name: %s + listen: 127.0.0.1:%d + routes = [%s] + } + ` + opFrag := ` + operator: %s + system_account: %s + resolver: { type: MEM } + resolver_preload = { + %s : %s + %s : %s + } + ` + + _, syspub := createKey(t) + sysJwt := encodeClaim(t, jwt.NewAccountClaims(syspub), syspub) + + accKp, aExpPub := createKey(t) + accClaim := jwt.NewAccountClaims(aExpPub) + accClaim.Limits.JetStreamTieredLimits["R1"] = jwt.JetStreamLimits{ + DiskStorage: -1, Consumer: 1, Streams: 1} + accClaim.Limits.JetStreamTieredLimits["R3"] = jwt.JetStreamLimits{ + DiskStorage: -1, Consumer: 1, Streams: 1} + accJwt := encodeClaim(t, accClaim, aExpPub) + accCreds := newUser(t, accKp) + + template := tmpl + fmt.Sprintf(opFrag, ojwt, syspub, syspub, sysJwt, aExpPub, accJwt) + c := createJetStreamClusterWithTemplate(t, template, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer(), nats.UserCredentials(accCreds)) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST1", + Subjects: []string{"foo"}, + MaxBytes: 1 * 1024 * 1024 * 1024, + MaxMsgs: 1000, + Replicas: 3, + }) + require_NoError(t, err) + + _, err = js.AddStream(&nats.StreamConfig{ + Name: "TEST2", + Subjects: []string{"bar"}, + }) + require_NoError(t, err) + + // These expected store values can come directory from stream info's state bytes. + // We will *= 3 for R3 + checkAccount := func(r1u, r3u uint64) { + t.Helper() + r3u *= 3 + + // Remote usage updates can be delayed, so wait for a bit for values we want. + checkFor(t, 5*time.Second, 250*time.Millisecond, func() error { + info, err := js.AccountInfo() + require_NoError(t, err) + require_True(t, len(info.Tiers) >= 2) + // These can move. + if u := info.Tiers["R1"].Store; u != r1u { + return fmt.Errorf("Expected R1 to be %v, got %v", friendlyBytes(r1u), friendlyBytes(u)) + } + if u := info.Tiers["R3"].Store; u != r3u { + return fmt.Errorf("Expected R3 to be %v, got %v", friendlyBytes(r3u), friendlyBytes(u)) + } + return nil + }) + } + + checkAccount(0, 0) + + // Now add in some R3 data. + msg := bytes.Repeat([]byte("Z"), 32*1024) // 32k + smallMsg := bytes.Repeat([]byte("Z"), 4*1024) // 4k + + for i := 0; i < 1000; i++ { + js.Publish("foo", msg) + } + sir3, err := js.StreamInfo("TEST1") + require_NoError(t, err) + + checkAccount(0, sir3.State.Bytes) + + // Now add in some R1 data. + for i := 0; i < 100; i++ { + js.Publish("bar", msg) + } + + sir1, err := js.StreamInfo("TEST2") + require_NoError(t, err) + + checkAccount(sir1.State.Bytes, sir3.State.Bytes) + + // We will now test a bunch of scenarios to see that we are doing accounting correctly. + + // Since our R3 has a limit of 1000 msgs, let's add in more msgs and drop older ones. + for i := 0; i < 100; i++ { + js.Publish("foo", smallMsg) + } + sir3, err = js.StreamInfo("TEST1") + require_NoError(t, err) + + checkAccount(sir1.State.Bytes, sir3.State.Bytes) + + // Move our R3 stream leader and make sure acounting is correct. + _, err = nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "TEST1"), nil, time.Second) + require_NoError(t, err) + + checkAccount(sir1.State.Bytes, sir3.State.Bytes) + + // Now scale down. + _, err = js.UpdateStream(&nats.StreamConfig{ + Name: "TEST1", + Subjects: []string{"foo"}, + MaxBytes: 1 * 1024 * 1024 * 1024, + MaxMsgs: 1000, + Replicas: 1, + }) + require_NoError(t, err) + + checkAccount(sir1.State.Bytes+sir3.State.Bytes, 0) + + // Add in more msgs which will replace the older and bigger ones. + for i := 0; i < 100; i++ { + js.Publish("foo", smallMsg) + } + sir3, err = js.StreamInfo("TEST1") + require_NoError(t, err) + + // Now scale back up. + _, err = js.UpdateStream(&nats.StreamConfig{ + Name: "TEST1", + Subjects: []string{"foo"}, + MaxBytes: 1 * 1024 * 1024 * 1024, + MaxMsgs: 1000, + Replicas: 3, + }) + require_NoError(t, err) + + checkAccount(sir1.State.Bytes, sir3.State.Bytes) + + // Test Purge. + err = js.PurgeStream("TEST1") + require_NoError(t, err) + + checkAccount(sir1.State.Bytes, 0) + + for i := 0; i < 1000; i++ { + js.Publish("foo", smallMsg) + } + sir3, err = js.StreamInfo("TEST1") + require_NoError(t, err) + + checkAccount(sir1.State.Bytes, sir3.State.Bytes) + + requestLeaderStepDown := func() { + ml := c.leader() + checkFor(t, 5*time.Second, 250*time.Millisecond, func() error { + if cml := c.leader(); cml == ml { + nc.Request(JSApiLeaderStepDown, nil, time.Second) + return fmt.Errorf("Metaleader has not moved yet") + } + return nil + }) + } + + // Test meta leader stepdowns. + for i := 0; i < len(c.servers); i++ { + requestLeaderStepDown() + checkAccount(sir1.State.Bytes, sir3.State.Bytes) + } + + // Now test cluster reset operations where we internally reset the NRG and optionally the stream too. + nl := c.randomNonStreamLeader(aExpPub, "TEST1") + acc, err := nl.LookupAccount(aExpPub) + require_NoError(t, err) + mset, err := acc.lookupStream("TEST1") + require_NoError(t, err) + // NRG only + mset.resetClusteredState(nil) + checkAccount(sir1.State.Bytes, sir3.State.Bytes) + // Now NRG and Stream state itself. + mset.resetClusteredState(errFirstSequenceMismatch) + checkAccount(sir1.State.Bytes, sir3.State.Bytes) + + // Now test server restart + for _, s := range c.servers { + s.Shutdown() + s.WaitForShutdown() + s = c.restartServer(s) + + // Wait on healthz and leader etc. + checkFor(t, 10*time.Second, 200*time.Millisecond, func() error { + if hs := s.healthz(nil); hs.Error != _EMPTY_ { + return errors.New(hs.Error) + } + return nil + }) + c.waitOnLeader() + c.waitOnStreamLeader(aExpPub, "TEST1") + c.waitOnStreamLeader(aExpPub, "TEST2") + + // Now check account again. + checkAccount(sir1.State.Bytes, sir3.State.Bytes) + } +} + +func TestJetStreamClusterStreamFailTracking(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"foo"}, + Replicas: 3, + }) + require_NoError(t, err) + + m := nats.NewMsg("foo") + m.Data = []byte("OK") + + b, bsz := 0, 5 + sendBatch := func() { + for i := b * bsz; i < b*bsz+bsz; i++ { + msgId := fmt.Sprintf("ID:%d", i) + m.Header.Set(JSMsgId, msgId) + // Send it twice on purpose. + js.PublishMsg(m) + js.PublishMsg(m) + } + b++ + } + + sendBatch() + + _, err = nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "TEST"), nil, time.Second) + require_NoError(t, err) + c.waitOnStreamLeader(globalAccountName, "TEST") + + sendBatch() + + // Now stop one and restart. + nl := c.randomNonStreamLeader(globalAccountName, "TEST") + mset, err := nl.GlobalAccount().lookupStream("TEST") + require_NoError(t, err) + // Reset raft + mset.resetClusteredState(nil) + time.Sleep(100 * time.Millisecond) + + nl.Shutdown() + nl.WaitForShutdown() + + sendBatch() + + nl = c.restartServer(nl) + + sendBatch() + + for { + _, err = nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "TEST"), nil, time.Second) + require_NoError(t, err) + c.waitOnStreamLeader(globalAccountName, "TEST") + if nl == c.streamLeader(globalAccountName, "TEST") { + break + } + } + + sendBatch() + + _, err = js.UpdateStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"foo"}, + Replicas: 1, + }) + require_NoError(t, err) + + // Make sure all in order. + errCh := make(chan error, 100) + var wg sync.WaitGroup + wg.Add(1) + + expected, seen := b*bsz, 0 + + sub, err := js.Subscribe("foo", func(msg *nats.Msg) { + expectedID := fmt.Sprintf("ID:%d", seen) + if v := msg.Header.Get(JSMsgId); v != expectedID { + errCh <- err + wg.Done() + msg.Sub.Unsubscribe() + return + } + seen++ + if seen >= expected { + wg.Done() + msg.Sub.Unsubscribe() + } + }) + require_NoError(t, err) + defer sub.Unsubscribe() + + wg.Wait() + if len(errCh) > 0 { + t.Fatalf("Expected no errors, got %d", len(errCh)) + } +} + +func TestJetStreamClusterStreamFailTrackingSnapshots(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"foo"}, + Replicas: 3, + }) + require_NoError(t, err) + + m := nats.NewMsg("foo") + m.Data = []byte("OK") + + // Send 1000 a dupe every msgID. + for i := 0; i < 1000; i++ { + msgId := fmt.Sprintf("ID:%d", i) + m.Header.Set(JSMsgId, msgId) + // Send it twice on purpose. + js.PublishMsg(m) + js.PublishMsg(m) + } + + // Now stop one. + nl := c.randomNonStreamLeader(globalAccountName, "TEST") + nl.Shutdown() + nl.WaitForShutdown() + + // Now send more and make sure leader snapshots. + for i := 1000; i < 2000; i++ { + msgId := fmt.Sprintf("ID:%d", i) + m.Header.Set(JSMsgId, msgId) + // Send it twice on purpose. + js.PublishMsg(m) + js.PublishMsg(m) + } + + sl := c.streamLeader(globalAccountName, "TEST") + mset, err := sl.GlobalAccount().lookupStream("TEST") + require_NoError(t, err) + node := mset.raftNode() + require_NotNil(t, node) + node.InstallSnapshot(mset.stateSnapshot()) + + // Now restart nl + nl = c.restartServer(nl) + c.waitOnServerCurrent(nl) + + // Move leader to NL + for { + _, err = nc.Request(fmt.Sprintf(JSApiStreamLeaderStepDownT, "TEST"), nil, time.Second) + require_NoError(t, err) + c.waitOnStreamLeader(globalAccountName, "TEST") + if nl == c.streamLeader(globalAccountName, "TEST") { + break + } + } + + _, err = js.UpdateStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"foo"}, + Replicas: 1, + }) + require_NoError(t, err) + + // Make sure all in order. + errCh := make(chan error, 100) + var wg sync.WaitGroup + wg.Add(1) + + expected, seen := 2000, 0 + + sub, err := js.Subscribe("foo", func(msg *nats.Msg) { + expectedID := fmt.Sprintf("ID:%d", seen) + if v := msg.Header.Get(JSMsgId); v != expectedID { + errCh <- err + wg.Done() + msg.Sub.Unsubscribe() + return + } + seen++ + if seen >= expected { + wg.Done() + msg.Sub.Unsubscribe() + } + }) + require_NoError(t, err) + defer sub.Unsubscribe() + + wg.Wait() + if len(errCh) > 0 { + t.Fatalf("Expected no errors, got %d", len(errCh)) + } +} + +func TestJetStreamClusterOrphanConsumerSubjects(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"foo.>", "bar.>"}, + Replicas: 3, + }) + require_NoError(t, err) + + _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ + Name: "consumer_foo", + Durable: "consumer_foo", + FilterSubject: "foo.something", + }) + require_NoError(t, err) + + for _, replicas := range []int{3, 1, 3} { + _, err = js.UpdateStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"bar.>"}, + Replicas: replicas, + }) + require_NoError(t, err) + c.waitOnAllCurrent() + } + + c.waitOnStreamLeader("$G", "TEST") + c.waitOnConsumerLeader("$G", "TEST", "consumer_foo") + + info, err := js.ConsumerInfo("TEST", "consumer_foo") + require_NoError(t, err) + require_True(t, info.Cluster != nil) + require_NotEqual(t, info.Cluster.Leader, "") + require_Equal(t, len(info.Cluster.Replicas), 2) +} + +func TestJetStreamClusterDurableConsumerInactiveThresholdLeaderSwitch(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + Replicas: 3, + }) + require_NoError(t, err) + + // Queue a msg. + sendStreamMsg(t, nc, "foo", "ok") + + thresh := 250 * time.Millisecond + + // This will start the timer. + sub, err := js.PullSubscribe("foo", "dlc", nats.InactiveThreshold(thresh)) + require_NoError(t, err) + + // Switch over leader. + cl := c.consumerLeader(globalAccountName, "TEST", "dlc") + cl.JetStreamStepdownConsumer(globalAccountName, "TEST", "dlc") + c.waitOnConsumerLeader(globalAccountName, "TEST", "dlc") + + // Create activity on this consumer. + msgs, err := sub.Fetch(1) + require_NoError(t, err) + require_True(t, len(msgs) == 1) + + // This is consider activity as well. So we can watch now up to thresh to make sure consumer still active. + msgs[0].AckSync() + + // The consumer should not disappear for next `thresh` interval unless old leader does so. + timeout := time.Now().Add(thresh) + for time.Now().Before(timeout) { + _, err := js.ConsumerInfo("TEST", "dlc") + if err == nats.ErrConsumerNotFound { + t.Fatalf("Consumer deleted when it should not have been") + } + } +} + +func TestJetStreamClusterConsumerMaxDeliveryNumAckPendingBug(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + Replicas: 3, + }) + require_NoError(t, err) + + // send 50 msgs + for i := 0; i < 50; i++ { + _, err := js.Publish("foo", []byte("ok")) + require_NoError(t, err) + } + + // File based. + _, err = js.Subscribe("foo", + func(msg *nats.Msg) {}, + nats.Durable("file"), + nats.ManualAck(), + nats.MaxDeliver(1), + nats.AckWait(time.Second), + nats.MaxAckPending(10), + ) + require_NoError(t, err) + + // Let first batch retry and expire. + time.Sleep(1200 * time.Millisecond) + + cia, err := js.ConsumerInfo("TEST", "file") + require_NoError(t, err) + + // Make sure followers will have exact same state. + _, err = nc.Request(fmt.Sprintf(JSApiConsumerLeaderStepDownT, "TEST", "file"), nil, time.Second) + require_NoError(t, err) + c.waitOnConsumerLeader(globalAccountName, "TEST", "file") + + cib, err := js.ConsumerInfo("TEST", "file") + require_NoError(t, err) + + // Want to compare sans cluster details which we know will change due to leader change. + // Also last activity for delivered can be slightly off so nil out as well. + checkConsumerInfo := func(a, b *nats.ConsumerInfo) { + t.Helper() + a.Cluster, b.Cluster = nil, nil + a.Delivered.Last, b.Delivered.Last = nil, nil + if !reflect.DeepEqual(a, b) { + t.Fatalf("ConsumerInfo do not match\n\t%+v\n\t%+v", a, b) + } + } + + checkConsumerInfo(cia, cib) + + // Memory based. + _, err = js.Subscribe("foo", + func(msg *nats.Msg) {}, + nats.Durable("mem"), + nats.ManualAck(), + nats.MaxDeliver(1), + nats.AckWait(time.Second), + nats.MaxAckPending(10), + nats.ConsumerMemoryStorage(), + ) + require_NoError(t, err) + + // Let first batch retry and expire. + time.Sleep(1200 * time.Millisecond) + + cia, err = js.ConsumerInfo("TEST", "mem") + require_NoError(t, err) + + // Make sure followers will have exact same state. + _, err = nc.Request(fmt.Sprintf(JSApiConsumerLeaderStepDownT, "TEST", "mem"), nil, time.Second) + require_NoError(t, err) + c.waitOnConsumerLeader(globalAccountName, "TEST", "mem") + + cib, err = js.ConsumerInfo("TEST", "mem") + require_NoError(t, err) + + checkConsumerInfo(cia, cib) + + // Now file based but R1 and server restart. + _, err = js.Subscribe("foo", + func(msg *nats.Msg) {}, + nats.Durable("r1"), + nats.ManualAck(), + nats.MaxDeliver(1), + nats.AckWait(time.Second), + nats.MaxAckPending(10), + nats.ConsumerReplicas(1), + ) + require_NoError(t, err) + + // Let first batch retry and expire. + time.Sleep(1200 * time.Millisecond) + + cia, err = js.ConsumerInfo("TEST", "r1") + require_NoError(t, err) + + cl := c.consumerLeader(globalAccountName, "TEST", "r1") + cl.Shutdown() + cl.WaitForShutdown() + cl = c.restartServer(cl) + c.waitOnServerCurrent(cl) + + cib, err = js.ConsumerInfo("TEST", "r1") + require_NoError(t, err) + + // Created can skew a small bit due to server restart, this is expected. + now := time.Now() + cia.Created, cib.Created = now, now + checkConsumerInfo(cia, cib) +} + +// Discovered that we are not properly setting certain default filestore blkSizes. +func TestJetStreamClusterCheckFileStoreBlkSizes(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + // Nowmal Stream + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + Replicas: 3, + }) + require_NoError(t, err) + + _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ + Durable: "C3", + AckPolicy: nats.AckExplicitPolicy, + }) + require_NoError(t, err) + + // KV + _, err = js.CreateKeyValue(&nats.KeyValueConfig{ + Bucket: "TEST", + Replicas: 3, + }) + require_NoError(t, err) + + blkSize := func(fs *fileStore) uint64 { + fs.mu.RLock() + defer fs.mu.RUnlock() + return fs.fcfg.BlockSize + } + + // We will check now the following filestores. + // meta + // TEST stream and NRG + // C3 NRG + // KV_TEST stream and NRG + for _, s := range c.servers { + js, cc := s.getJetStreamCluster() + // META + js.mu.RLock() + meta := cc.meta + js.mu.RUnlock() + require_True(t, meta != nil) + fs := meta.(*raft).wal.(*fileStore) + require_True(t, blkSize(fs) == defaultMetaFSBlkSize) + + // TEST STREAM + mset, err := s.GlobalAccount().lookupStream("TEST") + require_NoError(t, err) + mset.mu.RLock() + fs = mset.store.(*fileStore) + mset.mu.RUnlock() + require_True(t, blkSize(fs) == defaultLargeBlockSize) + + // KV STREAM + // Now the KV which is different default size. + kv, err := s.GlobalAccount().lookupStream("KV_TEST") + require_NoError(t, err) + kv.mu.RLock() + fs = kv.store.(*fileStore) + kv.mu.RUnlock() + require_True(t, blkSize(fs) == defaultKVBlockSize) + + // Now check NRGs + // TEST Stream + n := mset.raftNode() + require_True(t, n != nil) + fs = n.(*raft).wal.(*fileStore) + require_True(t, blkSize(fs) == defaultMediumBlockSize) + // KV TEST Stream + n = kv.raftNode() + require_True(t, n != nil) + fs = n.(*raft).wal.(*fileStore) + require_True(t, blkSize(fs) == defaultMediumBlockSize) + // Consumer + o := mset.lookupConsumer("C3") + require_True(t, o != nil) + n = o.raftNode() + require_True(t, n != nil) + fs = n.(*raft).wal.(*fileStore) + require_True(t, blkSize(fs) == defaultMediumBlockSize) + } +} diff --git a/server/jetstream_errors.go b/server/jetstream_errors.go index a80a48bbd..781f98568 100644 --- a/server/jetstream_errors.go +++ b/server/jetstream_errors.go @@ -1,15 +1,3 @@ -// Copyright 2012-2018 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. package server import ( diff --git a/server/jetstream_errors_generated.go b/server/jetstream_errors_generated.go index 637893746..58bdfe305 100644 --- a/server/jetstream_errors_generated.go +++ b/server/jetstream_errors_generated.go @@ -1,16 +1,3 @@ -// Copyright 2012-2018 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - // Generated code, do not edit. See errors.json and run go generate to update package server diff --git a/server/jetstream_errors_test.go b/server/jetstream_errors_test.go index 48c64fe7b..c7adcee33 100644 --- a/server/jetstream_errors_test.go +++ b/server/jetstream_errors_test.go @@ -1,15 +1,3 @@ -// Copyright 2012-2018 The NATS Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. package server import ( diff --git a/server/jetstream_helpers_test.go b/server/jetstream_helpers_test.go index 9b9a8230b..505b82930 100644 --- a/server/jetstream_helpers_test.go +++ b/server/jetstream_helpers_test.go @@ -1,4 +1,4 @@ -// Copyright 2020-2022 The NATS Authors +// Copyright 2020-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -37,12 +37,21 @@ import ( func init() { // Speed up raft for tests. hbInterval = 50 * time.Millisecond - minElectionTimeout = 1 * time.Second - maxElectionTimeout = 3 * time.Second - lostQuorumInterval = time.Second + minElectionTimeout = 750 * time.Millisecond + maxElectionTimeout = 2500 * time.Millisecond + lostQuorumInterval = 500 * time.Millisecond lostQuorumCheck = 4 * hbInterval } +// Used to setup clusters of clusters for tests. +type cluster struct { + servers []*Server + opts []*Options + name string + t testing.TB + nproxies []*netProxy +} + // Used to setup superclusters for tests. type supercluster struct { t *testing.T @@ -106,6 +115,13 @@ var jsClusterAccountsTempl = ` routes = [%s] } + websocket { + listen: 127.0.0.1:-1 + compression: true + handshake_timeout: "5s" + no_tls: true + } + no_auth_user: one accounts { @@ -351,6 +367,9 @@ type gwProxy struct { down int } +// For use in normal clusters. +type clusterProxy = gwProxy + // Maps cluster names to proxy settings. type gwProxyMap map[string]*gwProxy @@ -497,7 +516,7 @@ func (sc *supercluster) leader() *Server { func (sc *supercluster) waitOnLeader() { sc.t.Helper() - expires := time.Now().Add(10 * time.Second) + expires := time.Now().Add(30 * time.Second) for time.Now().Before(expires) { for _, c := range sc.clusters { if leader := c.leader(); leader != nil { @@ -536,7 +555,7 @@ func (sc *supercluster) waitOnPeerCount(n int) { sc.t.Helper() sc.waitOnLeader() leader := sc.leader() - expires := time.Now().Add(20 * time.Second) + expires := time.Now().Add(30 * time.Second) for time.Now().Before(expires) { peers := leader.JetStreamClusterPeers() if len(peers) == n { @@ -692,9 +711,19 @@ func createJetStreamCluster(t testing.TB, tmpl string, clusterName, snPre string type modifyCb func(serverName, clusterName, storeDir, conf string) string -func createJetStreamClusterAndModHook(t testing.TB, tmpl string, clusterName, snPre string, numServers int, portStart int, waitOnReady bool, modify modifyCb) *cluster { +func createJetStreamClusterAndModHook(t testing.TB, tmpl, cName, snPre string, numServers int, portStart int, waitOnReady bool, modify modifyCb) *cluster { + return createJetStreamClusterEx(t, tmpl, cName, snPre, numServers, portStart, waitOnReady, modify, nil) +} + +func createJetStreamClusterWithNetProxy(t testing.TB, cName string, numServers int, cnp *clusterProxy) *cluster { + startPorts := []int{7_122, 9_122, 11_122, 15_122} + port := startPorts[rand.Intn(len(startPorts))] + return createJetStreamClusterEx(t, jsClusterTempl, cName, _EMPTY_, numServers, port, true, nil, cnp) +} + +func createJetStreamClusterEx(t testing.TB, tmpl, cName, snPre string, numServers int, portStart int, wait bool, modify modifyCb, cnp *clusterProxy) *cluster { t.Helper() - if clusterName == _EMPTY_ || numServers < 1 { + if cName == _EMPTY_ || numServers < 1 { t.Fatalf("Bad params") } @@ -715,20 +744,32 @@ func createJetStreamClusterAndModHook(t testing.TB, tmpl string, clusterName, sn // Build out the routes that will be shared with all configs. var routes []string + var nproxies []*netProxy for cp := portStart; cp < portStart+numServers; cp++ { - routes = append(routes, fmt.Sprintf("nats-route://127.0.0.1:%d", cp)) + routeURL := fmt.Sprintf("nats-route://127.0.0.1:%d", cp) + if cnp != nil { + np := createNetProxy(cnp.rtt, cnp.up, cnp.down, routeURL, false) + nproxies = append(nproxies, np) + routeURL = np.routeURL() + } + routes = append(routes, routeURL) } routeConfig := strings.Join(routes, ",") // Go ahead and build configurations and start servers. - c := &cluster{servers: make([]*Server, 0, numServers), opts: make([]*Options, 0, numServers), name: clusterName} + c := &cluster{servers: make([]*Server, 0, numServers), opts: make([]*Options, 0, numServers), name: cName, nproxies: nproxies} + + // Start any proxies. + for _, np := range nproxies { + np.start() + } for cp := portStart; cp < portStart+numServers; cp++ { storeDir := t.TempDir() sn := fmt.Sprintf("%sS-%d", snPre, cp-portStart+1) - conf := fmt.Sprintf(tmpl, sn, storeDir, clusterName, cp, routeConfig) + conf := fmt.Sprintf(tmpl, sn, storeDir, cName, cp, routeConfig) if modify != nil { - conf = modify(sn, clusterName, storeDir, conf) + conf = modify(sn, cName, storeDir, conf) } s, o := RunServerWithConfig(createConfFile(t, []byte(conf))) c.servers = append(c.servers, s) @@ -738,7 +779,7 @@ func createJetStreamClusterAndModHook(t testing.TB, tmpl string, clusterName, sn // Wait til we are formed and have a leader. c.checkClusterFormed() - if waitOnReady { + if wait { c.waitOnClusterReady() } @@ -870,6 +911,18 @@ var jsClusterTemplWithSingleLeafNode = ` accounts { $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } } ` +var jsClusterTemplWithSingleFleetLeafNode = ` + listen: 127.0.0.1:-1 + server_name: %s + cluster: { name: fleet } + jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} + + {{leaf}} + + # For access to system account. + accounts { $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } } +` + var jsClusterTemplWithSingleLeafNodeNoJS = ` listen: 127.0.0.1:-1 server_name: %s @@ -938,8 +991,12 @@ func (c *cluster) createLeafNodeWithTemplate(name, template string) *Server { } func (c *cluster) createLeafNodeWithTemplateNoSystem(name, template string) *Server { + return c.createLeafNodeWithTemplateNoSystemWithProto(name, template, "nats") +} + +func (c *cluster) createLeafNodeWithTemplateNoSystemWithProto(name, template, proto string) *Server { c.t.Helper() - tmpl := c.createLeafSolicitNoSystem(template) + tmpl := c.createLeafSolicitNoSystemWithProto(template, proto) conf := fmt.Sprintf(tmpl, name, c.t.TempDir()) s, o := RunServerWithConfig(createConfFile(c.t, []byte(conf))) c.servers = append(c.servers, s) @@ -949,6 +1006,10 @@ func (c *cluster) createLeafNodeWithTemplateNoSystem(name, template string) *Ser // Helper to generate the leaf solicit configs. func (c *cluster) createLeafSolicit(tmpl string) string { + return c.createLeafSolicitWithProto(tmpl, "nats") +} + +func (c *cluster) createLeafSolicitWithProto(tmpl, proto string) string { c.t.Helper() // Create our leafnode cluster template first. @@ -958,8 +1019,8 @@ func (c *cluster) createLeafSolicit(tmpl string) string { continue } ln := s.getOpts().LeafNode - lns = append(lns, fmt.Sprintf("nats://%s:%d", ln.Host, ln.Port)) - lnss = append(lnss, fmt.Sprintf("nats://admin:s3cr3t!@%s:%d", ln.Host, ln.Port)) + lns = append(lns, fmt.Sprintf("%s://%s:%d", proto, ln.Host, ln.Port)) + lnss = append(lnss, fmt.Sprintf("%s://admin:s3cr3t!@%s:%d", proto, ln.Host, ln.Port)) } lnc := strings.Join(lns, ", ") lnsc := strings.Join(lnss, ", ") @@ -967,19 +1028,26 @@ func (c *cluster) createLeafSolicit(tmpl string) string { return strings.Replace(tmpl, "{{leaf}}", lconf, 1) } -func (c *cluster) createLeafSolicitNoSystem(tmpl string) string { +func (c *cluster) createLeafSolicitNoSystemWithProto(tmpl, proto string) string { c.t.Helper() // Create our leafnode cluster template first. - var lns string + var lns []string for _, s := range c.servers { if s.ClusterName() != c.name { continue } - ln := s.getOpts().LeafNode - lns = fmt.Sprintf("nats://%s:%d", ln.Host, ln.Port) + switch proto { + case "nats", "tls": + ln := s.getOpts().LeafNode + lns = append(lns, fmt.Sprintf("%s://%s:%d", proto, ln.Host, ln.Port)) + case "ws", "wss": + ln := s.getOpts().Websocket + lns = append(lns, fmt.Sprintf("%s://%s:%d", proto, ln.Host, ln.Port)) + } } - return strings.Replace(tmpl, "{{leaf}}", fmt.Sprintf(jsLeafNoSysFrag, lns), 1) + lnc := strings.Join(lns, ", ") + return strings.Replace(tmpl, "{{leaf}}", fmt.Sprintf(jsLeafNoSysFrag, lnc), 1) } func (c *cluster) createLeafNodesWithTemplateMixedMode(template, clusterName string, numJsServers, numNonServers int, doJSConfig bool) *cluster { @@ -1203,7 +1271,7 @@ func (c *cluster) waitOnPeerCount(n int) { func (c *cluster) waitOnConsumerLeader(account, stream, consumer string) { c.t.Helper() - expires := time.Now().Add(20 * time.Second) + expires := time.Now().Add(30 * time.Second) for time.Now().Before(expires) { if leader := c.consumerLeader(account, stream, consumer); leader != nil { time.Sleep(200 * time.Millisecond) @@ -1295,7 +1363,7 @@ func (c *cluster) waitOnServerHealthz(s *Server) { func (c *cluster) waitOnServerCurrent(s *Server) { c.t.Helper() - expires := time.Now().Add(20 * time.Second) + expires := time.Now().Add(30 * time.Second) for time.Now().Before(expires) { time.Sleep(100 * time.Millisecond) if !s.JetStreamEnabled() || s.JetStreamIsCurrent() { @@ -1621,18 +1689,14 @@ func (np *netProxy) loop(rtt time.Duration, tbw int, r, w net.Conn) { rl := rate.NewLimiter(rate.Limit(tbw), rbl) - for fr := true; ; { - sr := time.Now() + for { n, err := r.Read(buf[:]) if err != nil { return } // RTT delays - if fr || time.Since(sr) > 250*time.Millisecond { - fr = false - if delay > 0 { - time.Sleep(delay) - } + if delay > 0 { + time.Sleep(delay) } if err := rl.WaitN(ctx, n); err != nil { return diff --git a/server/jetstream_leafnode_test.go b/server/jetstream_leafnode_test.go index abc789a9b..509827572 100644 --- a/server/jetstream_leafnode_test.go +++ b/server/jetstream_leafnode_test.go @@ -298,8 +298,8 @@ jetstream :{ server_name: A cluster: { name: clust1 - listen: 127.0.0.1:50554 - routes=[nats-route://127.0.0.1:50555] + listen: 127.0.0.1:20104 + routes=[nats-route://127.0.0.1:20105] no_advertise: true } ` @@ -327,8 +327,8 @@ jetstream: { server_name: B cluster: { name: clust1 - listen: 127.0.0.1:50555 - routes=[nats-route://127.0.0.1:50554] + listen: 127.0.0.1:20105 + routes=[nats-route://127.0.0.1:20104] no_advertise: true } ` @@ -350,8 +350,8 @@ jetstream: { server_name: LA cluster: { name: clustL - listen: 127.0.0.1:50556 - routes=[nats-route://127.0.0.1:50557] + listen: 127.0.0.1:20106 + routes=[nats-route://127.0.0.1:20107] no_advertise: true } leafnodes:{ @@ -378,8 +378,8 @@ jetstream: { server_name: LB cluster: { name: clustL - listen: 127.0.0.1:50557 - routes=[nats-route://127.0.0.1:50556] + listen: 127.0.0.1:20107 + routes=[nats-route://127.0.0.1:20106] no_advertise: true } leafnodes:{ @@ -568,8 +568,8 @@ jetstream: { %s store_dir: '%s'; max_mem: 50Mb, max_file: 50Mb } server_name: A cluster: { name: clust1 - listen: 127.0.0.1:50554 - routes=[nats-route://127.0.0.1:50555,nats-route://127.0.0.1:50556] + listen: 127.0.0.1:20114 + routes=[nats-route://127.0.0.1:20115,nats-route://127.0.0.1:20116] no_advertise: true } ` @@ -592,8 +592,8 @@ jetstream: { %s store_dir: '%s'; max_mem: 50Mb, max_file: 50Mb } server_name: B cluster: { name: clust1 - listen: 127.0.0.1:50555 - routes=[nats-route://127.0.0.1:50554,nats-route://127.0.0.1:50556] + listen: 127.0.0.1:20115 + routes=[nats-route://127.0.0.1:20114,nats-route://127.0.0.1:20116] no_advertise: true } ` @@ -619,8 +619,8 @@ jetstream: { server_name: C cluster: { name: clust1 - listen: 127.0.0.1:50556 - routes=[nats-route://127.0.0.1:50554,nats-route://127.0.0.1:50555] + listen: 127.0.0.1:20116 + routes=[nats-route://127.0.0.1:20114,nats-route://127.0.0.1:20115] no_advertise: true } ` @@ -741,8 +741,8 @@ jetstream: { max_file: 50Mb } leafnodes:{ - remotes:[{url:nats://a1:a1@127.0.0.1:50555, account: A, credentials: '%s' }, - {url:nats://s1:s1@127.0.0.1:50555, account: SYS, credentials: '%s', deny_imports: foo, deny_exports: bar}] + remotes:[{url:nats://a1:a1@127.0.0.1:20125, account: A, credentials: '%s' }, + {url:nats://s1:s1@127.0.0.1:20125, account: SYS, credentials: '%s', deny_imports: foo, deny_exports: bar}] } ` akp, err := nkeys.CreateAccount() @@ -1052,8 +1052,8 @@ jetstream : { domain: "DHUB", store_dir: '%s', max_mem: 100Mb, max_file: 100Mb } server_name: HUB1 cluster: { name: HUB - listen: 127.0.0.1:50554 - routes=[nats-route://127.0.0.1:50555] + listen: 127.0.0.1:20134 + routes=[nats-route://127.0.0.1:20135] } leafnodes: { listen:127.0.0.1:-1 @@ -1070,8 +1070,8 @@ jetstream : { domain: "DHUB", store_dir: '%s', max_mem: 100Mb, max_file: 100Mb } server_name: HUB2 cluster: { name: HUB - listen: 127.0.0.1:50555 - routes=[nats-route://127.0.0.1:50554] + listen: 127.0.0.1:20135 + routes=[nats-route://127.0.0.1:20134] } leafnodes: { listen:127.0.0.1:-1 @@ -1088,8 +1088,8 @@ jetstream: { domain: "DLEAF", store_dir: '%s', max_mem: 100Mb, max_file: 100Mb } server_name: LEAF1 cluster: { name: LEAF - listen: 127.0.0.1:50556 - routes=[nats-route://127.0.0.1:50557] + listen: 127.0.0.1:20136 + routes=[nats-route://127.0.0.1:20137] } leafnodes: { remotes:[{url:nats://a1:a1@127.0.0.1:%d, account: A},{url:nats://b1:b1@127.0.0.1:%d, account: B}] @@ -1107,8 +1107,8 @@ jetstream: { domain: "DLEAF", store_dir: '%s', max_mem: 100Mb, max_file: 100Mb } server_name: LEAF2 cluster: { name: LEAF - listen: 127.0.0.1:50557 - routes=[nats-route://127.0.0.1:50556] + listen: 127.0.0.1:20137 + routes=[nats-route://127.0.0.1:20136] } leafnodes: { remotes:[{url:nats://a1:a1@127.0.0.1:%d, account: A},{url:nats://b1:b1@127.0.0.1:%d, account: B}] diff --git a/server/jetstream_super_cluster_test.go b/server/jetstream_super_cluster_test.go index 8abbe17c4..f229d46aa 100644 --- a/server/jetstream_super_cluster_test.go +++ b/server/jetstream_super_cluster_test.go @@ -3953,3 +3953,97 @@ func TestJetStreamSuperClusterGWOfflineSatus(t *testing.T) { return nil }) } + +func TestJetStreamSuperClusterMovingR1Stream(t *testing.T) { + // Make C2 have some latency. + gwm := gwProxyMap{ + "C2": &gwProxy{ + rtt: 10 * time.Millisecond, + up: 1 * 1024 * 1024 * 1024, // 1gbit + down: 1 * 1024 * 1024 * 1024, // 1gbit + }, + } + sc := createJetStreamTaggedSuperClusterWithGWProxy(t, gwm) + defer sc.shutdown() + + nc, js := jsClientConnect(t, sc.clusterForName("C1").randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + }) + require_NoError(t, err) + + toSend := 10_000 + for i := 0; i < toSend; i++ { + _, err := js.PublishAsync("TEST", []byte("HELLO WORLD")) + require_NoError(t, err) + } + select { + case <-js.PublishAsyncComplete(): + case <-time.After(5 * time.Second): + t.Fatalf("Did not receive completion signal") + } + + // Have it move to GCP. + _, err = js.UpdateStream(&nats.StreamConfig{ + Name: "TEST", + Placement: &nats.Placement{Tags: []string{"cloud:gcp"}}, + }) + require_NoError(t, err) + + checkFor(t, 5*time.Second, 100*time.Millisecond, func() error { + sc.waitOnStreamLeader(globalAccountName, "TEST") + si, err := js.StreamInfo("TEST") + if err != nil { + return err + } + if si.Cluster.Name != "C2" { + return fmt.Errorf("Wrong cluster: %q", si.Cluster.Name) + } + if si.Cluster.Leader == _EMPTY_ { + return fmt.Errorf("No leader yet") + } else if !strings.HasPrefix(si.Cluster.Leader, "C2") { + return fmt.Errorf("Wrong leader: %q", si.Cluster.Leader) + } + // Now we want to see that we shrink back to original. + if len(si.Cluster.Replicas) != 0 { + return fmt.Errorf("Expected 0 replicas, got %d", len(si.Cluster.Replicas)) + } + if si.State.Msgs != uint64(toSend) { + return fmt.Errorf("Only see %d msgs", si.State.Msgs) + } + return nil + }) +} + +// https://github.com/nats-io/nats-server/issues/4396 +func TestJetStreamSuperClusterR1StreamPeerRemove(t *testing.T) { + sc := createJetStreamSuperCluster(t, 1, 3) + defer sc.shutdown() + + nc, js := jsClientConnect(t, sc.serverByName("C1-S1")) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"foo"}, + Replicas: 1, + }) + require_NoError(t, err) + + si, err := js.StreamInfo("TEST") + require_NoError(t, err) + + // Call peer remove on the only peer the leader. + resp, err := nc.Request(fmt.Sprintf(JSApiStreamRemovePeerT, "TEST"), []byte(`{"peer":"`+si.Cluster.Leader+`"}`), time.Second) + require_NoError(t, err) + var rpr JSApiStreamRemovePeerResponse + require_NoError(t, json.Unmarshal(resp.Data, &rpr)) + require_False(t, rpr.Success) + require_True(t, rpr.Error.ErrCode == 10075) + + // Stream should still be in place and useable. + _, err = js.StreamInfo("TEST") + require_NoError(t, err) +} diff --git a/server/jetstream_test.go b/server/jetstream_test.go index 4373632a8..78987d9b5 100644 --- a/server/jetstream_test.go +++ b/server/jetstream_test.go @@ -3527,10 +3527,14 @@ func TestJetStreamConsumerRateLimit(t *testing.T) { nc.Publish(mname, msg) } nc.Flush() - state := mset.state() - if state.Msgs != uint64(toSend) { - t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs) - } + + checkFor(t, 5*time.Second, 100*time.Millisecond, func() error { + state := mset.state() + if state.Msgs != uint64(toSend) { + return fmt.Errorf("Expected %d messages, got %d", toSend, state.Msgs) + } + return nil + }) // 100Mbit rateLimit := uint64(100 * 1024 * 1024) @@ -5336,13 +5340,15 @@ func TestJetStreamRedeliverCount(t *testing.T) { nc, js := jsClientConnect(t, s) defer nc.Close() - // Send 10 msgs - for i := 0; i < 10; i++ { - js.Publish("DC", []byte("OK!")) - } - if state := mset.state(); state.Msgs != 10 { - t.Fatalf("Expected %d messages, got %d", 10, state.Msgs) + if _, err = js.Publish("DC", []byte("OK!")); err != nil { + t.Fatal(err) } + checkFor(t, time.Second, time.Millisecond*250, func() error { + if state := mset.state(); state.Msgs != 1 { + return fmt.Errorf("Expected %d messages, got %d", 1, state.Msgs) + } + return nil + }) o, err := mset.addConsumer(workerModeConfig("WQ")) if err != nil { @@ -8703,13 +8709,16 @@ func TestJetStreamMsgHeaders(t *testing.T) { nc.PublishMsg(m) nc.Flush() - state := mset.state() - if state.Msgs != 1 { - t.Fatalf("Expected 1 message, got %d", state.Msgs) - } - if state.Bytes == 0 { - t.Fatalf("Expected non-zero bytes") - } + checkFor(t, time.Second*2, time.Millisecond*250, func() error { + state := mset.state() + if state.Msgs != 1 { + return fmt.Errorf("Expected 1 message, got %d", state.Msgs) + } + if state.Bytes == 0 { + return fmt.Errorf("Expected non-zero bytes") + } + return nil + }) // Now access raw from stream. sm, err := mset.getMsg(1) @@ -10723,6 +10732,222 @@ func TestJetStreamAccountImportBasics(t *testing.T) { } } +// This tests whether we are able to aggregate all JetStream advisory events +// from all accounts into a single account. Config for this test uses +// service imports and exports as that allows for gathering all events +// without having to know the account name and without separate entries +// for each account in aggregate account config. +// This test fails as it is not receiving the api audit event ($JS.EVENT.ADVISORY.API). +func TestJetStreamAccountImportJSAdvisoriesAsService(t *testing.T) { + conf := createConfFile(t, []byte(` + listen=127.0.0.1:-1 + no_auth_user: pp + jetstream: {max_mem_store: 64GB, max_file_store: 10TB} + accounts { + JS { + jetstream: enabled + users: [ {user: pp, password: foo} ] + imports [ + { service: { account: AGG, subject: '$JS.EVENT.ADVISORY.ACC.JS.>' }, to: '$JS.EVENT.ADVISORY.>' } + ] + } + AGG { + users: [ {user: agg, password: foo} ] + exports: [ + { service: '$JS.EVENT.ADVISORY.ACC.*.>', response: Singleton, account_token_position: 5 } + ] + } + } + `)) + + s, _ := RunServerWithConfig(conf) + if config := s.JetStreamConfig(); config != nil { + defer removeDir(t, config.StoreDir) + } + defer s.Shutdown() + + // This should be the pp user, one which manages JetStream assets + ncJS, err := nats.Connect(s.ClientURL()) + if err != nil { + t.Fatalf("Unexpected error during connect: %v", err) + } + defer ncJS.Close() + + // This is the agg user, which should aggregate all JS advisory events. + ncAgg, err := nats.Connect(s.ClientURL(), nats.UserInfo("agg", "foo")) + if err != nil { + t.Fatalf("Unexpected error during connect: %v", err) + } + defer ncAgg.Close() + + js, err := ncJS.JetStream() + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + // user from JS account should receive events on $JS.EVENT.ADVISORY.> subject + subJS, err := ncJS.SubscribeSync("$JS.EVENT.ADVISORY.>") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer subJS.Unsubscribe() + + // user from AGG account should receive events on mapped $JS.EVENT.ADVISORY.ACC.JS.> subject (with account name) + subAgg, err := ncAgg.SubscribeSync("$JS.EVENT.ADVISORY.ACC.JS.>") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + // add stream using JS account + // this should trigger 2 events: + // - an action event on $JS.EVENT.ADVISORY.STREAM.CREATED.ORDERS + // - an api audit event on $JS.EVENT.ADVISORY.API + _, err = js.AddStream(&nats.StreamConfig{Name: "ORDERS", Subjects: []string{"ORDERS.*"}}) + if err != nil { + t.Fatalf("Unexpected error adding stream: %v", err) + } + + gotEvents := map[string]int{} + for i := 0; i < 2; i++ { + msg, err := subJS.NextMsg(time.Second * 2) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + gotEvents[msg.Subject]++ + } + if c := gotEvents["$JS.EVENT.ADVISORY.STREAM.CREATED.ORDERS"]; c != 1 { + t.Fatalf("Should have received one advisory from $JS.EVENT.ADVISORY.STREAM.CREATED.ORDERS but got %d", c) + } + if c := gotEvents["$JS.EVENT.ADVISORY.API"]; c != 1 { + t.Fatalf("Should have received one advisory from $JS.EVENT.ADVISORY.API but got %d", c) + } + + // same set of events should be received by AGG account + // on subjects containing account name (ACC.JS) + gotEvents = map[string]int{} + for i := 0; i < 2; i++ { + msg, err := subAgg.NextMsg(time.Second * 2) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + gotEvents[msg.Subject]++ + } + if c := gotEvents["$JS.EVENT.ADVISORY.ACC.JS.STREAM.CREATED.ORDERS"]; c != 1 { + t.Fatalf("Should have received one advisory from $JS.EVENT.ADVISORY.ACC.JS.STREAM.CREATED.ORDERS but got %d", c) + } + if c := gotEvents["$JS.EVENT.ADVISORY.ACC.JS.API"]; c != 1 { + t.Fatalf("Should have received one advisory from $JS.EVENT.ADVISORY.ACC.JS.API but got %d", c) + } +} + +// This tests whether we are able to aggregate all JetStream advisory events +// from all accounts into a single account. Config for this test uses +// stream imports and exports as that allows for gathering all events +// as long as there is a separate stream import entry for each account +// in aggregate account config. +func TestJetStreamAccountImportJSAdvisoriesAsStream(t *testing.T) { + conf := createConfFile(t, []byte(` + listen=127.0.0.1:-1 + no_auth_user: pp + jetstream: {max_mem_store: 64GB, max_file_store: 10TB} + accounts { + JS { + jetstream: enabled + users: [ {user: pp, password: foo} ] + exports [ + { stream: '$JS.EVENT.ADVISORY.>' } + ] + } + AGG { + users: [ {user: agg, password: foo} ] + imports: [ + { stream: { account: JS, subject: '$JS.EVENT.ADVISORY.>' }, to: '$JS.EVENT.ADVISORY.ACC.JS.>' } + ] + } + } + `)) + + s, _ := RunServerWithConfig(conf) + if config := s.JetStreamConfig(); config != nil { + defer removeDir(t, config.StoreDir) + } + defer s.Shutdown() + + // This should be the pp user, one which manages JetStream assets + ncJS, err := nats.Connect(s.ClientURL()) + if err != nil { + t.Fatalf("Unexpected error during connect: %v", err) + } + defer ncJS.Close() + + // This is the agg user, which should aggregate all JS advisory events. + ncAgg, err := nats.Connect(s.ClientURL(), nats.UserInfo("agg", "foo")) + if err != nil { + t.Fatalf("Unexpected error during connect: %v", err) + } + defer ncAgg.Close() + + js, err := ncJS.JetStream() + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + // user from JS account should receive events on $JS.EVENT.ADVISORY.> subject + subJS, err := ncJS.SubscribeSync("$JS.EVENT.ADVISORY.>") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer subJS.Unsubscribe() + + // user from AGG account should receive events on mapped $JS.EVENT.ADVISORY.ACC.JS.> subject (with account name) + subAgg, err := ncAgg.SubscribeSync("$JS.EVENT.ADVISORY.ACC.JS.>") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + // add stream using JS account + // this should trigger 2 events: + // - an action event on $JS.EVENT.ADVISORY.STREAM.CREATED.ORDERS + // - an api audit event on $JS.EVENT.ADVISORY.API + _, err = js.AddStream(&nats.StreamConfig{Name: "ORDERS", Subjects: []string{"ORDERS.*"}}) + if err != nil { + t.Fatalf("Unexpected error adding stream: %v", err) + } + msg, err := subJS.NextMsg(time.Second) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if msg.Subject != "$JS.EVENT.ADVISORY.STREAM.CREATED.ORDERS" { + t.Fatalf("Unexpected subject: %q", msg.Subject) + } + msg, err = subJS.NextMsg(time.Second) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if msg.Subject != "$JS.EVENT.ADVISORY.API" { + t.Fatalf("Unexpected subject: %q", msg.Subject) + } + + // same set of events should be received by AGG account + // on subjects containing account name (ACC.JS) + msg, err = subAgg.NextMsg(time.Second) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if msg.Subject != "$JS.EVENT.ADVISORY.ACC.JS.STREAM.CREATED.ORDERS" { + t.Fatalf("Unexpected subject: %q", msg.Subject) + } + + // when using stream instead of service, we get all events + msg, err = subAgg.NextMsg(time.Second) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if msg.Subject != "$JS.EVENT.ADVISORY.ACC.JS.API" { + t.Fatalf("Unexpected subject: %q", msg.Subject) + } +} + // This is for importing all of JetStream into another account for admin purposes. func TestJetStreamAccountImportAll(t *testing.T) { conf := createConfFile(t, []byte(` @@ -16732,6 +16957,63 @@ func TestJetStreamDisabledHealthz(t *testing.T) { t.Fatalf("Expected healthz to return error if JetStream is disabled, got status: %s", hs.Status) } +func TestJetStreamPullTimeout(t *testing.T) { + s := RunBasicJetStreamServer(t) + defer s.Shutdown() + + nc, js := jsClientConnect(t, s) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + }) + require_NoError(t, err) + + _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ + Durable: "pr", + AckPolicy: nats.AckExplicitPolicy, + }) + require_NoError(t, err) + + const numMessages = 1000 + // Send messages in small intervals. + go func() { + for i := 0; i < numMessages; i++ { + time.Sleep(time.Millisecond * 10) + sendStreamMsg(t, nc, "TEST", "data") + } + }() + + // Prepare manual Pull Request. + req := &JSApiConsumerGetNextRequest{Batch: 200, NoWait: false, Expires: time.Millisecond * 100} + jreq, _ := json.Marshal(req) + + subj := fmt.Sprintf(JSApiRequestNextT, "TEST", "pr") + reply := "_pr_" + var got atomic.Int32 + nc.PublishRequest(subj, reply, jreq) + + // Manually subscribe to inbox subject and send new request only if we get `408 Request Timeout`. + sub, _ := nc.Subscribe(reply, func(msg *nats.Msg) { + if msg.Header.Get("Status") == "408" && msg.Header.Get("Description") == "Request Timeout" { + nc.PublishRequest(subj, reply, jreq) + nc.Flush() + } else { + got.Add(1) + msg.Ack() + } + }) + defer sub.Unsubscribe() + + // Check if we're not stuck. + checkFor(t, time.Second*30, time.Second*1, func() error { + if got.Load() < int32(numMessages) { + return fmt.Errorf("expected %d messages", numMessages) + } + return nil + }) +} + func TestJetStreamPullMaxBytes(t *testing.T) { s := RunBasicJetStreamServer(t) defer s.Shutdown() @@ -18785,6 +19067,7 @@ func TestJetStreamAccountPurge(t *testing.T) { require_NoError(t, os.Remove(storeDir+"/jwt/"+accpub+".jwt")) s, o = RunServerWithConfig(o.ConfigFile) + defer s.Shutdown() inspectDirs(t, 1) purge(t) inspectDirs(t, 0) @@ -19202,6 +19485,45 @@ func TestJetStreamMetaDataFailOnKernelFault(t *testing.T) { require_True(t, si.State.Msgs == 10) } +func TestJetstreamConsumerSingleTokenSubject(t *testing.T) { + + s := RunBasicJetStreamServer(t) + defer s.Shutdown() + + nc, js := jsClientConnect(t, s) + defer nc.Close() + + filterSubject := "foo" + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{filterSubject}, + }) + require_NoError(t, err) + + req, err := json.Marshal(&CreateConsumerRequest{Stream: "TEST", Config: ConsumerConfig{ + FilterSubject: filterSubject, + Name: "name", + }}) + + if err != nil { + t.Fatalf("failed to marshal consumer create request: %v", err) + } + + resp, err := nc.Request(fmt.Sprintf("$JS.API.CONSUMER.CREATE.%s.%s.%s", "TEST", "name", "not_filter_subject"), req, time.Second*10) + + var apiResp ApiResponse + json.Unmarshal(resp.Data, &apiResp) + if err != nil { + t.Fatalf("failed to unmarshal response: %v", err) + } + if apiResp.Error == nil { + t.Fatalf("expected error, got nil") + } + if apiResp.Error.ErrCode != 10131 { + t.Fatalf("expected error 10131, got %v", apiResp.Error) + } +} + // https://github.com/nats-io/nats-server/issues/3734 func TestJetStreamMsgBlkFailOnKernelFault(t *testing.T) { s := RunBasicJetStreamServer(t) @@ -19726,3 +20048,451 @@ func TestJetStreamConsumerAckFloorWithExpired(t *testing.T) { require_True(t, ci.NumPending == 0) require_True(t, ci.NumRedelivered == 0) } + +func TestJetStreamConsumerWithFormattingSymbol(t *testing.T) { + s := RunBasicJetStreamServer(t) + defer s.Shutdown() + + nc, js := jsClientConnect(t, s) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "Test%123", + Subjects: []string{"foo"}, + }) + require_NoError(t, err) + + for i := 0; i < 10; i++ { + sendStreamMsg(t, nc, "foo", "OK") + } + + _, err = js.AddConsumer("Test%123", &nats.ConsumerConfig{ + Durable: "Test%123", + FilterSubject: "foo", + DeliverSubject: "bar", + }) + require_NoError(t, err) + + sub, err := js.SubscribeSync("foo", nats.Bind("Test%123", "Test%123")) + require_NoError(t, err) + + _, err = sub.NextMsg(time.Second * 5) + require_NoError(t, err) +} + +func TestJetStreamStreamUpdateWithExternalSource(t *testing.T) { + ho := DefaultTestOptions + ho.Port = -1 + ho.LeafNode.Host = "127.0.0.1" + ho.LeafNode.Port = -1 + ho.JetStream = true + ho.JetStreamDomain = "hub" + ho.StoreDir = t.TempDir() + hs := RunServer(&ho) + defer hs.Shutdown() + + lu, err := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", ho.LeafNode.Port)) + require_NoError(t, err) + + lo1 := DefaultTestOptions + lo1.Port = -1 + lo1.ServerName = "a-leaf" + lo1.JetStream = true + lo1.StoreDir = t.TempDir() + lo1.JetStreamDomain = "a-leaf" + lo1.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{lu}}} + l1 := RunServer(&lo1) + defer l1.Shutdown() + + checkLeafNodeConnected(t, l1) + + // Test sources with `External` provided + ncl, jsl := jsClientConnect(t, l1) + defer ncl.Close() + + // Hub stream. + _, err = jsl.AddStream(&nats.StreamConfig{Name: "stream", Subjects: []string{"leaf"}}) + require_NoError(t, err) + + nch, jsh := jsClientConnect(t, hs) + defer nch.Close() + + // Leaf stream. + // Both streams uses the same name, as we're testing if overlap does not check against itself + // if `External` stream has the same name. + _, err = jsh.AddStream(&nats.StreamConfig{ + Name: "stream", + Subjects: []string{"hub"}, + }) + require_NoError(t, err) + + // Add `Sources`. + // This should not validate subjects overlap against itself. + _, err = jsh.UpdateStream(&nats.StreamConfig{ + Name: "stream", + Subjects: []string{"hub"}, + Sources: []*nats.StreamSource{ + { + Name: "stream", + FilterSubject: "leaf", + External: &nats.ExternalStream{ + APIPrefix: "$JS.a-leaf.API", + }, + }, + }, + }) + require_NoError(t, err) + + // Specifying not existing FilterSubject should also be fine, as we do not validate `External` stream. + _, err = jsh.UpdateStream(&nats.StreamConfig{ + Name: "stream", + Subjects: []string{"hub"}, + Sources: []*nats.StreamSource{ + { + Name: "stream", + FilterSubject: "foo", + External: &nats.ExternalStream{ + APIPrefix: "$JS.a-leaf.API", + }, + }, + }, + }) + require_NoError(t, err) + + // Add one more stream to the Hub, so when we source it, it is not `External`. + _, err = jsh.AddStream(&nats.StreamConfig{Name: "other", Subjects: []string{"other"}}) + require_NoError(t, err) + + _, err = jsh.UpdateStream(&nats.StreamConfig{ + Name: "stream", + Subjects: []string{"hub"}, + Sources: []*nats.StreamSource{ + { + Name: "other", + FilterSubject: "foo", + }, + }, + }) + require_Error(t, err) + require_True(t, strings.Contains(err.Error(), "does not overlap")) +} + +func TestJetStreamKVHistoryRegression(t *testing.T) { + s := RunBasicJetStreamServer(t) + defer s.Shutdown() + + nc, js := jsClientConnect(t, s) + defer nc.Close() + + for _, storage := range []nats.StorageType{nats.FileStorage, nats.MemoryStorage} { + t.Run(storage.String(), func(t *testing.T) { + js.DeleteKeyValue("TEST") + + kv, err := js.CreateKeyValue(&nats.KeyValueConfig{ + Bucket: "TEST", + History: 4, + Storage: storage, + }) + require_NoError(t, err) + + r1, err := kv.Create("foo", []byte("a")) + require_NoError(t, err) + + _, err = kv.Update("foo", []byte("ab"), r1) + require_NoError(t, err) + + err = kv.Delete("foo") + require_NoError(t, err) + + _, err = kv.Create("foo", []byte("abc")) + require_NoError(t, err) + + err = kv.Delete("foo") + require_NoError(t, err) + + history, err := kv.History("foo") + require_NoError(t, err) + require_True(t, len(history) == 4) + + _, err = kv.Update("foo", []byte("abcd"), history[len(history)-1].Revision()) + require_NoError(t, err) + + err = kv.Purge("foo") + require_NoError(t, err) + + _, err = kv.Create("foo", []byte("abcde")) + require_NoError(t, err) + + err = kv.Purge("foo") + require_NoError(t, err) + + history, err = kv.History("foo") + require_NoError(t, err) + require_True(t, len(history) == 1) + }) + } +} + +func TestJetStreamSnapshotRestoreStallAndHealthz(t *testing.T) { + s := RunBasicJetStreamServer(t) + defer s.Shutdown() + + nc, js := jsClientConnect(t, s) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "ORDERS", + Subjects: []string{"orders.*"}, + }) + require_NoError(t, err) + + for i := 0; i < 1000; i++ { + sendStreamMsg(t, nc, "orders.created", "new order") + } + + hs := s.healthz(nil) + if hs.Status != "ok" || hs.Error != _EMPTY_ { + t.Fatalf("Expected health to be ok, got %+v", hs) + } + + // Simulate the staging directory for restores. This is normally cleaned up + // but since its at the root of the storage directory make sure healthz is not affected. + snapDir := filepath.Join(s.getJetStream().config.StoreDir, snapStagingDir) + require_NoError(t, os.MkdirAll(snapDir, defaultDirPerms)) + + // Make sure healthz ok. + hs = s.healthz(nil) + if hs.Status != "ok" || hs.Error != _EMPTY_ { + t.Fatalf("Expected health to be ok, got %+v", hs) + } +} + +// https://github.com/nats-io/nats-server/pull/4163 +func TestJetStreamMaxBytesIgnored(t *testing.T) { + s := RunBasicJetStreamServer(t) + defer s.Shutdown() + + nc, js := jsClientConnect(t, s) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + MaxBytes: 10 * 1024 * 1024, + }) + require_NoError(t, err) + + msg := bytes.Repeat([]byte("A"), 1024*1024) + + for i := 0; i < 10; i++ { + _, err := js.Publish("x", msg) + require_NoError(t, err) + } + + si, err := js.StreamInfo("TEST") + require_NoError(t, err) + require_True(t, si.State.Msgs == 9) + + // Stop current + sd := s.JetStreamConfig().StoreDir + s.Shutdown() + + // We will remove the idx file and truncate the blk and fss files. + mdir := filepath.Join(sd, "$G", "streams", "TEST", "msgs") + // Remove idx + err = os.Remove(filepath.Join(mdir, "1.idx")) + require_NoError(t, err) + // Truncate fss + err = os.WriteFile(filepath.Join(mdir, "1.fss"), nil, defaultFilePerms) + require_NoError(t, err) + // Truncate blk + err = os.WriteFile(filepath.Join(mdir, "1.blk"), nil, defaultFilePerms) + require_NoError(t, err) + + // Restart. + s = RunJetStreamServerOnPort(-1, sd) + defer s.Shutdown() + + nc, js = jsClientConnect(t, s) + defer nc.Close() + + for i := 0; i < 10; i++ { + _, err := js.Publish("x", msg) + require_NoError(t, err) + } + + si, err = js.StreamInfo("TEST") + require_NoError(t, err) + require_True(t, si.State.Bytes <= 10*1024*1024) +} + +func TestJetStreamLastSequenceBySubjectConcurrent(t *testing.T) { + for _, st := range []StorageType{FileStorage, MemoryStorage} { + t.Run(st.String(), func(t *testing.T) { + c := createJetStreamClusterExplicit(t, "JSC", 3) + defer c.shutdown() + + nc0, js0 := jsClientConnect(t, c.randomServer()) + defer nc0.Close() + + nc1, js1 := jsClientConnect(t, c.randomServer()) + defer nc1.Close() + + cfg := StreamConfig{ + Name: "KV", + Subjects: []string{"kv.>"}, + Storage: st, + Replicas: 3, + } + + req, err := json.Marshal(cfg) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + // Do manually for now. + m, err := nc0.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second) + require_NoError(t, err) + si, err := js0.StreamInfo("KV") + if err != nil { + t.Fatalf("Unexpected error: %v, respmsg: %q", err, string(m.Data)) + } + if si == nil || si.Config.Name != "KV" { + t.Fatalf("StreamInfo is not correct %+v", si) + } + + pub := func(js nats.JetStreamContext, subj, data, seq string) { + t.Helper() + m := nats.NewMsg(subj) + m.Data = []byte(data) + m.Header.Set(JSExpectedLastSubjSeq, seq) + js.PublishMsg(m) + } + + ready := make(chan struct{}) + wg := &sync.WaitGroup{} + wg.Add(2) + + go func() { + <-ready + pub(js0, "kv.foo", "0-0", "0") + pub(js0, "kv.foo", "0-1", "1") + pub(js0, "kv.foo", "0-2", "2") + wg.Done() + }() + + go func() { + <-ready + pub(js1, "kv.foo", "1-0", "0") + pub(js1, "kv.foo", "1-1", "1") + pub(js1, "kv.foo", "1-2", "2") + wg.Done() + }() + + time.Sleep(50 * time.Millisecond) + close(ready) + wg.Wait() + + // Read the messages. + sub, err := js0.PullSubscribe(_EMPTY_, _EMPTY_, nats.BindStream("KV")) + require_NoError(t, err) + msgs, err := sub.Fetch(10) + require_NoError(t, err) + if len(msgs) != 3 { + t.Errorf("Expected 3 messages, got %d", len(msgs)) + } + for i, m := range msgs { + if m.Header.Get(JSExpectedLastSubjSeq) != fmt.Sprint(i) { + t.Errorf("Expected %d for last sequence, got %q", i, m.Header.Get(JSExpectedLastSubjSeq)) + } + } + }) + } +} + +func TestJetStreamUsageSyncDeadlock(t *testing.T) { + s := RunBasicJetStreamServer(t) + defer s.Shutdown() + + nc, js := jsClientConnect(t, s) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"*"}, + }) + require_NoError(t, err) + + sendStreamMsg(t, nc, "foo", "hello") + + // Now purposely mess up the usage that will force a sync. + // Without the fix this will deadlock. + jsa := s.getJetStream().lookupAccount(s.GlobalAccount()) + jsa.usageMu.Lock() + st, ok := jsa.usage[_EMPTY_] + require_True(t, ok) + st.local.store = -1000 + jsa.usageMu.Unlock() + + sendStreamMsg(t, nc, "foo", "hello") +} + +// https://github.com/nats-io/nats.go/issues/1382 +// https://github.com/nats-io/nats-server/issues/4445 +func TestJetStreamChangeMaxMessagesPerSubject(t *testing.T) { + s := RunBasicJetStreamServer(t) + defer s.Shutdown() + + nc, js := jsClientConnect(t, s) + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"one.>"}, + MaxMsgsPerSubject: 5, + }) + require_NoError(t, err) + + for i := 0; i < 10; i++ { + sendStreamMsg(t, nc, "one.data", "data") + } + + expectMsgs := func(num int32) error { + t.Helper() + + var msgs atomic.Int32 + sub, err := js.Subscribe("one.>", func(msg *nats.Msg) { + msgs.Add(1) + msg.Ack() + }) + require_NoError(t, err) + defer sub.Unsubscribe() + + checkFor(t, 5*time.Second, 100*time.Millisecond, func() error { + if nm := msgs.Load(); nm != num { + return fmt.Errorf("expected to get %v messages, got %v instead", num, nm) + } + return nil + }) + return nil + } + + require_NoError(t, expectMsgs(5)) + + js.UpdateStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"one.>"}, + MaxMsgsPerSubject: 3, + }) + + info, err := js.StreamInfo("TEST") + require_NoError(t, err) + require_True(t, info.Config.MaxMsgsPerSubject == 3) + require_True(t, info.State.Msgs == 3) + + require_NoError(t, expectMsgs(3)) + + for i := 0; i < 10; i++ { + sendStreamMsg(t, nc, "one.data", "data") + } + + require_NoError(t, expectMsgs(3)) +} diff --git a/server/jwt_test.go b/server/jwt_test.go index 108589243..58884b97c 100644 --- a/server/jwt_test.go +++ b/server/jwt_test.go @@ -3692,7 +3692,7 @@ func TestJWTAccountNATSResolverCrossClusterFetch(t *testing.T) { listen: 127.0.0.1:-1 no_advertise: true } - `, ojwt, syspub, dirAA))) + `, ojwt, syspub, dirAA))) sAA, _ := RunServerWithConfig(confAA) defer sAA.Shutdown() // Create Server B (using no_advertise to prevent fail over) @@ -3718,7 +3718,7 @@ func TestJWTAccountNATSResolverCrossClusterFetch(t *testing.T) { nats-route://127.0.0.1:%d ] } - `, ojwt, syspub, dirAB, sAA.opts.Cluster.Port))) + `, ojwt, syspub, dirAB, sAA.opts.Cluster.Port))) sAB, _ := RunServerWithConfig(confAB) defer sAB.Shutdown() // Create Server C (using no_advertise to prevent fail over) @@ -3744,10 +3744,10 @@ func TestJWTAccountNATSResolverCrossClusterFetch(t *testing.T) { listen: 127.0.0.1:-1 no_advertise: true } - `, ojwt, syspub, dirBA, sAA.opts.Gateway.Port))) + `, ojwt, syspub, dirBA, sAA.opts.Gateway.Port))) sBA, _ := RunServerWithConfig(confBA) defer sBA.Shutdown() - // Create Sever BA (using no_advertise to prevent fail over) + // Create Server BA (using no_advertise to prevent fail over) confBB := createConfFile(t, []byte(fmt.Sprintf(` listen: 127.0.0.1:-1 server_name: srv-B-B @@ -3773,7 +3773,7 @@ func TestJWTAccountNATSResolverCrossClusterFetch(t *testing.T) { {name: "clust-A", url: "nats://127.0.0.1:%d"}, ] } - `, ojwt, syspub, dirBB, sBA.opts.Cluster.Port, sAA.opts.Cluster.Port))) + `, ojwt, syspub, dirBB, sBA.opts.Cluster.Port, sAA.opts.Cluster.Port))) sBB, _ := RunServerWithConfig(confBB) defer sBB.Shutdown() // Assert topology @@ -6592,3 +6592,190 @@ func TestServerOperatorModeNoAuthRequired(t *testing.T) { require_True(t, nc.AuthRequired()) } + +func TestJWTAccountNATSResolverWrongCreds(t *testing.T) { + require_NoLocalOrRemoteConnections := func(account string, srvs ...*Server) { + t.Helper() + for _, srv := range srvs { + if acc, ok := srv.accounts.Load(account); ok { + checkAccClientsCount(t, acc.(*Account), 0) + } + } + } + connect := func(url string, credsfile string, acc string, srvs ...*Server) { + t.Helper() + nc := natsConnect(t, url, nats.UserCredentials(credsfile), nats.Timeout(5*time.Second)) + nc.Close() + require_NoLocalOrRemoteConnections(acc, srvs...) + } + createAccountAndUser := func(limit bool, done chan struct{}, pubKey, jwt1, jwt2, creds *string) { + t.Helper() + kp, _ := nkeys.CreateAccount() + *pubKey, _ = kp.PublicKey() + claim := jwt.NewAccountClaims(*pubKey) + var err error + *jwt1, err = claim.Encode(oKp) + require_NoError(t, err) + *jwt2, err = claim.Encode(oKp) + require_NoError(t, err) + ukp, _ := nkeys.CreateUser() + seed, _ := ukp.Seed() + upub, _ := ukp.PublicKey() + uclaim := newJWTTestUserClaims() + uclaim.Subject = upub + ujwt, err := uclaim.Encode(kp) + require_NoError(t, err) + *creds = genCredsFile(t, ujwt, seed) + done <- struct{}{} + } + // Create Accounts and corresponding user creds. + doneChan := make(chan struct{}, 4) + defer close(doneChan) + var syspub, sysjwt, dummy1, sysCreds string + createAccountAndUser(false, doneChan, &syspub, &sysjwt, &dummy1, &sysCreds) + + var apub, ajwt1, ajwt2, aCreds string + createAccountAndUser(true, doneChan, &apub, &ajwt1, &ajwt2, &aCreds) + + var bpub, bjwt1, bjwt2, bCreds string + createAccountAndUser(true, doneChan, &bpub, &bjwt1, &bjwt2, &bCreds) + + // The one that is going to be missing. + var cpub, cjwt1, cjwt2, cCreds string + createAccountAndUser(true, doneChan, &cpub, &cjwt1, &cjwt2, &cCreds) + for i := 0; i < cap(doneChan); i++ { + <-doneChan + } + // Create one directory for each server + dirA := t.TempDir() + dirB := t.TempDir() + dirC := t.TempDir() + + // Store accounts on servers A and B, then let C sync on its own. + writeJWT(t, dirA, apub, ajwt1) + writeJWT(t, dirB, bpub, bjwt1) + + ///////////////////////////////////////// + // // + // Server A: has creds from client A // + // // + ///////////////////////////////////////// + confA := createConfFile(t, []byte(fmt.Sprintf(` + listen: 127.0.0.1:-1 + server_name: srv-A + operator: %s + system_account: %s + debug: true + resolver: { + type: full + dir: '%s' + allow_delete: true + timeout: "1.5s" + interval: "200ms" + } + resolver_preload: { + %s: %s + } + cluster { + name: clust + listen: 127.0.0.1:-1 + no_advertise: true + } + `, ojwt, syspub, dirA, apub, ajwt1))) + sA, _ := RunServerWithConfig(confA) + defer sA.Shutdown() + require_JWTPresent(t, dirA, apub) + + ///////////////////////////////////////// + // // + // Server B: has creds from client B // + // // + ///////////////////////////////////////// + confB := createConfFile(t, []byte(fmt.Sprintf(` + listen: 127.0.0.1:-1 + server_name: srv-B + operator: %s + system_account: %s + resolver: { + type: full + dir: '%s' + allow_delete: true + timeout: "1.5s" + interval: "200ms" + } + cluster { + name: clust + listen: 127.0.0.1:-1 + no_advertise: true + routes [ + nats-route://127.0.0.1:%d + ] + } + `, ojwt, syspub, dirB, sA.opts.Cluster.Port))) + sB, _ := RunServerWithConfig(confB) + defer sB.Shutdown() + + ///////////////////////////////////////// + // // + // Server C: has no creds // + // // + ///////////////////////////////////////// + fmtC := ` + listen: 127.0.0.1:-1 + server_name: srv-C + operator: %s + system_account: %s + resolver: { + type: full + dir: '%s' + allow_delete: true + timeout: "1.5s" + interval: "200ms" + } + cluster { + name: clust + listen: 127.0.0.1:-1 + no_advertise: true + routes [ + nats-route://127.0.0.1:%d + ] + } + ` + confClongTTL := createConfFile(t, []byte(fmt.Sprintf(fmtC, ojwt, syspub, dirC, sA.opts.Cluster.Port))) + sC, _ := RunServerWithConfig(confClongTTL) // use long ttl to assure it is not kicking + defer sC.Shutdown() + + // startup cluster + checkClusterFormed(t, sA, sB, sC) + time.Sleep(1 * time.Second) // wait for the protocol to converge + // // Check all accounts + require_JWTPresent(t, dirA, apub) // was already present on startup + require_JWTPresent(t, dirB, apub) // was copied from server A + require_JWTPresent(t, dirA, bpub) // was copied from server B + require_JWTPresent(t, dirB, bpub) // was already present on startup + + // There should be no state about the missing account. + require_JWTAbsent(t, dirA, cpub) + require_JWTAbsent(t, dirB, cpub) + require_JWTAbsent(t, dirC, cpub) + + // system account client can connect to every server + connect(sA.ClientURL(), sysCreds, "") + connect(sB.ClientURL(), sysCreds, "") + connect(sC.ClientURL(), sysCreds, "") + + // A and B clients can connect to any server. + connect(sA.ClientURL(), aCreds, "") + connect(sB.ClientURL(), aCreds, "") + connect(sC.ClientURL(), aCreds, "") + connect(sA.ClientURL(), bCreds, "") + connect(sB.ClientURL(), bCreds, "") + connect(sC.ClientURL(), bCreds, "") + + // Check that trying to connect with bad credentials should not hang until the fetch timeout + // and instead return a faster response when an account is not found. + _, err := nats.Connect(sC.ClientURL(), nats.UserCredentials(cCreds), nats.Timeout(500*time.Second)) + if err != nil && !errors.Is(err, nats.ErrAuthorization) { + t.Fatalf("Expected auth error: %v", err) + } +} diff --git a/server/leafnode.go b/server/leafnode.go index 7716c719b..3f7c445e6 100644 --- a/server/leafnode.go +++ b/server/leafnode.go @@ -1,4 +1,4 @@ -// Copyright 2019-2022 The NATS Authors +// Copyright 2019-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -39,29 +39,31 @@ import ( "github.com/nats-io/nuid" ) -// Warning when user configures leafnode TLS insecure -const leafnodeTLSInsecureWarning = "TLS certificate chain and hostname of solicited leafnodes will not be verified. DO NOT USE IN PRODUCTION!" +const ( + // Warning when user configures leafnode TLS insecure + leafnodeTLSInsecureWarning = "TLS certificate chain and hostname of solicited leafnodes will not be verified. DO NOT USE IN PRODUCTION!" -// When a loop is detected, delay the reconnect of solicited connection. -const leafNodeReconnectDelayAfterLoopDetected = 30 * time.Second + // When a loop is detected, delay the reconnect of solicited connection. + leafNodeReconnectDelayAfterLoopDetected = 30 * time.Second -// When a server receives a message causing a permission violation, the -// connection is closed and it won't attempt to reconnect for that long. -const leafNodeReconnectAfterPermViolation = 30 * time.Second + // When a server receives a message causing a permission violation, the + // connection is closed and it won't attempt to reconnect for that long. + leafNodeReconnectAfterPermViolation = 30 * time.Second -// When we have the same cluster name as the hub. -const leafNodeReconnectDelayAfterClusterNameSame = 30 * time.Second + // When we have the same cluster name as the hub. + leafNodeReconnectDelayAfterClusterNameSame = 30 * time.Second -// Prefix for loop detection subject -const leafNodeLoopDetectionSubjectPrefix = "$LDS." + // Prefix for loop detection subject + leafNodeLoopDetectionSubjectPrefix = "$LDS." -// Path added to URL to indicate to WS server that the connection is a -// LEAF connection as opposed to a CLIENT. -const leafNodeWSPath = "/leafnode" + // Path added to URL to indicate to WS server that the connection is a + // LEAF connection as opposed to a CLIENT. + leafNodeWSPath = "/leafnode" -// This is the time the server will wait, when receiving a CONNECT, -// before closing the connection if the required minimum version is not met. -const leafNodeWaitBeforeClose = 5 * time.Second + // This is the time the server will wait, when receiving a CONNECT, + // before closing the connection if the required minimum version is not met. + leafNodeWaitBeforeClose = 5 * time.Second +) type leaf struct { // We have any auth stuff here for solicited connections. @@ -695,7 +697,7 @@ func (s *Server) startLeafNodeAcceptLoop() { s.leafNodeInfo = info // Possibly override Host/Port and set IP based on Cluster.Advertise if err := s.setLeafNodeInfoHostPortAndIP(); err != nil { - s.Fatalf("Error setting leafnode INFO with LeafNode.Advertise value of %s, err=%v", s.opts.LeafNode.Advertise, err) + s.Fatalf("Error setting leafnode INFO with LeafNode.Advertise value of %s, err=%v", opts.LeafNode.Advertise, err) l.Close() s.mu.Unlock() return @@ -1420,7 +1422,7 @@ func (s *Server) addLeafNodeConnection(c *client, srvName, clusterName string, c } // If we have a specified JetStream domain we will want to add a mapping to // allow access cross domain for each non-system account. - if opts.JetStreamDomain != _EMPTY_ && acc != sysAcc && opts.JetStream { + if opts.JetStreamDomain != _EMPTY_ && opts.JetStream && acc != nil && acc != sysAcc { for src, dest := range generateJSMappingTable(opts.JetStreamDomain) { if err := acc.AddMapping(src, dest); err != nil { c.Debugf("Error adding JetStream domain mapping: %s", err.Error()) @@ -1579,6 +1581,11 @@ func (c *client) processLeafNodeConnect(s *Server, arg []byte, lang string) erro c.mu.Unlock() + // Register the cluster, even if empty, as long as we are acting as a hub. + if !proto.Hub { + c.acc.registerLeafNodeCluster(proto.Cluster) + } + // Add in the leafnode here since we passed through auth at this point. s.addLeafNodeConnection(c, proto.Name, proto.Cluster, true) @@ -1632,11 +1639,15 @@ func (s *Server) initLeafNodeSmapAndSendSubs(c *client) { return } // Collect all account subs here. - _subs := [32]*subscription{} + _subs := [1024]*subscription{} subs := _subs[:0] ims := []string{} - acc.mu.Lock() + // Hold the client lock otherwise there can be a race and miss some subs. + c.mu.Lock() + defer c.mu.Unlock() + + acc.mu.RLock() accName := acc.Name accNTag := acc.nameTag @@ -1675,11 +1686,15 @@ func (s *Server) initLeafNodeSmapAndSendSubs(c *client) { // Create a unique subject that will be used for loop detection. lds := acc.lds + acc.mu.RUnlock() + + // Check if we have to create the LDS. if lds == _EMPTY_ { lds = leafNodeLoopDetectionSubjectPrefix + nuid.Next() + acc.mu.Lock() acc.lds = lds + acc.mu.Unlock() } - acc.mu.Unlock() // Now check for gateway interest. Leafnodes will put this into // the proper mode to propagate, but they are not held in the account. @@ -1707,7 +1722,6 @@ func (s *Server) initLeafNodeSmapAndSendSubs(c *client) { } // Now walk the results and add them to our smap - c.mu.Lock() rc := c.leaf.remoteCluster c.leaf.smap = make(map[string]int32) for _, sub := range subs { @@ -1773,7 +1787,6 @@ func (s *Server) initLeafNodeSmapAndSendSubs(c *client) { c.mu.Unlock() }) } - c.mu.Unlock() } // updateInterestForAccountOnGateway called from gateway code when processing RS+ and RS-. @@ -1783,53 +1796,76 @@ func (s *Server) updateInterestForAccountOnGateway(accName string, sub *subscrip s.Debugf("No or bad account for %q, failed to update interest from gateway", accName) return } - s.updateLeafNodes(acc, sub, delta) + acc.updateLeafNodes(sub, delta) } -// updateLeafNodes will make sure to update the smap for the subscription. Will -// also forward to all leaf nodes as needed. -func (s *Server) updateLeafNodes(acc *Account, sub *subscription, delta int32) { +// updateLeafNodes will make sure to update the account smap for the subscription. +// Will also forward to all leaf nodes as needed. +func (acc *Account) updateLeafNodes(sub *subscription, delta int32) { if acc == nil || sub == nil { return } - _l := [32]*client{} - leafs := _l[:0] + // We will do checks for no leafnodes and same cluster here inline and under the + // general account read lock. + // If we feel we need to update the leafnodes we will do that out of line to avoid + // blocking routes or GWs. - // Grab all leaf nodes. Ignore a leafnode if sub's client is a leafnode and matches. acc.mu.RLock() - for _, ln := range acc.lleafs { - if ln != sub.client { - leafs = append(leafs, ln) - } + // First check if we even have leafnodes here. + if acc.nleafs == 0 { + acc.mu.RUnlock() + return + } + + // Is this a loop detection subject. + isLDS := bytes.HasPrefix(sub.subject, []byte(leafNodeLoopDetectionSubjectPrefix)) + + // Capture the cluster even if its empty. + cluster := _EMPTY_ + if sub.origin != nil { + cluster = string(sub.origin) } + + // If we have an isolated cluster we can return early, as long as it is not a loop detection subject. + // Empty clusters will return false for the check. + if !isLDS && acc.isLeafNodeClusterIsolated(cluster) { + acc.mu.RUnlock() + return + } + + // We can release the general account lock. acc.mu.RUnlock() - for _, ln := range leafs { - // Check to make sure this sub does not have an origin cluster than matches the leafnode. + // We can hold the list lock here to avoid having to copy a large slice. + acc.lmu.RLock() + defer acc.lmu.RUnlock() + + // Do this once. + subject := string(sub.subject) + + // Walk the connected leafnodes. + for _, ln := range acc.lleafs { + if ln == sub.client { + continue + } + // Check to make sure this sub does not have an origin cluster that matches the leafnode. ln.mu.Lock() - skip := (sub.origin != nil && string(sub.origin) == ln.remoteCluster()) || !ln.canSubscribe(string(sub.subject)) + skip := (cluster != _EMPTY_ && cluster == ln.remoteCluster()) || (delta > 0 && !ln.canSubscribe(subject)) // If skipped, make sure that we still let go the "$LDS." subscription that allows // the detection of a loop. - if skip && bytes.HasPrefix(sub.subject, []byte(leafNodeLoopDetectionSubjectPrefix)) { - skip = false + if isLDS || !skip { + ln.updateSmap(sub, delta) } ln.mu.Unlock() - if skip { - continue - } - ln.updateSmap(sub, delta) } } // This will make an update to our internal smap and determine if we should send out // an interest update to the remote side. +// Lock should be held. func (c *client) updateSmap(sub *subscription, delta int32) { - key := keyFromSub(sub) - - c.mu.Lock() if c.leaf.smap == nil { - c.mu.Unlock() return } @@ -1837,7 +1873,6 @@ func (c *client) updateSmap(sub *subscription, delta int32) { skind := sub.client.kind updateClient := skind == CLIENT || skind == SYSTEM || skind == JETSTREAM || skind == ACCOUNT if c.isSpokeLeafNode() && !(updateClient || (skind == LEAF && !sub.client.isSpokeLeafNode())) { - c.mu.Unlock() return } @@ -1850,12 +1885,16 @@ func (c *client) updateSmap(sub *subscription, delta int32) { c.leaf.tsubt.Stop() c.leaf.tsubt = nil } - c.mu.Unlock() return } } - n := c.leaf.smap[key] + key := keyFromSub(sub) + n, ok := c.leaf.smap[key] + if delta < 0 && !ok { + return + } + // We will update if its a queue, if count is zero (or negative), or we were 0 and are N > 0. update := sub.queue != nil || n == 0 || n+delta <= 0 n += delta @@ -1867,7 +1906,6 @@ func (c *client) updateSmap(sub *subscription, delta int32) { if update { c.sendLeafNodeSubUpdate(key, n) } - c.mu.Unlock() } // Used to force add subjects to the subject map. @@ -2065,8 +2103,11 @@ func (c *client) processLeafSub(argo []byte) (err error) { spoke := c.isSpokeLeafNode() c.mu.Unlock() - if err := c.addShadowSubscriptions(acc, sub); err != nil { - c.Errorf(err.Error()) + // Only add in shadow subs if a new sub or qsub. + if osub == nil { + if err := c.addShadowSubscriptions(acc, sub); err != nil { + c.Errorf(err.Error()) + } } // If we are not solicited, treat leaf node subscriptions similar to a @@ -2081,7 +2122,7 @@ func (c *client) processLeafSub(argo []byte) (err error) { } // Now check on leafnode updates for other leaf nodes. We understand solicited // and non-solicited state in this call so we will do the right thing. - srv.updateLeafNodes(acc, sub, delta) + acc.updateLeafNodes(sub, delta) return nil } @@ -2138,7 +2179,7 @@ func (c *client) processLeafUnsub(arg []byte) error { } } // Now check on leafnode updates for other leaf nodes. - srv.updateLeafNodes(acc, sub, -1) + acc.updateLeafNodes(sub, -1) return nil } diff --git a/server/leafnode_test.go b/server/leafnode_test.go index 3f7016651..92a8314df 100644 --- a/server/leafnode_test.go +++ b/server/leafnode_test.go @@ -1,4 +1,4 @@ -// Copyright 2019-2021 The NATS Authors +// Copyright 2019-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -2540,7 +2540,7 @@ func TestLeafNodeOperatorBadCfg(t *testing.T) { cfg: ` port: -1 authorization { - users = [{user: "u", password: "p"}]} + users = [{user: "u", password: "p"}] }`, }, { @@ -3876,9 +3876,9 @@ func TestLeafNodeInterestPropagationDaisychain(t *testing.T) { aTmpl := ` port: %d leafnodes { - port: %d - } - }` + port: %d + } + ` confA := createConfFile(t, []byte(fmt.Sprintf(aTmpl, -1, -1))) sA, _ := RunServerWithConfig(confA) @@ -4846,3 +4846,718 @@ func TestLeafNodeDuplicateMsg(t *testing.T) { t.Run("sub_b2_pub_a1", func(t *testing.T) { check(t, b2, a1) }) t.Run("sub_b2_pub_a2", func(t *testing.T) { check(t, b2, a2) }) } + +func TestLeafNodeWithWeightedDQRequestsToSuperClusterWithSeparateAccounts(t *testing.T) { + sc := createJetStreamSuperClusterWithTemplate(t, jsClusterAccountsTempl, 3, 2) + defer sc.shutdown() + + // Now create a leafnode cluster that has 2 LNs, one to each cluster but on separate accounts, ONE and TWO. + var lnTmpl = ` + listen: 127.0.0.1:-1 + server_name: %s + jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} + + {{leaf}} + + cluster { + name: %s + listen: 127.0.0.1:%d + routes = [%s] + } + + accounts { $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] }} + ` + + var leafFrag = ` + leaf { + listen: 127.0.0.1:-1 + remotes [ + { urls: [ %s ] } + { urls: [ %s ] } + ] + }` + + // We want to have two leaf node connections that join to the same local account on the leafnode servers, + // but connect to different accounts in different clusters. + c1 := sc.clusters[0] // Will connect to account ONE + c2 := sc.clusters[1] // Will connect to account TWO + + genLeafTmpl := func(tmpl string) string { + t.Helper() + + var ln1, ln2 []string + for _, s := range c1.servers { + if s.ClusterName() != c1.name { + continue + } + ln := s.getOpts().LeafNode + ln1 = append(ln1, fmt.Sprintf("nats://one:p@%s:%d", ln.Host, ln.Port)) + } + + for _, s := range c2.servers { + if s.ClusterName() != c2.name { + continue + } + ln := s.getOpts().LeafNode + ln2 = append(ln2, fmt.Sprintf("nats://two:p@%s:%d", ln.Host, ln.Port)) + } + return strings.Replace(tmpl, "{{leaf}}", fmt.Sprintf(leafFrag, strings.Join(ln1, ", "), strings.Join(ln2, ", ")), 1) + } + + tmpl := strings.Replace(lnTmpl, "store_dir:", fmt.Sprintf(`domain: "%s", store_dir:`, "SA"), 1) + tmpl = genLeafTmpl(tmpl) + + ln := createJetStreamCluster(t, tmpl, "SA", "SA-", 3, 22280, false) + ln.waitOnClusterReady() + defer ln.shutdown() + + for _, s := range ln.servers { + checkLeafNodeConnectedCount(t, s, 2) + } + + // Now connect DQ subscribers to each cluster and they separate accounts, and make sure we get the right behavior, balanced between + // them when requests originate from the leaf cluster. + + // Create 5 clients for each cluster / account + var c1c, c2c []*nats.Conn + for i := 0; i < 5; i++ { + nc1, _ := jsClientConnect(t, c1.randomServer(), nats.UserInfo("one", "p")) + defer nc1.Close() + c1c = append(c1c, nc1) + nc2, _ := jsClientConnect(t, c2.randomServer(), nats.UserInfo("two", "p")) + defer nc2.Close() + c2c = append(c2c, nc2) + } + + createSubs := func(num int, conns []*nats.Conn) (subs []*nats.Subscription) { + for i := 0; i < num; i++ { + nc := conns[rand.Intn(len(conns))] + sub, err := nc.QueueSubscribeSync("REQUEST", "MC") + require_NoError(t, err) + subs = append(subs, sub) + nc.Flush() + } + // Let subs propagate. + time.Sleep(100 * time.Millisecond) + return subs + } + closeSubs := func(subs []*nats.Subscription) { + for _, sub := range subs { + sub.Unsubscribe() + } + } + + // Simple test first. + subs1 := createSubs(1, c1c) + defer closeSubs(subs1) + subs2 := createSubs(1, c2c) + defer closeSubs(subs2) + + sendRequests := func(num int) { + t.Helper() + // Now connect to the leaf cluster and send some requests. + nc, _ := jsClientConnect(t, ln.randomServer()) + defer nc.Close() + + for i := 0; i < num; i++ { + require_NoError(t, nc.Publish("REQUEST", []byte("HELP"))) + } + nc.Flush() + } + + pending := func(subs []*nats.Subscription) (total int) { + t.Helper() + for _, sub := range subs { + n, _, err := sub.Pending() + require_NoError(t, err) + total += n + } + return total + } + + num := 1000 + checkAllReceived := func() error { + total := pending(subs1) + pending(subs2) + if total == num { + return nil + } + return fmt.Errorf("Not all received: %d vs %d", total, num) + } + + checkBalanced := func(total, pc1, pc2 int) { + t.Helper() + tf := float64(total) + e1 := tf * (float64(pc1) / 100.00) + e2 := tf * (float64(pc2) / 100.00) + delta := tf / 10 + p1 := float64(pending(subs1)) + if p1 < e1-delta || p1 > e1+delta { + t.Fatalf("Value out of range for subs1, expected %v got %v", e1, p1) + } + p2 := float64(pending(subs2)) + if p2 < e2-delta || p2 > e2+delta { + t.Fatalf("Value out of range for subs2, expected %v got %v", e2, p2) + } + } + + // Now connect to the leaf cluster and send some requests. + + // Simple 50/50 + sendRequests(num) + checkFor(t, time.Second, 200*time.Millisecond, checkAllReceived) + checkBalanced(num, 50, 50) + + closeSubs(subs1) + closeSubs(subs2) + + // Now test unbalanced. 10/90 + subs1 = createSubs(1, c1c) + defer closeSubs(subs1) + subs2 = createSubs(9, c2c) + defer closeSubs(subs2) + + sendRequests(num) + checkFor(t, time.Second, 200*time.Millisecond, checkAllReceived) + checkBalanced(num, 10, 90) + + // Now test draining the subs as we are sending from an initial balanced situation simulating a draining of a cluster. + + closeSubs(subs1) + closeSubs(subs2) + subs1, subs2 = nil, nil + + // These subs slightly different. + var r1, r2 atomic.Uint64 + for i := 0; i < 20; i++ { + nc := c1c[rand.Intn(len(c1c))] + sub, err := nc.QueueSubscribe("REQUEST", "MC", func(m *nats.Msg) { r1.Add(1) }) + require_NoError(t, err) + subs1 = append(subs1, sub) + nc.Flush() + + nc = c2c[rand.Intn(len(c2c))] + sub, err = nc.QueueSubscribe("REQUEST", "MC", func(m *nats.Msg) { r2.Add(1) }) + require_NoError(t, err) + subs2 = append(subs2, sub) + nc.Flush() + } + defer closeSubs(subs1) + defer closeSubs(subs2) + + nc, _ := jsClientConnect(t, ln.randomServer()) + defer nc.Close() + + for i, dindex := 0, 1; i < num; i++ { + require_NoError(t, nc.Publish("REQUEST", []byte("HELP"))) + // Check if we have more to simulate draining. + // Will drain within first ~100 requests using 20% rand test below. + // Will leave 1 behind. + if dindex < len(subs1)-1 && rand.Intn(6) > 4 { + sub := subs1[dindex] + dindex++ + sub.Drain() + } + } + nc.Flush() + + checkFor(t, time.Second, 200*time.Millisecond, func() error { + total := int(r1.Load() + r2.Load()) + if total == num { + return nil + } + return fmt.Errorf("Not all received: %d vs %d", total, num) + }) + require_True(t, r2.Load() > r1.Load()) +} + +func TestLeafNodeWithWeightedDQRequestsToSuperClusterWithStreamImportAccounts(t *testing.T) { + var tmpl = ` + listen: 127.0.0.1:-1 + + server_name: %s + jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} + + leaf { listen: 127.0.0.1:-1 } + + cluster { + name: %s + listen: 127.0.0.1:%d + routes = [%s] + } + + accounts { + EFG { + users = [ { user: "efg", pass: "p" } ] + jetstream: enabled + imports [ + { stream: { account: STL, subject: "REQUEST"} } + { stream: { account: KSC, subject: "REQUEST"} } + ] + exports [ { stream: "RESPONSE" } ] + } + STL { + users = [ { user: "stl", pass: "p" } ] + exports [ { stream: "REQUEST" } ] + imports [ { stream: { account: EFG, subject: "RESPONSE"} } ] + } + KSC { + users = [ { user: "ksc", pass: "p" } ] + exports [ { stream: "REQUEST" } ] + imports [ { stream: { account: EFG, subject: "RESPONSE"} } ] + } + $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } + }` + + sc := createJetStreamSuperClusterWithTemplate(t, tmpl, 5, 2) + defer sc.shutdown() + + // Now create a leafnode cluster that has 2 LNs, one to each cluster but on separate accounts, STL and KSC. + var lnTmpl = ` + listen: 127.0.0.1:-1 + server_name: %s + jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} + + {{leaf}} + + cluster { + name: %s + listen: 127.0.0.1:%d + routes = [%s] + } + + accounts { $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] }} + ` + + var leafFrag = ` + leaf { + listen: 127.0.0.1:-1 + remotes [ + { urls: [ %s ] } + { urls: [ %s ] } + { urls: [ %s ] ; deny_export: [REQUEST, RESPONSE], deny_import: RESPONSE } + ] + }` + + // We want to have two leaf node connections that join to the same local account on the leafnode servers, + // but connect to different accounts in different clusters. + c1 := sc.clusters[0] // Will connect to account KSC + c2 := sc.clusters[1] // Will connect to account STL + + genLeafTmpl := func(tmpl string) string { + t.Helper() + + var ln1, ln2, ln3 []string + for _, s := range c1.servers { + if s.ClusterName() != c1.name { + continue + } + ln := s.getOpts().LeafNode + ln1 = append(ln1, fmt.Sprintf("nats://ksc:p@%s:%d", ln.Host, ln.Port)) + } + + for _, s := range c2.servers { + if s.ClusterName() != c2.name { + continue + } + ln := s.getOpts().LeafNode + ln2 = append(ln2, fmt.Sprintf("nats://stl:p@%s:%d", ln.Host, ln.Port)) + ln3 = append(ln3, fmt.Sprintf("nats://efg:p@%s:%d", ln.Host, ln.Port)) + } + return strings.Replace(tmpl, "{{leaf}}", fmt.Sprintf(leafFrag, strings.Join(ln1, ", "), strings.Join(ln2, ", "), strings.Join(ln3, ", ")), 1) + } + + tmpl = strings.Replace(lnTmpl, "store_dir:", fmt.Sprintf(`domain: "%s", store_dir:`, "SA"), 1) + tmpl = genLeafTmpl(tmpl) + + ln := createJetStreamCluster(t, tmpl, "SA", "SA-", 3, 22280, false) + ln.waitOnClusterReady() + defer ln.shutdown() + + for _, s := range ln.servers { + checkLeafNodeConnectedCount(t, s, 3) + } + + // Now connect DQ subscribers to each cluster but to the global account. + + // Create 5 clients for each cluster / account + var c1c, c2c []*nats.Conn + for i := 0; i < 5; i++ { + nc1, _ := jsClientConnect(t, c1.randomServer(), nats.UserInfo("efg", "p")) + defer nc1.Close() + c1c = append(c1c, nc1) + nc2, _ := jsClientConnect(t, c2.randomServer(), nats.UserInfo("efg", "p")) + defer nc2.Close() + c2c = append(c2c, nc2) + } + + createSubs := func(num int, conns []*nats.Conn) (subs []*nats.Subscription) { + for i := 0; i < num; i++ { + nc := conns[rand.Intn(len(conns))] + sub, err := nc.QueueSubscribeSync("REQUEST", "MC") + require_NoError(t, err) + subs = append(subs, sub) + nc.Flush() + } + // Let subs propagate. + time.Sleep(100 * time.Millisecond) + return subs + } + closeSubs := func(subs []*nats.Subscription) { + for _, sub := range subs { + sub.Unsubscribe() + } + } + + // Simple test first. + subs1 := createSubs(1, c1c) + defer closeSubs(subs1) + subs2 := createSubs(1, c2c) + defer closeSubs(subs2) + + sendRequests := func(num int) { + t.Helper() + // Now connect to the leaf cluster and send some requests. + nc, _ := jsClientConnect(t, ln.randomServer()) + defer nc.Close() + + for i := 0; i < num; i++ { + require_NoError(t, nc.Publish("REQUEST", []byte("HELP"))) + } + nc.Flush() + } + + pending := func(subs []*nats.Subscription) (total int) { + t.Helper() + for _, sub := range subs { + n, _, err := sub.Pending() + require_NoError(t, err) + total += n + } + return total + } + + num := 1000 + checkAllReceived := func() error { + total := pending(subs1) + pending(subs2) + if total == num { + return nil + } + return fmt.Errorf("Not all received: %d vs %d", total, num) + } + + checkBalanced := func(total, pc1, pc2 int) { + t.Helper() + tf := float64(total) + e1 := tf * (float64(pc1) / 100.00) + e2 := tf * (float64(pc2) / 100.00) + delta := tf / 10 + p1 := float64(pending(subs1)) + if p1 < e1-delta || p1 > e1+delta { + t.Fatalf("Value out of range for subs1, expected %v got %v", e1, p1) + } + p2 := float64(pending(subs2)) + if p2 < e2-delta || p2 > e2+delta { + t.Fatalf("Value out of range for subs2, expected %v got %v", e2, p2) + } + } + + // Now connect to the leaf cluster and send some requests. + + // Simple 50/50 + sendRequests(num) + checkFor(t, time.Second, 200*time.Millisecond, checkAllReceived) + checkBalanced(num, 50, 50) + + closeSubs(subs1) + closeSubs(subs2) + + // Now test unbalanced. 10/90 + subs1 = createSubs(1, c1c) + defer closeSubs(subs1) + subs2 = createSubs(9, c2c) + defer closeSubs(subs2) + + sendRequests(num) + checkFor(t, time.Second, 200*time.Millisecond, checkAllReceived) + checkBalanced(num, 10, 90) + + closeSubs(subs1) + closeSubs(subs2) + + // Now test unbalanced. 80/20 + subs1 = createSubs(80, c1c) + defer closeSubs(subs1) + subs2 = createSubs(20, c2c) + defer closeSubs(subs2) + + sendRequests(num) + checkFor(t, time.Second, 200*time.Millisecond, checkAllReceived) + checkBalanced(num, 80, 20) + + // Now test draining the subs as we are sending from an initial balanced situation simulating a draining of a cluster. + + closeSubs(subs1) + closeSubs(subs2) + subs1, subs2 = nil, nil + + // These subs slightly different. + var r1, r2 atomic.Uint64 + for i := 0; i < 20; i++ { + nc := c1c[rand.Intn(len(c1c))] + sub, err := nc.QueueSubscribe("REQUEST", "MC", func(m *nats.Msg) { r1.Add(1) }) + require_NoError(t, err) + subs1 = append(subs1, sub) + nc.Flush() + + nc = c2c[rand.Intn(len(c2c))] + sub, err = nc.QueueSubscribe("REQUEST", "MC", func(m *nats.Msg) { r2.Add(1) }) + require_NoError(t, err) + subs2 = append(subs2, sub) + nc.Flush() + } + defer closeSubs(subs1) + defer closeSubs(subs2) + + nc, _ := jsClientConnect(t, ln.randomServer()) + defer nc.Close() + + for i, dindex := 0, 1; i < num; i++ { + require_NoError(t, nc.Publish("REQUEST", []byte("HELP"))) + // Check if we have more to simulate draining. + // Will drain within first ~100 requests using 20% rand test below. + // Will leave 1 behind. + if dindex < len(subs1)-1 && rand.Intn(6) > 4 { + sub := subs1[dindex] + dindex++ + sub.Drain() + } + } + nc.Flush() + + checkFor(t, time.Second, 200*time.Millisecond, func() error { + total := int(r1.Load() + r2.Load()) + if total == num { + return nil + } + return fmt.Errorf("Not all received: %d vs %d", total, num) + }) + require_True(t, r2.Load() > r1.Load()) + + // Now check opposite flow for responses. + + // Create 10 subscribers. + var rsubs []*nats.Subscription + + for i := 0; i < 10; i++ { + nc, _ := jsClientConnect(t, ln.randomServer()) + defer nc.Close() + sub, err := nc.QueueSubscribeSync("RESPONSE", "SA") + require_NoError(t, err) + nc.Flush() + rsubs = append(rsubs, sub) + } + + nc, _ = jsClientConnect(t, ln.randomServer()) + defer nc.Close() + _, err := nc.SubscribeSync("RESPONSE") + require_NoError(t, err) + nc.Flush() + + // Now connect and send responses from EFG in cloud. + nc, _ = jsClientConnect(t, sc.randomServer(), nats.UserInfo("efg", "p")) + + for i := 0; i < 100; i++ { + require_NoError(t, nc.Publish("RESPONSE", []byte("OK"))) + } + nc.Flush() + + checkAllRespReceived := func() error { + p := pending(rsubs) + if p == 100 { + return nil + } + return fmt.Errorf("Not all responses received: %d vs %d", p, 100) + } + + checkFor(t, time.Second, 200*time.Millisecond, checkAllRespReceived) +} + +func TestLeafNodeWithWeightedDQResponsesWithStreamImportAccountsWithUnsub(t *testing.T) { + var tmpl = ` + listen: 127.0.0.1:-1 + + server_name: %s + jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} + + leaf { listen: 127.0.0.1:-1 } + + cluster { + name: %s + listen: 127.0.0.1:%d + routes = [%s] + } + + accounts { + EFG { + users = [ { user: "efg", pass: "p" } ] + jetstream: enabled + exports [ { stream: "RESPONSE" } ] + } + STL { + users = [ { user: "stl", pass: "p" } ] + imports [ { stream: { account: EFG, subject: "RESPONSE"} } ] + } + KSC { + users = [ { user: "ksc", pass: "p" } ] + imports [ { stream: { account: EFG, subject: "RESPONSE"} } ] + } + $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } + }` + + c := createJetStreamClusterWithTemplate(t, tmpl, "US-CENTRAL", 3) + defer c.shutdown() + + // Now create a leafnode cluster that has 2 LNs, one to each cluster but on separate accounts, STL and KSC. + var lnTmpl = ` + listen: 127.0.0.1:-1 + server_name: %s + jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} + + {{leaf}} + + cluster { + name: %s + listen: 127.0.0.1:%d + routes = [%s] + } + + accounts { $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] }} + ` + + var leafFrag = ` + leaf { + listen: 127.0.0.1:-1 + remotes [ { urls: [ %s ] } ] + }` + + genLeafTmpl := func(tmpl string) string { + t.Helper() + + var ln []string + for _, s := range c.servers { + lno := s.getOpts().LeafNode + ln = append(ln, fmt.Sprintf("nats://ksc:p@%s:%d", lno.Host, lno.Port)) + } + return strings.Replace(tmpl, "{{leaf}}", fmt.Sprintf(leafFrag, strings.Join(ln, ", ")), 1) + } + + tmpl = strings.Replace(lnTmpl, "store_dir:", fmt.Sprintf(`domain: "%s", store_dir:`, "SA"), 1) + tmpl = genLeafTmpl(tmpl) + + ln := createJetStreamCluster(t, tmpl, "SA", "SA-", 3, 22280, false) + ln.waitOnClusterReady() + defer ln.shutdown() + + for _, s := range ln.servers { + checkLeafNodeConnectedCount(t, s, 1) + } + + // Create 10 subscribers. + var rsubs []*nats.Subscription + + closeSubs := func(subs []*nats.Subscription) { + for _, sub := range subs { + sub.Unsubscribe() + } + } + + checkAllRespReceived := func() error { + t.Helper() + var total int + for _, sub := range rsubs { + n, _, err := sub.Pending() + require_NoError(t, err) + total += n + } + if total == 100 { + return nil + } + return fmt.Errorf("Not all responses received: %d vs %d", total, 100) + } + + s := ln.randomServer() + for i := 0; i < 4; i++ { + nc, _ := jsClientConnect(t, s) + defer nc.Close() + sub, err := nc.QueueSubscribeSync("RESPONSE", "SA") + require_NoError(t, err) + nc.Flush() + rsubs = append(rsubs, sub) + } + + // Now connect and send responses from EFG in cloud. + nc, _ := jsClientConnect(t, c.randomServer(), nats.UserInfo("efg", "p")) + for i := 0; i < 100; i++ { + require_NoError(t, nc.Publish("RESPONSE", []byte("OK"))) + } + nc.Flush() + + // Make sure all received. + checkFor(t, time.Second, 200*time.Millisecond, checkAllRespReceived) + + checkAccountInterest := func(s *Server, accName string) *SublistResult { + t.Helper() + acc, err := s.LookupAccount(accName) + require_NoError(t, err) + acc.mu.RLock() + r := acc.sl.Match("RESPONSE") + acc.mu.RUnlock() + return r + } + + checkInterest := func() error { + t.Helper() + for _, s := range c.servers { + if r := checkAccountInterest(s, "KSC"); len(r.psubs)+len(r.qsubs) > 0 { + return fmt.Errorf("Subs still present for %q: %+v", "KSC", r) + } + if r := checkAccountInterest(s, "EFG"); len(r.psubs)+len(r.qsubs) > 0 { + return fmt.Errorf("Subs still present for %q: %+v", "EFG", r) + } + } + return nil + } + + // Now unsub them and create new ones on a different server. + closeSubs(rsubs) + rsubs = rsubs[:0] + + // Also restart the server that we had all the rsubs on. + s.Shutdown() + s.WaitForShutdown() + s = ln.restartServer(s) + ln.waitOnClusterReady() + ln.waitOnServerCurrent(s) + + checkFor(t, time.Second, 200*time.Millisecond, checkInterest) + + for i := 0; i < 4; i++ { + nc, _ := jsClientConnect(t, s) + defer nc.Close() + sub, err := nc.QueueSubscribeSync("RESPONSE", "SA") + require_NoError(t, err) + nc.Flush() + rsubs = append(rsubs, sub) + } + + for i := 0; i < 100; i++ { + require_NoError(t, nc.Publish("RESPONSE", []byte("OK"))) + } + nc.Flush() + + // Make sure all received. + checkFor(t, time.Second, 200*time.Millisecond, checkAllRespReceived) + + closeSubs(rsubs) + checkFor(t, time.Second, 200*time.Millisecond, checkInterest) +} diff --git a/server/log.go b/server/log.go index 2d14d3e94..ed7dbf5d0 100644 --- a/server/log.go +++ b/server/log.go @@ -45,7 +45,7 @@ type Logger interface { Tracef(format string, v ...interface{}) // Log a system statement - Systemf(format string, v ...interface{}) + Systemf(format string, v ...interface{}) // ** added by Memphis } // ConfigureLogger configures and sets the logger for the server. @@ -69,7 +69,7 @@ func (s *Server) ConfigureLogger() { } if opts.LogFile != "" { - log = srvlog.NewFileLogger(opts.LogFile, opts.Logtime, opts.Debug, opts.Trace, true) + log = srvlog.NewFileLogger(opts.LogFile, opts.Logtime, opts.Debug, opts.Trace, true, srvlog.LogUTC(opts.LogtimeUTC)) if opts.LogSizeLimit > 0 { if l, ok := log.(*srvlog.Logger); ok { l.SetSizeLimit(opts.LogSizeLimit) @@ -164,8 +164,11 @@ func (s *Server) ReOpenLogFile() { if opts.LogFile == "" { s.Noticef("File log re-open ignored, not a file logger") } else { - fileLog := srvlog.NewFileLogger(opts.LogFile, - opts.Logtime, opts.Debug, opts.Trace, true) + fileLog := srvlog.NewFileLogger( + opts.LogFile, opts.Logtime, + opts.Debug, opts.Trace, true, + srvlog.LogUTC(opts.LogtimeUTC), + ) s.SetLogger(fileLog, opts.Debug, opts.Trace) if opts.LogSizeLimit > 0 { fileLog.SetSizeLimit(opts.LogSizeLimit) @@ -181,12 +184,15 @@ func (s *Server) Noticef(format string, v ...interface{}) { }, format, v...) } +// ** added by Memphis func (s *Server) Systemf(format string, v ...interface{}) { s.executeLogCall(func(logger Logger, format string, v ...interface{}) { logger.Systemf(format, v...) }, format, v...) } +// ** added by Memphis + // Errorf logs an error func (s *Server) Errorf(format string, v ...interface{}) { s.executeLogCall(func(logger Logger, format string, v ...interface{}) { @@ -269,6 +275,7 @@ func (s *Server) executeLogCall(f func(logger Logger, format string, v ...interf f(s.logging.logger, format, args...) } +// ** added by Memphis func publishLogToSubjectAndAnalytics(s *Server, label string, log []byte) { copiedLog := copyBytes(log) s.sendLogToSubject(label, copiedLog) @@ -314,3 +321,5 @@ func (s *Server) sendLogToSubject(label string, log []byte) { func (s *Server) getLogSource() string { return s.memphis.serverID } + +// ** added by Memphis diff --git a/server/log_test.go b/server/log_test.go index fda716125..543229351 100644 --- a/server/log_test.go +++ b/server/log_test.go @@ -101,7 +101,7 @@ func TestReOpenLogFile(t *testing.T) { // Set a File log s.opts.LogFile = filepath.Join(t.TempDir(), "test.log") - fileLog := logger.NewFileLogger(s.opts.LogFile, s.opts.Logtime, s.opts.Debug, s.opts.Trace, true) + fileLog := logger.NewFileLogger(s.opts.LogFile, s.opts.Logtime, s.opts.Debug, s.opts.Trace, true, logger.LogUTC(s.opts.LogtimeUTC)) s.SetLogger(fileLog, false, false) // Add some log expectedStr := "This is a Notice" diff --git a/server/memstore.go b/server/memstore.go index 26675aaa7..f7f75f471 100644 --- a/server/memstore.go +++ b/server/memstore.go @@ -1,4 +1,4 @@ -// Copyright 2019-2022 The NATS Authors +// Copyright 2019-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -50,6 +50,7 @@ func newMemStore(cfg *StreamConfig) (*memStore, error) { maxp: cfg.MaxMsgsPer, cfg: *cfg, } + return ms, nil } @@ -100,9 +101,9 @@ func (ms *memStore) UpdateConfig(cfg *StreamConfig) error { // If the value is smaller we need to enforce that. if ms.maxp != 0 && ms.maxp < maxp { lm := uint64(ms.maxp) - for _, ss := range ms.fss { + for subj, ss := range ms.fss { if ss.Msgs > lm { - ms.enforcePerSubjectLimit(ss) + ms.enforcePerSubjectLimit(subj, ss) } } } @@ -146,6 +147,9 @@ func (ms *memStore) storeRawMsg(subj string, hdr, msg []byte, seq uint64, ts int return ErrMaxBytes } // If we are here we are at a subject maximum, need to determine if dropping last message gives us enough room. + if ss.firstNeedsUpdate { + ms.recalculateFirstForSubj(subj, ss.First, ss) + } sm, ok := ms.msgs[ss.First] if !ok || memStoreMsgSize(sm.subj, sm.hdr, sm.msg) < uint64(len(msg)+len(hdr)) { return ErrMaxBytes @@ -197,7 +201,7 @@ func (ms *memStore) storeRawMsg(subj string, hdr, msg []byte, seq uint64, ts int ss.Last = seq // Check per subject limits. if ms.maxp > 0 && ss.Msgs > uint64(ms.maxp) { - ms.enforcePerSubjectLimit(ss) + ms.enforcePerSubjectLimit(subj, ss) } } else { ms.fss[subj] = &SimpleState{Msgs: 1, First: seq, Last: seq} @@ -379,6 +383,9 @@ func (ms *memStore) filteredStateLocked(sseq uint64, filter string, lastPerSubje // We will track start and end sequences as we go. for subj, fss := range ms.fss { if isMatch(subj) { + if fss.firstNeedsUpdate { + ms.recalculateFirstForSubj(subj, fss.First, fss) + } if sseq <= fss.First { update(fss) } else if sseq <= fss.Last { @@ -473,6 +480,9 @@ func (ms *memStore) SubjectsState(subject string) map[string]SimpleState { fss := make(map[string]SimpleState) for subj, ss := range ms.fss { if subject == _EMPTY_ || subject == fwcs || subjectIsSubsetMatch(subj, subject) { + if ss.firstNeedsUpdate { + ms.recalculateFirstForSubj(subj, ss.First, ss) + } oss := fss[subj] if oss.First == 0 { // New fss[subj] = *ss @@ -524,11 +534,14 @@ func (ms *memStore) NumPending(sseq uint64, filter string, lastPerSubject bool) // Will check the msg limit for this tracked subject. // Lock should be held. -func (ms *memStore) enforcePerSubjectLimit(ss *SimpleState) { +func (ms *memStore) enforcePerSubjectLimit(subj string, ss *SimpleState) { if ms.maxp <= 0 { return } for nmsgs := ss.Msgs; nmsgs > uint64(ms.maxp); nmsgs = ss.Msgs { + if ss.firstNeedsUpdate { + ms.recalculateFirstForSubj(subj, ss.First, ss) + } if !ms.removeMsg(ss.First, false) { break } @@ -623,10 +636,6 @@ func (ms *memStore) expireMsgs() { // PurgeEx will remove messages based on subject filters, sequence and number of messages to keep. // Will return the number of purged messages. func (ms *memStore) PurgeEx(subject string, sequence, keep uint64) (purged uint64, err error) { - if sequence > 1 && keep > 0 { - return 0, ErrPurgeArgMismatch - } - if subject == _EMPTY_ || subject == fwcs { if keep == 0 && (sequence == 0 || sequence == 1) { return ms.Purge() @@ -724,7 +733,13 @@ func (ms *memStore) Compact(seq uint64) (uint64, error) { ms.removeSeqPerSubject(sm.subj, seq) } } + if purged > ms.state.Msgs { + purged = ms.state.Msgs + } ms.state.Msgs -= purged + if bytes > ms.state.Bytes { + bytes = ms.state.Bytes + } ms.state.Bytes -= bytes } else { // We are compacting past the end of our range. Do purge and set sequences correctly @@ -809,7 +824,13 @@ func (ms *memStore) Truncate(seq uint64) error { ms.state.LastSeq = lsm.seq ms.state.LastTime = time.Unix(0, lsm.ts).UTC() // Update msgs and bytes. + if purged > ms.state.Msgs { + purged = ms.state.Msgs + } ms.state.Msgs -= purged + if bytes > ms.state.Bytes { + bytes = ms.state.Bytes + } ms.state.Bytes -= bytes cb := ms.scb @@ -865,6 +886,10 @@ func (ms *memStore) LoadLastMsg(subject string, smp *StoreMsg) (*StoreMsg, error if subject == _EMPTY_ || subject == fwcs { sm, ok = ms.msgs[ms.state.LastSeq] + } else if subjectIsLiteral(subject) { + if ss := ms.fss[subject]; ss != nil && ss.Msgs > 0 { + sm, ok = ms.msgs[ss.Last] + } } else if ss := ms.filteredStateLocked(1, subject, true); ss.Msgs > 0 { sm, ok = ms.msgs[ss.Last] } @@ -896,8 +921,8 @@ func (ms *memStore) LoadNextMsg(filter string, wc bool, start uint64, smp *Store isAll := filter == _EMPTY_ || filter == fwcs - // Skip scan of mb.fss is number of messages in the block are less than - // 1/2 the number of subjects in mb.fss. Or we have a wc and lots of fss entries. + // Skip scan of ms.fss is number of messages in the block are less than + // 1/2 the number of subjects in ms.fss. Or we have a wc and lots of fss entries. const linearScanMaxFSS = 256 doLinearScan := isAll || 2*int(ms.state.LastSeq-start) < len(ms.fss) || (wc && len(ms.fss) > linearScanMaxFSS) @@ -920,6 +945,9 @@ func (ms *memStore) LoadNextMsg(filter string, wc bool, start uint64, smp *Store if ss == nil { continue } + if ss.firstNeedsUpdate { + ms.recalculateFirstForSubj(subj, ss.First, ss) + } if ss.First < fseq { fseq = ss.First } @@ -1002,19 +1030,27 @@ func (ms *memStore) removeSeqPerSubject(subj string, seq uint64) { return } ss.Msgs-- - if seq != ss.First { - return - } + // If we know we only have 1 msg left don't need to search for next first. if ss.Msgs == 1 { - ss.First = ss.Last - return + if seq == ss.Last { + ss.Last = ss.First + } else { + ss.First = ss.Last + } + ss.firstNeedsUpdate = false + } else { + ss.firstNeedsUpdate = seq == ss.First || ss.firstNeedsUpdate } - // TODO(dlc) - Might want to optimize this longer term. - for tseq := seq + 1; tseq <= ss.Last; tseq++ { +} + +// Will recalulate the first sequence for this subject in this block. +func (ms *memStore) recalculateFirstForSubj(subj string, startSeq uint64, ss *SimpleState) { + for tseq := startSeq + 1; tseq <= ss.Last; tseq++ { if sm := ms.msgs[tseq]; sm != nil && sm.subj == subj { ss.First = tseq - break + ss.firstNeedsUpdate = false + return } } } @@ -1028,17 +1064,24 @@ func (ms *memStore) removeMsg(seq uint64, secure bool) bool { return false } + // ** added by memphis // send the message to tiered 2 storage if needed tieredStorageEnabled := ms.cfg.TieredStorageEnabled if !secure && !strings.HasPrefix(ms.cfg.Name, "$memphis") && tieredStorageEnabled && serv != nil { serv.sendToTier2Storage(ms, copyBytes(sm.buf), sm.seq, "s3") } + // ** added by memphis ss = memStoreMsgSize(sm.subj, sm.hdr, sm.msg) delete(ms.msgs, seq) - ms.state.Msgs-- - ms.state.Bytes -= ss + if ms.state.Msgs > 0 { + ms.state.Msgs-- + if ss > ms.state.Bytes { + ss = ms.state.Bytes + } + ms.state.Bytes -= ss + } ms.updateFirstSeq(seq) if secure { @@ -1131,11 +1174,12 @@ func memStoreMsgSize(subj string, hdr, msg []byte) uint64 { // Delete is same as Stop for memory store. func (ms *memStore) Delete() error { - ms.Purge() return ms.Stop() } func (ms *memStore) Stop() error { + // These can't come back, so stop is same as Delete. + ms.Purge() ms.mu.Lock() if ms.ageChk != nil { ms.ageChk.Stop() @@ -1293,17 +1337,28 @@ func (o *consumerMemStore) UpdateDelivered(dseq, sseq, dc uint64, ts int64) erro } if dc > 1 { + if maxdc := uint64(o.cfg.MaxDeliver); maxdc > 0 && dc > maxdc { + // Make sure to remove from pending. + delete(o.state.Pending, sseq) + } if o.state.Redelivered == nil { o.state.Redelivered = make(map[uint64]uint64) } - o.state.Redelivered[sseq] = dc - 1 + // Only update if greater then what we already have. + if o.state.Redelivered[sseq] < dc-1 { + o.state.Redelivered[sseq] = dc - 1 + } } } else { // For AckNone just update delivered and ackfloor at the same time. - o.state.Delivered.Consumer = dseq - o.state.Delivered.Stream = sseq - o.state.AckFloor.Consumer = dseq - o.state.AckFloor.Stream = sseq + if dseq > o.state.Delivered.Consumer { + o.state.Delivered.Consumer = dseq + o.state.AckFloor.Consumer = dseq + } + if sseq > o.state.Delivered.Stream { + o.state.Delivered.Stream = sseq + o.state.AckFloor.Stream = sseq + } } return nil diff --git a/server/monitor.go b/server/monitor.go index 24c912c8a..895f8c410 100644 --- a/server/monitor.go +++ b/server/monitor.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The NATS Authors +// Copyright 2013-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -36,19 +36,6 @@ import ( "github.com/nats-io/jwt/v2" ) -// Snapshot this -var numCores int -var maxProcs int - -func SnapshotMonitorInfo() { - numCores = runtime.NumCPU() - maxProcs = runtime.GOMAXPROCS(0) -} - -func init() { - SnapshotMonitorInfo() -} - // Connz represents detailed information on current client connections. type Connz struct { ID string `json:"server_id"` @@ -150,6 +137,9 @@ type ConnInfo struct { NameTag string `json:"name_tag,omitempty"` Tags jwt.TagList `json:"tags,omitempty"` MQTTClient string `json:"mqtt_client,omitempty"` // This is the MQTT client id + + // Internal + rtt int64 // For fast sorting } // TLSPeerCert contains basic information about a TLS peer certificate @@ -203,9 +193,7 @@ func (s *Server) Connz(opts *ConnzOptions) (*Connz, error) { if opts != nil { // If no sort option given or sort is by uptime, then sort by cid - if opts.Sort == _EMPTY_ { - sortOpt = ByCid - } else { + if opts.Sort != _EMPTY_ { sortOpt = opts.Sort if !sortOpt.IsValid() { return nil, fmt.Errorf("invalid sorting option: %s", sortOpt) @@ -214,9 +202,6 @@ func (s *Server) Connz(opts *ConnzOptions) (*Connz, error) { // Auth specifics. auth = opts.Username - if !auth && (user != _EMPTY_ || acc != _EMPTY_) { - return nil, fmt.Errorf("filter by user or account only allowed with auth option") - } user = opts.User acc = opts.Account mqttCID = opts.MQTTClient @@ -286,7 +271,7 @@ func (s *Server) Connz(opts *ConnzOptions) (*Connz, error) { } // Walk the open client list with server lock held. - s.mu.Lock() + s.mu.RLock() // Default to all client unless filled in above. if clist == nil { clist = s.clients @@ -313,9 +298,10 @@ func (s *Server) Connz(opts *ConnzOptions) (*Connz, error) { if acc != _EMPTY_ && len(closedClients) > 0 { var ccc []*closedClient for _, cc := range closedClients { - if cc.acc == acc { - ccc = append(ccc, cc) + if cc.acc != acc { + continue } + ccc = append(ccc, cc) } c.Total -= (len(closedClients) - len(ccc)) closedClients = ccc @@ -370,7 +356,7 @@ func (s *Server) Connz(opts *ConnzOptions) (*Connz, error) { continue } // Do user filtering second - if user != _EMPTY_ && client.opts.Username != user { + if user != _EMPTY_ && client.getRawAuthUserLock() != user { continue } // Do mqtt client ID filtering next @@ -381,7 +367,7 @@ func (s *Server) Connz(opts *ConnzOptions) (*Connz, error) { } } } - s.mu.Unlock() + s.mu.RUnlock() // Filter by subject now if needed. We do this outside of server lock. if filter != _EMPTY_ { @@ -506,13 +492,15 @@ func (s *Server) Connz(opts *ConnzOptions) (*Connz, error) { case ByLast: sort.Sort(sort.Reverse(byLast{pconns})) case ByIdle: - sort.Sort(sort.Reverse(byIdle{pconns})) + sort.Sort(sort.Reverse(byIdle{pconns, c.Now})) case ByUptime: sort.Sort(byUptime{pconns, time.Now()}) case ByStop: sort.Sort(sort.Reverse(byStop{pconns})) case ByReason: sort.Sort(byReason{pconns}) + case ByRTT: + sort.Sort(sort.Reverse(byRTT{pconns})) } minoff := c.Offset @@ -542,6 +530,10 @@ func (s *Server) Connz(opts *ConnzOptions) (*Connz, error) { // Fills in the ConnInfo from the client. // client should be locked. func (ci *ConnInfo) fill(client *client, nc net.Conn, now time.Time, auth bool) { + // For fast sort if required. + rtt := client.getRTT() + ci.rtt = int64(rtt) + ci.Cid = client.cid ci.MQTTClient = client.getMQTTClientID() ci.Kind = client.kindString() @@ -550,7 +542,7 @@ func (ci *ConnInfo) fill(client *client, nc net.Conn, now time.Time, auth bool) ci.LastActivity = client.last ci.Uptime = myUptime(now.Sub(client.start)) ci.Idle = myUptime(now.Sub(client.last)) - ci.RTT = client.getRTT().String() + ci.RTT = rtt.String() ci.OutMsgs = client.outMsgs ci.OutBytes = client.outBytes ci.NumSubs = uint32(len(client.subs)) @@ -599,7 +591,7 @@ func (c *client) getRTT() time.Duration { if c.rtt == 0 { // If a real client, go ahead and send ping now to get a value // for RTT. For tests and telnet, or if client is closing, etc skip. - if c.opts.Lang != "" { + if c.opts.Lang != _EMPTY_ { c.sendRTTPingLocked() } return 0 @@ -751,6 +743,7 @@ func (s *Server) HandleConnz(w http.ResponseWriter, r *http.Request) { // Routez represents detailed information on current client connections. type Routez struct { ID string `json:"server_id"` + Name string `json:"server_name"` Now time.Time `json:"now"` Import *SubjectPermission `json:"import,omitempty"` Export *SubjectPermission `json:"export,omitempty"` @@ -770,6 +763,7 @@ type RoutezOptions struct { type RouteInfo struct { Rid uint64 `json:"rid"` RemoteID string `json:"remote_id"` + RemoteName string `json:"remote_name"` DidSolicit bool `json:"did_solicit"` IsConfigured bool `json:"is_configured"` IP string `json:"ip"` @@ -811,6 +805,7 @@ func (s *Server) Routez(routezOpts *RoutezOptions) (*Routez, error) { rs.Import = perms.Import rs.Export = perms.Export } + rs.Name = s.getOpts().ServerName // Walk the list for _, r := range s.routes { @@ -818,6 +813,7 @@ func (s *Server) Routez(routezOpts *RoutezOptions) (*Routez, error) { ri := &RouteInfo{ Rid: r.cid, RemoteID: r.route.remoteID, + RemoteName: r.route.remoteName, DidSolicit: r.route.didSolicit, IsConfigured: r.route.routeType == Explicit, InMsgs: atomic.LoadInt64(&r.inMsgs), @@ -1124,14 +1120,17 @@ func (s *Server) HandleIPQueuesz(w http.ResponseWriter, r *http.Request) { queues := map[string]monitorIPQueue{} - s.ipQueues.Range(func(k, v interface{}) bool { + s.ipQueues.Range(func(k, v any) bool { + var pending, inProgress int name := k.(string) - queue := v.(interface { + queue, ok := v.(interface { len() int - inProgress() uint64 + inProgress() int64 }) - pending := queue.len() - inProgress := int(queue.inProgress()) + if ok { + pending = queue.len() + inProgress = int(queue.inProgress()) + } if !all && (pending == 0 && inProgress == 0) { return true } else if qfilter != _EMPTY_ && !strings.Contains(name, qfilter) { @@ -1158,6 +1157,7 @@ type Varz struct { AuthRequired bool `json:"auth_required,omitempty"` TLSRequired bool `json:"tls_required,omitempty"` TLSVerify bool `json:"tls_verify,omitempty"` + TLSOCSPPeerVerify bool `json:"tls_ocsp_peer_verify,omitempty"` IP string `json:"ip,omitempty"` ClientConnectURLs []string `json:"connect_urls,omitempty"` WSConnectURLs []string `json:"ws_connect_urls,omitempty"` @@ -1206,6 +1206,7 @@ type Varz struct { TrustedOperatorsClaim []*jwt.OperatorClaims `json:"trusted_operators_claim,omitempty"` SystemAccount string `json:"system_account,omitempty"` PinnedAccountFail uint64 `json:"pinned_account_fails,omitempty"` + OCSPResponseCache OCSPResponseCacheVarz `json:"ocsp_peer_cache,omitempty"` } // JetStreamVarz contains basic runtime information about jetstream @@ -1251,13 +1252,14 @@ type RemoteGatewayOptsVarz struct { // LeafNodeOptsVarz contains monitoring leaf node information type LeafNodeOptsVarz struct { - Host string `json:"host,omitempty"` - Port int `json:"port,omitempty"` - AuthTimeout float64 `json:"auth_timeout,omitempty"` - TLSTimeout float64 `json:"tls_timeout,omitempty"` - TLSRequired bool `json:"tls_required,omitempty"` - TLSVerify bool `json:"tls_verify,omitempty"` - Remotes []RemoteLeafOptsVarz `json:"remotes,omitempty"` + Host string `json:"host,omitempty"` + Port int `json:"port,omitempty"` + AuthTimeout float64 `json:"auth_timeout,omitempty"` + TLSTimeout float64 `json:"tls_timeout,omitempty"` + TLSRequired bool `json:"tls_required,omitempty"` + TLSVerify bool `json:"tls_verify,omitempty"` + Remotes []RemoteLeafOptsVarz `json:"remotes,omitempty"` + TLSOCSPPeerVerify bool `json:"tls_ocsp_peer_verify,omitempty"` } // DenyRules Contains lists of subjects not allowed to be imported/exported @@ -1268,41 +1270,55 @@ type DenyRules struct { // RemoteLeafOptsVarz contains monitoring remote leaf node information type RemoteLeafOptsVarz struct { - LocalAccount string `json:"local_account,omitempty"` - TLSTimeout float64 `json:"tls_timeout,omitempty"` - URLs []string `json:"urls,omitempty"` - Deny *DenyRules `json:"deny,omitempty"` + LocalAccount string `json:"local_account,omitempty"` + TLSTimeout float64 `json:"tls_timeout,omitempty"` + URLs []string `json:"urls,omitempty"` + Deny *DenyRules `json:"deny,omitempty"` + TLSOCSPPeerVerify bool `json:"tls_ocsp_peer_verify,omitempty"` } // MQTTOptsVarz contains monitoring MQTT information type MQTTOptsVarz struct { - Host string `json:"host,omitempty"` - Port int `json:"port,omitempty"` - NoAuthUser string `json:"no_auth_user,omitempty"` - AuthTimeout float64 `json:"auth_timeout,omitempty"` - TLSMap bool `json:"tls_map,omitempty"` - TLSTimeout float64 `json:"tls_timeout,omitempty"` - TLSPinnedCerts []string `json:"tls_pinned_certs,omitempty"` - JsDomain string `json:"js_domain,omitempty"` - AckWait time.Duration `json:"ack_wait,omitempty"` - MaxAckPending uint16 `json:"max_ack_pending,omitempty"` + Host string `json:"host,omitempty"` + Port int `json:"port,omitempty"` + NoAuthUser string `json:"no_auth_user,omitempty"` + AuthTimeout float64 `json:"auth_timeout,omitempty"` + TLSMap bool `json:"tls_map,omitempty"` + TLSTimeout float64 `json:"tls_timeout,omitempty"` + TLSPinnedCerts []string `json:"tls_pinned_certs,omitempty"` + JsDomain string `json:"js_domain,omitempty"` + AckWait time.Duration `json:"ack_wait,omitempty"` + MaxAckPending uint16 `json:"max_ack_pending,omitempty"` + TLSOCSPPeerVerify bool `json:"tls_ocsp_peer_verify,omitempty"` } // WebsocketOptsVarz contains monitoring websocket information type WebsocketOptsVarz struct { - Host string `json:"host,omitempty"` - Port int `json:"port,omitempty"` - Advertise string `json:"advertise,omitempty"` - NoAuthUser string `json:"no_auth_user,omitempty"` - JWTCookie string `json:"jwt_cookie,omitempty"` - HandshakeTimeout time.Duration `json:"handshake_timeout,omitempty"` - AuthTimeout float64 `json:"auth_timeout,omitempty"` - NoTLS bool `json:"no_tls,omitempty"` - TLSMap bool `json:"tls_map,omitempty"` - TLSPinnedCerts []string `json:"tls_pinned_certs,omitempty"` - SameOrigin bool `json:"same_origin,omitempty"` - AllowedOrigins []string `json:"allowed_origins,omitempty"` - Compression bool `json:"compression,omitempty"` + Host string `json:"host,omitempty"` + Port int `json:"port,omitempty"` + Advertise string `json:"advertise,omitempty"` + NoAuthUser string `json:"no_auth_user,omitempty"` + JWTCookie string `json:"jwt_cookie,omitempty"` + HandshakeTimeout time.Duration `json:"handshake_timeout,omitempty"` + AuthTimeout float64 `json:"auth_timeout,omitempty"` + NoTLS bool `json:"no_tls,omitempty"` + TLSMap bool `json:"tls_map,omitempty"` + TLSPinnedCerts []string `json:"tls_pinned_certs,omitempty"` + SameOrigin bool `json:"same_origin,omitempty"` + AllowedOrigins []string `json:"allowed_origins,omitempty"` + Compression bool `json:"compression,omitempty"` + TLSOCSPPeerVerify bool `json:"tls_ocsp_peer_verify,omitempty"` +} + +// OCSPResponseCacheVarz contains OCSP response cache information +type OCSPResponseCacheVarz struct { + Type string `json:"cache_type,omitempty"` + Hits int64 `json:"cache_hits,omitempty"` + Misses int64 `json:"cache_misses,omitempty"` + Responses int64 `json:"cached_responses,omitempty"` + Revokes int64 `json:"cached_revoked_responses,omitempty"` + Goods int64 `json:"cached_good_responses,omitempty"` + Unknowns int64 `json:"cached_unknown_responses,omitempty"` } // VarzOptions are the options passed to Varz(). @@ -1456,6 +1472,9 @@ func (s *Server) createVarz(pcpu float64, rss int64) *Varz { gatewayTlsReq := gw.TLSConfig != nil leafTlsReq := ln.TLSConfig != nil leafTlsVerify := leafTlsReq && ln.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert + leafTlsOCSPPeerVerify := s.ocspPeerVerify && leafTlsReq && ln.tlsConfigOpts.OCSPPeerConfig != nil && ln.tlsConfigOpts.OCSPPeerConfig.Verify + mqttTlsOCSPPeerVerify := s.ocspPeerVerify && mqtt.TLSConfig != nil && mqtt.tlsConfigOpts.OCSPPeerConfig != nil && mqtt.tlsConfigOpts.OCSPPeerConfig.Verify + wsTlsOCSPPeerVerify := s.ocspPeerVerify && ws.TLSConfig != nil && ws.tlsConfigOpts.OCSPPeerConfig != nil && ws.tlsConfigOpts.OCSPPeerConfig.Verify varz := &Varz{ ID: info.ID, Version: info.Version, @@ -1493,43 +1512,46 @@ func (s *Server) createVarz(pcpu float64, rss int64) *Varz { RejectUnknown: gw.RejectUnknown, }, LeafNode: LeafNodeOptsVarz{ - Host: ln.Host, - Port: ln.Port, - AuthTimeout: ln.AuthTimeout, - TLSTimeout: ln.TLSTimeout, - TLSRequired: leafTlsReq, - TLSVerify: leafTlsVerify, - Remotes: []RemoteLeafOptsVarz{}, + Host: ln.Host, + Port: ln.Port, + AuthTimeout: ln.AuthTimeout, + TLSTimeout: ln.TLSTimeout, + TLSRequired: leafTlsReq, + TLSVerify: leafTlsVerify, + TLSOCSPPeerVerify: leafTlsOCSPPeerVerify, + Remotes: []RemoteLeafOptsVarz{}, }, MQTT: MQTTOptsVarz{ - Host: mqtt.Host, - Port: mqtt.Port, - NoAuthUser: mqtt.NoAuthUser, - AuthTimeout: mqtt.AuthTimeout, - TLSMap: mqtt.TLSMap, - TLSTimeout: mqtt.TLSTimeout, - JsDomain: mqtt.JsDomain, - AckWait: mqtt.AckWait, - MaxAckPending: mqtt.MaxAckPending, + Host: mqtt.Host, + Port: mqtt.Port, + NoAuthUser: mqtt.NoAuthUser, + AuthTimeout: mqtt.AuthTimeout, + TLSMap: mqtt.TLSMap, + TLSTimeout: mqtt.TLSTimeout, + JsDomain: mqtt.JsDomain, + AckWait: mqtt.AckWait, + MaxAckPending: mqtt.MaxAckPending, + TLSOCSPPeerVerify: mqttTlsOCSPPeerVerify, }, Websocket: WebsocketOptsVarz{ - Host: ws.Host, - Port: ws.Port, - Advertise: ws.Advertise, - NoAuthUser: ws.NoAuthUser, - JWTCookie: ws.JWTCookie, - AuthTimeout: ws.AuthTimeout, - NoTLS: ws.NoTLS, - TLSMap: ws.TLSMap, - SameOrigin: ws.SameOrigin, - AllowedOrigins: copyStrings(ws.AllowedOrigins), - Compression: ws.Compression, - HandshakeTimeout: ws.HandshakeTimeout, + Host: ws.Host, + Port: ws.Port, + Advertise: ws.Advertise, + NoAuthUser: ws.NoAuthUser, + JWTCookie: ws.JWTCookie, + AuthTimeout: ws.AuthTimeout, + NoTLS: ws.NoTLS, + TLSMap: ws.TLSMap, + SameOrigin: ws.SameOrigin, + AllowedOrigins: copyStrings(ws.AllowedOrigins), + Compression: ws.Compression, + HandshakeTimeout: ws.HandshakeTimeout, + TLSOCSPPeerVerify: wsTlsOCSPPeerVerify, }, - Start: s.start, + Start: s.start.UTC(), MaxSubs: opts.MaxSubs, - Cores: numCores, - MaxProcs: maxProcs, + Cores: runtime.NumCPU(), + MaxProcs: runtime.GOMAXPROCS(0), Tags: opts.Tags, TrustedOperatorsJwt: opts.operatorJWT, TrustedOperatorsClaim: opts.TrustedOperators, @@ -1557,11 +1579,14 @@ func (s *Server) createVarz(pcpu float64, rss int64) *Varz { Exports: r.DenyExports, } } + remoteTlsOCSPPeerVerify := s.ocspPeerVerify && r.tlsConfigOpts != nil && r.tlsConfigOpts.OCSPPeerConfig != nil && r.tlsConfigOpts.OCSPPeerConfig.Verify + rlna[i] = RemoteLeafOptsVarz{ - LocalAccount: r.LocalAccount, - URLs: urlsToStrings(r.URLs), - TLSTimeout: r.TLSTimeout, - Deny: deny, + LocalAccount: r.LocalAccount, + URLs: urlsToStrings(r.URLs), + TLSTimeout: r.TLSTimeout, + Deny: deny, + TLSOCSPPeerVerify: remoteTlsOCSPPeerVerify, } } varz.LeafNode.Remotes = rlna @@ -1604,7 +1629,7 @@ func (s *Server) updateVarzConfigReloadableFields(v *Varz) { v.MaxPending = opts.MaxPending v.TLSTimeout = opts.TLSTimeout v.WriteDeadline = opts.WriteDeadline - v.ConfigLoadTime = s.configTime + v.ConfigLoadTime = s.configTime.UTC() // Update route URLs if applicable if s.varzUpdateRouteURLs { v.Cluster.URLs = urlsToStrings(opts.Routes) @@ -1615,6 +1640,8 @@ func (s *Server) updateVarzConfigReloadableFields(v *Varz) { } v.MQTT.TLSPinnedCerts = getPinnedCertsAsSlice(opts.MQTT.TLSPinnedCerts) v.Websocket.TLSPinnedCerts = getPinnedCertsAsSlice(opts.Websocket.TLSPinnedCerts) + + v.TLSOCSPPeerVerify = s.ocspPeerVerify && v.TLSRequired && s.opts.tlsConfigOpts != nil && s.opts.tlsConfigOpts.OCSPPeerConfig != nil && s.opts.tlsConfigOpts.OCSPPeerConfig.Verify } func getPinnedCertsAsSlice(certs PinnedCertSet) []string { @@ -1706,6 +1733,21 @@ func (s *Server) updateVarzRuntimeFields(v *Varz, forceUpdate bool, pcpu float64 } } gw.RUnlock() + + if s.ocsprc != nil && s.ocsprc.Type() != "none" { + stats := s.ocsprc.Stats() + if stats != nil { + v.OCSPResponseCache = OCSPResponseCacheVarz{ + s.ocsprc.Type(), + stats.Hits, + stats.Misses, + stats.Responses, + stats.Revokes, + stats.Goods, + stats.Unknowns, + } + } + } } // HandleVarz will process HTTP requests for server information. @@ -2262,18 +2304,26 @@ func (s *Server) HandleAccountStatz(w http.ResponseWriter, r *http.Request) { ResponseHandler(w, r, b) } -// ResponseHandler handles responses for monitoring routes +// ResponseHandler handles responses for monitoring routes. func ResponseHandler(w http.ResponseWriter, r *http.Request, data []byte) { + handleResponse(http.StatusOK, w, r, data) +} + +// handleResponse handles responses for monitoring routes with a specific HTTP status code. +func handleResponse(code int, w http.ResponseWriter, r *http.Request, data []byte) { // Get callback from request callback := r.URL.Query().Get("callback") // If callback is not empty then if callback != "" { // Response for JSONP w.Header().Set("Content-Type", "application/javascript") + w.WriteHeader(code) fmt.Fprintf(w, "%s(%s)", callback, data) } else { // Otherwise JSON w.Header().Set("Content-Type", "application/json") + w.Header().Set("Access-Control-Allow-Origin", "*") + w.WriteHeader(code) w.Write(data) } } @@ -2450,19 +2500,20 @@ func (s *Server) Accountz(optz *AccountzOptions) (*Accountz, error) { if sacc := s.SystemAccount(); sacc != nil { a.SystemAccount = sacc.GetName() } - if optz.Account == "" { + if optz == nil || optz.Account == _EMPTY_ { a.Accounts = []string{} s.accounts.Range(func(key, value interface{}) bool { a.Accounts = append(a.Accounts, key.(string)) return true }) return a, nil - } else if aInfo, err := s.accountInfo(optz.Account); err != nil { + } + aInfo, err := s.accountInfo(optz.Account) + if err != nil { return nil, err - } else { - a.Account = aInfo - return a, nil } + a.Account = aInfo + return a, nil } func newExtImport(v *serviceImport) ExtImport { @@ -2475,10 +2526,12 @@ func newExtImport(v *serviceImport) ExtImport { imp.Tracking = v.tracking imp.Invalid = v.invalid imp.Import = jwt.Import{ - Subject: jwt.Subject(v.from), + Subject: jwt.Subject(v.to), Account: v.acc.Name, Type: jwt.Service, - To: jwt.Subject(v.to), + // Deprecated so we duplicate. Use LocalSubject. + To: jwt.Subject(v.from), + LocalSubject: jwt.RenamingSubject(v.from), } imp.TrackingHdr = v.trackingHdr imp.Latency = newExtServiceLatency(v.latency) @@ -2607,7 +2660,7 @@ func (s *Server) accountInfo(accName string) (*AccountInfo, error) { } return &AccountInfo{ accName, - a.updated, + a.updated.UTC(), isSys, a.expired, !a.incomplete, @@ -3005,7 +3058,7 @@ type HealthStatus struct { Error string `json:"error,omitempty"` } -// https://tools.ietf.org/id/draft-inadarei-api-health-check-05.html +// https://datatracker.ietf.org/doc/html/draft-inadarei-api-health-check func (s *Server) HandleHealthz(w http.ResponseWriter, r *http.Request) { s.mu.Lock() s.httpReqStats[HealthzPath]++ @@ -3032,16 +3085,19 @@ func (s *Server) HandleHealthz(w http.ResponseWriter, r *http.Request) { JSEnabledOnly: jsEnabledOnly, JSServerOnly: jsServerOnly, }) + + code := http.StatusOK + if hs.Error != _EMPTY_ { s.Warnf("Healthcheck failed: %q", hs.Error) - w.WriteHeader(http.StatusServiceUnavailable) + code = http.StatusServiceUnavailable } b, err := json.Marshal(hs) if err != nil { s.Errorf("Error marshaling response to /healthz request: %v", err) } - ResponseHandler(w, r, b) + handleResponse(code, w, r, b) } // Generate health status. @@ -3081,18 +3137,20 @@ func (s *Server) healthz(opts *HealthzOptions) *HealthStatus { // Clustered JetStream js.mu.RLock() - defer js.mu.RUnlock() - cc := js.cluster + js.mu.RUnlock() const na = "unavailable" // Currently single server we make sure the streams were recovered. - if cc == nil || cc.meta == nil { + if cc == nil { sdir := js.config.StoreDir // Whip through account folders and pull each stream name. fis, _ := os.ReadDir(sdir) for _, fi := range fis { + if fi.Name() == snapStagingDir { + continue + } acc, err := s.LookupAccount(fi.Name()) if err != nil { health.Status = na @@ -3113,17 +3171,19 @@ func (s *Server) healthz(opts *HealthzOptions) *HealthStatus { } // If we are here we want to check for any assets assigned to us. - meta := cc.meta - ourID := meta.ID() + var meta RaftNode + js.mu.RLock() + meta = cc.meta + js.mu.RUnlock() // If no meta leader. - if meta.GroupLeader() == _EMPTY_ { + if meta == nil || meta.GroupLeader() == _EMPTY_ { health.Status = na health.Error = "JetStream has not established contact with a meta leader" return health } // If we are not current with the meta leader. - if !meta.Current() { + if !meta.Healthy() { health.Status = na health.Error = "JetStream is not current with the meta leader" return health @@ -3136,29 +3196,58 @@ func (s *Server) healthz(opts *HealthzOptions) *HealthStatus { // Range across all accounts, the streams assigned to them, and the consumers. // If they are assigned to this server check their status. + ourID := meta.ID() + + // Copy the meta layer so we do not need to hold the js read lock for an extended period of time. + js.mu.RLock() + streams := make(map[string]map[string]*streamAssignment, len(cc.streams)) for acc, asa := range cc.streams { + nasa := make(map[string]*streamAssignment) for stream, sa := range asa { - if sa.Group.isMember(ourID) { - // Make sure we can look up - if !cc.isStreamHealthy(acc, stream) { - health.Status = na - health.Error = fmt.Sprintf("JetStream stream '%s > %s' is not current", acc, stream) - return health - } - // Now check consumers. + // If we are a member and we are not being restored, select for check. + if sa.Group.isMember(ourID) && sa.Restore == nil { + csa := sa.copyGroup() + csa.consumers = make(map[string]*consumerAssignment) for consumer, ca := range sa.consumers { if ca.Group.isMember(ourID) { - if !cc.isConsumerCurrent(acc, stream, consumer) { - health.Status = na - health.Error = fmt.Sprintf("JetStream consumer '%s > %s > %s' is not current", acc, stream, consumer) - return health - } + // Use original here. Not a copy. + csa.consumers[consumer] = ca } } + nasa[stream] = csa } } + streams[acc] = nasa } + js.mu.RUnlock() + + // Use our copy to traverse so we do not need to hold the js lock. + for accName, asa := range streams { + acc, err := s.LookupAccount(accName) + if err != nil && len(asa) > 0 { + health.Status = na + health.Error = fmt.Sprintf("JetStream can not lookup account %q: %v", accName, err) + return health + } + for stream, sa := range asa { + // Make sure we can look up + if !js.isStreamHealthy(acc, sa) { + health.Status = na + health.Error = fmt.Sprintf("JetStream stream '%s > %s' is not current", accName, stream) + return health + } + mset, _ := acc.lookupStream(stream) + // Now check consumers. + for consumer, ca := range sa.consumers { + if !js.isConsumerHealthy(mset, consumer, ca) { + health.Status = na + health.Error = fmt.Sprintf("JetStream consumer '%s > %s > %s' is not current", acc, stream, consumer) + return health + } + } + } + } // Success. return health } diff --git a/server/monitor_sort_opts.go b/server/monitor_sort_opts.go index 10258d26c..2fcaf2e9f 100644 --- a/server/monitor_sort_opts.go +++ b/server/monitor_sort_opts.go @@ -45,7 +45,7 @@ const ( ByUptime SortOpt = "uptime" // By the amount of time connections exist ByStop SortOpt = "stop" // By the stop time for a closed connection ByReason SortOpt = "reason" // By the reason for a closed connection - + ByRTT SortOpt = "rtt" // By the round trip time ) // Individual sort options provide the Less for sort.Interface. Len and Swap are on cList. @@ -92,12 +92,13 @@ func (l byLast) Less(i, j int) bool { } // Idle time -type byIdle struct{ ConnInfos } +type byIdle struct { + ConnInfos + now time.Time +} func (l byIdle) Less(i, j int) bool { - ii := l.ConnInfos[i].LastActivity.Sub(l.ConnInfos[i].Start) - ij := l.ConnInfos[j].LastActivity.Sub(l.ConnInfos[j].Start) - return ii < ij + return l.now.Sub(l.ConnInfos[i].LastActivity) < l.now.Sub(l.ConnInfos[j].LastActivity) } // Uptime @@ -139,10 +140,15 @@ func (l byReason) Less(i, j int) bool { return l.ConnInfos[i].Reason < l.ConnInfos[j].Reason } +// RTT - Default is descending +type byRTT struct{ ConnInfos } + +func (l byRTT) Less(i, j int) bool { return l.ConnInfos[i].rtt < l.ConnInfos[j].rtt } + // IsValid determines if a sort option is valid func (s SortOpt) IsValid() bool { switch s { - case "", ByCid, ByStart, BySubs, ByPending, ByOutMsgs, ByInMsgs, ByOutBytes, ByInBytes, ByLast, ByIdle, ByUptime, ByStop, ByReason: + case _EMPTY_, ByCid, ByStart, BySubs, ByPending, ByOutMsgs, ByInMsgs, ByOutBytes, ByInBytes, ByLast, ByIdle, ByUptime, ByStop, ByReason, ByRTT: return true default: return false diff --git a/server/monitor_test.go b/server/monitor_test.go index fc4b95f85..d5cd9ff6a 100644 --- a/server/monitor_test.go +++ b/server/monitor_test.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The NATS Authors +// Copyright 2013-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -157,7 +157,14 @@ func readBodyEx(t *testing.T, url string, status int, content string) []byte { } ct := resp.Header.Get("Content-Type") if ct != content { - stackFatalf(t, "Expected %s content-type, got %s\n", content, ct) + stackFatalf(t, "Expected %q content-type, got %q\n", content, ct) + } + // Check the CORS header for "application/json" requests only. + if ct == appJSONContent { + acao := resp.Header.Get("Access-Control-Allow-Origin") + if acao != "*" { + stackFatalf(t, "Expected with %q Content-Type an Access-Control-Allow-Origin header with value %q, got %q\n", appJSONContent, "*", acao) + } } body, err := io.ReadAll(resp.Body) if err != nil { @@ -1179,77 +1186,167 @@ func TestConnzSortedByIdle(t *testing.T) { s := runMonitorServer() defer s.Shutdown() - url := fmt.Sprintf("http://127.0.0.1:%d/", s.MonitorAddr().Port) + url := fmt.Sprintf("http://%s/connz?sort=idle", s.MonitorAddr()) + now := time.Now() - testIdle := func(mode int) { - firstClient := createClientConnSubscribeAndPublish(t, s) - defer firstClient.Close() - firstClient.Subscribe("client.1", func(m *nats.Msg) {}) - firstClient.Flush() - - secondClient := createClientConnSubscribeAndPublish(t, s) - defer secondClient.Close() - - // Make it such that the second client started 10 secs ago. 10 is important since bug - // was strcmp, e.g. 1s vs 11s - var cid uint64 - switch mode { - case 0: - cid = uint64(2) - case 1: - cid = uint64(4) - } - client := s.getClient(cid) - if client == nil { - t.Fatalf("Error looking up client %v\n", 2) - } + clients := []struct { + start time.Time // Client start time. + last time.Time // Client last activity time. + }{ + {start: now.Add(-10 * time.Second), last: now.Add(-5 * time.Second)}, + {start: now.Add(-20 * time.Second), last: now.Add(-10 * time.Second)}, + {start: now.Add(-3 * time.Second), last: now.Add(-2 * time.Second)}, + {start: now.Add(-30 * time.Second), last: now.Add(-20 * time.Second)}, + } - // We want to make sure that we set start/last after the server has finished - // updating this client's last activity. Doing another Flush() now (even though - // one is done in createClientConnSubscribeAndPublish) ensures that server has - // finished updating the client's last activity, since for that last flush there - // should be no new message/sub/unsub activity. - secondClient.Flush() + testIdle := func(mode int) { + // Connect the specified number of clients. + for _, c := range clients { + clientConn := createClientConnSubscribeAndPublish(t, s) + defer clientConn.Close() - client.mu.Lock() - client.start = client.start.Add(-10 * time.Second) - client.last = client.start - client.mu.Unlock() + cid, err := clientConn.GetClientID() + if err != nil { + t.Fatalf("error getting the client CID: %v", err) + } - // The Idle granularity is a whole second - time.Sleep(time.Second) - firstClient.Publish("client.1", []byte("new message")) + client := s.getClient(cid) + if client == nil { + t.Fatalf("error looking up client %d", cid) + } - c := pollConz(t, s, mode, url+"connz?sort=idle", &ConnzOptions{Sort: ByIdle}) - // Make sure we are returned 2 connections... - if len(c.Conns) != 2 { - t.Fatalf("Expected to get two connections, got %v", len(c.Conns)) + // Change the client's start and last activity times. + client.mu.Lock() + client.start = c.start + client.last = c.last + client.mu.Unlock() } - // And that the Idle time is valid (even if equal to "0s") - if c.Conns[0].Idle == "" || c.Conns[1].Idle == "" { - t.Fatal("Expected Idle value to be valid") - } + connz := pollConz(t, s, mode, url, &ConnzOptions{Sort: ByIdle}) - idle1, err := time.ParseDuration(c.Conns[0].Idle) - if err != nil { - t.Fatalf("Unable to parse duration %v, err=%v", c.Conns[0].Idle, err) - } - idle2, err := time.ParseDuration(c.Conns[1].Idle) - if err != nil { - t.Fatalf("Unable to parse duration %v, err=%v", c.Conns[0].Idle, err) + wantConns := len(clients) + gotConns := len(connz.Conns) + + if gotConns != wantConns { + t.Fatalf("want %d connections, got %d", wantConns, gotConns) } - if idle2 < idle1 { - t.Fatalf("Expected conns sorted in descending order by Idle, got %v < %v\n", - idle2, idle1) + idleDurations := getConnsIdleDurations(t, connz.Conns) + + if !sortedDurationsDesc(idleDurations) { + t.Errorf("want durations sorted in descending order, got %v", idleDurations) } } + for mode := 0; mode < 2; mode++ { testIdle(mode) } } +// getConnsIdleDurations returns a slice of parsed idle durations from a connection info slice. +func getConnsIdleDurations(t *testing.T, conns []*ConnInfo) []time.Duration { + t.Helper() + + durations := make([]time.Duration, 0, len(conns)) + + for _, conn := range conns { + idle, err := time.ParseDuration(conn.Idle) + if err != nil { + t.Fatalf("error parsing duration %q: %v", conn.Idle, err) + } + durations = append(durations, idle) + } + + return durations +} + +// sortedDurationsDesc checks if a time.Duration slice is sorted in descending order. +func sortedDurationsDesc(durations []time.Duration) bool { + return sort.SliceIsSorted(durations, func(i, j int) bool { + // Must be longer than the next duration. + return durations[i] > durations[j] + }) +} + +func TestConnzSortByIdleTime(t *testing.T) { + now := time.Now().UTC() + + cases := map[string]ConnInfos{ + "zero values": {{}, {}, {}, {}}, + "equal last activity times": { + {Start: now.Add(-50 * time.Minute), LastActivity: now.Add(-time.Minute)}, + {Start: now.Add(-30 * time.Minute), LastActivity: now.Add(-time.Minute)}, + {Start: now.Add(-10 * time.Second), LastActivity: now.Add(-time.Minute)}, + {Start: now.Add(-2 * time.Hour), LastActivity: now.Add(-time.Minute)}, + }, + "last activity in the future": { + {Start: now.Add(-50 * time.Minute), LastActivity: now.Add(10 * time.Minute)}, // +10m + {Start: now.Add(-30 * time.Minute), LastActivity: now.Add(5 * time.Minute)}, // +5m + {Start: now.Add(-24 * time.Hour), LastActivity: now.Add(2 * time.Second)}, // +2s + {Start: now.Add(-10 * time.Second), LastActivity: now.Add(15 * time.Minute)}, // +15m + {Start: now.Add(-2 * time.Hour), LastActivity: now.Add(time.Minute)}, // +1m + }, + "unsorted": { + {Start: now.Add(-50 * time.Minute), LastActivity: now.Add(-10 * time.Minute)}, // 10m ago + {Start: now.Add(-30 * time.Minute), LastActivity: now.Add(-5 * time.Minute)}, // 5m ago + {Start: now.Add(-24 * time.Hour), LastActivity: now.Add(-2 * time.Second)}, // 2s ago + {Start: now.Add(-10 * time.Second), LastActivity: now.Add(-15 * time.Minute)}, // 15m ago + {Start: now.Add(-2 * time.Hour), LastActivity: now.Add(-time.Minute)}, // 1m ago + }, + "unsorted with zero value start time": { + {LastActivity: now.Add(-10 * time.Minute)}, // 10m ago + {LastActivity: now.Add(-5 * time.Minute)}, // 5m ago + {LastActivity: now.Add(-2 * time.Second)}, // 2s ago + {LastActivity: now.Add(-15 * time.Minute)}, // 15m ago + {LastActivity: now.Add(-time.Minute)}, // 1m ago + }, + "sorted": { + {Start: now.Add(-24 * time.Hour), LastActivity: now.Add(-2 * time.Second)}, // 2s ago + {Start: now.Add(-2 * time.Hour), LastActivity: now.Add(-time.Minute)}, // 1m ago + {Start: now.Add(-30 * time.Minute), LastActivity: now.Add(-5 * time.Minute)}, // 5m ago + {Start: now.Add(-50 * time.Minute), LastActivity: now.Add(-10 * time.Minute)}, // 10m ago + {Start: now.Add(-10 * time.Second), LastActivity: now.Add(-15 * time.Minute)}, // 15m ago + }, + "sorted with zero value start time": { + {LastActivity: now.Add(-2 * time.Second)}, // 2s ago + {LastActivity: now.Add(-time.Minute)}, // 1m ago + {LastActivity: now.Add(-5 * time.Minute)}, // 5m ago + {LastActivity: now.Add(-10 * time.Minute)}, // 10m ago + {LastActivity: now.Add(-15 * time.Minute)}, // 15m ago + }, + } + + for name, conns := range cases { + t.Run(name, func(t *testing.T) { + sort.Sort(byIdle{conns, now}) + + idleDurations := getIdleDurations(conns, now) + + if !sortedDurationsAsc(idleDurations) { + t.Errorf("want durations sorted in ascending order, got %v", idleDurations) + } + }) + } +} + +// getIdleDurations returns a slice of idle durations from a connection info list up until now time. +func getIdleDurations(conns ConnInfos, now time.Time) []time.Duration { + durations := make([]time.Duration, 0, len(conns)) + + for _, conn := range conns { + durations = append(durations, now.Sub(conn.LastActivity)) + } + + return durations +} + +// sortedDurationsAsc checks if a time.Duration slice is sorted in ascending order. +func sortedDurationsAsc(durations []time.Duration) bool { + return sort.SliceIsSorted(durations, func(i, j int) bool { + return durations[i] < durations[j] + }) +} + func TestConnzSortBadRequest(t *testing.T) { s := runMonitorServer() defer s.Shutdown() @@ -2966,9 +3063,10 @@ func TestMonitorLeafNode(t *testing.T) { opts.LeafNode.TLSConfig != nil, []RemoteLeafOptsVarz{ { - "acc", 1, []string{"localhost:1234"}, nil, + "acc", 1, []string{"localhost:1234"}, nil, false, }, }, + false, } varzURL := fmt.Sprintf("http://127.0.0.1:%d/varz", s.MonitorAddr().Port) @@ -2985,7 +3083,7 @@ func TestMonitorLeafNode(t *testing.T) { // Having this here to make sure that if fields are added in ClusterOptsVarz, // we make sure to update this test (compiler will report an error if we don't) - _ = LeafNodeOptsVarz{"", 0, 0, 0, false, false, []RemoteLeafOptsVarz{{"", 0, nil, nil}}} + _ = LeafNodeOptsVarz{"", 0, 0, 0, false, false, []RemoteLeafOptsVarz{{"", 0, nil, nil, false}}, false} // Alter the fields to make sure that we have a proper deep copy // of what may be stored in the server. Anything we change here @@ -3941,7 +4039,7 @@ func TestMonitorAccountz(t *testing.T) { body = string(readBody(t, fmt.Sprintf("http://127.0.0.1:%d%s?acc=$SYS", s.MonitorAddr().Port, AccountzPath))) require_Contains(t, body, `"account_detail": {`) require_Contains(t, body, `"account_name": "$SYS",`) - require_Contains(t, body, `"subscriptions": 40,`) + require_Contains(t, body, `"subscriptions": 41,`) require_Contains(t, body, `"is_system": true,`) require_Contains(t, body, `"system_account": "$SYS"`) @@ -4427,7 +4525,7 @@ func TestMonitorJsz(t *testing.T) { t.Fatal("expected stream raft group info to be included") } crgroup := si.ConsumerRaftGroups[0] - if crgroup.Name != "my-consumer-replicated" { + if crgroup.Name != "my-consumer-replicated" && crgroup.Name != "my-consumer-mirror" { t.Fatalf("expected consumer name to be included in raft group info, got: %v", crgroup.Name) } if len(crgroup.RaftGroup) == 0 { @@ -4600,3 +4698,305 @@ func TestMonitorWebsocket(t *testing.T) { } } } + +func TestMonitorConnzOperatorModeFilterByUser(t *testing.T) { + accKp, accPub := createKey(t) + accClaim := jwt.NewAccountClaims(accPub) + accJwt := encodeClaim(t, accClaim, accPub) + + conf := createConfFile(t, []byte(fmt.Sprintf(` + listen: 127.0.0.1:-1 + http: 127.0.0.1:-1 + operator = %s + resolver = MEMORY + resolver_preload = { + %s : %s + } + `, ojwt, accPub, accJwt))) + + s, _ := RunServerWithConfig(conf) + defer s.Shutdown() + + createUser := func() (string, string) { + ukp, _ := nkeys.CreateUser() + seed, _ := ukp.Seed() + upub, _ := ukp.PublicKey() + uclaim := newJWTTestUserClaims() + uclaim.Subject = upub + ujwt, err := uclaim.Encode(accKp) + require_NoError(t, err) + return upub, genCredsFile(t, ujwt, seed) + } + + // Now create 2 users. + aUser, aCreds := createUser() + bUser, bCreds := createUser() + + var users []*nats.Conn + + // Create 2 for A + for i := 0; i < 2; i++ { + nc, err := nats.Connect(s.ClientURL(), nats.UserCredentials(aCreds)) + require_NoError(t, err) + defer nc.Close() + users = append(users, nc) + } + // Create 5 for B + for i := 0; i < 5; i++ { + nc, err := nats.Connect(s.ClientURL(), nats.UserCredentials(bCreds)) + require_NoError(t, err) + defer nc.Close() + users = append(users, nc) + } + + // Test A + connz := pollConz(t, s, 1, _EMPTY_, &ConnzOptions{User: aUser, Username: true}) + require_True(t, connz.NumConns == 2) + for _, ci := range connz.Conns { + require_True(t, ci.AuthorizedUser == aUser) + } + // Test B + connz = pollConz(t, s, 1, _EMPTY_, &ConnzOptions{User: bUser, Username: true}) + require_True(t, connz.NumConns == 5) + for _, ci := range connz.Conns { + require_True(t, ci.AuthorizedUser == bUser) + } + + // Make sure URL access is the same. + url := fmt.Sprintf("http://127.0.0.1:%d/", s.MonitorAddr().Port) + urlFull := url + fmt.Sprintf("connz?auth=true&user=%s", aUser) + connz = pollConz(t, s, 0, urlFull, nil) + require_True(t, connz.NumConns == 2) + for _, ci := range connz.Conns { + require_True(t, ci.AuthorizedUser == aUser) + } + + // Now test closed filtering as well. + for _, nc := range users { + nc.Close() + } + // Let them process and be moved to closed ring buffer in server. + time.Sleep(100 * time.Millisecond) + + connz = pollConz(t, s, 1, _EMPTY_, &ConnzOptions{User: aUser, Username: true, State: ConnClosed}) + require_True(t, connz.NumConns == 2) + for _, ci := range connz.Conns { + require_True(t, ci.AuthorizedUser == aUser) + } +} + +func TestMonitorConnzSortByRTT(t *testing.T) { + s := runMonitorServer() + defer s.Shutdown() + + for i := 0; i < 10; i++ { + nc, err := nats.Connect(s.ClientURL()) + require_NoError(t, err) + defer nc.Close() + } + + connz := pollConz(t, s, 1, _EMPTY_, &ConnzOptions{Sort: ByRTT}) + require_True(t, connz.NumConns == 10) + + var rtt int64 + for _, ci := range connz.Conns { + if rtt == 0 { + rtt = ci.rtt + } else { + if ci.rtt > rtt { + t.Fatalf("RTT not in descending order: %v vs %v", + time.Duration(rtt), time.Duration(ci.rtt)) + } + rtt = ci.rtt + } + } + + // Make sure url works as well. + url := fmt.Sprintf("http://127.0.0.1:%d/connz?sort=rtt", s.MonitorAddr().Port) + connz = pollConz(t, s, 0, url, nil) + require_True(t, connz.NumConns == 10) + + rtt = 0 + for _, ci := range connz.Conns { + crttd, err := time.ParseDuration(ci.RTT) + require_NoError(t, err) + crtt := int64(crttd) + if rtt == 0 { + rtt = crtt + } else { + if crtt > rtt { + t.Fatalf("RTT not in descending order: %v vs %v", + time.Duration(rtt), time.Duration(crtt)) + } + rtt = ci.rtt + } + } +} + +// https://github.com/nats-io/nats-server/issues/4144 +func TestMonitorAccountszMappingOrderReporting(t *testing.T) { + conf := createConfFile(t, []byte(` + listen: 127.0.0.1:-1 + server_name: SR22 + accounts { + CLOUD { + exports [ { service: "downlink.>" } ] + } + APP { + imports [ { service: { account: CLOUD, subject: "downlink.>"}, to: "event.>"} ] + } + }`)) + + s, _ := RunServerWithConfig(conf) + defer s.Shutdown() + + az, err := s.Accountz(&AccountzOptions{"APP"}) + require_NoError(t, err) + require_NotNil(t, az.Account) + require_True(t, len(az.Account.Imports) > 0) + + var found bool + for _, si := range az.Account.Imports { + if si.Import.Subject == "downlink.>" { + found = true + require_True(t, si.Import.LocalSubject == "event.>") + break + } + } + require_True(t, found) +} + +// createCallbackURL adds a callback query parameter for JSONP requests. +func createCallbackURL(t *testing.T, endpoint string) string { + t.Helper() + + u, err := url.Parse(endpoint) + if err != nil { + t.Fatal(err) + } + + params := u.Query() + params.Set("callback", "callback") + + u.RawQuery = params.Encode() + + return u.String() +} + +// stripCallback removes the JSONP callback function from the response. +// Returns the JSON body without the wrapping callback function. +// If there's no callback function, the data is returned as is. +func stripCallback(data []byte) []byte { + // Cut the JSONP callback function with the opening parentheses. + _, after, found := bytes.Cut(data, []byte("(")) + + if found { + return bytes.TrimSuffix(after, []byte(")")) + } + + return data +} + +// expectHealthStatus makes 1 regular and 1 JSONP request to the URL and checks the +// HTTP status code, Content-Type header and health status string. +func expectHealthStatus(t *testing.T, url string, statusCode int, wantStatus string) { + t.Helper() + + // First check for regular requests. + body := readBodyEx(t, url, statusCode, appJSONContent) + checkHealthStatus(t, body, wantStatus) + + // Another check for JSONP requests. + jsonpURL := createCallbackURL(t, url) // Adds a callback query param. + jsonpBody := readBodyEx(t, jsonpURL, statusCode, appJSContent) + checkHealthStatus(t, stripCallback(jsonpBody), wantStatus) +} + +// checkHealthStatus checks the health status from a JSON response. +func checkHealthStatus(t *testing.T, body []byte, wantStatus string) { + t.Helper() + + h := &HealthStatus{} + + if err := json.Unmarshal(body, h); err != nil { + t.Fatalf("error unmarshalling the body: %v", err) + } + + if h.Status != wantStatus { + t.Errorf("want health status %q, got %q", wantStatus, h.Status) + } +} + +// checkHealthzEndpoint makes requests to the /healthz endpoint and checks the health status. +func checkHealthzEndpoint(t *testing.T, address string, statusCode int, wantStatus string) { + t.Helper() + + cases := map[string]string{ + "healthz": fmt.Sprintf("http://%s/healthz", address), + "js-enabled-only": fmt.Sprintf("http://%s/healthz?js-enabled-only=true", address), + "js-server-only": fmt.Sprintf("http://%s/healthz?js-server-only=true", address), + } + + for name, url := range cases { + t.Run(name, func(t *testing.T) { + expectHealthStatus(t, url, statusCode, wantStatus) + }) + } +} + +func TestHealthzStatusOK(t *testing.T) { + s := runMonitorServer() + defer s.Shutdown() + + checkHealthzEndpoint(t, s.MonitorAddr().String(), http.StatusOK, "ok") +} + +func TestHealthzStatusError(t *testing.T) { + s := runMonitorServer() + defer s.Shutdown() + + // Intentionally causing an error in readyForConnections(). + // Note: Private field access, taking advantage of having the tests in the same package. + s.listener = nil + + checkHealthzEndpoint(t, s.MonitorAddr().String(), http.StatusServiceUnavailable, "error") +} + +func TestHealthzStatusUnavailable(t *testing.T) { + opts := DefaultMonitorOptions() + opts.JetStream = true + + s := RunServer(opts) + defer s.Shutdown() + + if !s.JetStreamEnabled() { + t.Fatalf("want JetStream to be enabled first") + } + + err := s.DisableJetStream() + + if err != nil { + t.Fatalf("got an error disabling JetStream: %v", err) + } + + checkHealthzEndpoint(t, s.MonitorAddr().String(), http.StatusServiceUnavailable, "unavailable") +} + +// When we converted ipq to use generics we still were using sync.Map. Currently you can not convert +// interface{} or any to a generic parameterized type. So this stopped working and panics. +func TestIpqzWithGenerics(t *testing.T) { + opts := DefaultMonitorOptions() + opts.JetStream = true + + s := RunServer(opts) + defer s.Shutdown() + + url := fmt.Sprintf("http://%s/ipqueuesz?all=1", s.MonitorAddr().String()) + body := readBody(t, url) + require_True(t, len(body) > 0) + + queues := map[string]*monitorIPQueue{} + require_NoError(t, json.Unmarshal(body, &queues)) + require_True(t, len(queues) >= 4) + require_True(t, queues["SendQ"] != nil) +} diff --git a/server/mqtt.go b/server/mqtt.go index 9353563a1..8680e8dde 100644 --- a/server/mqtt.go +++ b/server/mqtt.go @@ -1,4 +1,4 @@ -// Copyright 2020-2021 The NATS Authors +// Copyright 2020-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -422,7 +422,7 @@ func (s *Server) createMQTTClient(conn net.Conn, ws *websocket) *client { if maxSubs == 0 { maxSubs = -1 } - now := time.Now().UTC() + now := time.Now() c := &client{srv: s, nc: conn, mpay: maxPay, msubs: maxSubs, start: now, last: now, mqtt: &mqtt{}, ws: ws} c.headers = true @@ -3407,8 +3407,8 @@ func mqttSubscribeTrace(pi uint16, filters []*mqttFilter) string { // message and this is the callback for a QoS1 subscription because in // that case, it will be handled by the other callback. This avoid getting // duplicate deliveries. -func mqttDeliverMsgCbQos0(sub *subscription, pc *client, _ *Account, subject, _ string, rmsg []byte) { - if pc.kind == JETSTREAM { +func mqttDeliverMsgCbQos0(sub *subscription, pc *client, _ *Account, subject, reply string, rmsg []byte) { + if pc.kind == JETSTREAM && len(reply) > 0 && strings.HasPrefix(reply, jsAckPre) { return } diff --git a/server/mqtt_test.go b/server/mqtt_test.go index 0c659df0e..625c206b4 100644 --- a/server/mqtt_test.go +++ b/server/mqtt_test.go @@ -1,4 +1,4 @@ -// Copyright 2020 The NATS Authors +// Copyright 2020-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -319,7 +319,7 @@ func testMQTTRunServer(t testing.TB, o *Options) *Server { } l := &DummyLogger{} s.SetLogger(l, true, true) - go s.Start() + s.Start() if err := s.readyForConnections(3 * time.Second); err != nil { testMQTTShutdownServer(s) t.Fatal(err) @@ -6367,6 +6367,49 @@ func TestMQTTSubjectWildcardStart(t *testing.T) { require_True(t, si.State.Msgs == 0) } +// Issue https://github.com/nats-io/nats-server/issues/4291 +func TestMQTTJetStreamRepublishAndQoS0Subscribers(t *testing.T) { + conf := createConfFile(t, []byte(` + listen: 127.0.0.1:-1 + server_name: mqtt + jetstream: enabled + mqtt { + listen: 127.0.0.1:-1 + } + `)) + s, o := RunServerWithConfig(conf) + defer testMQTTShutdownServer(s) + + nc, js := jsClientConnect(t, s) + defer nc.Close() + + // Setup stream with republish on it. + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"foo"}, + RePublish: &nats.RePublish{ + Source: "foo", + Destination: "mqtt.foo", + }, + }) + require_NoError(t, err) + + // Create QoS0 subscriber to catch re-publishes. + mc, r := testMQTTConnect(t, &mqttConnInfo{cleanSess: true}, o.MQTT.Host, o.MQTT.Port) + defer mc.Close() + testMQTTCheckConnAck(t, r, mqttConnAckRCConnectionAccepted, false) + + testMQTTSub(t, 1, mc, r, []*mqttFilter{{filter: "mqtt/foo", qos: 0}}, []byte{0}) + testMQTTFlush(t, mc, nil, r) + + msg := []byte("HELLO WORLD") + _, err = js.Publish("foo", msg) + require_NoError(t, err) + + testMQTTCheckPubMsg(t, mc, r, "mqtt/foo", 0, msg) + testMQTTExpectNothing(t, r) +} + ////////////////////////////////////////////////////////////////////////// // // Benchmarks diff --git a/server/nkey.go b/server/nkey.go index 8604c4413..5b45edf75 100644 --- a/server/nkey.go +++ b/server/nkey.go @@ -34,7 +34,7 @@ func (s *Server) NonceRequired() bool { // nonceRequired tells us if we should send a nonce. // Lock should be held on entry. func (s *Server) nonceRequired() bool { - return s.opts.AlwaysEnableNonce || len(s.nkeys) > 0 || s.trustedKeys != nil + return s.getOpts().AlwaysEnableNonce || len(s.nkeys) > 0 || s.trustedKeys != nil } // Generate a nonce for INFO challenge. diff --git a/server/norace_test.go b/server/norace_test.go index 787a6e5f8..473576bd1 100644 --- a/server/norace_test.go +++ b/server/norace_test.go @@ -26,6 +26,7 @@ import ( "errors" "fmt" "io" + "math" "math/rand" "net" "net/http" @@ -49,6 +50,7 @@ import ( "github.com/nats-io/jwt/v2" "github.com/nats-io/nats.go" "github.com/nats-io/nkeys" + "github.com/nats-io/nuid" ) // IMPORTANT: Tests in this file are not executed when running with the -race flag. @@ -2422,7 +2424,7 @@ func TestNoRaceJetStreamSlowFilteredInititalPendingAndFirstMsg(t *testing.T) { }) // Threshold for taking too long. - const thresh = 50 * time.Millisecond + const thresh = 150 * time.Millisecond var dindex int testConsumerCreate := func(subj string, startSeq, expectedNumPending uint64) { @@ -3605,7 +3607,7 @@ func TestNoRaceJetStreamClusterCorruptWAL(t *testing.T) { fs = o.raftNode().(*raft).wal.(*fileStore) state = fs.State() err = fs.Truncate(state.FirstSeq) - require_NoError(t, err) + require_True(t, err == nil || err == ErrInvalidSequence) state = fs.State() sub, err = js.PullSubscribe("foo", "dlc") @@ -3798,13 +3800,16 @@ func TestNoRaceJetStreamClusterStreamReset(t *testing.T) { return err }) - // Grab number go routines. - if after := runtime.NumGoroutine(); base > after { - t.Fatalf("Expected %d go routines, got %d", base, after) - } + checkFor(t, 5*time.Second, 200*time.Millisecond, func() error { + if after := runtime.NumGoroutine(); base > after { + return fmt.Errorf("Expected %d go routines, got %d", base, after) + } + return nil + }) // Simulate a low level write error on our consumer and make sure we can recover etc. cl = c.consumerLeader("$G", "TEST", "d1") + require_True(t, cl != nil) mset, err = cl.GlobalAccount().lookupStream("TEST") if err != nil { t.Fatalf("Unexpected error: %v", err) @@ -5200,16 +5205,15 @@ func TestNoRaceJetStreamClusterDirectAccessAllPeersSubs(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - nc, _ := jsClientConnect(t, c.randomServer()) + nc, js := jsClientConnect(t, c.randomServer()) defer nc.Close() - js, _ := nc.JetStream(nats.MaxWait(500 * time.Millisecond)) for { select { case <-qch: return default: // Send as fast as we can. - js.PublishAsync(fmt.Sprintf("kv.%d", rand.Intn(1000)), msg) + js.Publish(fmt.Sprintf("kv.%d", rand.Intn(1000)), msg) } } }() @@ -5265,7 +5269,7 @@ func TestNoRaceJetStreamClusterDirectAccessAllPeersSubs(t *testing.T) { t.Fatalf("Expected to see messages increase, got %d", si.State.Msgs) } - checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { + checkFor(t, 10*time.Second, 100*time.Millisecond, func() error { // Make sure they are all the same from a state perspective. // Leader will have the expected state. lmset, err := c.streamLeader("$G", "TEST").GlobalAccount().lookupStream("TEST") @@ -5383,6 +5387,9 @@ func TestNoRaceJetStreamClusterConsumerListPaging(t *testing.T) { if resp.Limit < len(resp.Consumers) { t.Fatalf("Expected total limited to %d but got %d", resp.Limit, len(resp.Consumers)) } + if resp.Total != numConsumers { + t.Fatalf("Invalid total response: expected %d got %d", numConsumers, resp.Total) + } return resp.Consumers } @@ -5412,6 +5419,9 @@ func TestNoRaceJetStreamClusterConsumerListPaging(t *testing.T) { if resp.Limit < len(resp.Consumers) { t.Fatalf("Expected total limited to %d but got %d", resp.Limit, len(resp.Consumers)) } + if resp.Total != numConsumers { + t.Fatalf("Invalid total response: expected %d got %d", numConsumers, resp.Total) + } return resp.Consumers } @@ -5428,6 +5438,10 @@ func TestNoRaceJetStreamClusterConsumerListPaging(t *testing.T) { results[name] = true } } + + if len(results) != numConsumers { + t.Fatalf("Received %d / %d consumers", len(results), numConsumers) + } } func TestNoRaceJetStreamFileStoreLargeKVAccessTiming(t *testing.T) { @@ -5564,7 +5578,7 @@ func TestNoRaceJetStreamSuperClusterStreamMoveLongRTT(t *testing.T) { // Make C2 far away. gwm := gwProxyMap{ "C2": &gwProxy{ - rtt: 400 * time.Millisecond, + rtt: 20 * time.Millisecond, up: 1 * 1024 * 1024 * 1024, // 1gbit down: 1 * 1024 * 1024 * 1024, // 1gbit }, @@ -5583,7 +5597,7 @@ func TestNoRaceJetStreamSuperClusterStreamMoveLongRTT(t *testing.T) { } // Place a stream in C1. - _, err := js.AddStream(cfg) + _, err := js.AddStream(cfg, nats.MaxWait(10*time.Second)) require_NoError(t, err) chunk := bytes.Repeat([]byte("Z"), 1000*1024) // ~1MB @@ -5594,7 +5608,7 @@ func TestNoRaceJetStreamSuperClusterStreamMoveLongRTT(t *testing.T) { } select { case <-js.PublishAsyncComplete(): - case <-time.After(5 * time.Second): + case <-time.After(10 * time.Second): t.Fatalf("Did not receive completion signal") } @@ -5603,7 +5617,7 @@ func TestNoRaceJetStreamSuperClusterStreamMoveLongRTT(t *testing.T) { _, err = js.UpdateStream(cfg) require_NoError(t, err) - checkFor(t, 10*time.Second, time.Second, func() error { + checkFor(t, 20*time.Second, time.Second, func() error { si, err := js.StreamInfo("TEST", nats.MaxWait(time.Second)) if err != nil { return err @@ -5835,7 +5849,7 @@ func TestNoRaceEncodeConsumerStateBug(t *testing.T) { } // Performance impact on stream ingress with large number of consumers. -func TestJetStreamLargeNumConsumersPerfImpact(t *testing.T) { +func TestNoRaceJetStreamLargeNumConsumersPerfImpact(t *testing.T) { skip(t) s := RunBasicJetStreamServer(t) @@ -5927,7 +5941,7 @@ func TestJetStreamLargeNumConsumersPerfImpact(t *testing.T) { } // Performance impact on large number of consumers but sparse delivery. -func TestJetStreamLargeNumConsumersSparseDelivery(t *testing.T) { +func TestNoRaceJetStreamLargeNumConsumersSparseDelivery(t *testing.T) { skip(t) s := RunBasicJetStreamServer(t) @@ -6120,12 +6134,15 @@ func TestNoRaceJetStreamClusterEnsureWALCompact(t *testing.T) { err = node.InstallSnapshot(snap) require_NoError(t, err) - received, done := 0, make(chan bool) + received, done := 0, make(chan bool, 1) nc.Subscribe("zz", func(m *nats.Msg) { received++ if received >= ns { - done <- true + select { + case done <- true: + default: + } } m.Ack() }) @@ -6390,8 +6407,8 @@ func TestNoRaceJetStreamConsumerCreateTimeNumPending(t *testing.T) { case <-time.After(5 * time.Second): } - // Should stay under 5ms now, but for Travis variability say 25ms. - threshold := 25 * time.Millisecond + // Should stay under 5ms now, but for Travis variability say 50ms. + threshold := 50 * time.Millisecond start := time.Now() _, err = js.PullSubscribe("events.*", "dlc") @@ -6527,3 +6544,1667 @@ func TestNoRaceJetStreamClusterGhostConsumers(t *testing.T) { return fmt.Errorf("Still have missing: %+v", missing) }) } + +// This is to test a publish slowdown and general instability experienced in a setup simular to this. +// We have feeder streams that are all sourced to an aggregate stream. All streams are interest retention. +// We want to monitor the avg publish time for the sync publishers to the feeder streams, the ingest rate to +// the aggregate stream, and general health of the consumers on the aggregate stream. +// Target publish rate is ~2k/s with publish time being ~40-60ms but remaining stable. +// We can also simulate max redeliveries that create interior deletes in streams. +func TestNoRaceJetStreamClusterF3Setup(t *testing.T) { + // Uncomment to run. Needs to be on a pretty big machine. Do not want as part of Travis tests atm. + skip(t) + + // These and the settings below achieve ~60ms pub time on avg and ~2k msgs per sec inbound to the aggregate stream. + // On my machine though. + np := clusterProxy{ + rtt: 2 * time.Millisecond, + up: 1 * 1024 * 1024 * 1024, // 1gbit + down: 1 * 1024 * 1024 * 1024, // 1gbit + } + + // Test params. + numSourceStreams := 20 + numConsumersPerSource := 1 + numPullersPerConsumer := 50 + numPublishers := 100 + setHighStartSequence := false + simulateMaxRedeliveries := false + maxBadPubTimes := uint32(20) + badPubThresh := 500 * time.Millisecond + testTime := 5 * time.Minute // make sure to do --timeout=65m + + t.Logf("Starting Test: Total Test Time %v", testTime) + + c := createJetStreamClusterWithNetProxy(t, "R3S", 3, &np) + defer c.shutdown() + + // Do some quick sanity checking for latency stuff. + { + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Replicas: 3, + Subjects: []string{"foo"}, + Retention: nats.InterestPolicy, + }) + require_NoError(t, err) + defer js.DeleteStream("TEST") + + sl := c.streamLeader(globalAccountName, "TEST") + nc, js = jsClientConnect(t, sl) + defer nc.Close() + start := time.Now() + _, err = js.Publish("foo", []byte("hello")) + require_NoError(t, err) + // This is best case, and with client connection being close to free, this should be at least > rtt + if elapsed := time.Since(start); elapsed < np.rtt { + t.Fatalf("Expected publish time to be > %v, got %v", np.rtt, elapsed) + } + + nl := c.randomNonStreamLeader(globalAccountName, "TEST") + nc, js = jsClientConnect(t, nl) + defer nc.Close() + start = time.Now() + _, err = js.Publish("foo", []byte("hello")) + require_NoError(t, err) + // This is worst case, meaning message has to travel to leader, then to fastest replica, then back. + // So should be at 3x rtt, so check at least > 2x rtt. + if elapsed := time.Since(start); elapsed < 2*np.rtt { + t.Fatalf("Expected publish time to be > %v, got %v", 2*np.rtt, elapsed) + } + } + + // Setup source streams. + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + t.Logf("Creating %d Source Streams", numSourceStreams) + + var sources []string + wg := sync.WaitGroup{} + for i := 0; i < numSourceStreams; i++ { + sname := fmt.Sprintf("EVENT-%s", nuid.Next()) + sources = append(sources, sname) + wg.Add(1) + go func(stream string) { + defer wg.Done() + t.Logf(" %q", stream) + subj := fmt.Sprintf("%s.>", stream) + _, err := js.AddStream(&nats.StreamConfig{ + Name: stream, + Subjects: []string{subj}, + Replicas: 3, + Retention: nats.InterestPolicy, + }) + require_NoError(t, err) + for j := 0; j < numConsumersPerSource; j++ { + consumer := fmt.Sprintf("C%d", j) + _, err := js.Subscribe(_EMPTY_, func(msg *nats.Msg) { + msg.Ack() + }, nats.BindStream(stream), nats.Durable(consumer), nats.ManualAck()) + require_NoError(t, err) + } + }(sname) + } + wg.Wait() + + var streamSources []*nats.StreamSource + for _, src := range sources { + streamSources = append(streamSources, &nats.StreamSource{Name: src}) + + } + + t.Log("Creating Aggregate Stream") + + // Now create the aggregate stream. + _, err := js.AddStream(&nats.StreamConfig{ + Name: "EVENTS", + Replicas: 3, + Retention: nats.InterestPolicy, + Sources: streamSources, + }) + require_NoError(t, err) + + // Set first sequence to a high number. + if setHighStartSequence { + require_NoError(t, js.PurgeStream("EVENTS", &nats.StreamPurgeRequest{Sequence: 32_000_001})) + } + + // Now create 2 pull consumers. + _, err = js.PullSubscribe(_EMPTY_, "C1", + nats.BindStream("EVENTS"), + nats.MaxDeliver(1), + nats.AckWait(10*time.Second), + nats.ManualAck(), + ) + require_NoError(t, err) + + _, err = js.PullSubscribe(_EMPTY_, "C2", + nats.BindStream("EVENTS"), + nats.MaxDeliver(1), + nats.AckWait(10*time.Second), + nats.ManualAck(), + ) + require_NoError(t, err) + + t.Logf("Creating %d x 2 Pull Subscribers", numPullersPerConsumer) + + // Now create the pullers. + for _, subName := range []string{"C1", "C2"} { + for i := 0; i < numPullersPerConsumer; i++ { + go func(subName string) { + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + sub, err := js.PullSubscribe(_EMPTY_, subName, + nats.BindStream("EVENTS"), + nats.MaxDeliver(1), + nats.AckWait(10*time.Second), + nats.ManualAck(), + ) + require_NoError(t, err) + + for { + msgs, err := sub.Fetch(25, nats.MaxWait(2*time.Second)) + if err != nil && err != nats.ErrTimeout { + t.Logf("Exiting pull subscriber %q: %v", subName, err) + return + } + // Shuffle + rand.Shuffle(len(msgs), func(i, j int) { msgs[i], msgs[j] = msgs[j], msgs[i] }) + + // Wait for a random interval up to 100ms. + time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond) + + for _, m := range msgs { + // If we want to simulate max redeliveries being hit, since not acking + // once will cause it due to subscriber setup. + // 100_000 == 0.01% + if simulateMaxRedeliveries && rand.Intn(100_000) == 0 { + md, err := m.Metadata() + require_NoError(t, err) + t.Logf("** Skipping Ack: %d **", md.Sequence.Stream) + } else { + m.Ack() + } + } + } + }(subName) + } + } + + // Now create feeder publishers. + eventTypes := []string{"PAYMENT", "SUBMISSION", "CANCEL"} + + msg := make([]byte, 2*1024) // 2k payload + rand.Read(msg) + + // For tracking pub times. + var pubs int + var totalPubTime time.Duration + var pmu sync.Mutex + last := time.Now() + + updatePubStats := func(elapsed time.Duration) { + pmu.Lock() + defer pmu.Unlock() + // Reset every 5s + if time.Since(last) > 5*time.Second { + pubs = 0 + totalPubTime = 0 + last = time.Now() + } + pubs++ + totalPubTime += elapsed + } + avgPubTime := func() time.Duration { + pmu.Lock() + np := pubs + tpt := totalPubTime + pmu.Unlock() + return tpt / time.Duration(np) + } + + t.Logf("Creating %d Publishers", numPublishers) + + var numLimitsExceeded atomic.Uint32 + errCh := make(chan error, 100) + + for i := 0; i < numPublishers; i++ { + go func() { + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + for { + // Grab a random source stream + stream := sources[rand.Intn(len(sources))] + // Grab random event type. + evt := eventTypes[rand.Intn(len(eventTypes))] + subj := fmt.Sprintf("%s.%s", stream, evt) + start := time.Now() + _, err := js.Publish(subj, msg) + if err != nil { + t.Logf("Exiting publisher: %v", err) + return + } + elapsed := time.Since(start) + if elapsed > badPubThresh { + t.Logf("Publish time took more than expected: %v", elapsed) + numLimitsExceeded.Add(1) + if ne := numLimitsExceeded.Load(); ne > maxBadPubTimes { + errCh <- fmt.Errorf("Too many exceeded times on publish: %d", ne) + return + } + } + updatePubStats(elapsed) + } + }() + } + + t.Log("Creating Monitoring Routine - Data in ~10s") + + // Create monitoring routine. + go func() { + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + fseq, lseq := uint64(0), uint64(0) + for { + // Grab consumers + var minAckFloor uint64 = math.MaxUint64 + for _, consumer := range []string{"C1", "C2"} { + ci, err := js.ConsumerInfo("EVENTS", consumer) + if err != nil { + t.Logf("Exiting Monitor: %v", err) + return + } + if lseq > 0 { + t.Logf("%s:\n Delivered:\t%d\n AckFloor:\t%d\n AckPending:\t%d\n NumPending:\t%d", + consumer, ci.Delivered.Stream, ci.AckFloor.Stream, ci.NumAckPending, ci.NumPending) + } + if ci.AckFloor.Stream < minAckFloor { + minAckFloor = ci.AckFloor.Stream + } + } + // Now grab aggregate stream state. + si, err := js.StreamInfo("EVENTS") + if err != nil { + t.Logf("Exiting Monitor: %v", err) + return + } + state := si.State + if lseq != 0 { + t.Logf("Stream:\n Msgs: \t%d\n First:\t%d\n Last: \t%d\n Deletes:\t%d\n", + state.Msgs, state.FirstSeq, state.LastSeq, state.NumDeleted) + t.Logf("Publish Stats:\n Msgs/s:\t%0.2f\n Avg Pub:\t%v\n\n", float64(si.State.LastSeq-lseq)/5.0, avgPubTime()) + if si.State.FirstSeq < minAckFloor && si.State.FirstSeq == fseq { + t.Log("Stream first seq < minimum ack floor") + } + } + fseq, lseq = si.State.FirstSeq, si.State.LastSeq + time.Sleep(5 * time.Second) + } + + }() + + select { + case e := <-errCh: + t.Fatal(e) + case <-time.After(testTime): + t.Fatalf("Did not receive completion signal") + } +} + +// Unbalanced stretch cluster. +// S2 (stream leader) will have a slow path to S1 (via proxy) and S3 (consumer leader) will have a fast path. +// +// Route Ports +// "S1": 14622 +// "S2": 15622 +// "S3": 16622 +func createStretchUnbalancedCluster(t testing.TB) (c *cluster, np *netProxy) { + t.Helper() + + tmpl := ` + listen: 127.0.0.1:-1 + server_name: %s + jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} + + cluster { + name: "F3" + listen: 127.0.0.1:%d + routes = [%s] + } + + accounts { + $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } + } + ` + // Do these in order, S1, S2 (proxy) then S3. + c = &cluster{t: t, servers: make([]*Server, 3), opts: make([]*Options, 3), name: "F3"} + + // S1 + conf := fmt.Sprintf(tmpl, "S1", t.TempDir(), 14622, "route://127.0.0.1:15622, route://127.0.0.1:16622") + c.servers[0], c.opts[0] = RunServerWithConfig(createConfFile(t, []byte(conf))) + + // S2 + // Create the proxy first. Connect this to S1. Make it slow, e.g. 5ms RTT. + np = createNetProxy(1*time.Millisecond, 1024*1024*1024, 1024*1024*1024, "route://127.0.0.1:14622", true) + routes := fmt.Sprintf("%s, route://127.0.0.1:16622", np.routeURL()) + conf = fmt.Sprintf(tmpl, "S2", t.TempDir(), 15622, routes) + c.servers[1], c.opts[1] = RunServerWithConfig(createConfFile(t, []byte(conf))) + + // S3 + conf = fmt.Sprintf(tmpl, "S3", t.TempDir(), 16622, "route://127.0.0.1:14622, route://127.0.0.1:15622") + c.servers[2], c.opts[2] = RunServerWithConfig(createConfFile(t, []byte(conf))) + + c.checkClusterFormed() + c.waitOnClusterReady() + + return c, np +} + +// We test an interest based stream that has a cluster with a node with asymmetric paths from +// the stream leader and the consumer leader such that the consumer leader path is fast and +// replicated acks arrive sooner then the actual message. This path was considered, but also +// categorized as very rare and was expensive as it tried to forward a new stream msg delete +// proposal to the original stream leader. It now will deal with the issue locally and not +// slow down the ingest rate to the stream's publishers. +func TestNoRaceJetStreamClusterDifferentRTTInterestBasedStreamSetup(t *testing.T) { + // Uncomment to run. Do not want as part of Travis tests atm. + skip(t) + + c, np := createStretchUnbalancedCluster(t) + defer c.shutdown() + defer np.stop() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + // Now create the stream. + _, err := js.AddStream(&nats.StreamConfig{ + Name: "EVENTS", + Subjects: []string{"EV.>"}, + Replicas: 3, + Retention: nats.InterestPolicy, + }) + require_NoError(t, err) + + // Make sure it's leader is on S2. + sl := c.servers[1] + checkFor(t, 20*time.Second, 200*time.Millisecond, func() error { + c.waitOnStreamLeader(globalAccountName, "EVENTS") + if s := c.streamLeader(globalAccountName, "EVENTS"); s != sl { + s.JetStreamStepdownStream(globalAccountName, "EVENTS") + return fmt.Errorf("Server %s is not stream leader yet", sl) + } + return nil + }) + + // Now create the consumer. + _, err = js.PullSubscribe(_EMPTY_, "C", nats.BindStream("EVENTS"), nats.ManualAck()) + require_NoError(t, err) + + // Make sure the consumer leader is on S3. + cl := c.servers[2] + checkFor(t, 20*time.Second, 200*time.Millisecond, func() error { + c.waitOnConsumerLeader(globalAccountName, "EVENTS", "C") + if s := c.consumerLeader(globalAccountName, "EVENTS", "C"); s != cl { + s.JetStreamStepdownConsumer(globalAccountName, "EVENTS", "C") + return fmt.Errorf("Server %s is not consumer leader yet", cl) + } + return nil + }) + + go func(js nats.JetStream) { + sub, err := js.PullSubscribe(_EMPTY_, "C", nats.BindStream("EVENTS"), nats.ManualAck()) + require_NoError(t, err) + + for { + msgs, err := sub.Fetch(100, nats.MaxWait(2*time.Second)) + if err != nil && err != nats.ErrTimeout { + return + } + // Shuffle + rand.Shuffle(len(msgs), func(i, j int) { msgs[i], msgs[j] = msgs[j], msgs[i] }) + for _, m := range msgs { + m.Ack() + } + } + }(js) + + numPublishers := 25 + pubThresh := 2 * time.Second + var maxExceeded atomic.Int64 + errCh := make(chan error, numPublishers) + wg := sync.WaitGroup{} + + msg := make([]byte, 2*1024) // 2k payload + rand.Read(msg) + + // Publishers. + for i := 0; i < numPublishers; i++ { + wg.Add(1) + go func(iter int) { + defer wg.Done() + + // Connect to random, the slow ones will be connected to the slow node. + // But if you connect them all there it will pass. + s := c.randomServer() + nc, js := jsClientConnect(t, s) + defer nc.Close() + + for i := 0; i < 1_000; i++ { + start := time.Now() + _, err := js.Publish("EV.PAID", msg) + if err != nil { + errCh <- fmt.Errorf("Publish error: %v", err) + return + } + if elapsed := time.Since(start); elapsed > pubThresh { + errCh <- fmt.Errorf("Publish time exceeded") + if int64(elapsed) > maxExceeded.Load() { + maxExceeded.Store(int64(elapsed)) + } + return + } + } + }(i) + } + + wg.Wait() + + select { + case e := <-errCh: + t.Fatalf("%v: threshold is %v, maximum seen: %v", e, pubThresh, time.Duration(maxExceeded.Load())) + default: + } +} + +func TestNoRaceJetStreamInterestStreamCheckInterestRaceBug(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"foo"}, + Replicas: 3, + Retention: nats.InterestPolicy, + }) + require_NoError(t, err) + + numConsumers := 10 + for i := 0; i < numConsumers; i++ { + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err = js.Subscribe("foo", func(m *nats.Msg) { + m.Ack() + }, nats.Durable(fmt.Sprintf("C%d", i)), nats.ManualAck()) + require_NoError(t, err) + } + + numToSend := 10_000 + for i := 0; i < numToSend; i++ { + _, err := js.PublishAsync("foo", nil) + require_NoError(t, err) + } + select { + case <-js.PublishAsyncComplete(): + case <-time.After(20 * time.Second): + t.Fatalf("Did not receive completion signal") + } + + // Wait til ackfloor is correct for all consumers. + checkFor(t, 20*time.Second, 100*time.Millisecond, func() error { + for _, s := range c.servers { + mset, err := s.GlobalAccount().lookupStream("TEST") + require_NoError(t, err) + + mset.mu.RLock() + defer mset.mu.RUnlock() + + require_True(t, len(mset.consumers) == numConsumers) + + for _, o := range mset.consumers { + state, err := o.store.State() + require_NoError(t, err) + if state.AckFloor.Stream != uint64(numToSend) { + return fmt.Errorf("Ackfloor not correct yet") + } + } + } + return nil + }) + + for _, s := range c.servers { + mset, err := s.GlobalAccount().lookupStream("TEST") + require_NoError(t, err) + + mset.mu.RLock() + defer mset.mu.RUnlock() + + state := mset.state() + require_True(t, state.Msgs == 0) + require_True(t, state.FirstSeq == uint64(numToSend+1)) + } +} + +func TestNoRaceJetStreamClusterInterestStreamConsistencyAfterRollingRestart(t *testing.T) { + // Uncomment to run. Needs to be on a big machine. Do not want as part of Travis tests atm. + skip(t) + + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + numStreams := 200 + numConsumersPer := 5 + numPublishers := 10 + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + qch := make(chan bool) + + var mm sync.Mutex + ackMap := make(map[string]map[uint64][]string) + + addAckTracking := func(seq uint64, stream, consumer string) { + mm.Lock() + defer mm.Unlock() + sam := ackMap[stream] + if sam == nil { + sam = make(map[uint64][]string) + ackMap[stream] = sam + } + sam[seq] = append(sam[seq], consumer) + } + + doPullSubscriber := func(stream, consumer, filter string) { + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + var err error + var sub *nats.Subscription + timeout := time.Now().Add(5 * time.Second) + for time.Now().Before(timeout) { + sub, err = js.PullSubscribe(filter, consumer, nats.BindStream(stream), nats.ManualAck()) + if err == nil { + break + } + } + if err != nil { + t.Logf("Error on pull subscriber: %v", err) + return + } + + for { + select { + case <-time.After(500 * time.Millisecond): + msgs, err := sub.Fetch(100, nats.MaxWait(time.Second)) + if err != nil { + continue + } + // Shuffle + rand.Shuffle(len(msgs), func(i, j int) { msgs[i], msgs[j] = msgs[j], msgs[i] }) + for _, m := range msgs { + meta, err := m.Metadata() + require_NoError(t, err) + m.Ack() + addAckTracking(meta.Sequence.Stream, stream, consumer) + if meta.NumDelivered > 1 { + t.Logf("Got a msg redelivered %d for sequence %d on %q %q\n", meta.NumDelivered, meta.Sequence.Stream, stream, consumer) + } + } + case <-qch: + nc.Flush() + return + } + } + } + + // Setup + wg := sync.WaitGroup{} + for i := 0; i < numStreams; i++ { + wg.Add(1) + go func(stream string) { + defer wg.Done() + subj := fmt.Sprintf("%s.>", stream) + _, err := js.AddStream(&nats.StreamConfig{ + Name: stream, + Subjects: []string{subj}, + Replicas: 3, + Retention: nats.InterestPolicy, + }) + require_NoError(t, err) + for i := 0; i < numConsumersPer; i++ { + consumer := fmt.Sprintf("C%d", i) + filter := fmt.Sprintf("%s.%d", stream, i) + _, err = js.AddConsumer(stream, &nats.ConsumerConfig{ + Durable: consumer, + FilterSubject: filter, + AckPolicy: nats.AckExplicitPolicy, + AckWait: 2 * time.Second, + }) + require_NoError(t, err) + c.waitOnConsumerLeader(globalAccountName, stream, consumer) + go doPullSubscriber(stream, consumer, filter) + } + }(fmt.Sprintf("A-%d", i)) + } + wg.Wait() + + msg := make([]byte, 2*1024) // 2k payload + rand.Read(msg) + + // Controls if publishing is on or off. + var pubActive atomic.Bool + + doPublish := func() { + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + for { + select { + case <-time.After(100 * time.Millisecond): + if pubActive.Load() { + for i := 0; i < numStreams; i++ { + for j := 0; j < numConsumersPer; j++ { + subj := fmt.Sprintf("A-%d.%d", i, j) + // Don't care about errors here for this test. + js.Publish(subj, msg) + } + } + } + case <-qch: + return + } + } + } + + pubActive.Store(true) + + for i := 0; i < numPublishers; i++ { + go doPublish() + } + + // Let run for a bit. + time.Sleep(20 * time.Second) + + // Do a rolling restart. + for _, s := range c.servers { + t.Logf("Shutdown %v\n", s) + s.Shutdown() + s.WaitForShutdown() + time.Sleep(20 * time.Second) + t.Logf("Restarting %v\n", s) + s = c.restartServer(s) + c.waitOnServerHealthz(s) + } + + // Let run for a bit longer. + time.Sleep(10 * time.Second) + + // Stop pubs. + pubActive.Store(false) + + // Let settle. + time.Sleep(10 * time.Second) + close(qch) + time.Sleep(20 * time.Second) + + nc, js = jsClientConnect(t, c.randomServer()) + defer nc.Close() + + minAckFloor := func(stream string) (uint64, string) { + var maf uint64 = math.MaxUint64 + var consumer string + for i := 0; i < numConsumersPer; i++ { + cname := fmt.Sprintf("C%d", i) + ci, err := js.ConsumerInfo(stream, cname) + require_NoError(t, err) + if ci.AckFloor.Stream < maf { + maf = ci.AckFloor.Stream + consumer = cname + } + } + return maf, consumer + } + + checkStreamAcks := func(stream string) { + mm.Lock() + defer mm.Unlock() + if sam := ackMap[stream]; sam != nil { + for seq := 1; ; seq++ { + acks := sam[uint64(seq)] + if acks == nil { + if sam[uint64(seq+1)] != nil { + t.Logf("Missing an ack on stream %q for sequence %d\n", stream, seq) + } else { + break + } + } + if len(acks) > 1 { + t.Logf("Multiple acks for %d which is not expected: %+v", seq, acks) + } + } + } + } + + // Now check all streams such that their first sequence is equal to the minimum of all consumers. + for i := 0; i < numStreams; i++ { + stream := fmt.Sprintf("A-%d", i) + si, err := js.StreamInfo(stream) + require_NoError(t, err) + + if maf, consumer := minAckFloor(stream); maf > si.State.FirstSeq { + t.Logf("\nBAD STATE DETECTED FOR %q, CHECKING OTHER SERVERS! ACK %d vs %+v LEADER %v, CL FOR %q %v\n", + stream, maf, si.State, c.streamLeader(globalAccountName, stream), consumer, c.consumerLeader(globalAccountName, stream, consumer)) + + t.Logf("TEST ACKS %+v\n", ackMap) + + checkStreamAcks(stream) + + for _, s := range c.servers { + mset, err := s.GlobalAccount().lookupStream(stream) + require_NoError(t, err) + state := mset.state() + t.Logf("Server %v Stream STATE %+v\n", s, state) + + var smv StoreMsg + if sm, err := mset.store.LoadMsg(state.FirstSeq, &smv); err == nil { + t.Logf("Subject for msg %d is %q", state.FirstSeq, sm.subj) + } else { + t.Logf("Could not retrieve msg for %d: %v", state.FirstSeq, err) + } + + if len(mset.preAcks) > 0 { + t.Logf("%v preAcks %+v\n", s, mset.preAcks) + } + + for _, o := range mset.consumers { + ostate, err := o.store.State() + require_NoError(t, err) + t.Logf("Consumer STATE for %q is %+v\n", o.name, ostate) + } + } + t.Fatalf("BAD STATE: ACKFLOOR > FIRST %d vs %d\n", maf, si.State.FirstSeq) + } + } +} + +func TestNoRaceFileStoreNumPending(t *testing.T) { + // No need for all permutations here. + storeDir := t.TempDir() + fcfg := FileStoreConfig{ + StoreDir: storeDir, + BlockSize: 2 * 1024, // Create many blocks on purpose. + } + fs, err := newFileStore(fcfg, StreamConfig{Name: "zzz", Subjects: []string{"*.*.*.*"}, Storage: FileStorage}) + require_NoError(t, err) + defer fs.Stop() + + tokens := []string{"foo", "bar", "baz"} + genSubj := func() string { + return fmt.Sprintf("%s.%s.%s.%s", + tokens[rand.Intn(len(tokens))], + tokens[rand.Intn(len(tokens))], + tokens[rand.Intn(len(tokens))], + tokens[rand.Intn(len(tokens))], + ) + } + + for i := 0; i < 50_000; i++ { + subj := genSubj() + _, _, err := fs.StoreMsg(subj, nil, []byte("Hello World")) + require_NoError(t, err) + } + + state := fs.State() + + // Scan one by one for sanity check against other calculations. + sanityCheck := func(sseq uint64, filter string) SimpleState { + t.Helper() + var ss SimpleState + var smv StoreMsg + // For here we know 0 is invalid, set to 1. + if sseq == 0 { + sseq = 1 + } + for seq := sseq; seq <= state.LastSeq; seq++ { + sm, err := fs.LoadMsg(seq, &smv) + if err != nil { + t.Logf("Encountered error %v loading sequence: %d", err, seq) + continue + } + if subjectIsSubsetMatch(sm.subj, filter) { + ss.Msgs++ + ss.Last = seq + if ss.First == 0 || seq < ss.First { + ss.First = seq + } + } + } + return ss + } + + check := func(sseq uint64, filter string) { + t.Helper() + np, lvs := fs.NumPending(sseq, filter, false) + ss := fs.FilteredState(sseq, filter) + sss := sanityCheck(sseq, filter) + if lvs != state.LastSeq { + t.Fatalf("Expected NumPending to return valid through last of %d but got %d", state.LastSeq, lvs) + } + if ss.Msgs != np { + t.Fatalf("NumPending of %d did not match ss.Msgs of %d", np, ss.Msgs) + } + if ss != sss { + t.Fatalf("Failed sanity check, expected %+v got %+v", sss, ss) + } + } + + sanityCheckLastOnly := func(sseq uint64, filter string) SimpleState { + t.Helper() + var ss SimpleState + var smv StoreMsg + // For here we know 0 is invalid, set to 1. + if sseq == 0 { + sseq = 1 + } + seen := make(map[string]bool) + for seq := state.LastSeq; seq >= sseq; seq-- { + sm, err := fs.LoadMsg(seq, &smv) + if err != nil { + t.Logf("Encountered error %v loading sequence: %d", err, seq) + continue + } + if !seen[sm.subj] && subjectIsSubsetMatch(sm.subj, filter) { + ss.Msgs++ + if ss.Last == 0 { + ss.Last = seq + } + if ss.First == 0 || seq < ss.First { + ss.First = seq + } + seen[sm.subj] = true + } + } + return ss + } + + checkLastOnly := func(sseq uint64, filter string) { + t.Helper() + np, lvs := fs.NumPending(sseq, filter, true) + ss := sanityCheckLastOnly(sseq, filter) + if lvs != state.LastSeq { + t.Fatalf("Expected NumPending to return valid through last of %d but got %d", state.LastSeq, lvs) + } + if ss.Msgs != np { + t.Fatalf("NumPending of %d did not match ss.Msgs of %d", np, ss.Msgs) + } + } + + startSeqs := []uint64{0, 1, 2, 200, 444, 555, 2222, 8888, 12_345, 28_222, 33_456, 44_400, 49_999} + checkSubs := []string{"foo.>", "*.bar.>", "foo.bar.*.baz", "*.bar.>", "*.foo.bar.*", "foo.foo.bar.baz"} + + for _, filter := range checkSubs { + for _, start := range startSeqs { + check(start, filter) + checkLastOnly(start, filter) + } + } +} + +func TestNoRaceJetStreamClusterUnbalancedInterestMultipleConsumers(t *testing.T) { + c, np := createStretchUnbalancedCluster(t) + defer c.shutdown() + defer np.stop() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + // Now create the stream. + _, err := js.AddStream(&nats.StreamConfig{ + Name: "EVENTS", + Subjects: []string{"EV.>"}, + Replicas: 3, + Retention: nats.InterestPolicy, + }) + require_NoError(t, err) + + // Make sure it's leader is on S2. + sl := c.servers[1] + checkFor(t, 20*time.Second, 200*time.Millisecond, func() error { + c.waitOnStreamLeader(globalAccountName, "EVENTS") + if s := c.streamLeader(globalAccountName, "EVENTS"); s != sl { + s.JetStreamStepdownStream(globalAccountName, "EVENTS") + return fmt.Errorf("Server %s is not stream leader yet", sl) + } + return nil + }) + + // Create a fast ack consumer. + _, err = js.Subscribe("EV.NEW", func(m *nats.Msg) { + m.Ack() + }, nats.Durable("C"), nats.ManualAck()) + require_NoError(t, err) + + // Make sure the consumer leader is on S3. + cl := c.servers[2] + checkFor(t, 20*time.Second, 200*time.Millisecond, func() error { + c.waitOnConsumerLeader(globalAccountName, "EVENTS", "C") + if s := c.consumerLeader(globalAccountName, "EVENTS", "C"); s != cl { + s.JetStreamStepdownConsumer(globalAccountName, "EVENTS", "C") + return fmt.Errorf("Server %s is not consumer leader yet", cl) + } + return nil + }) + + // Connect a client directly to the stream leader. + nc, js = jsClientConnect(t, sl) + defer nc.Close() + + // Now create a pull subscriber. + sub, err := js.PullSubscribe("EV.NEW", "D", nats.ManualAck()) + require_NoError(t, err) + + // Make sure this consumer leader is on S1. + cl = c.servers[0] + checkFor(t, 20*time.Second, 200*time.Millisecond, func() error { + c.waitOnConsumerLeader(globalAccountName, "EVENTS", "D") + if s := c.consumerLeader(globalAccountName, "EVENTS", "D"); s != cl { + s.JetStreamStepdownConsumer(globalAccountName, "EVENTS", "D") + return fmt.Errorf("Server %s is not consumer leader yet", cl) + } + return nil + }) + + numToSend := 1000 + for i := 0; i < numToSend; i++ { + _, err := js.PublishAsync("EV.NEW", nil) + require_NoError(t, err) + } + select { + case <-js.PublishAsyncComplete(): + case <-time.After(20 * time.Second): + t.Fatalf("Did not receive completion signal") + } + + // Now make sure we can pull messages since we have not acked. + // The bug is that the acks arrive on S1 faster then the messages but we want to + // make sure we do not remove prematurely. + msgs, err := sub.Fetch(100, nats.MaxWait(time.Second)) + require_NoError(t, err) + require_True(t, len(msgs) == 100) + for _, m := range msgs { + m.AckSync() + } + + ci, err := js.ConsumerInfo("EVENTS", "D") + require_NoError(t, err) + require_True(t, ci.NumPending == uint64(numToSend-100)) + require_True(t, ci.NumAckPending == 0) + require_True(t, ci.Delivered.Stream == 100) + require_True(t, ci.AckFloor.Stream == 100) + + // Check stream state on all servers. + for _, s := range c.servers { + mset, err := s.GlobalAccount().lookupStream("EVENTS") + require_NoError(t, err) + state := mset.state() + require_True(t, state.Msgs == 900) + require_True(t, state.FirstSeq == 101) + require_True(t, state.LastSeq == 1000) + require_True(t, state.Consumers == 2) + } + + msgs, err = sub.Fetch(900, nats.MaxWait(time.Second)) + require_NoError(t, err) + require_True(t, len(msgs) == 900) + for _, m := range msgs { + m.AckSync() + } + + // Let acks propagate. + time.Sleep(250 * time.Millisecond) + + // Check final stream state on all servers. + for _, s := range c.servers { + mset, err := s.GlobalAccount().lookupStream("EVENTS") + require_NoError(t, err) + state := mset.state() + require_True(t, state.Msgs == 0) + require_True(t, state.FirstSeq == 1001) + require_True(t, state.LastSeq == 1000) + require_True(t, state.Consumers == 2) + // Now check preAcks + mset.mu.RLock() + numPreAcks := len(mset.preAcks) + mset.mu.RUnlock() + require_True(t, numPreAcks == 0) + } +} + +func TestNoRaceJetStreamClusterUnbalancedInterestMultipleFilteredConsumers(t *testing.T) { + c, np := createStretchUnbalancedCluster(t) + defer c.shutdown() + defer np.stop() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + // Now create the stream. + _, err := js.AddStream(&nats.StreamConfig{ + Name: "EVENTS", + Subjects: []string{"EV.>"}, + Replicas: 3, + Retention: nats.InterestPolicy, + }) + require_NoError(t, err) + + // Make sure it's leader is on S2. + sl := c.servers[1] + checkFor(t, 20*time.Second, 200*time.Millisecond, func() error { + c.waitOnStreamLeader(globalAccountName, "EVENTS") + if s := c.streamLeader(globalAccountName, "EVENTS"); s != sl { + s.JetStreamStepdownStream(globalAccountName, "EVENTS") + return fmt.Errorf("Server %s is not stream leader yet", sl) + } + return nil + }) + + // Create a fast ack consumer. + _, err = js.Subscribe("EV.NEW", func(m *nats.Msg) { + m.Ack() + }, nats.Durable("C"), nats.ManualAck()) + require_NoError(t, err) + + // Make sure the consumer leader is on S3. + cl := c.servers[2] + checkFor(t, 20*time.Second, 200*time.Millisecond, func() error { + c.waitOnConsumerLeader(globalAccountName, "EVENTS", "C") + if s := c.consumerLeader(globalAccountName, "EVENTS", "C"); s != cl { + s.JetStreamStepdownConsumer(globalAccountName, "EVENTS", "C") + return fmt.Errorf("Server %s is not consumer leader yet", cl) + } + return nil + }) + + // Connect a client directly to the stream leader. + nc, js = jsClientConnect(t, sl) + defer nc.Close() + + // Now create another fast ack consumer. + _, err = js.Subscribe("EV.UPDATED", func(m *nats.Msg) { + m.Ack() + }, nats.Durable("D"), nats.ManualAck()) + require_NoError(t, err) + + // Make sure this consumer leader is on S1. + cl = c.servers[0] + checkFor(t, 20*time.Second, 200*time.Millisecond, func() error { + c.waitOnConsumerLeader(globalAccountName, "EVENTS", "D") + if s := c.consumerLeader(globalAccountName, "EVENTS", "D"); s != cl { + s.JetStreamStepdownConsumer(globalAccountName, "EVENTS", "D") + return fmt.Errorf("Server %s is not consumer leader yet", cl) + } + return nil + }) + + numToSend := 500 + for i := 0; i < numToSend; i++ { + _, err := js.PublishAsync("EV.NEW", nil) + require_NoError(t, err) + _, err = js.PublishAsync("EV.UPDATED", nil) + require_NoError(t, err) + } + select { + case <-js.PublishAsyncComplete(): + case <-time.After(20 * time.Second): + t.Fatalf("Did not receive completion signal") + } + + // Let acks propagate. + time.Sleep(250 * time.Millisecond) + + ci, err := js.ConsumerInfo("EVENTS", "D") + require_NoError(t, err) + require_True(t, ci.NumPending == 0) + require_True(t, ci.NumAckPending == 0) + require_True(t, ci.Delivered.Consumer == 500) + require_True(t, ci.Delivered.Stream == 1000) + require_True(t, ci.AckFloor.Consumer == 500) + require_True(t, ci.AckFloor.Stream == 1000) + + // Check final stream state on all servers. + for _, s := range c.servers { + mset, err := s.GlobalAccount().lookupStream("EVENTS") + require_NoError(t, err) + state := mset.state() + require_True(t, state.Msgs == 0) + require_True(t, state.FirstSeq == 1001) + require_True(t, state.LastSeq == 1000) + require_True(t, state.Consumers == 2) + // Now check preAcks + mset.mu.RLock() + numPreAcks := len(mset.preAcks) + mset.mu.RUnlock() + require_True(t, numPreAcks == 0) + } +} + +func TestNoRaceParallelStreamAndConsumerCreation(t *testing.T) { + s := RunBasicJetStreamServer(t) + defer s.Shutdown() + + // stream config. + scfg := &StreamConfig{ + Name: "TEST", + Subjects: []string{"foo", "bar"}, + MaxMsgs: 10, + Storage: FileStorage, + Replicas: 1, + } + + // Will do these direct against the low level API to really make + // sure parallel creation ok. + np := 1000 + startCh := make(chan bool) + errCh := make(chan error, np) + wg := sync.WaitGroup{} + wg.Add(np) + + var streams sync.Map + + for i := 0; i < np; i++ { + go func() { + defer wg.Done() + + // Make them all fire at once. + <-startCh + + if mset, err := s.GlobalAccount().addStream(scfg); err != nil { + t.Logf("Stream create got an error: %v", err) + errCh <- err + } else { + streams.Store(mset, true) + } + }() + } + time.Sleep(100 * time.Millisecond) + close(startCh) + wg.Wait() + + // Check for no errors. + if len(errCh) > 0 { + t.Fatalf("Expected no errors, got %d", len(errCh)) + } + + // Now make sure we really only created one stream. + var numStreams int + streams.Range(func(k, v any) bool { + numStreams++ + return true + }) + if numStreams > 1 { + t.Fatalf("Expected only one stream to be really created, got %d out of %d attempts", numStreams, np) + } + + // Also make sure we cleanup the inflight entries for streams. + gacc := s.GlobalAccount() + _, jsa, err := gacc.checkForJetStream() + require_NoError(t, err) + var numEntries int + jsa.inflight.Range(func(k, v any) bool { + numEntries++ + return true + }) + if numEntries > 0 { + t.Fatalf("Expected no inflight entries to be left over, got %d", numEntries) + } + + // Now do consumers. + mset, err := gacc.lookupStream("TEST") + require_NoError(t, err) + + cfg := &ConsumerConfig{ + DeliverSubject: "to", + Name: "DLC", + AckPolicy: AckExplicit, + } + + startCh = make(chan bool) + errCh = make(chan error, np) + wg.Add(np) + + var consumers sync.Map + + for i := 0; i < np; i++ { + go func() { + defer wg.Done() + + // Make them all fire at once. + <-startCh + + if _, err = mset.addConsumer(cfg); err != nil { + t.Logf("Consumer create got an error: %v", err) + errCh <- err + } else { + consumers.Store(mset, true) + } + }() + } + time.Sleep(100 * time.Millisecond) + close(startCh) + wg.Wait() + + // Check for no errors. + if len(errCh) > 0 { + t.Fatalf("Expected no errors, got %d", len(errCh)) + } + + // Now make sure we really only created one stream. + var numConsumers int + consumers.Range(func(k, v any) bool { + numConsumers++ + return true + }) + if numConsumers > 1 { + t.Fatalf("Expected only one consumer to be really created, got %d out of %d attempts", numConsumers, np) + } +} + +func TestNoRaceJetStreamClusterLeafnodeConnectPerf(t *testing.T) { + // Uncomment to run. Needs to be on a big machine. Do not want as part of Travis tests atm. + skip(t) + + tmpl := strings.Replace(jsClusterAccountsTempl, "store_dir:", "domain: cloud, store_dir:", 1) + c := createJetStreamCluster(t, tmpl, "CLOUD", _EMPTY_, 3, 18033, true) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "STATE", + Subjects: []string{"STATE.GLOBAL.CELL1.*.>"}, + Replicas: 3, + }) + require_NoError(t, err) + + tmpl = strings.Replace(jsClusterTemplWithSingleFleetLeafNode, "store_dir:", "domain: vehicle, store_dir:", 1) + + var vinSerial int + genVIN := func() string { + vinSerial++ + return fmt.Sprintf("7PDSGAALXNN%06d", vinSerial) + } + + numVehicles := 500 + for i := 0; i < numVehicles; i++ { + start := time.Now() + vin := genVIN() + ln := c.createLeafNodeWithTemplateNoSystemWithProto(vin, tmpl, "ws") + nc, js := jsClientConnect(t, ln) + _, err := js.AddStream(&nats.StreamConfig{ + Name: "VEHICLE", + Subjects: []string{"STATE.GLOBAL.LOCAL.>"}, + Sources: []*nats.StreamSource{{ + Name: "STATE", + FilterSubject: fmt.Sprintf("STATE.GLOBAL.CELL1.%s.>", vin), + External: &nats.ExternalStream{ + APIPrefix: "$JS.cloud.API", + DeliverPrefix: fmt.Sprintf("DELIVER.STATE.GLOBAL.CELL1.%s", vin), + }, + }}, + }) + require_NoError(t, err) + // Create the sourced stream. + checkLeafNodeConnectedCount(t, ln, 1) + if elapsed := time.Since(start); elapsed > 2*time.Second { + t.Fatalf("Took too long to create leafnode %d connection: %v", i+1, elapsed) + } + nc.Close() + } +} + +// This test ensures that outbound queues don't cause a run on +// memory when sending something to lots of clients. +func TestNoRaceClientOutboundQueueMemory(t *testing.T) { + opts := DefaultOptions() + s := RunServer(opts) + defer s.Shutdown() + + var before runtime.MemStats + var after runtime.MemStats + + var err error + clients := make([]*nats.Conn, 50000) + wait := &sync.WaitGroup{} + wait.Add(len(clients)) + + for i := 0; i < len(clients); i++ { + clients[i], err = nats.Connect(fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port), nats.InProcessServer(s)) + if err != nil { + t.Fatalf("Error on connect: %v", err) + } + defer clients[i].Close() + + clients[i].Subscribe("test", func(m *nats.Msg) { + wait.Done() + }) + } + + runtime.GC() + runtime.ReadMemStats(&before) + + nc, err := nats.Connect(fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port), nats.InProcessServer(s)) + if err != nil { + t.Fatalf("Error on connect: %v", err) + } + defer nc.Close() + + var m [48000]byte + if err = nc.Publish("test", m[:]); err != nil { + t.Fatal(err) + } + + wait.Wait() + + runtime.GC() + runtime.ReadMemStats(&after) + + hb, ha := float64(before.HeapAlloc), float64(after.HeapAlloc) + ms := float64(len(m)) + diff := float64(ha) - float64(hb) + inc := (diff / float64(hb)) * 100 + + if inc > 10 { + t.Logf("Message size: %.1fKB\n", ms/1024) + t.Logf("Subscribed clients: %d\n", len(clients)) + t.Logf("Heap allocs before: %.1fMB\n", hb/1024/1024) + t.Logf("Heap allocs after: %.1fMB\n", ha/1024/1024) + t.Logf("Heap allocs delta: %.1f%%\n", inc) + + t.Fatalf("memory increase was %.1f%% (should be <= 10%%)", inc) + } +} + +func TestNoRaceJetStreamClusterDifferentRTTInterestBasedStreamPreAck(t *testing.T) { + tmpl := ` + listen: 127.0.0.1:-1 + server_name: %s + jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} + + cluster { + name: "F3" + listen: 127.0.0.1:%d + routes = [%s] + } + + accounts { + $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } + } + ` + + // Route Ports + // "S1": 14622, + // "S2": 15622, + // "S3": 16622, + + // S2 (stream leader) will have a slow path to S1 (via proxy) and S3 (consumer leader) will have a fast path. + + // Do these in order, S1, S2 (proxy) then S3. + c := &cluster{t: t, servers: make([]*Server, 3), opts: make([]*Options, 3), name: "F3"} + + // S1 + conf := fmt.Sprintf(tmpl, "S1", t.TempDir(), 14622, "route://127.0.0.1:15622, route://127.0.0.1:16622") + c.servers[0], c.opts[0] = RunServerWithConfig(createConfFile(t, []byte(conf))) + + // S2 + // Create the proxy first. Connect this to S1. Make it slow, e.g. 5ms RTT. + np := createNetProxy(1*time.Millisecond, 1024*1024*1024, 1024*1024*1024, "route://127.0.0.1:14622", true) + routes := fmt.Sprintf("%s, route://127.0.0.1:16622", np.routeURL()) + conf = fmt.Sprintf(tmpl, "S2", t.TempDir(), 15622, routes) + c.servers[1], c.opts[1] = RunServerWithConfig(createConfFile(t, []byte(conf))) + + // S3 + conf = fmt.Sprintf(tmpl, "S3", t.TempDir(), 16622, "route://127.0.0.1:14622, route://127.0.0.1:15622") + c.servers[2], c.opts[2] = RunServerWithConfig(createConfFile(t, []byte(conf))) + + c.checkClusterFormed() + c.waitOnClusterReady() + defer c.shutdown() + defer np.stop() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + // Now create the stream. + _, err := js.AddStream(&nats.StreamConfig{ + Name: "EVENTS", + Subjects: []string{"EV.>"}, + Replicas: 3, + Retention: nats.InterestPolicy, + }) + require_NoError(t, err) + + // Make sure it's leader is on S2. + sl := c.servers[1] + checkFor(t, 20*time.Second, 200*time.Millisecond, func() error { + c.waitOnStreamLeader(globalAccountName, "EVENTS") + if s := c.streamLeader(globalAccountName, "EVENTS"); s != sl { + s.JetStreamStepdownStream(globalAccountName, "EVENTS") + return fmt.Errorf("Server %s is not stream leader yet", sl) + } + return nil + }) + + // Now create the consumer. + _, err = js.AddConsumer("EVENTS", &nats.ConsumerConfig{ + Durable: "C", + AckPolicy: nats.AckExplicitPolicy, + DeliverSubject: "dx", + }) + require_NoError(t, err) + + // Make sure the consumer leader is on S3. + cl := c.servers[2] + checkFor(t, 20*time.Second, 200*time.Millisecond, func() error { + c.waitOnConsumerLeader(globalAccountName, "EVENTS", "C") + if s := c.consumerLeader(globalAccountName, "EVENTS", "C"); s != cl { + s.JetStreamStepdownConsumer(globalAccountName, "EVENTS", "C") + return fmt.Errorf("Server %s is not consumer leader yet", sl) + } + return nil + }) + + // Create the real consumer on the consumer leader to make it efficient. + nc, js = jsClientConnect(t, cl) + defer nc.Close() + + _, err = js.Subscribe(_EMPTY_, func(msg *nats.Msg) { + msg.Ack() + }, nats.BindStream("EVENTS"), nats.Durable("C"), nats.ManualAck()) + require_NoError(t, err) + + for i := 0; i < 1_000; i++ { + _, err := js.PublishAsync("EVENTS.PAID", []byte("ok")) + require_NoError(t, err) + } + select { + case <-js.PublishAsyncComplete(): + case <-time.After(5 * time.Second): + t.Fatalf("Did not receive completion signal") + } + + slow := c.servers[0] + mset, err := slow.GlobalAccount().lookupStream("EVENTS") + require_NoError(t, err) + + // Make sure preAck is non-nil, so we know the logic has kicked in. + mset.mu.RLock() + preAcks := mset.preAcks + mset.mu.RUnlock() + require_NotNil(t, preAcks) + + checkFor(t, 5*time.Second, 200*time.Millisecond, func() error { + state := mset.state() + if state.Msgs == 0 { + mset.mu.RLock() + lp := len(mset.preAcks) + mset.mu.RUnlock() + if lp == 0 { + return nil + } else { + t.Fatalf("Expected no preAcks with no msgs, but got %d", lp) + } + } + return fmt.Errorf("Still have %d msgs left", state.Msgs) + }) + +} + +func TestNoRaceCheckAckFloorWithVeryLargeFirstSeqAndNewConsumers(t *testing.T) { + s := RunBasicJetStreamServer(t) + defer s.Shutdown() + + nc, _ := jsClientConnect(t, s) + defer nc.Close() + + // Make sure to time bound here for the acksync call below. + js, err := nc.JetStream(nats.MaxWait(200 * time.Millisecond)) + require_NoError(t, err) + + _, err = js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"wq-req"}, + Retention: nats.WorkQueuePolicy, + }) + require_NoError(t, err) + + largeFirstSeq := uint64(1_200_000_000) + err = js.PurgeStream("TEST", &nats.StreamPurgeRequest{Sequence: largeFirstSeq}) + require_NoError(t, err) + si, err := js.StreamInfo("TEST") + require_NoError(t, err) + require_True(t, si.State.FirstSeq == largeFirstSeq) + + // Add a simple request to the stream. + sendStreamMsg(t, nc, "wq-req", "HELP") + + sub, err := js.PullSubscribe("wq-req", "dlc") + require_NoError(t, err) + + msgs, err := sub.Fetch(1) + require_NoError(t, err) + require_True(t, len(msgs) == 1) + + // The bug is around the checkAckFloor walking the sequences from current ackfloor + // to the first sequence of the stream. We time bound the max wait with the js context + // to 200ms. Since checkAckFloor is spinning and holding up processing of acks this will fail. + // We will short circuit new consumers to fix this one. + require_NoError(t, msgs[0].AckSync()) + + // Now do again so we move past the new consumer with no ack floor situation. + err = js.PurgeStream("TEST", &nats.StreamPurgeRequest{Sequence: 2 * largeFirstSeq}) + require_NoError(t, err) + si, err = js.StreamInfo("TEST") + require_NoError(t, err) + require_True(t, si.State.FirstSeq == 2*largeFirstSeq) + + sendStreamMsg(t, nc, "wq-req", "MORE HELP") + + // We check this one directly for this use case. + mset, err := s.GlobalAccount().lookupStream("TEST") + require_NoError(t, err) + o := mset.lookupConsumer("dlc") + require_True(t, o != nil) + + // Purge will move the stream floor by default, so force into the situation where it is back to largeFirstSeq. + // This will not trigger the new consumer logic, but will trigger a walk of the sequence space. + // Fix will be to walk the lesser of the two linear spaces. + o.mu.Lock() + o.asflr = largeFirstSeq + o.mu.Unlock() + + done := make(chan bool) + go func() { + o.checkAckFloor() + done <- true + }() + + select { + case <-done: + return + case <-time.After(time.Second): + t.Fatalf("Check ack floor taking too long!") + } +} + +func TestNoRaceReplicatedMirrorWithLargeStartingSequenceOverLeafnode(t *testing.T) { + // Cluster B + tmpl := strings.Replace(jsClusterTempl, "store_dir:", "domain: B, store_dir:", 1) + c := createJetStreamCluster(t, tmpl, "B", _EMPTY_, 3, 22020, true) + defer c.shutdown() + + // Cluster A + // Domain is "A' + lc := c.createLeafNodesWithStartPortAndDomain("A", 3, 22110, "A") + defer lc.shutdown() + + lc.waitOnClusterReady() + + // Create a stream on B (HUB/CLOUD) and set its starting sequence very high. + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Subjects: []string{"foo"}, + Replicas: 3, + }) + require_NoError(t, err) + + err = js.PurgeStream("TEST", &nats.StreamPurgeRequest{Sequence: 1_000_000_000}) + require_NoError(t, err) + + // Send in a small amount of messages. + for i := 0; i < 1000; i++ { + sendStreamMsg(t, nc, "foo", "Hello") + } + + si, err := js.StreamInfo("TEST") + require_NoError(t, err) + require_True(t, si.State.FirstSeq == 1_000_000_000) + + // Now try to create a replicated mirror on the leaf cluster. + lnc, ljs := jsClientConnect(t, lc.randomServer()) + defer lnc.Close() + + _, err = ljs.AddStream(&nats.StreamConfig{ + Name: "TEST", + Mirror: &nats.StreamSource{ + Name: "TEST", + Domain: "B", + }, + }) + require_NoError(t, err) + + // Make sure we sync quickly. + checkFor(t, time.Second, 200*time.Millisecond, func() error { + si, err = ljs.StreamInfo("TEST") + require_NoError(t, err) + if si.State.Msgs == 1000 && si.State.FirstSeq == 1_000_000_000 { + return nil + } + return fmt.Errorf("Mirror state not correct: %+v", si.State) + }) +} diff --git a/server/ocsp.go b/server/ocsp.go index fa8b21134..1c5e3a81d 100644 --- a/server/ocsp.go +++ b/server/ocsp.go @@ -30,6 +30,9 @@ import ( "time" "golang.org/x/crypto/ocsp" + + "github.com/memphisdev/memphis/server/certidp" + "github.com/memphisdev/memphis/server/certstore" ) const ( @@ -389,7 +392,7 @@ func (srv *Server) NewOCSPMonitor(config *tlsConfigKind) (*tls.Config, *OCSPMoni } // TODO: Add OCSP 'responder_cert' option in case CA cert not available. - issuers, err := getOCSPIssuer(caFile, cert.Certificate) + issuer, err := getOCSPIssuer(caFile, cert.Certificate) if err != nil { return nil, nil, err } @@ -402,7 +405,7 @@ func (srv *Server) NewOCSPMonitor(config *tlsConfigKind) (*tls.Config, *OCSPMoni certFile: certFile, stopCh: make(chan struct{}, 1), Leaf: cert.Leaf, - Issuer: issuers[len(issuers)-1], + Issuer: issuer, } // Get the certificate status from the memory, then remote OCSP responder. @@ -433,36 +436,35 @@ func (srv *Server) NewOCSPMonitor(config *tlsConfigKind) (*tls.Config, *OCSPMoni }, nil } - // Check whether need to verify staples from a client connection depending on the type. + // Check whether need to verify staples from a peer router or gateway connection. switch kind { - case kindStringMap[ROUTER], kindStringMap[GATEWAY], kindStringMap[LEAF]: + case kindStringMap[ROUTER], kindStringMap[GATEWAY]: tc.VerifyConnection = func(s tls.ConnectionState) error { oresp := s.OCSPResponse if oresp == nil { - return fmt.Errorf("%s client missing OCSP Staple", kind) + return fmt.Errorf("%s peer missing OCSP Staple", kind) } - // Client route connections will verify the response of the staple. + // Peer connections will verify the response of the staple. if len(s.VerifiedChains) == 0 { - return fmt.Errorf("%s client missing TLS verified chains", kind) + return fmt.Errorf("%s peer missing TLS verified chains", kind) } chain := s.VerifiedChains[0] - leaf := chain[0] - parent := issuers[len(issuers)-1] + peerLeaf := chain[0] + peerIssuer := certidp.GetLeafIssuerCert(chain, 0) + if peerIssuer == nil { + return fmt.Errorf("failed to get issuer certificate for %s peer", kind) + } - resp, err := ocsp.ParseResponseForCert(oresp, leaf, parent) + // Response signature of issuer or issuer delegate is checked in the library parse + resp, err := ocsp.ParseResponseForCert(oresp, peerLeaf, peerIssuer) if err != nil { - return fmt.Errorf("failed to parse OCSP response from %s client: %w", kind, err) + return fmt.Errorf("failed to parse OCSP response from %s peer: %w", kind, err) } - if resp.Certificate == nil { - if err := resp.CheckSignatureFrom(parent); err != nil { - return fmt.Errorf("OCSP staple not issued by issuer: %w", err) - } - } else { - if err := resp.Certificate.CheckSignatureFrom(parent); err != nil { - return fmt.Errorf("OCSP staple's signer not signed by issuer: %w", err) - } + + // If signer was issuer delegate double-check issuer delegate authorization + if resp.Certificate != nil { ok := false for _, eku := range resp.Certificate.ExtKeyUsage { if eku == x509.ExtKeyUsageOCSPSigning { @@ -474,14 +476,22 @@ func (srv *Server) NewOCSPMonitor(config *tlsConfigKind) (*tls.Config, *OCSPMoni return fmt.Errorf("OCSP staple's signer missing authorization by CA to act as OCSP signer") } } + + // Check that the OCSP response is effective, take defaults for clockskew and default validity + peerOpts := certidp.OCSPPeerConfig{ClockSkew: -1, TTLUnsetNextUpdate: -1} + sLog := certidp.Log{Debugf: srv.Debugf} + if !certidp.OCSPResponseCurrent(resp, &peerOpts, &sLog) { + return fmt.Errorf("OCSP staple from %s peer not current", kind) + } + if resp.Status != ocsp.Good { - return fmt.Errorf("bad status for OCSP Staple from %s client: %s", kind, ocspStatusString(resp.Status)) + return fmt.Errorf("bad status for OCSP Staple from %s peer: %s", kind, ocspStatusString(resp.Status)) } return nil } - // When server makes a client connection, need to also present an OCSP Staple. + // When server makes a peer connection, need to also present an OCSP Staple. tc.GetClientCertificate = func(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { raw, _, err := mon.getStatus() if err != nil { @@ -520,10 +530,11 @@ func (s *Server) setupOCSPStapleStoreDir() error { } type tlsConfigKind struct { - tlsConfig *tls.Config - tlsOpts *TLSConfigOpts - kind string - apply func(*tls.Config) + tlsConfig *tls.Config + tlsOpts *TLSConfigOpts + kind string + isLeafSpoke bool + apply func(*tls.Config) } func (s *Server) configureOCSP() []*tlsConfigKind { @@ -541,6 +552,26 @@ func (s *Server) configureOCSP() []*tlsConfigKind { } configs = append(configs, o) } + if config := sopts.Websocket.TLSConfig; config != nil { + opts := sopts.Websocket.tlsConfigOpts + o := &tlsConfigKind{ + kind: kindStringMap[CLIENT], + tlsConfig: config, + tlsOpts: opts, + apply: func(tc *tls.Config) { sopts.Websocket.TLSConfig = tc }, + } + configs = append(configs, o) + } + if config := sopts.MQTT.TLSConfig; config != nil { + opts := sopts.tlsConfigOpts + o := &tlsConfigKind{ + kind: kindStringMap[CLIENT], + tlsConfig: config, + tlsOpts: opts, + apply: func(tc *tls.Config) { sopts.MQTT.TLSConfig = tc }, + } + configs = append(configs, o) + } if config := sopts.Cluster.TLSConfig; config != nil { opts := sopts.Cluster.tlsConfigOpts o := &tlsConfigKind{ @@ -557,16 +588,7 @@ func (s *Server) configureOCSP() []*tlsConfigKind { kind: kindStringMap[LEAF], tlsConfig: config, tlsOpts: opts, - apply: func(tc *tls.Config) { - - // RequireAndVerifyClientCert is used to tell a client that it - // should send the client cert to the server. - tc.ClientAuth = tls.RequireAndVerifyClientCert - // GetClientCertificate is used by a client to send the client cert - // to a server. We're a server, so we must not set this. - tc.GetClientCertificate = nil - sopts.LeafNode.TLSConfig = tc - }, + apply: func(tc *tls.Config) { sopts.LeafNode.TLSConfig = tc }, } configs = append(configs, o) } @@ -576,15 +598,11 @@ func (s *Server) configureOCSP() []*tlsConfigKind { // in the apply func callback below. r, opts := remote, remote.tlsConfigOpts o := &tlsConfigKind{ - kind: kindStringMap[LEAF], - tlsConfig: config, - tlsOpts: opts, - apply: func(tc *tls.Config) { - // GetCertificate is used by a server to send the server cert to a - // client. We're a client, so we must not set this. - tc.GetCertificate = nil - r.TLSConfig = tc - }, + kind: kindStringMap[LEAF], + tlsConfig: config, + tlsOpts: opts, + isLeafSpoke: true, + apply: func(tc *tls.Config) { r.TLSConfig = tc }, } configs = append(configs, o) } @@ -606,9 +624,7 @@ func (s *Server) configureOCSP() []*tlsConfigKind { kind: kindStringMap[GATEWAY], tlsConfig: config, tlsOpts: opts, - apply: func(tc *tls.Config) { - gw.TLSConfig = tc - }, + apply: func(tc *tls.Config) { gw.TLSConfig = tc }, } configs = append(configs, o) } @@ -620,16 +636,33 @@ func (s *Server) enableOCSP() error { configs := s.configureOCSP() for _, config := range configs { - tc, mon, err := s.NewOCSPMonitor(config) - if err != nil { - return err + + // We do not staple Leaf Hub and Leaf Spokes, use ocsp_peer + if config.kind != kindStringMap[LEAF] { + // OCSP Stapling feature, will also enable tls server peer check for gateway and route peers + tc, mon, err := s.NewOCSPMonitor(config) + if err != nil { + return err + } + // Check if an OCSP stapling monitor is required for this certificate. + if mon != nil { + s.ocsps = append(s.ocsps, mon) + + // Override the TLS config with one that follows OCSP stapling + config.apply(tc) + } } - // Check if an OCSP stapling monitor is required for this certificate. - if mon != nil { - s.ocsps = append(s.ocsps, mon) - // Override the TLS config with one that follows OCSP. - config.apply(tc) + // OCSP peer check (client mTLS, leaf mTLS, leaf remote TLS) + if config.kind == kindStringMap[CLIENT] || config.kind == kindStringMap[LEAF] { + tc, plugged, err := s.plugTLSOCSPPeer(config) + if err != nil { + return err + } + if plugged && tc != nil { + s.ocspPeerVerify = true + config.apply(tc) + } } } @@ -671,17 +704,39 @@ func (s *Server) reloadOCSP() error { // Restart the monitors under the new configuration. ocspm := make([]*OCSPMonitor, 0) + + // Reset server's ocspPeerVerify flag to re-detect at least one plugged OCSP peer + s.mu.Lock() + s.ocspPeerVerify = false + s.mu.Unlock() + s.stopOCSPResponseCache() + for _, config := range configs { - tc, mon, err := s.NewOCSPMonitor(config) - if err != nil { - return err + // We do not staple Leaf Hub and Leaf Spokes, use ocsp_peer + if config.kind != kindStringMap[LEAF] { + tc, mon, err := s.NewOCSPMonitor(config) + if err != nil { + return err + } + // Check if an OCSP stapling monitor is required for this certificate. + if mon != nil { + ocspm = append(ocspm, mon) + + // Apply latest TLS configuration. + config.apply(tc) + } } - // Check if an OCSP stapling monitor is required for this certificate. - if mon != nil { - ocspm = append(ocspm, mon) - // Apply latest TLS configuration. - config.apply(tc) + // OCSP peer check (client mTLS, leaf mTLS, leaf remote TLS) + if config.kind == kindStringMap[CLIENT] || config.kind == kindStringMap[LEAF] { + tc, plugged, err := s.plugTLSOCSPPeer(config) + if err != nil { + return err + } + if plugged && tc != nil { + s.ocspPeerVerify = true + config.apply(tc) + } } } @@ -693,6 +748,11 @@ func (s *Server) reloadOCSP() error { // Dispatch all goroutines once again. s.startOCSPMonitoring() + // Init and restart OCSP responder cache + s.stopOCSPResponseCache() + s.initOCSPResponseCache() + s.startOCSPResponseCache() + return nil } @@ -783,37 +843,81 @@ func parseCertPEM(name string) ([]*x509.Certificate, error) { return x509.ParseCertificates(pemBytes) } -// getOCSPIssuer returns a CA cert from the given path. If the path is empty, -// then this checks a given cert chain. If both are empty, then it returns an -// error. -func getOCSPIssuer(issuerCert string, chain [][]byte) ([]*x509.Certificate, error) { - var issuers []*x509.Certificate - var err error - switch { - case len(chain) == 1 && issuerCert == _EMPTY_: - err = fmt.Errorf("ocsp ca required in chain or configuration") - case issuerCert != _EMPTY_: - issuers, err = parseCertPEM(issuerCert) - case len(chain) > 1 && issuerCert == _EMPTY_: - issuers, err = x509.ParseCertificates(chain[1]) - default: - err = fmt.Errorf("invalid ocsp ca configuration") +// getOCSPIssuerLocally determines a leaf's issuer from locally configured certificates +func getOCSPIssuerLocally(trustedCAs []*x509.Certificate, certBundle []*x509.Certificate) (*x509.Certificate, error) { + var vOpts x509.VerifyOptions + var leaf *x509.Certificate + trustedCAPool := x509.NewCertPool() + + // Require Leaf as first cert in bundle + if len(certBundle) > 0 { + leaf = certBundle[0] + } else { + return nil, fmt.Errorf("invalid ocsp ca configuration") } - if err != nil { - return nil, err + + // Allow Issuer to be configured as second cert in bundle + if len(certBundle) > 1 { + // The operator may have misconfigured the cert bundle + issuerCandidate := certBundle[1] + err := issuerCandidate.CheckSignature(leaf.SignatureAlgorithm, leaf.RawTBSCertificate, leaf.Signature) + if err != nil { + return nil, fmt.Errorf("invalid issuer configuration: %w", err) + } else { + return issuerCandidate, nil + } } - if len(issuers) == 0 { - return nil, fmt.Errorf("no issuers found") + // Operator did not provide the Leaf Issuer in cert bundle second position + // so we will attempt to create at least one ordered verified chain from the + // trusted CA pool. + + // Specify CA trust store to validator; if unset, system trust store used + if len(trustedCAs) > 0 { + for _, ca := range trustedCAs { + trustedCAPool.AddCert(ca) + } + vOpts.Roots = trustedCAPool + } + + return certstore.GetLeafIssuer(leaf, vOpts), nil +} + +// getOCSPIssuer determines an issuer certificate from the cert (bundle) or the file-based CA trust store +func getOCSPIssuer(caFile string, chain [][]byte) (*x509.Certificate, error) { + var issuer *x509.Certificate + var trustedCAs []*x509.Certificate + var certBundle []*x509.Certificate + var err error + + // FIXME(tgb): extend if pluggable CA store provider added to NATS (i.e. other than PEM file) + + // Non-system default CA trust store passed + if caFile != _EMPTY_ { + trustedCAs, err = parseCertPEM(caFile) + if err != nil { + return nil, fmt.Errorf("failed to parse ca_file: %v", err) + } } - for _, issuer := range issuers { - if !issuer.IsCA { - return nil, fmt.Errorf("%s invalid ca basic constraints: is not ca", issuer.Subject) + // Specify bundled intermediate CA store + for _, certBytes := range chain { + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse cert: %v", err) } + certBundle = append(certBundle, cert) } - return issuers, nil + issuer, err = getOCSPIssuerLocally(trustedCAs, certBundle) + if err != nil || issuer == nil { + return nil, fmt.Errorf("no issuers found") + } + + if !issuer.IsCA { + return nil, fmt.Errorf("%s invalid ca basic constraints: is not ca", issuer.Subject) + } + return issuer, nil } func ocspStatusString(n int) string { diff --git a/server/ocsp_peer.go b/server/ocsp_peer.go new file mode 100644 index 000000000..3550fc27a --- /dev/null +++ b/server/ocsp_peer.go @@ -0,0 +1,405 @@ +// Copyright 2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "strings" + "time" + + "golang.org/x/crypto/ocsp" + + "github.com/memphisdev/memphis/server/certidp" +) + +func parseOCSPPeer(v interface{}) (pcfg *certidp.OCSPPeerConfig, retError error) { + var lt token + defer convertPanicToError(<, &retError) + tk, v := unwrapValue(v, <) + cm, ok := v.(map[string]interface{}) + if !ok { + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrIllegalPeerOptsConfig, v)} + } + pcfg = certidp.NewOCSPPeerConfig() + retError = nil + for mk, mv := range cm { + tk, mv = unwrapValue(mv, <) + switch strings.ToLower(mk) { + case "verify": + verify, ok := mv.(bool) + if !ok { + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingPeerOptFieldGeneric, mk)} + } + pcfg.Verify = verify + case "allowed_clockskew": + at := float64(0) + switch mv := mv.(type) { + case int64: + at = float64(mv) + case float64: + at = mv + case string: + d, err := time.ParseDuration(mv) + if err != nil { + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingPeerOptFieldTypeConversion, "unexpected type")} + } + at = d.Seconds() + default: + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingPeerOptFieldTypeConversion, "unexpected type")} + } + if at >= 0 { + pcfg.ClockSkew = at + } + case "ca_timeout": + at := float64(0) + switch mv := mv.(type) { + case int64: + at = float64(mv) + case float64: + at = mv + case string: + d, err := time.ParseDuration(mv) + if err != nil { + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingPeerOptFieldTypeConversion, err)} + } + at = d.Seconds() + default: + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingPeerOptFieldTypeConversion, "unexpected type")} + } + if at >= 0 { + pcfg.Timeout = at + } + case "cache_ttl_when_next_update_unset": + at := float64(0) + switch mv := mv.(type) { + case int64: + at = float64(mv) + case float64: + at = mv + case string: + d, err := time.ParseDuration(mv) + if err != nil { + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingPeerOptFieldTypeConversion, err)} + } + at = d.Seconds() + default: + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingPeerOptFieldTypeConversion, "unexpected type")} + } + if at >= 0 { + pcfg.TTLUnsetNextUpdate = at + } + case "warn_only": + warnOnly, ok := mv.(bool) + if !ok { + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingPeerOptFieldGeneric, mk)} + } + pcfg.WarnOnly = warnOnly + case "unknown_is_good": + unknownIsGood, ok := mv.(bool) + if !ok { + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingPeerOptFieldGeneric, mk)} + } + pcfg.UnknownIsGood = unknownIsGood + case "allow_when_ca_unreachable": + allowWhenCAUnreachable, ok := mv.(bool) + if !ok { + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingPeerOptFieldGeneric, mk)} + } + pcfg.AllowWhenCAUnreachable = allowWhenCAUnreachable + default: + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingPeerOptFieldGeneric, mk)} + } + } + return pcfg, nil +} + +func peerFromVerifiedChains(chains [][]*x509.Certificate) *x509.Certificate { + if len(chains) == 0 || len(chains[0]) == 0 { + return nil + } + return chains[0][0] +} + +// plugTLSOCSPPeer will plug the TLS handshake lifecycle for client mTLS connections and Leaf connections +func (s *Server) plugTLSOCSPPeer(config *tlsConfigKind) (*tls.Config, bool, error) { + if config == nil || config.tlsConfig == nil { + return nil, false, errors.New(certidp.ErrUnableToPlugTLSEmptyConfig) + } + kind := config.kind + isSpoke := config.isLeafSpoke + tcOpts := config.tlsOpts + if tcOpts == nil || tcOpts.OCSPPeerConfig == nil || !tcOpts.OCSPPeerConfig.Verify { + return nil, false, nil + } + s.Debugf(certidp.DbgPlugTLSForKind, config.kind) + // peer is a tls client + if kind == kindStringMap[CLIENT] || (kind == kindStringMap[LEAF] && !isSpoke) { + if !tcOpts.Verify { + return nil, false, errors.New(certidp.ErrMTLSRequired) + } + return s.plugClientTLSOCSPPeer(config) + } + // peer is a tls server + if kind == kindStringMap[LEAF] && isSpoke { + return s.plugServerTLSOCSPPeer(config) + } + return nil, false, nil +} + +func (s *Server) plugClientTLSOCSPPeer(config *tlsConfigKind) (*tls.Config, bool, error) { + if config == nil || config.tlsConfig == nil || config.tlsOpts == nil { + return nil, false, errors.New(certidp.ErrUnableToPlugTLSClient) + } + tc := config.tlsConfig + tcOpts := config.tlsOpts + kind := config.kind + if tcOpts.OCSPPeerConfig == nil || !tcOpts.OCSPPeerConfig.Verify { + return tc, false, nil + } + tc.VerifyConnection = func(cs tls.ConnectionState) error { + if !s.tlsClientOCSPValid(cs.VerifiedChains, tcOpts.OCSPPeerConfig) { + s.sendOCSPPeerRejectEvent(kind, peerFromVerifiedChains(cs.VerifiedChains), certidp.MsgTLSClientRejectConnection) + return errors.New(certidp.MsgTLSClientRejectConnection) + } + return nil + } + return tc, true, nil +} + +func (s *Server) plugServerTLSOCSPPeer(config *tlsConfigKind) (*tls.Config, bool, error) { + if config == nil || config.tlsConfig == nil || config.tlsOpts == nil { + return nil, false, errors.New(certidp.ErrUnableToPlugTLSServer) + } + tc := config.tlsConfig + tcOpts := config.tlsOpts + kind := config.kind + if tcOpts.OCSPPeerConfig == nil || !tcOpts.OCSPPeerConfig.Verify { + return tc, false, nil + } + tc.VerifyConnection = func(cs tls.ConnectionState) error { + if !s.tlsServerOCSPValid(cs.VerifiedChains, tcOpts.OCSPPeerConfig) { + s.sendOCSPPeerRejectEvent(kind, peerFromVerifiedChains(cs.VerifiedChains), certidp.MsgTLSServerRejectConnection) + return errors.New(certidp.MsgTLSServerRejectConnection) + } + return nil + } + return tc, true, nil +} + +// tlsServerOCSPValid evaluates verified chains (post successful TLS handshake) against OCSP +// eligibility. A verified chain is considered OCSP Valid if either none of the links are +// OCSP eligible, or current "good" responses from the CA can be obtained for each eligible link. +// Upon first OCSP Valid chain found, the Server is deemed OCSP Valid. If none of the chains are +// OCSP Valid, the Server is deemed OCSP Invalid. A verified self-signed certificate (chain length 1) +// is also considered OCSP Valid. +func (s *Server) tlsServerOCSPValid(chains [][]*x509.Certificate, opts *certidp.OCSPPeerConfig) bool { + s.Debugf(certidp.DbgNumServerChains, len(chains)) + return s.peerOCSPValid(chains, opts) +} + +// tlsClientOCSPValid evaluates verified chains (post successful TLS handshake) against OCSP +// eligibility. A verified chain is considered OCSP Valid if either none of the links are +// OCSP eligible, or current "good" responses from the CA can be obtained for each eligible link. +// Upon first OCSP Valid chain found, the Client is deemed OCSP Valid. If none of the chains are +// OCSP Valid, the Client is deemed OCSP Invalid. A verified self-signed certificate (chain length 1) +// is also considered OCSP Valid. +func (s *Server) tlsClientOCSPValid(chains [][]*x509.Certificate, opts *certidp.OCSPPeerConfig) bool { + s.Debugf(certidp.DbgNumClientChains, len(chains)) + return s.peerOCSPValid(chains, opts) +} + +func (s *Server) peerOCSPValid(chains [][]*x509.Certificate, opts *certidp.OCSPPeerConfig) bool { + peer := peerFromVerifiedChains(chains) + if peer == nil { + s.Errorf(certidp.ErrPeerEmptyAutoReject) + return false + } + for ci, chain := range chains { + s.Debugf(certidp.DbgLinksInChain, ci, len(chain)) + // Self-signed certificate is Client OCSP Valid (no CA) + if len(chain) == 1 { + s.Debugf(certidp.DbgSelfSignedValid, ci) + return true + } + // Check if any of the links in the chain are OCSP eligible + chainEligible := false + var eligibleLinks []*certidp.ChainLink + // Iterate over links skipping the root cert which is not OCSP eligible (self == issuer) + for linkPos := 0; linkPos < len(chain)-1; linkPos++ { + cert := chain[linkPos] + link := &certidp.ChainLink{ + Leaf: cert, + } + if certidp.CertOCSPEligible(link) { + chainEligible = true + issuerCert := certidp.GetLeafIssuerCert(chain, linkPos) + if issuerCert == nil { + // unexpected chain condition, reject Client as OCSP Invalid + return false + } + link.Issuer = issuerCert + eligibleLinks = append(eligibleLinks, link) + } + } + // A trust-store verified chain that is not OCSP eligible is always OCSP Valid + if !chainEligible { + s.Debugf(certidp.DbgValidNonOCSPChain, ci) + return true + } + s.Debugf(certidp.DbgChainIsOCSPEligible, ci, len(eligibleLinks)) + // Chain has at least one OCSP eligible link, so check each eligible link; + // any link with a !good OCSP response chain OCSP Invalid + chainValid := true + for _, link := range eligibleLinks { + // if option selected, good could reflect either ocsp.Good or ocsp.Unknown + if badReason, good := s.certOCSPGood(link, opts); !good { + s.Debugf(badReason) + s.sendOCSPPeerChainlinkInvalidEvent(peer, link.Leaf, badReason) + chainValid = false + break + } + } + if chainValid { + s.Debugf(certidp.DbgChainIsOCSPValid, ci) + return true + } + } + // If we are here, all chains had OCSP eligible links, but none of the chains achieved OCSP valid + s.Debugf(certidp.DbgNoOCSPValidChains) + return false +} + +func (s *Server) certOCSPGood(link *certidp.ChainLink, opts *certidp.OCSPPeerConfig) (string, bool) { + if link == nil || link.Leaf == nil || link.Issuer == nil || link.OCSPWebEndpoints == nil || len(*link.OCSPWebEndpoints) < 1 { + return "Empty chainlink found", false + } + var err error + sLogs := &certidp.Log{ + Debugf: s.Debugf, + Noticef: s.Noticef, + Warnf: s.Warnf, + Errorf: s.Errorf, + Tracef: s.Tracef, + } + fingerprint := certidp.GenerateFingerprint(link.Leaf) + // Used for debug/operator only, not match + subj := certidp.GetSubjectDNForm(link.Leaf) + var rawResp []byte + var ocspr *ocsp.Response + var useCachedResp bool + var rc = s.ocsprc + var cachedRevocation bool + // Check our cache before calling out to the CA OCSP responder + s.Debugf(certidp.DbgCheckingCacheForCert, subj, fingerprint) + if rawResp = rc.Get(fingerprint, sLogs); len(rawResp) > 0 { + // Signature validation of CA's OCSP response occurs in ParseResponse + ocspr, err = ocsp.ParseResponse(rawResp, link.Issuer) + if err == nil && ocspr != nil { + // Check if OCSP Response delegation present and if so is valid + if !certidp.ValidDelegationCheck(link.Issuer, ocspr) { + // Invalid delegation was already in cache, purge it and don't use it + s.Debugf(certidp.MsgCachedOCSPResponseInvalid, subj) + rc.Delete(fingerprint, true, sLogs) + goto AFTERCACHE + } + if certidp.OCSPResponseCurrent(ocspr, opts, sLogs) { + s.Debugf(certidp.DbgCurrentResponseCached, certidp.GetStatusAssertionStr(ocspr.Status)) + useCachedResp = true + } else { + // Cached response is not current, delete it and tidy runtime stats to reflect a miss; + // if preserve_revoked is enabled, the cache will not delete the cached response + s.Debugf(certidp.DbgExpiredResponseCached, certidp.GetStatusAssertionStr(ocspr.Status)) + rc.Delete(fingerprint, true, sLogs) + } + // Regardless of currency, record a cached revocation found in case AllowWhenCAUnreachable is set + if ocspr.Status == ocsp.Revoked { + cachedRevocation = true + } + } else { + // Bogus cached assertion, purge it and don't use it + s.Debugf(certidp.MsgCachedOCSPResponseInvalid, subj, fingerprint) + rc.Delete(fingerprint, true, sLogs) + goto AFTERCACHE + } + } +AFTERCACHE: + if !useCachedResp { + // CA OCSP responder callout needed + rawResp, err = certidp.FetchOCSPResponse(link, opts, sLogs) + if err != nil || rawResp == nil || len(rawResp) == 0 { + s.Warnf(certidp.ErrCAResponderCalloutFail, subj, err) + if opts.WarnOnly { + s.Warnf(certidp.MsgAllowWarnOnlyOccurred, subj) + return _EMPTY_, true + } + if opts.AllowWhenCAUnreachable && !cachedRevocation { + // Link has no cached history of revocation, so allow it to pass + s.Warnf(certidp.MsgAllowWhenCAUnreachableOccurred, subj) + return _EMPTY_, true + } else if opts.AllowWhenCAUnreachable { + // Link has cached but expired revocation so reject when CA is unreachable + s.Warnf(certidp.MsgAllowWhenCAUnreachableOccurredCachedRevoke, subj) + } + return certidp.MsgFailedOCSPResponseFetch, false + } + // Signature validation of CA's OCSP response occurs in ParseResponse + ocspr, err = ocsp.ParseResponse(rawResp, link.Issuer) + if err == nil && ocspr != nil { + // Check if OCSP Response delegation present and if so is valid + if !certidp.ValidDelegationCheck(link.Issuer, ocspr) { + s.Warnf(certidp.MsgOCSPResponseDelegationInvalid, subj) + if opts.WarnOnly { + // Can't use bogus assertion, but warn-only set so allow link to pass + s.Warnf(certidp.MsgAllowWarnOnlyOccurred, subj) + return _EMPTY_, true + } + return fmt.Sprintf(certidp.MsgOCSPResponseDelegationInvalid, subj), false + } + if !certidp.OCSPResponseCurrent(ocspr, opts, sLogs) { + s.Warnf(certidp.ErrNewCAResponseNotCurrent, subj) + if opts.WarnOnly { + // Can't use non-effective assertion, but warn-only set so allow link to pass + s.Warnf(certidp.MsgAllowWarnOnlyOccurred, subj) + return _EMPTY_, true + } + return certidp.MsgOCSPResponseNotEffective, false + } + } else { + s.Errorf(certidp.ErrCAResponseParseFailed, subj, err) + if opts.WarnOnly { + // Can't use bogus assertion, but warn-only set so allow link to pass + s.Warnf(certidp.MsgAllowWarnOnlyOccurred, subj) + return _EMPTY_, true + } + return certidp.MsgFailedOCSPResponseParse, false + } + // cache the valid fetched CA OCSP Response + rc.Put(fingerprint, ocspr, subj, sLogs) + } + + // Whether through valid cache response available or newly fetched valid response, now check the status + if ocspr.Status == ocsp.Revoked || (ocspr.Status == ocsp.Unknown && !opts.UnknownIsGood) { + s.Warnf(certidp.ErrOCSPInvalidPeerLink, subj, certidp.GetStatusAssertionStr(ocspr.Status)) + if opts.WarnOnly { + s.Warnf(certidp.MsgAllowWarnOnlyOccurred, subj) + return _EMPTY_, true + } + return fmt.Sprintf(certidp.MsgOCSPResponseInvalidStatus, certidp.GetStatusAssertionStr(ocspr.Status)), false + } + s.Debugf(certidp.DbgOCSPValidPeerLink, subj) + return _EMPTY_, true +} diff --git a/server/ocsp_responsecache.go b/server/ocsp_responsecache.go new file mode 100644 index 000000000..eb692437f --- /dev/null +++ b/server/ocsp_responsecache.go @@ -0,0 +1,636 @@ +// Copyright 2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/klauspost/compress/s2" + "golang.org/x/crypto/ocsp" + + "github.com/memphisdev/memphis/server/certidp" +) + +const ( + OCSPResponseCacheDefaultDir = "_rc_" + OCSPResponseCacheDefaultFilename = "cache.json" + OCSPResponseCacheDefaultTempFilePrefix = "ocsprc-*" + OCSPResponseCacheMinimumSaveInterval = 1 * time.Second + OCSPResponseCacheDefaultSaveInterval = 5 * time.Minute +) + +type OCSPResponseCacheType int + +const ( + NONE OCSPResponseCacheType = iota + 1 + LOCAL +) + +var OCSPResponseCacheTypeMap = map[string]OCSPResponseCacheType{ + "none": NONE, + "local": LOCAL, +} + +type OCSPResponseCacheConfig struct { + Type OCSPResponseCacheType + LocalStore string + PreserveRevoked bool + SaveInterval float64 +} + +func NewOCSPResponseCacheConfig() *OCSPResponseCacheConfig { + return &OCSPResponseCacheConfig{ + Type: LOCAL, + LocalStore: OCSPResponseCacheDefaultDir, + PreserveRevoked: false, + SaveInterval: OCSPResponseCacheDefaultSaveInterval.Seconds(), + } +} + +type OCSPResponseCacheStats struct { + Responses int64 `json:"size"` + Hits int64 `json:"hits"` + Misses int64 `json:"misses"` + Revokes int64 `json:"revokes"` + Goods int64 `json:"goods"` + Unknowns int64 `json:"unknowns"` +} + +type OCSPResponseCacheItem struct { + Subject string `json:"subject,omitempty"` + CachedAt time.Time `json:"cached_at"` + RespStatus certidp.StatusAssertion `json:"resp_status"` + RespExpires time.Time `json:"resp_expires,omitempty"` + Resp []byte `json:"resp"` +} + +type OCSPResponseCache interface { + Put(key string, resp *ocsp.Response, subj string, log *certidp.Log) + Get(key string, log *certidp.Log) []byte + Delete(key string, miss bool, log *certidp.Log) + Type() string + Start(s *Server) + Stop(s *Server) + Online() bool + Config() *OCSPResponseCacheConfig + Stats() *OCSPResponseCacheStats +} + +// NoOpCache is a no-op implementation of OCSPResponseCache +type NoOpCache struct { + config *OCSPResponseCacheConfig + stats *OCSPResponseCacheStats + online bool + mu *sync.RWMutex +} + +func (c *NoOpCache) Put(_ string, _ *ocsp.Response, _ string, _ *certidp.Log) {} + +func (c *NoOpCache) Get(_ string, _ *certidp.Log) []byte { + return nil +} + +func (c *NoOpCache) Delete(_ string, _ bool, _ *certidp.Log) {} + +func (c *NoOpCache) Start(_ *Server) { + c.mu.Lock() + defer c.mu.Unlock() + c.stats = &OCSPResponseCacheStats{} + c.online = true +} + +func (c *NoOpCache) Stop(_ *Server) { + c.mu.Lock() + defer c.mu.Unlock() + c.online = false +} + +func (c *NoOpCache) Online() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.online +} + +func (c *NoOpCache) Type() string { + c.mu.RLock() + defer c.mu.RUnlock() + return "none" +} + +func (c *NoOpCache) Config() *OCSPResponseCacheConfig { + c.mu.RLock() + defer c.mu.RUnlock() + return c.config +} + +func (c *NoOpCache) Stats() *OCSPResponseCacheStats { + c.mu.RLock() + defer c.mu.RUnlock() + return c.stats +} + +// LocalCache is a local file implementation of OCSPResponseCache +type LocalCache struct { + config *OCSPResponseCacheConfig + stats *OCSPResponseCacheStats + online bool + cache map[string]OCSPResponseCacheItem + mu *sync.RWMutex + saveInterval time.Duration + dirty bool + timer *time.Timer +} + +// Put captures a CA OCSP response to the OCSP peer cache indexed by response fingerprint (a hash) +func (c *LocalCache) Put(key string, caResp *ocsp.Response, subj string, log *certidp.Log) { + c.mu.RLock() + if !c.online || caResp == nil || key == "" { + c.mu.RUnlock() + return + } + c.mu.RUnlock() + log.Debugf(certidp.DbgCachingResponse, subj, key) + rawC, err := c.Compress(caResp.Raw) + if err != nil { + log.Errorf(certidp.ErrResponseCompressFail, key, err) + return + } + log.Debugf(certidp.DbgAchievedCompression, float64(len(rawC))/float64(len(caResp.Raw))) + c.mu.Lock() + defer c.mu.Unlock() + // check if we are replacing and do stats + item, ok := c.cache[key] + if ok { + c.adjustStats(-1, item.RespStatus) + } + item = OCSPResponseCacheItem{ + Subject: subj, + CachedAt: time.Now().UTC().Round(time.Second), + RespStatus: certidp.StatusAssertionIntToVal[caResp.Status], + RespExpires: caResp.NextUpdate, + Resp: rawC, + } + c.cache[key] = item + c.adjustStats(1, item.RespStatus) + c.dirty = true +} + +// Get returns a CA OCSP response from the OCSP peer cache matching the response fingerprint (a hash) +func (c *LocalCache) Get(key string, log *certidp.Log) []byte { + c.mu.RLock() + defer c.mu.RUnlock() + if !c.online || key == "" { + return nil + } + val, ok := c.cache[key] + if ok { + atomic.AddInt64(&c.stats.Hits, 1) + log.Debugf(certidp.DbgCacheHit, key) + } else { + atomic.AddInt64(&c.stats.Misses, 1) + log.Debugf(certidp.DbgCacheMiss, key) + return nil + } + resp, err := c.Decompress(val.Resp) + if err != nil { + log.Errorf(certidp.ErrResponseDecompressFail, key, err) + return nil + } + return resp +} + +func (c *LocalCache) adjustStatsHitToMiss() { + atomic.AddInt64(&c.stats.Misses, 1) + atomic.AddInt64(&c.stats.Hits, -1) +} + +func (c *LocalCache) adjustStats(delta int64, rs certidp.StatusAssertion) { + if delta == 0 { + return + } + atomic.AddInt64(&c.stats.Responses, delta) + switch rs { + case ocsp.Good: + atomic.AddInt64(&c.stats.Goods, delta) + case ocsp.Revoked: + atomic.AddInt64(&c.stats.Revokes, delta) + case ocsp.Unknown: + atomic.AddInt64(&c.stats.Unknowns, delta) + } +} + +// Delete removes a CA OCSP response from the OCSP peer cache matching the response fingerprint (a hash) +func (c *LocalCache) Delete(key string, wasMiss bool, log *certidp.Log) { + c.mu.Lock() + defer c.mu.Unlock() + if !c.online || key == "" || c.config == nil { + return + } + item, ok := c.cache[key] + if !ok { + return + } + if item.RespStatus == ocsp.Revoked && c.config.PreserveRevoked { + log.Debugf(certidp.DbgPreservedRevocation, key) + if wasMiss { + c.adjustStatsHitToMiss() + } + return + } + log.Debugf(certidp.DbgDeletingCacheResponse, key) + delete(c.cache, key) + c.adjustStats(-1, item.RespStatus) + if wasMiss { + c.adjustStatsHitToMiss() + } + c.dirty = true +} + +// Start initializes the configured OCSP peer cache, loads a saved cache from disk (if present), and initializes runtime statistics +func (c *LocalCache) Start(s *Server) { + s.Debugf(certidp.DbgStartingCache) + c.loadCache(s) + c.initStats() + c.mu.Lock() + c.online = true + c.mu.Unlock() +} + +func (c *LocalCache) Stop(s *Server) { + c.mu.Lock() + s.Debugf(certidp.DbgStoppingCache) + c.online = false + c.timer.Stop() + c.mu.Unlock() + c.saveCache(s) +} + +func (c *LocalCache) Online() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.online +} + +func (c *LocalCache) Type() string { + c.mu.RLock() + defer c.mu.RUnlock() + return "local" +} + +func (c *LocalCache) Config() *OCSPResponseCacheConfig { + c.mu.RLock() + defer c.mu.RUnlock() + return c.config +} + +func (c *LocalCache) Stats() *OCSPResponseCacheStats { + c.mu.RLock() + defer c.mu.RUnlock() + if c.stats == nil { + return nil + } + stats := OCSPResponseCacheStats{ + Responses: c.stats.Responses, + Hits: c.stats.Hits, + Misses: c.stats.Misses, + Revokes: c.stats.Revokes, + Goods: c.stats.Goods, + Unknowns: c.stats.Unknowns, + } + return &stats +} + +func (c *LocalCache) initStats() { + c.mu.Lock() + defer c.mu.Unlock() + c.stats = &OCSPResponseCacheStats{} + c.stats.Hits = 0 + c.stats.Misses = 0 + c.stats.Responses = int64(len(c.cache)) + for _, resp := range c.cache { + switch resp.RespStatus { + case ocsp.Good: + c.stats.Goods++ + case ocsp.Revoked: + c.stats.Revokes++ + case ocsp.Unknown: + c.stats.Unknowns++ + } + } +} + +func (c *LocalCache) Compress(buf []byte) ([]byte, error) { + bodyLen := int64(len(buf)) + var output bytes.Buffer + writer := s2.NewWriter(&output) + input := bytes.NewReader(buf[:bodyLen]) + if n, err := io.CopyN(writer, input, bodyLen); err != nil { + return nil, fmt.Errorf(certidp.ErrCannotWriteCompressed, err) + } else if n != bodyLen { + return nil, fmt.Errorf(certidp.ErrTruncatedWrite, n, bodyLen) + } + if err := writer.Close(); err != nil { + return nil, fmt.Errorf(certidp.ErrCannotCloseWriter, err) + } + return output.Bytes(), nil +} + +func (c *LocalCache) Decompress(buf []byte) ([]byte, error) { + bodyLen := int64(len(buf)) + input := bytes.NewReader(buf[:bodyLen]) + reader := io.NopCloser(s2.NewReader(input)) + output, err := io.ReadAll(reader) + if err != nil { + return nil, fmt.Errorf(certidp.ErrCannotReadCompressed, err) + } + return output, reader.Close() +} + +func (c *LocalCache) loadCache(s *Server) { + d := s.opts.OCSPCacheConfig.LocalStore + if d == _EMPTY_ { + d = OCSPResponseCacheDefaultDir + } + f := OCSPResponseCacheDefaultFilename + store, err := filepath.Abs(path.Join(d, f)) + if err != nil { + s.Errorf(certidp.ErrLoadCacheFail, err) + return + } + s.Debugf(certidp.DbgLoadingCache, store) + c.mu.Lock() + defer c.mu.Unlock() + c.cache = make(map[string]OCSPResponseCacheItem) + dat, err := os.ReadFile(store) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + s.Debugf(certidp.DbgNoCacheFound) + } else { + s.Warnf(certidp.ErrLoadCacheFail, err) + } + return + } + err = json.Unmarshal(dat, &c.cache) + if err != nil { + // make sure clean cache + c.cache = make(map[string]OCSPResponseCacheItem) + s.Warnf(certidp.ErrLoadCacheFail, err) + c.dirty = true + return + } + c.dirty = false +} + +func (c *LocalCache) saveCache(s *Server) { + c.mu.RLock() + dirty := c.dirty + c.mu.RUnlock() + if !dirty { + return + } + s.Debugf(certidp.DbgCacheDirtySave) + var d string + if c.config.LocalStore != _EMPTY_ { + d = c.config.LocalStore + } else { + d = OCSPResponseCacheDefaultDir + } + f := OCSPResponseCacheDefaultFilename + store, err := filepath.Abs(path.Join(d, f)) + if err != nil { + s.Errorf(certidp.ErrSaveCacheFail, err) + return + } + s.Debugf(certidp.DbgSavingCache, store) + if _, err := os.Stat(d); os.IsNotExist(err) { + err = os.Mkdir(d, defaultDirPerms) + if err != nil { + s.Errorf(certidp.ErrSaveCacheFail, err) + return + } + } + tmp, err := os.CreateTemp(d, OCSPResponseCacheDefaultTempFilePrefix) + if err != nil { + s.Errorf(certidp.ErrSaveCacheFail, err) + return + } + defer func() { + tmp.Close() + os.Remove(tmp.Name()) + }() // clean up any temp files + + // RW lock here because we're going to snapshot the cache to disk and mark as clean if successful + c.mu.Lock() + defer c.mu.Unlock() + dat, err := json.MarshalIndent(c.cache, "", " ") + if err != nil { + s.Errorf(certidp.ErrSaveCacheFail, err) + return + } + cacheSize, err := tmp.Write(dat) + if err != nil { + s.Errorf(certidp.ErrSaveCacheFail, err) + return + } + err = tmp.Sync() + if err != nil { + s.Errorf(certidp.ErrSaveCacheFail, err) + return + } + err = tmp.Close() + if err != nil { + s.Errorf(certidp.ErrSaveCacheFail, err) + return + } + // do the final swap and overwrite any old saved peer cache + err = os.Rename(tmp.Name(), store) + if err != nil { + s.Errorf(certidp.ErrSaveCacheFail, err) + return + } + c.dirty = false + s.Debugf(certidp.DbgCacheSaved, cacheSize) +} + +var OCSPResponseCacheUsage = ` +You may enable OCSP peer response cacheing at server configuration root level: + +(If no TLS blocks are configured with OCSP peer verification, ocsp_cache is ignored.) + + ... + # short form enables with defaults + ocsp_cache: true + + # if false or undefined and one or more TLS blocks are configured with OCSP peer verification, "none" is implied + + # long form includes settable options + ocsp_cache { + + # Cache type (default local) + type: local + + # Cache file directory for local-type cache (default _rc_ in current working directory) + local_store: "_rc_" + + # Ignore cache deletes if cached OCSP response is Revoked status (default false) + preserve_revoked: false + + # For local store, interval to save in-memory cache to disk in seconds (default 300 seconds, minimum 1 second) + save_interval: 300 + } + ... + +Note: Cache of server's own OCSP response (staple) is enabled using the 'ocsp' configuration option. +` + +func (s *Server) initOCSPResponseCache() { + // No mTLS OCSP or Leaf OCSP enablements, so no need to init cache + s.mu.RLock() + if !s.ocspPeerVerify { + s.mu.RUnlock() + return + } + s.mu.RUnlock() + so := s.getOpts() + if so.OCSPCacheConfig == nil { + so.OCSPCacheConfig = NewOCSPResponseCacheConfig() + } + var cc = so.OCSPCacheConfig + s.mu.Lock() + defer s.mu.Unlock() + switch cc.Type { + case NONE: + s.ocsprc = &NoOpCache{config: cc, online: true, mu: &sync.RWMutex{}} + case LOCAL: + c := &LocalCache{ + config: cc, + online: false, + cache: make(map[string]OCSPResponseCacheItem), + mu: &sync.RWMutex{}, + dirty: false, + } + c.saveInterval = time.Duration(cc.SaveInterval) * time.Second + c.timer = time.AfterFunc(c.saveInterval, func() { + s.Debugf(certidp.DbgCacheSaveTimerExpired) + c.saveCache(s) + c.timer.Reset(c.saveInterval) + }) + s.ocsprc = c + default: + s.Fatalf(certidp.ErrBadCacheTypeConfig, cc.Type) + } +} + +func (s *Server) startOCSPResponseCache() { + // No mTLS OCSP or Leaf OCSP enablements, so no need to start cache + s.mu.RLock() + if !s.ocspPeerVerify || s.ocsprc == nil { + s.mu.RUnlock() + return + } + s.mu.RUnlock() + + // Could be heavier operation depending on cache implementation + s.ocsprc.Start(s) + if s.ocsprc.Online() { + s.Noticef(certidp.MsgCacheOnline, s.ocsprc.Type()) + } else { + s.Noticef(certidp.MsgCacheOffline, s.ocsprc.Type()) + } +} + +func (s *Server) stopOCSPResponseCache() { + s.mu.RLock() + if s.ocsprc == nil { + s.mu.RUnlock() + return + } + s.mu.RUnlock() + s.ocsprc.Stop(s) +} + +func parseOCSPResponseCache(v interface{}) (pcfg *OCSPResponseCacheConfig, retError error) { + var lt token + defer convertPanicToError(<, &retError) + tk, v := unwrapValue(v, <) + cm, ok := v.(map[string]interface{}) + if !ok { + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrIllegalCacheOptsConfig, v)} + } + pcfg = NewOCSPResponseCacheConfig() + retError = nil + for mk, mv := range cm { + // Again, unwrap token value if line check is required. + tk, mv = unwrapValue(mv, <) + switch strings.ToLower(mk) { + case "type": + cache, ok := mv.(string) + if !ok { + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingCacheOptFieldGeneric, mk)} + } + cacheType, exists := OCSPResponseCacheTypeMap[strings.ToLower(cache)] + if !exists { + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrUnknownCacheType, cache)} + } + pcfg.Type = cacheType + case "local_store": + store, ok := mv.(string) + if !ok { + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingCacheOptFieldGeneric, mk)} + } + pcfg.LocalStore = store + case "preserve_revoked": + preserve, ok := mv.(bool) + if !ok { + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingCacheOptFieldGeneric, mk)} + } + pcfg.PreserveRevoked = preserve + case "save_interval": + at := float64(0) + switch mv := mv.(type) { + case int64: + at = float64(mv) + case float64: + at = mv + case string: + d, err := time.ParseDuration(mv) + if err != nil { + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingPeerOptFieldTypeConversion, err)} + } + at = d.Seconds() + default: + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingCacheOptFieldTypeConversion, "unexpected type")} + } + si := time.Duration(at) * time.Second + if si < OCSPResponseCacheMinimumSaveInterval { + si = OCSPResponseCacheMinimumSaveInterval + } + pcfg.SaveInterval = si.Seconds() + default: + return nil, &configErr{tk, fmt.Sprintf(certidp.ErrParsingCacheOptFieldGeneric, mk)} + } + } + return pcfg, nil +} diff --git a/server/opts.go b/server/opts.go index 2f023daa0..09d3a482b 100644 --- a/server/opts.go +++ b/server/opts.go @@ -1,4 +1,4 @@ -// Copyright 2012-2022 The NATS Authors +// Copyright 2012-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -33,10 +33,11 @@ import ( "sync/atomic" "time" + "github.com/memphisdev/memphis/conf" + "github.com/memphisdev/memphis/server/certidp" + "github.com/memphisdev/memphis/server/certstore" "github.com/nats-io/jwt/v2" "github.com/nats-io/nkeys" - - "github.com/memphisdev/memphis/conf" ) var allowUnknownTopLevelField = int32(0) @@ -53,7 +54,7 @@ func NoErrOnUnknownFields(noError bool) { atomic.StoreInt32(&allowUnknownTopLevelField, val) } -// Set of lower case hex-encoded sha256 of DER encoded SubjectPublicKeyInfo +// PinnedCertSet is a set of lower case hex-encoded sha256 of DER encoded SubjectPublicKeyInfo type PinnedCertSet map[string]struct{} // ClusterOpts are options for clusters. @@ -221,6 +222,7 @@ type Options struct { NoHeaderSupport bool `json:"-"` DisableShortFirstPing bool `json:"-"` Logtime bool `json:"-"` + LogtimeUTC bool `json:"-"` MaxConn int `json:"max_connections"` MaxSubs int `json:"max_subscriptions,omitempty"` MaxSubTokens uint8 `json:"-"` @@ -285,7 +287,7 @@ type Options struct { LameDuckDuration time.Duration `json:"-"` LameDuckGracePeriod time.Duration `json:"-"` - // memphis options + // ** added by Memphis UiPort int `json:"-"` RestGwPort int `json:"-"` K8sNamespace string `json:"-"` @@ -296,6 +298,7 @@ type Options struct { UiHost string `json:"-"` RestGwHost string `json:"-"` BrokerHost string `json:"-"` + // ** added by Memphis // MaxTracedMsgLen is the maximum printable length for traced messages. MaxTracedMsgLen int `json:"-"` @@ -352,6 +355,9 @@ type Options struct { // JetStream maxMemSet bool maxStoreSet bool + + // OCSP Cache config enables next-gen cache for OCSP features + OCSPCacheConfig *OCSPResponseCacheConfig } // WebsocketOpts are options for websocket @@ -415,6 +421,9 @@ type WebsocketOpts struct { // and write the response back to the client. This include the // time needed for the TLS Handshake. HandshakeTimeout time.Duration + + // Snapshot of configured TLS options. + tlsConfigOpts *TLSConfigOpts } // MQTTOpts are options for MQTT @@ -495,6 +504,9 @@ type MQTTOpts struct { // subscription ending with "#" will use 2 times the MaxAckPending value. // Note that changes to this option is applied only to new subscriptions. MaxAckPending uint16 + + // Snapshot of configured TLS options. + tlsConfigOpts *TLSConfigOpts } type netResolver interface { @@ -586,6 +598,10 @@ type TLSConfigOpts struct { Ciphers []uint16 CurvePreferences []tls.CurveID PinnedCerts PinnedCertSet + CertStore certstore.StoreType + CertMatchBy certstore.MatchByType + CertMatch string + OCSPPeerConfig *certidp.OCSPPeerConfig } // OCSPConfig represents the options of OCSP stapling options. @@ -627,7 +643,7 @@ Available cipher suites include: // FIXME(dlc): A bit hacky func ProcessConfigFile(configFile string) (*Options, error) { opts := &Options{} - if err := opts.ProcessConfigFile(configFile, true); err != nil { + if err := opts.ProcessConfigFile(configFile, true); err != nil { // ** true added by memphis // If only warnings then continue and return the options. if cerr, ok := err.(*processConfigErr); ok && len(cerr.Errors()) == 0 { return opts, nil @@ -729,7 +745,7 @@ func configureSystemAccount(o *Options, m map[string]interface{}) (retErr error) // achieve that with the non receiver ProcessConfigFile() version, // since one would not know after the call if "debug" was not present // or was present but set to false. -func (o *Options) ProcessConfigFile(configFile string, reload bool) error { +func (o *Options) ProcessConfigFile(configFile string, reload bool) error { // ** reload added by memphis o.ConfigFile = configFile if configFile == _EMPTY_ { return nil @@ -768,6 +784,9 @@ func (o *Options) ProcessConfigFile(configFile string, reload bool) error { // Collect all errors and warnings and report them all together. errors := make([]error, 0) warnings := make([]error, 0) + if len(m) == 0 { + warnings = append(warnings, fmt.Errorf("%s: config has no values or is empty", configFile)) + } // First check whether a system account has been defined, // as that is a condition for other features to be enabled. @@ -825,6 +844,9 @@ func (o *Options) processConfigFileLine(k string, v interface{}, errors *[]error case "logtime": o.Logtime = v.(bool) trackExplicitVal(o, &o.inConfig, "Logtime", o.Logtime) + case "logtime_utc": + o.LogtimeUTC = v.(bool) + trackExplicitVal(o, &o.inConfig, "LogtimeUTC", o.LogtimeUTC) case "mappings", "maps": gacc := NewAccount(globalAccountName) o.Accounts = append(o.Accounts, gacc) @@ -1169,8 +1191,10 @@ func (o *Options) processConfigFileLine(k string, v interface{}, errors *[]error } case map[string]interface{}: del := false - dir := "" - dirType := "" + hdel := false + hdel_set := false + dir := _EMPTY_ + dirType := _EMPTY_ limit := int64(0) ttl := time.Duration(0) sync := time.Duration(0) @@ -1188,6 +1212,11 @@ func (o *Options) processConfigFileLine(k string, v interface{}, errors *[]error _, v := unwrapValue(v, <) del = v.(bool) } + if v, ok := v["hard_delete"]; ok { + _, v := unwrapValue(v, <) + hdel_set = true + hdel = v.(bool) + } if v, ok := v["limit"]; ok { _, v := unwrapValue(v, <) limit = v.(int64) @@ -1211,29 +1240,51 @@ func (o *Options) processConfigFileLine(k string, v interface{}, errors *[]error *errors = append(*errors, &configErr{tk, err.Error()}) return } - if dir == "" { - *errors = append(*errors, &configErr{tk, "dir has no value and needs to point to a directory"}) - return - } - if info, _ := os.Stat(dir); info != nil && (!info.IsDir() || info.Mode().Perm()&(1<<(uint(7))) == 0) { - *errors = append(*errors, &configErr{tk, "dir needs to point to an accessible directory"}) - return + + checkDir := func() { + if dir == _EMPTY_ { + *errors = append(*errors, &configErr{tk, "dir has no value and needs to point to a directory"}) + return + } + if info, _ := os.Stat(dir); info != nil && (!info.IsDir() || info.Mode().Perm()&(1<<(uint(7))) == 0) { + *errors = append(*errors, &configErr{tk, "dir needs to point to an accessible directory"}) + return + } } + var res AccountResolver switch strings.ToUpper(dirType) { case "CACHE": + checkDir() if sync != 0 { *errors = append(*errors, &configErr{tk, "CACHE does not accept sync"}) } if del { *errors = append(*errors, &configErr{tk, "CACHE does not accept allow_delete"}) } + if hdel_set { + *errors = append(*errors, &configErr{tk, "CACHE does not accept hard_delete"}) + } res, err = NewCacheDirAccResolver(dir, limit, ttl, opts...) case "FULL": + checkDir() if ttl != 0 { *errors = append(*errors, &configErr{tk, "FULL does not accept ttl"}) } - res, err = NewDirAccResolver(dir, limit, sync, del, opts...) + if hdel_set && !del { + *errors = append(*errors, &configErr{tk, "hard_delete has no effect without delete"}) + } + delete := NoDelete + if del { + if hdel { + delete = HardDelete + } else { + delete = RenameDeleted + } + } + res, err = NewDirAccResolver(dir, limit, sync, delete, opts...) + case "MEM", "MEMORY": + res = &MemAccResolver{} } if err != nil { *errors = append(*errors, &configErr{tk, err.Error()}) @@ -1411,6 +1462,36 @@ func (o *Options) processConfigFileLine(k string, v interface{}, errors *[]error m[kk] = v.(string) } o.JsAccDefaultDomain = m + case "ocsp_cache": + var err error + switch vv := v.(type) { + case bool: + pc := NewOCSPResponseCacheConfig() + if vv { + // Set enabled + pc.Type = LOCAL + o.OCSPCacheConfig = pc + } else { + // Set disabled (none cache) + pc.Type = NONE + o.OCSPCacheConfig = pc + } + case map[string]interface{}: + pc, err := parseOCSPResponseCache(v) + if err != nil { + *errors = append(*errors, err) + return + } + o.OCSPCacheConfig = pc + default: + err = &configErr{tk, fmt.Sprintf("error parsing tags: unsupported type %T", v)} + } + if err != nil { + *errors = append(*errors, err) + return + } + + // ** added by Memphis case "ui_port": o.UiPort = int(v.(int64)) case "rest_gw_port": @@ -1473,6 +1554,7 @@ func (o *Options) processConfigFileLine(k string, v interface{}, errors *[]error return } o.BrokerHost = value + // ** added by Memphis default: if au := atomic.LoadInt32(&allowUnknownTopLevelField); au == 0 && !tk.IsUsedVariable() { err := &unknownConfigFieldErr{ @@ -3927,6 +4009,11 @@ func PrintTLSHelpAndDie() { for k := range curvePreferenceMap { fmt.Printf(" %s\n", k) } + if runtime.GOOS == "windows" { + fmt.Printf("%s\n", certstore.Usage) + } + fmt.Printf("%s", certidp.OCSPPeerUsage) + fmt.Printf("%s", OCSPResponseCacheUsage) os.Exit(0) } @@ -4084,6 +4171,54 @@ func parseTLS(v interface{}, isClientCtx bool) (t *TLSConfigOpts, retErr error) } tc.PinnedCerts = wl } + case "cert_store": + certStore, ok := mv.(string) + if !ok || certStore == _EMPTY_ { + return nil, &configErr{tk, certstore.ErrBadCertStoreField.Error()} + } + certStoreType, err := certstore.ParseCertStore(certStore) + if err != nil { + return nil, &configErr{tk, err.Error()} + } + tc.CertStore = certStoreType + case "cert_match_by": + certMatchBy, ok := mv.(string) + if !ok || certMatchBy == _EMPTY_ { + return nil, &configErr{tk, certstore.ErrBadCertMatchByField.Error()} + } + certMatchByType, err := certstore.ParseCertMatchBy(certMatchBy) + if err != nil { + return nil, &configErr{tk, err.Error()} + } + tc.CertMatchBy = certMatchByType + case "cert_match": + certMatch, ok := mv.(string) + if !ok || certMatch == _EMPTY_ { + return nil, &configErr{tk, certstore.ErrBadCertMatchField.Error()} + } + tc.CertMatch = certMatch + case "ocsp_peer": + switch vv := mv.(type) { + case bool: + pc := certidp.NewOCSPPeerConfig() + if vv { + // Set enabled + pc.Verify = true + tc.OCSPPeerConfig = pc + } else { + // Set disabled + pc.Verify = false + tc.OCSPPeerConfig = pc + } + case map[string]interface{}: + pc, err := parseOCSPPeer(mv) + if err != nil { + return nil, &configErr{tk, err.Error()} + } + tc.OCSPPeerConfig = pc + default: + return nil, &configErr{tk, fmt.Sprintf("error parsing ocsp peer config: unsupported type %T", v)} + } default: return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, unknown field [%q]", mk)} } @@ -4214,6 +4349,7 @@ func parseWebsocket(v interface{}, o *Options, errors *[]error, warnings *[]erro } o.Websocket.TLSMap = tc.Map o.Websocket.TLSPinnedCerts = tc.PinnedCerts + o.Websocket.tlsConfigOpts = tc case "same_origin": o.Websocket.SameOrigin = mv.(bool) case "allowed_origins", "allowed_origin", "allow_origins", "allow_origin", "origins", "origin": @@ -4304,6 +4440,7 @@ func parseMQTT(v interface{}, o *Options, errors *[]error, warnings *[]error) er o.MQTT.TLSTimeout = tc.Timeout o.MQTT.TLSMap = tc.Map o.MQTT.TLSPinnedCerts = tc.PinnedCerts + o.MQTT.tlsConfigOpts = tc case "authorization", "authentication": auth := parseSimpleAuth(tk, errors, warnings) o.MQTT.Username = auth.user @@ -4370,11 +4507,13 @@ func GenTLSConfig(tc *TLSConfigOpts) (*tls.Config, error) { } switch { - case tc.CertFile != "" && tc.KeyFile == "": + case tc.CertFile != _EMPTY_ && tc.CertStore != certstore.STOREEMPTY: + return nil, certstore.ErrConflictCertFileAndStore + case tc.CertFile != _EMPTY_ && tc.KeyFile == _EMPTY_: return nil, fmt.Errorf("missing 'key_file' in TLS configuration") - case tc.CertFile == "" && tc.KeyFile != "": + case tc.CertFile == _EMPTY_ && tc.KeyFile != _EMPTY_: return nil, fmt.Errorf("missing 'cert_file' in TLS configuration") - case tc.CertFile != "" && tc.KeyFile != "": + case tc.CertFile != _EMPTY_ && tc.KeyFile != _EMPTY_: // Now load in cert and private key cert, err := tls.LoadX509KeyPair(tc.CertFile, tc.KeyFile) if err != nil { @@ -4385,6 +4524,11 @@ func GenTLSConfig(tc *TLSConfigOpts) (*tls.Config, error) { return nil, fmt.Errorf("error parsing certificate: %v", err) } config.Certificates = []tls.Certificate{cert} + case tc.CertStore != certstore.STOREEMPTY: + err := certstore.TLSConfig(tc.CertStore, tc.CertMatchBy, tc.CertMatch, &config) + if err != nil { + return nil, err + } } // Require client certificates as needed @@ -4721,7 +4865,7 @@ func setBaselineOptions(opts *Options) { opts.JetStreamMaxStore = -1 } - // Memphis + // ** added by Memphis if !opts.JetStream { // enable JS by default opts.JetStream = true } @@ -4786,6 +4930,7 @@ func setBaselineOptions(opts *Options) { opts.RestGwHost = fmt.Sprintf("http://memphis-rest-gateway.%s.svc.cluster.local:%v", opts.K8sNamespace, opts.RestGwPort) } } + // ** added by Memphis } func getDefaultAuthTimeout(tls *tls.Config, tlsTimeout float64) float64 { @@ -4836,9 +4981,10 @@ func ConfigureOptions(fs *flag.FlagSet, args []string, printVersion, printHelp, fs.BoolVar(&dbgAndTrcAndVerboseTrc, "DVV", false, "Enable Debug and Verbose Trace logging. (Traces system account as well)") fs.BoolVar(&opts.Logtime, "T", true, "Timestamp log entries.") fs.BoolVar(&opts.Logtime, "logtime", true, "Timestamp log entries.") - fs.StringVar(&opts.Username, "user", "", "Username required for connection.") - fs.StringVar(&opts.Password, "pass", "", "Password required for connection.") - fs.StringVar(&opts.Authorization, "auth", "", "Authorization token required for connection.") + fs.BoolVar(&opts.LogtimeUTC, "logtime_utc", false, "Timestamps in UTC instead of local timezone.") + fs.StringVar(&opts.Username, "user", _EMPTY_, "Username required for connection.") + fs.StringVar(&opts.Password, "pass", _EMPTY_, "Password required for connection.") + fs.StringVar(&opts.Authorization, "auth", _EMPTY_, "Authorization token required for connection.") fs.IntVar(&opts.HTTPPort, "m", 0, "HTTP Port for /varz, /connz endpoints.") fs.IntVar(&opts.HTTPPort, "http_port", 0, "HTTP Port for /varz, /connz endpoints.") fs.IntVar(&opts.HTTPSPort, "ms", 0, "HTTPS Port for /varz, /connz endpoints.") @@ -4975,7 +5121,7 @@ func ConfigureOptions(fs *flag.FlagSet, args []string, printVersion, printHelp, // Parse config if given if configFile != _EMPTY_ { // This will update the options with values from the config file. - err := opts.ProcessConfigFile(configFile, false) + err := opts.ProcessConfigFile(configFile, false) // ** false added by Memphis if err != nil { if opts.CheckConfig { return nil, err diff --git a/server/opts_test.go b/server/opts_test.go index 1a149925f..a473d7efc 100644 --- a/server/opts_test.go +++ b/server/opts_test.go @@ -1348,7 +1348,7 @@ func TestPanic(t *testing.T) { func TestPingIntervalOld(t *testing.T) { conf := createConfFile(t, []byte(`ping_interval: 5`)) opts := &Options{} - err := opts.ProcessConfigFile(conf, false) + err := opts.ProcessConfigFile(conf, false) // ** false added by Memphis if err == nil { t.Fatalf("expected an error") } @@ -1370,7 +1370,7 @@ func TestPingIntervalOld(t *testing.T) { func TestPingIntervalNew(t *testing.T) { conf := createConfFile(t, []byte(`ping_interval: "5m"`)) opts := &Options{} - if err := opts.ProcessConfigFile(conf, false); err != nil { + if err := opts.ProcessConfigFile(conf, false); err != nil { // ** false added by Memphis t.Fatalf("expected no error") } if opts.PingInterval != 5*time.Minute { @@ -1389,7 +1389,7 @@ func TestOptionsProcessConfigFile(t *testing.T) { LogFile: logFileName, } configFileName := "./configs/test.conf" - if err := opts.ProcessConfigFile(configFileName, false); err != nil { + if err := opts.ProcessConfigFile(configFileName, false); err != nil { // ** false added by Memphis t.Fatalf("Error processing config file: %v", err) } // Verify that values are as expected @@ -2533,13 +2533,9 @@ func TestParsingLeafNodeRemotes(t *testing.T) { } func TestLargeMaxControlLine(t *testing.T) { - confFileName := "big_mcl.conf" - content := ` - max_control_line = 3000000000 - ` - if err := os.WriteFile(confFileName, []byte(content), 0666); err != nil { - t.Fatalf("Error writing config file: %v", err) - } + confFileName := createConfFile(t, []byte(` + max_control_line = 3000000000 + `)) if _, err := ProcessConfigFile(confFileName); err == nil { t.Fatalf("Expected an error from too large of a max_control_line entry") } @@ -2577,7 +2573,7 @@ func TestHandleUnknownTopLevelConfigurationField(t *testing.T) { // Verify that we get an error because of unknown "streaming" field. opts := &Options{} - if err := opts.ProcessConfigFile(conf, false); err == nil || !strings.Contains(err.Error(), "streaming") { + if err := opts.ProcessConfigFile(conf, false); err == nil || !strings.Contains(err.Error(), "streaming") { // false added by Memphis t.Fatal("Expected error, got none") } @@ -2585,7 +2581,7 @@ func TestHandleUnknownTopLevelConfigurationField(t *testing.T) { NoErrOnUnknownFields(true) defer NoErrOnUnknownFields(false) - if err := opts.ProcessConfigFile(conf, false); err != nil { + if err := opts.ProcessConfigFile(conf, false); err != nil { // false added by Memphis t.Fatalf("Unexpected error: %v", err) } if opts.Port != 1234 { @@ -2602,7 +2598,7 @@ func TestHandleUnknownTopLevelConfigurationField(t *testing.T) { id: "me" } `)) - if err := opts.ProcessConfigFile(conf, false); err == nil || !strings.Contains(err.Error(), "non_top_level") { + if err := opts.ProcessConfigFile(conf, false); err == nil || !strings.Contains(err.Error(), "non_top_level") { // false added by Memphis t.Fatal("Expected error, got none") } } diff --git a/server/parser.go b/server/parser.go index 3c6b360a1..8501879ab 100644 --- a/server/parser.go +++ b/server/parser.go @@ -16,7 +16,6 @@ package server import ( "bufio" "bytes" - "encoding/json" "fmt" "net/http" "net/textproto" @@ -920,15 +919,7 @@ func (c *client) parse(buf []byte) error { c.argBuf = nil } else { arg = buf[c.as : i-c.drop] - - d := json.NewDecoder(strings.NewReader(string(arg))) - err := d.Decode(&c.opts) - - if err != nil { - return err - } } - if err := c.overMaxControlLineLimit(arg, mcl); err != nil { return err } @@ -959,6 +950,7 @@ func (c *client) parse(buf []byte) error { authSet = c.awaitingAuth() c.mu.Unlock() + // ** added by Memphis if c.kind == CLIENT && !strings.Contains(c.opts.Name, "NATS CLI") && !c.isWebsocket() && @@ -974,6 +966,7 @@ func (c *client) parse(buf []byte) error { goto authErr } } + // ** added by Memphis default: if c.argBuf != nil { c.argBuf = append(c.argBuf, b) @@ -1259,7 +1252,7 @@ accountIdErr: parseErr: c.sendErr("Unknown Protocol Operation") snip := protoSnippet(i, PROTO_SNIPPET_SIZE, buf) - err := fmt.Errorf("%s parser ERROR, state=%d, i=%d:, name=%s, proto='%s...'", c.kindString(), c.state, i, c.opts.Name, snip) + err := fmt.Errorf("%s parser ERROR, state=%d, i=%d:, name=%s, proto='%s...'", c.kindString(), c.state, i, c.opts.Name, snip) // ** name added by Memphis return err } diff --git a/server/raft.go b/server/raft.go index 6a4ed96b7..bee20a1fc 100644 --- a/server/raft.go +++ b/server/raft.go @@ -25,6 +25,7 @@ import ( "net" "os" "path/filepath" + "strings" "sync" "sync/atomic" "time" @@ -346,16 +347,16 @@ func (s *Server) startRaftNode(accName string, cfg *RaftConfig) (RaftNode, error if cfg == nil { return nil, errNilCfg } - s.mu.Lock() + s.mu.RLock() if s.sys == nil { - s.mu.Unlock() + s.mu.RUnlock() return nil, ErrNoSysAccount } sq := s.sys.sq sacc := s.sys.account hash := s.sys.shash pub := s.info.ID - s.mu.Unlock() + s.mu.RUnlock() ps, err := readPeerState(cfg.Store) if err != nil { @@ -485,6 +486,12 @@ func (s *Server) startRaftNode(accName string, cfg *RaftConfig) (RaftNode, error n.debug("Started") + // Check if we need to start in observer mode due to lame duck status. + if s.isLameDuckMode() { + n.debug("Will start in observer mode due to lame duck status") + n.SetObserver(true) + } + n.Lock() n.resetElectionTimeout() n.llqrt = time.Now() @@ -556,11 +563,10 @@ func (s *Server) lookupRaftNode(group string) RaftNode { return n } -func (s *Server) reloadDebugRaftNodes() { +func (s *Server) reloadDebugRaftNodes(debug bool) { if s == nil { return } - debug := atomic.LoadInt32(&s.logging.debug) > 0 s.rnMu.RLock() for _, ni := range s.raftNodes { n := ni.(*raft) @@ -611,6 +617,9 @@ func (s *Server) shutdownRaftNodes() { } } +// Used in lameduck mode to move off the leaders. +// We also put all nodes in observer mode so new leaders +// can not be placed on this server. func (s *Server) transferRaftLeaders() bool { if s == nil { return false @@ -631,6 +640,7 @@ func (s *Server) transferRaftLeaders() bool { node.StepDown() didTransfer = true } + node.SetObserver(true) } return didTransfer } @@ -654,7 +664,7 @@ func (n *raft) Propose(data []byte) error { prop := n.prop n.RUnlock() - prop.push(&Entry{EntryNormal, data}) + prop.push(newEntry(EntryNormal, data)) return nil } @@ -706,7 +716,7 @@ func (n *raft) ProposeAddPeer(peer string) error { prop := n.prop n.RUnlock() - prop.push(&Entry{EntryAddPeer, []byte(peer)}) + prop.push(newEntry(EntryAddPeer, []byte(peer))) return nil } @@ -739,7 +749,7 @@ func (n *raft) ProposeRemovePeer(peer string) error { } if isLeader { - prop.push(&Entry{EntryRemovePeer, []byte(peer)}) + prop.push(newEntry(EntryRemovePeer, []byte(peer))) n.doRemovePeerAsLeader(peer) return nil } @@ -845,7 +855,13 @@ func (n *raft) ResumeApply() { } } n.hcommit = 0 - n.resetElectionTimeout() + + // If we had been selected to be the next leader campaign here now that we have resumed. + if n.lxfer { + n.xferCampaign() + } else { + n.resetElectionTimeout() + } } // Applied is to be called when the FSM has applied the committed entries. @@ -855,6 +871,11 @@ func (n *raft) Applied(index uint64) (entries uint64, bytes uint64) { n.Lock() defer n.Unlock() + // Ignore if not applicable. This can happen during a reset. + if index > n.commit { + return 0, 0 + } + // Ignore if already applied. if index > n.applied { n.applied = index @@ -939,7 +960,7 @@ func (n *raft) InstallSnapshot(data []byte) error { var state StreamState n.wal.FastState(&state) - if n.applied == 0 || len(data) == 0 { + if n.applied == 0 { n.Unlock() return errNoSnapAvailable } @@ -1074,7 +1095,7 @@ func (n *raft) setupLastSnapshot() { n.pterm = snap.lastTerm n.commit = snap.lastIndex n.applied = snap.lastIndex - n.apply.push(&CommittedEntry{n.commit, []*Entry{{EntrySnapshot, snap.data}}}) + n.apply.push(newCommittedEntry(n.commit, []*Entry{{EntrySnapshot, snap.data}})) if _, err := n.wal.Compact(snap.lastIndex + 1); err != nil { n.setWriteErrLocked(err) } @@ -1151,9 +1172,16 @@ func (n *raft) isCatchingUp() bool { return n.catchup != nil } -// Lock should be held. This function may block for up to ~5ms to check +// This function may block for up to ~10ms to check // forward progress in some cases. +// Lock should be held. func (n *raft) isCurrent(includeForwardProgress bool) bool { + // Check if we are closed. + if n.state == Closed { + n.debug("Not current, node is closed") + return false + } + // Check whether we've made progress on any state, 0 is invalid so not healthy. if n.commit == 0 { n.debug("Not current, no commits") @@ -1197,9 +1225,9 @@ func (n *raft) isCurrent(includeForwardProgress bool) bool { // forward progress. if startDelta := n.commit - n.applied; startDelta > 0 { for i := 0; i < 10; i++ { // 5ms, in 0.5ms increments - n.RUnlock() - time.Sleep(time.Millisecond / 2) - n.RLock() + n.Unlock() + time.Sleep(time.Millisecond) + n.Lock() if n.commit-n.applied < startDelta { // The gap is getting smaller, so we're making forward progress. return true @@ -1216,8 +1244,8 @@ func (n *raft) Current() bool { if n == nil { return false } - n.RLock() - defer n.RUnlock() + n.Lock() + defer n.Unlock() return n.isCurrent(false) } @@ -1226,8 +1254,8 @@ func (n *raft) Healthy() bool { if n == nil { return false } - n.RLock() - defer n.RUnlock() + n.Lock() + defer n.Unlock() return n.isCurrent(true) } @@ -1324,7 +1352,12 @@ func (n *raft) StepDown(preferred ...string) error { } } + // Clear our vote state. + n.vote = noVote + n.writeTermVote() + stepdown := n.stepdown + prop := n.prop n.Unlock() if len(preferred) > 0 && maybeLeader == noLeader { @@ -1334,11 +1367,12 @@ func (n *raft) StepDown(preferred ...string) error { // If we have a new leader selected, transfer over to them. if maybeLeader != noLeader { n.debug("Selected %q for new leader", maybeLeader) - n.sendAppendEntry([]*Entry{{EntryLeaderTransfer, []byte(maybeLeader)}}) + prop.push(newEntry(EntryLeaderTransfer, []byte(maybeLeader))) + } else { + // Force us to stepdown here. + n.debug("Stepping down") + stepdown.push(noLeader) } - // Force us to stepdown here. - n.debug("Stepping down") - stepdown.push(noLeader) return nil } @@ -1371,6 +1405,7 @@ func (n *raft) campaign() error { func (n *raft) xferCampaign() error { n.debug("Starting transfer campaign") if n.state == Leader { + n.lxfer = false return errAlreadyLeader } n.resetElect(10 * time.Millisecond) @@ -1439,12 +1474,6 @@ func (n *raft) Peers() []*Peer { // Update our known set of peers. func (n *raft) UpdateKnownPeers(knownPeers []string) { n.Lock() - // If this is a scale up, let the normal add peer logic take precedence. - // Otherwise if the new peers are slow to start we stall ourselves. - if len(knownPeers) > len(n.peers) { - n.Unlock() - return - } // Process like peer state update. ps := &peerState{knownPeers, len(knownPeers), n.extSt} n.processPeerState(ps) @@ -1739,9 +1768,21 @@ func (n *raft) setObserver(isObserver bool, extSt extensionState) { // Invoked when being notified that there is something in the entryc's queue func (n *raft) processAppendEntries() { + canProcess := true + if n.isClosed() { + n.debug("AppendEntry not processing inbound, closed") + canProcess = false + } + if n.outOfResources() { + n.debug("AppendEntry not processing inbound, no resources") + canProcess = false + } + // Always pop the entries, but check if we can process them. aes := n.entry.pop() - for _, ae := range aes { - n.processAppendEntry(ae, ae.sub) + if canProcess { + for _, ae := range aes { + n.processAppendEntry(ae, ae.sub) + } } n.entry.recycle(&aes) } @@ -1768,6 +1809,13 @@ func (n *raft) runAsFollower() { n.debug("Not switching to candidate, observer only") } else if n.isCatchingUp() { n.debug("Not switching to candidate, catching up") + // Check to see if our catchup has stalled. + n.Lock() + if n.catchupStalled() { + n.cancelCatchup() + } + n.resetElectionTimeout() + n.Unlock() } else { n.switchToCandidate() return @@ -1792,12 +1840,60 @@ func (n *raft) runAsFollower() { } } +// Pool for CommitedEntry re-use. +var cePool = sync.Pool{ + New: func() any { + return &CommittedEntry{} + }, +} + // CommitEntry is handed back to the user to apply a commit to their FSM. type CommittedEntry struct { Index uint64 Entries []*Entry } +// Create a new ComittedEntry. +func newCommittedEntry(index uint64, entries []*Entry) *CommittedEntry { + ce := cePool.Get().(*CommittedEntry) + ce.Index, ce.Entries = index, entries + return ce +} + +func (ce *CommittedEntry) ReturnToPool() { + if ce == nil { + return + } + if len(ce.Entries) > 0 { + for _, e := range ce.Entries { + entryPool.Put(e) + } + } + ce.Index, ce.Entries = 0, nil + cePool.Put(ce) +} + +// Pool for Entry re-use. +var entryPool = sync.Pool{ + New: func() any { + return &Entry{} + }, +} + +// Helper to create new entries. +func newEntry(t EntryType, data []byte) *Entry { + entry := entryPool.Get().(*Entry) + entry.Type, entry.Data = t, data + return entry +} + +// Pool for appendEntry re-use. +var aePool = sync.Pool{ + New: func() any { + return &appendEntry{} + }, +} + // appendEntry is the main struct that is used to sync raft peers. type appendEntry struct { leader string @@ -1812,6 +1908,20 @@ type appendEntry struct { buf []byte } +// Create a new appendEntry. +func newAppendEntry(leader string, term, commit, pterm, pindex uint64, entries []*Entry) *appendEntry { + ae := aePool.Get().(*appendEntry) + ae.leader, ae.term, ae.commit, ae.pterm, ae.pindex, ae.entries = leader, term, commit, pterm, pindex, entries + ae.reply, ae.sub, ae.buf = _EMPTY_, nil, nil + return ae +} + +// Will return this append entry, and its interior entries to their respective pools. +func (ae *appendEntry) returnToPool() { + ae.entries, ae.buf, ae.sub, ae.reply = nil, nil, nil, _EMPTY_ + aePool.Put(ae) +} + type EntryType uint8 const ( @@ -1903,15 +2013,10 @@ func (n *raft) decodeAppendEntry(msg []byte, sub *subscription, reply string) (* } var le = binary.LittleEndian - ae := &appendEntry{ - leader: string(msg[:idLen]), - term: le.Uint64(msg[8:]), - commit: le.Uint64(msg[16:]), - pterm: le.Uint64(msg[24:]), - pindex: le.Uint64(msg[32:]), - sub: sub, - reply: reply, - } + + ae := newAppendEntry(string(msg[:idLen]), le.Uint64(msg[8:]), le.Uint64(msg[16:]), le.Uint64(msg[24:]), le.Uint64(msg[32:]), nil) + ae.reply, ae.sub = reply, sub + // Decode Entries. ne, ri := int(le.Uint16(msg[40:])), 42 for i, max := 0, len(msg); i < ne; i++ { @@ -1923,27 +2028,42 @@ func (n *raft) decodeAppendEntry(msg []byte, sub *subscription, reply string) (* if le <= 0 || ri+le > max { return nil, errBadAppendEntry } - etype := EntryType(msg[ri]) - ae.entries = append(ae.entries, &Entry{etype, msg[ri+1 : ri+le]}) + entry := newEntry(EntryType(msg[ri]), msg[ri+1:ri+le]) + ae.entries = append(ae.entries, entry) ri += le } ae.buf = msg return ae, nil } +// Pool for appendEntryResponse re-use. +var arPool = sync.Pool{ + New: func() any { + return &appendEntryResponse{} + }, +} + +// We want to make sure this does not change from system changing length of syshash. +const idLen = 8 +const appendEntryResponseLen = 24 + 1 + // appendEntryResponse is our response to a received appendEntry. type appendEntryResponse struct { term uint64 index uint64 peer string + reply string // internal usage. success bool - // internal - reply string } -// We want to make sure this does not change from system changing length of syshash. -const idLen = 8 -const appendEntryResponseLen = 24 + 1 +// Create a new appendEntryResponse. +func newAppendEntryResponse(term, index uint64, peer string, success bool) *appendEntryResponse { + ar := arPool.Get().(*appendEntryResponse) + ar.term, ar.index, ar.peer, ar.success = term, index, peer, success + // Always empty out. + ar.reply = _EMPTY_ + return ar +} func (ar *appendEntryResponse) encode(b []byte) []byte { var buf []byte @@ -1964,16 +2084,25 @@ func (ar *appendEntryResponse) encode(b []byte) []byte { return buf[:appendEntryResponseLen] } +// Track all peers we may have ever seen to use an string interns for appendEntryResponse decoding. +var peers sync.Map + func (n *raft) decodeAppendEntryResponse(msg []byte) *appendEntryResponse { if len(msg) != appendEntryResponseLen { return nil } var le = binary.LittleEndian - ar := &appendEntryResponse{ - term: le.Uint64(msg[0:]), - index: le.Uint64(msg[8:]), - peer: string(msg[16 : 16+idLen]), + ar := arPool.Get().(*appendEntryResponse) + ar.term = le.Uint64(msg[0:]) + ar.index = le.Uint64(msg[8:]) + + peer, ok := peers.Load(string(msg[16 : 16+idLen])) + if !ok { + // We missed so store inline here. + peer = string(msg[16 : 16+idLen]) + peers.Store(peer, peer) } + ar.peer = peer.(string) ar.success = msg[24] == 1 return ar } @@ -1990,8 +2119,6 @@ func (n *raft) handleForwardedRemovePeerProposal(sub *subscription, c *client, _ n.warn("Received invalid peer name for remove proposal: %q", msg) return } - // Need to copy since this is underlying client/route buffer. - peer := string(copyBytes(msg)) n.RLock() prop, werr := n.prop, n.werr @@ -2002,7 +2129,9 @@ func (n *raft) handleForwardedRemovePeerProposal(sub *subscription, c *client, _ return } - prop.push(&Entry{EntryRemovePeer, []byte(peer)}) + // Need to copy since this is underlying client/route buffer. + peer := copyBytes(msg) + prop.push(newEntry(EntryRemovePeer, peer)) } // Called when a peer has forwarded a proposal. @@ -2023,7 +2152,7 @@ func (n *raft) handleForwardedProposal(sub *subscription, c *client, _ *Account, return } - prop.push(&Entry{EntryNormal, msg}) + prop.push(newEntry(EntryNormal, msg)) } func (n *raft) runAsLeader() { @@ -2055,6 +2184,7 @@ func (n *raft) runAsLeader() { n.Unlock() }() + // To send out our initial peer state. n.sendPeerState() hb := time.NewTicker(hbInterval) @@ -2092,11 +2222,20 @@ func (n *raft) runAsLeader() { continue } n.sendAppendEntry(entries) - // We need to re-craete `entries` because there is a reference + + // If this is us sending out a leadership transfer stepdown inline here. + if b.Type == EntryLeaderTransfer { + n.prop.recycle(&es) + n.debug("Stepping down due to leadership transfer") + n.switchToFollower(noLeader) + return + } + // We need to re-create `entries` because there is a reference // to it in the node's pae map. entries = nil } n.prop.recycle(&es) + case <-hb.C: if n.notActive() { n.sendHeartbeat() @@ -2203,6 +2342,7 @@ func (n *raft) runCatchup(ar *appendEntryResponse, indexUpdatesQ *ipQueue[uint64 n.RUnlock() defer s.grWG.Done() + defer arPool.Put(ar) defer func() { n.Lock() @@ -2295,6 +2435,8 @@ func (n *raft) sendSnapshotToFollower(subject string) (uint64, error) { if err != nil { // We need to stepdown here when this happens. n.stepdown.push(noLeader) + // We need to reset our state here as well. + n.resetWAL() return 0, err } // Go ahead and send the snapshot and peerstate here as first append entry to the catchup follower. @@ -2338,6 +2480,7 @@ func (n *raft) catchupFollower(ar *appendEntryResponse) { if lastIndex, err := n.sendSnapshotToFollower(ar.reply); err != nil { n.error("Error sending snapshot to follower [%s]: %v", ar.peer, err) n.Unlock() + arPool.Put(ar) return } else { start = lastIndex + 1 @@ -2345,6 +2488,7 @@ func (n *raft) catchupFollower(ar *appendEntryResponse) { if state.Msgs == 0 || start > state.LastSeq { n.debug("Finished catching up") n.Unlock() + arPool.Put(ar) return } n.debug("Snapshot sent, reset first catchup entry to %d", lastIndex) @@ -2354,11 +2498,23 @@ func (n *raft) catchupFollower(ar *appendEntryResponse) { ae, err := n.loadEntry(start) if err != nil { n.warn("Request from follower for entry at index [%d] errored for state %+v - %v", start, state, err) + if err == ErrStoreEOF { + // If we are here we are seeing a request for an item beyond our state, meaning we should stepdown. + n.stepdown.push(noLeader) + n.Unlock() + arPool.Put(ar) + return + } ae, err = n.loadFirstEntry() } if err != nil || ae == nil { n.warn("Could not find a starting entry for catchup request: %v", err) + // If we are here we are seeing a request for an item we do not have, meaning we should stepdown. + // This is possible on a reset of our WAL but the other side has a snapshot already. + // If we do not stepdown this can cycle. + n.stepdown.push(noLeader) n.Unlock() + arPool.Put(ar) return } if ae.pindex != ar.index || ae.pterm != ar.term { @@ -2411,12 +2567,16 @@ func (n *raft) applyCommit(index uint64) error { var err error if ae, err = n.loadEntry(index); err != nil { if err != ErrStoreClosed && err != ErrStoreEOF { - if err == errBadMsg { - n.setWriteErrLocked(err) + n.warn("Got an error loading %d index: %v - will reset", index, err) + if n.state == Leader { + n.stepdown.push(n.selectNextLeader()) } - n.warn("Got an error loading %d index: %v", index, err) + // Reset and cancel any catchup. + n.resetWAL() + n.cancelCatchup() + } else { + n.commit = original } - n.commit = original return errEntryLoadFailed } } else { @@ -2432,7 +2592,7 @@ func (n *raft) applyCommit(index uint64) error { committed = append(committed, e) case EntryOldSnapshot: // For old snapshots in our WAL. - committed = append(committed, &Entry{EntrySnapshot, e.Data}) + committed = append(committed, newEntry(EntrySnapshot, e.Data)) case EntrySnapshot: committed = append(committed, e) case EntryPeerState: @@ -2445,6 +2605,9 @@ func (n *raft) applyCommit(index uint64) error { newPeer := string(e.Data) n.debug("Added peer %q", newPeer) + // Store our peer in our global peer map for all peers. + peers.LoadOrStore(newPeer, newPeer) + // If we were on the removed list reverse that here. if n.removed != nil { delete(n.removed, newPeer) @@ -2487,6 +2650,9 @@ func (n *raft) applyCommit(index uint64) error { n.stepdown.push(n.selectNextLeader()) } + // Remove from string intern map. + peers.Delete(peer) + // We pass these up as well. committed = append(committed, e) } @@ -2496,17 +2662,23 @@ func (n *raft) applyCommit(index uint64) error { if fpae { delete(n.pae, index) } - n.apply.push(&CommittedEntry{index, committed}) + n.apply.push(newCommittedEntry(index, committed)) } else { // If we processed inline update our applied index. n.applied = index } + // Place back in the pool. + ae.returnToPool() return nil } // Used to track a success response and apply entries. func (n *raft) trackResponse(ar *appendEntryResponse) { n.Lock() + if n.state == Closed { + n.Unlock() + return + } // Update peer's last index. if ps := n.peers[ar.peer]; ps != nil && ar.index > ps.li { @@ -2532,8 +2704,8 @@ func (n *raft) trackResponse(ar *appendEntryResponse) { if nr := len(results); nr >= n.qn { // We have a quorum. for index := n.commit + 1; index <= ar.index; index++ { - if err := n.applyCommit(index); err != nil { - n.error("Got an error apply commit for %d: %v", index, err) + if err := n.applyCommit(index); err != nil && err != errNodeClosed { + n.error("Got an error applying commit for %d: %v", index, err) break } } @@ -2671,11 +2843,6 @@ func (n *raft) runAsCandidate() { // handleAppendEntry handles an append entry from the wire. func (n *raft) handleAppendEntry(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { - if n.outOfResources() { - n.debug("AppendEntry not processing inbound, no resources") - return - } - msg = copyBytes(msg) if ae, err := n.decodeAppendEntry(msg, sub, reply); err == nil { n.entry.push(ae) @@ -2760,7 +2927,7 @@ func (n *raft) truncateWAL(term, index uint64) { if err == ErrInvalidSequence { n.debug("Resetting WAL") n.wal.Truncate(0) - index, n.pterm, n.pindex = 0, 0, 0 + index, n.term, n.pterm, n.pindex = 0, 0, 0, 0 } else { n.warn("Error truncating WAL: %v", err) n.setWriteErrLocked(err) @@ -2769,7 +2936,7 @@ func (n *raft) truncateWAL(term, index uint64) { } // Set after we know we have truncated properly. - n.pterm, n.pindex = term, index + n.term, n.pterm, n.pindex = term, term, index } // Reset our WAL. @@ -2806,7 +2973,8 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { // Are we receiving from another leader. if n.state == Leader { - if ae.term > n.term { + // If we are the same we should step down to break the tie. + if ae.term >= n.term { n.term = ae.term n.vote = noVote n.writeTermVote() @@ -2814,9 +2982,10 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { n.stepdown.push(ae.leader) } else { // Let them know we are the leader. - ar := &appendEntryResponse{n.term, n.pindex, n.id, false, _EMPTY_} + ar := newAppendEntryResponse(n.term, n.pindex, n.id, false) n.debug("AppendEntry ignoring old term from another leader") n.sendRPC(ae.reply, _EMPTY_, ar.encode(arbuf)) + arPool.Put(ar) } // Always return here from processing. n.Unlock() @@ -2848,15 +3017,6 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { } } - // Ignore old terms. - if isNew && ae.term < n.term { - ar := &appendEntryResponse{n.term, n.pindex, n.id, false, _EMPTY_} - n.Unlock() - n.debug("AppendEntry ignoring old term") - n.sendRPC(ae.reply, _EMPTY_, ar.encode(arbuf)) - return - } - // If we are catching up ignore old catchup subs. // This could happen when we stall or cancel a catchup. if !isNew && catchingUp && sub != n.catchup.sub { @@ -2879,11 +3039,12 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { if n.catchupStalled() { n.debug("Catchup may be stalled, will request again") inbox = n.createCatchup(ae) - ar = &appendEntryResponse{n.pterm, n.pindex, n.id, false, _EMPTY_} + ar = newAppendEntryResponse(n.pterm, n.pindex, n.id, false) } n.Unlock() if ar != nil { n.sendRPC(ae.reply, inbox, ar.encode(arbuf)) + arPool.Put(ar) } // Ignore new while catching up or replaying. return @@ -2892,6 +3053,7 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { // If this term is greater than ours. if ae.term > n.term { + n.pterm = ae.pterm n.term = ae.term n.vote = noVote if isNew { @@ -2911,7 +3073,7 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { n.updateLeadChange(false) } - if ae.pterm != n.pterm || ae.pindex != n.pindex { + if (isNew && ae.pterm != n.pterm) || ae.pindex != n.pindex { // Check if this is a lower or equal index than what we were expecting. if ae.pindex <= n.pindex { n.debug("AppendEntry detected pindex less than ours: %d:%d vs %d:%d", ae.pterm, ae.pindex, n.pterm, n.pindex) @@ -2930,9 +3092,10 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { n.cancelCatchup() // Create response. - ar = &appendEntryResponse{ae.pterm, ae.pindex, n.id, success, _EMPTY_} + ar = newAppendEntryResponse(ae.pterm, ae.pindex, n.id, success) n.Unlock() n.sendRPC(ae.reply, _EMPTY_, ar.encode(arbuf)) + arPool.Put(ar) return } @@ -2980,7 +3143,7 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { } // Now send snapshot to upper levels. Only send the snapshot, not the peerstate entry. - n.apply.push(&CommittedEntry{n.commit, ae.entries[:1]}) + n.apply.push(newCommittedEntry(n.commit, ae.entries[:1])) n.Unlock() return @@ -2991,16 +3154,17 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { if ae.pindex > n.pindex { // Setup our state for catching up. inbox := n.createCatchup(ae) - ar := &appendEntryResponse{n.pterm, n.pindex, n.id, false, _EMPTY_} + ar := newAppendEntryResponse(n.pterm, n.pindex, n.id, false) n.Unlock() n.sendRPC(ae.reply, inbox, ar.encode(arbuf)) + arPool.Put(ar) return } } } // Save to our WAL if we have entries. - if len(ae.entries) > 0 { + if ae.shouldStore() { // Only store if an original which will have sub != nil if sub != nil { if err := n.storeToWAL(ae); err != nil { @@ -3025,27 +3189,41 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { n.pterm = ae.term n.pindex = ae.pindex + 1 } + } - // Check to see if we have any related entries to process here. - for _, e := range ae.entries { - switch e.Type { - case EntryLeaderTransfer: - if isNew { - maybeLeader := string(e.Data) - if maybeLeader == n.id && !n.observer && !n.paused { + // Check to see if we have any related entries to process here. + for _, e := range ae.entries { + switch e.Type { + case EntryLeaderTransfer: + // Only process these if they are new, so no replays or catchups. + if isNew { + maybeLeader := string(e.Data) + // This is us. We need to check if we can become the leader. + if maybeLeader == n.id { + // If not an observer and not paused we are good to go. + if !n.observer && !n.paused { n.lxfer = true n.xferCampaign() + } else if n.paused && !n.pobserver { + // Here we can become a leader but need to wait for resume of the apply channel. + n.lxfer = true } + } else { + // Since we are here we are not the chosen one but we should clear any vote preference. + n.vote = noVote + n.writeTermVote() } - case EntryAddPeer: - if newPeer := string(e.Data); len(newPeer) == idLen { - // Track directly, but wait for commit to be official - if ps := n.peers[newPeer]; ps != nil { - ps.ts = time.Now().UnixNano() - } else { - n.peers[newPeer] = &lps{time.Now().UnixNano(), 0, false} - } + } + case EntryAddPeer: + if newPeer := string(e.Data); len(newPeer) == idLen { + // Track directly, but wait for commit to be official + if ps := n.peers[newPeer]; ps != nil { + ps.ts = time.Now().UnixNano() + } else { + n.peers[newPeer] = &lps{time.Now().UnixNano(), 0, false} } + // Store our peer in our global peer map for all peers. + peers.LoadOrStore(newPeer, newPeer) } } } @@ -3066,13 +3244,14 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { var ar *appendEntryResponse if sub != nil { - ar = &appendEntryResponse{n.pterm, n.pindex, n.id, true, _EMPTY_} + ar = newAppendEntryResponse(n.pterm, n.pindex, n.id, true) } n.Unlock() // Success. Send our response. if ar != nil { n.sendRPC(ae.reply, _EMPTY_, ar.encode(arbuf)) + arPool.Put(ar) } } @@ -3102,14 +3281,18 @@ func (n *raft) processAppendEntryResponse(ar *appendEntryResponse) { if ar.success { n.trackResponse(ar) + arPool.Put(ar) } else if ar.term > n.term { // False here and they have a higher term. + n.Lock() n.term = ar.term n.vote = noVote n.writeTermVote() n.warn("Detected another leader with higher term, will stepdown and reset") n.stepdown.push(noLeader) n.resetWAL() + n.Unlock() + arPool.Put(ar) } else if ar.reply != _EMPTY_ { n.catchupFollower(ar) } @@ -3117,14 +3300,18 @@ func (n *raft) processAppendEntryResponse(ar *appendEntryResponse) { // handleAppendEntryResponse processes responses to append entries. func (n *raft) handleAppendEntryResponse(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { - msg = copyBytes(msg) ar := n.decodeAppendEntryResponse(msg) ar.reply = reply n.resp.push(ar) } func (n *raft) buildAppendEntry(entries []*Entry) *appendEntry { - return &appendEntry{n.id, n.term, n.commit, n.pterm, n.pindex, entries, _EMPTY_, nil, nil} + return newAppendEntry(n.id, n.term, n.commit, n.pterm, n.pindex, entries) +} + +// Determine if we should store an entry. +func (ae *appendEntry) shouldStore() bool { + return ae != nil && len(ae.entries) > 0 } // Store our append entry to our WAL. @@ -3136,6 +3323,7 @@ func (n *raft) storeToWAL(ae *appendEntry) error { if n.werr != nil { return n.werr } + seq, _, err := n.wal.StoreMsg(_EMPTY_, nil, ae.buf) if err != nil { n.setWriteErrLocked(err) @@ -3144,17 +3332,13 @@ func (n *raft) storeToWAL(ae *appendEntry) error { // Sanity checking for now. if index := ae.pindex + 1; index != seq { - n.warn("Wrong index, ae is %+v, index stored was %d, n.pindex is %d", ae, seq, n.pindex) - if index > seq { - // We are missing store state from our state. We need to stepdown at this point. - if n.state == Leader { - n.stepdown.push(n.selectNextLeader()) - } - } else { - // Truncate back to our last known. - n.truncateWAL(n.pterm, n.pindex) - n.cancelCatchup() + n.warn("Wrong index, ae is %+v, index stored was %d, n.pindex is %d, will reset", ae, seq, n.pindex) + if n.state == Leader { + n.stepdown.push(n.selectNextLeader()) } + // Reset and cancel any catchup. + n.resetWAL() + n.cancelCatchup() return errEntryStoreFailed } @@ -3182,7 +3366,8 @@ func (n *raft) sendAppendEntry(entries []*Entry) { } // If we have entries store this in our wal. - if len(entries) > 0 { + shouldStore := ae.shouldStore() + if shouldStore { if err := n.storeToWAL(ae); err != nil { return } @@ -3197,6 +3382,9 @@ func (n *raft) sendAppendEntry(entries []*Entry) { } } n.sendRPC(n.asubj, n.areply, ae.buf) + if !shouldStore { + ae.returnToPool() + } } type extensionState uint16 @@ -3402,8 +3590,17 @@ func (n *raft) setWriteErrLocked(err error) { } n.werr = err - // For now since this can be happening all under the covers, we will call up and disable JetStream. - go n.s.handleOutOfSpace(nil) + if isOutOfSpaceErr(err) { + // For now since this can be happening all under the covers, we will call up and disable JetStream. + go n.s.handleOutOfSpace(nil) + } +} + +// Helper to check if we are closed when we do not hold a lock already. +func (n *raft) isClosed() bool { + n.RLock() + defer n.RUnlock() + return n.state == Closed } // Capture our write error if any and hold. @@ -3422,12 +3619,6 @@ func (n *raft) fileWriter() { psf := filepath.Join(n.sd, peerStateFile) n.RUnlock() - isClosed := func() bool { - n.RLock() - defer n.RUnlock() - return n.state == Closed - } - for s.isRunning() { select { case <-n.quit: @@ -3440,7 +3631,7 @@ func (n *raft) fileWriter() { <-dios err := os.WriteFile(tvf, buf[:], 0640) dios <- struct{}{} - if err != nil && !isClosed() { + if err != nil && !n.isClosed() { n.setWriteErr(err) n.warn("Error writing term and vote file for %q: %v", n.group, err) } @@ -3451,7 +3642,7 @@ func (n *raft) fileWriter() { <-dios err := os.WriteFile(psf, buf, 0640) dios <- struct{}{} - if err != nil && !isClosed() { + if err != nil && !n.isClosed() { n.setWriteErr(err) n.warn("Error writing peer state file for %q: %v", n.group, err) } @@ -3556,10 +3747,11 @@ func (n *raft) processVoteRequest(vr *voteRequest) error { // If this is a higher term go ahead and stepdown. if vr.term > n.term { if n.state != Follower { - n.debug("Stepping down from candidate, detected higher term: %d vs %d", vr.term, n.term) + n.debug("Stepping down from %s, detected higher term: %d vs %d", + strings.ToLower(n.state.String()), vr.term, n.term) n.stepdown.push(noLeader) + n.term = vr.term } - n.term = vr.term n.vote = noVote n.writeTermVote() } @@ -3568,6 +3760,7 @@ func (n *raft) processVoteRequest(vr *voteRequest) error { voteOk := n.vote == noVote || n.vote == vr.candidate if voteOk && (vr.lastTerm > n.pterm || vr.lastTerm == n.pterm && vr.lastIndex >= n.pindex) { vresp.granted = true + n.term = vr.term n.vote = vr.candidate n.writeTermVote() } else { diff --git a/server/raft_helpers_test.go b/server/raft_helpers_test.go new file mode 100644 index 000000000..6d235d35a --- /dev/null +++ b/server/raft_helpers_test.go @@ -0,0 +1,276 @@ +// Copyright 2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Do not exlude this file with the !skip_js_tests since those helpers +// are also used by MQTT. + +package server + +import ( + "encoding/binary" + "fmt" + "math/rand" + "sync" + "testing" + "time" +) + +type stateMachine interface { + server() *Server + node() RaftNode + // This will call forward as needed so can be called on any node. + propose(data []byte) + // When entries have been committed and can be applied. + applyEntry(ce *CommittedEntry) + // When a leader change happens. + leaderChange(isLeader bool) + // Stop the raft group. + stop() + // Restart + restart() +} + +// Factory function needed for constructor. +type smFactory func(s *Server, cfg *RaftConfig, node RaftNode) stateMachine + +type smGroup []stateMachine + +// Leader of the group. +func (sg smGroup) leader() stateMachine { + for _, sm := range sg { + if sm.node().Leader() { + return sm + } + } + return nil +} + +// Wait on a leader to be elected. +func (sg smGroup) waitOnLeader() { + expires := time.Now().Add(10 * time.Second) + for time.Now().Before(expires) { + for _, sm := range sg { + if sm.node().Leader() { + return + } + } + time.Sleep(100 * time.Millisecond) + } +} + +// Pick a random member. +func (sg smGroup) randomMember() stateMachine { + return sg[rand.Intn(len(sg))] +} + +// Return a non-leader +func (sg smGroup) nonLeader() stateMachine { + for _, sm := range sg { + if !sm.node().Leader() { + return sm + } + } + return nil +} + +// Create a raft group and place on numMembers servers at random. +func (c *cluster) createRaftGroup(name string, numMembers int, smf smFactory) smGroup { + c.t.Helper() + if numMembers > len(c.servers) { + c.t.Fatalf("Members > Peers: %d vs %d", numMembers, len(c.servers)) + } + servers := append([]*Server{}, c.servers...) + rand.Shuffle(len(servers), func(i, j int) { servers[i], servers[j] = servers[j], servers[i] }) + return c.createRaftGroupWithPeers(name, servers[:numMembers], smf) +} + +func (c *cluster) createRaftGroupWithPeers(name string, servers []*Server, smf smFactory) smGroup { + c.t.Helper() + + var sg smGroup + var peers []string + + for _, s := range servers { + // generate peer names. + s.mu.RLock() + peers = append(peers, s.sys.shash) + s.mu.RUnlock() + } + + for _, s := range servers { + fs, err := newFileStore( + FileStoreConfig{StoreDir: c.t.TempDir(), BlockSize: defaultMediumBlockSize, AsyncFlush: false, SyncInterval: 5 * time.Minute}, + StreamConfig{Name: name, Storage: FileStorage}, + ) + require_NoError(c.t, err) + cfg := &RaftConfig{Name: name, Store: c.t.TempDir(), Log: fs} + s.bootstrapRaftNode(cfg, peers, true) + n, err := s.startRaftNode(globalAccountName, cfg) + require_NoError(c.t, err) + sm := smf(s, cfg, n) + sg = append(sg, sm) + go smLoop(sm) + } + return sg +} + +// Driver program for the state machine. +// Should be run in its own go routine. +func smLoop(sm stateMachine) { + s, n := sm.server(), sm.node() + qch, lch, aq := n.QuitC(), n.LeadChangeC(), n.ApplyQ() + + for { + select { + case <-s.quitCh: + return + case <-qch: + return + case <-aq.ch: + ces := aq.pop() + for _, ce := range ces { + sm.applyEntry(ce) + } + aq.recycle(&ces) + + case isLeader := <-lch: + sm.leaderChange(isLeader) + } + } +} + +// Simple implementation of a replicated state. +// The adder state just sums up int64 values. +type stateAdder struct { + sync.Mutex + s *Server + n RaftNode + cfg *RaftConfig + sum int64 +} + +// Simple getters for server and the raft node. +func (a *stateAdder) server() *Server { + a.Lock() + defer a.Unlock() + return a.s +} +func (a *stateAdder) node() RaftNode { + a.Lock() + defer a.Unlock() + return a.n +} + +func (a *stateAdder) propose(data []byte) { + a.Lock() + defer a.Unlock() + a.n.ForwardProposal(data) +} + +func (a *stateAdder) applyEntry(ce *CommittedEntry) { + a.Lock() + defer a.Unlock() + if ce == nil { + // This means initial state is done/replayed. + return + } + for _, e := range ce.Entries { + if e.Type == EntryNormal { + delta, _ := binary.Varint(e.Data) + a.sum += delta + } else if e.Type == EntrySnapshot { + a.sum, _ = binary.Varint(e.Data) + } + } + // Update applied. + a.n.Applied(ce.Index) +} + +func (a *stateAdder) leaderChange(isLeader bool) {} + +// Adder specific to change the total. +func (a *stateAdder) proposeDelta(delta int64) { + data := make([]byte, binary.MaxVarintLen64) + n := binary.PutVarint(data, int64(delta)) + a.propose(data[:n]) +} + +// Stop the group. +func (a *stateAdder) stop() { + a.Lock() + defer a.Unlock() + a.n.Stop() +} + +// Restart the group +func (a *stateAdder) restart() { + a.Lock() + defer a.Unlock() + + if a.n.State() != Closed { + return + } + + // The filestore is stopped as well, so need to extract the parts to recreate it. + rn := a.n.(*raft) + fs := rn.wal.(*fileStore) + + var err error + a.cfg.Log, err = newFileStore(fs.fcfg, fs.cfg.StreamConfig) + if err != nil { + panic(err) + } + a.n, err = a.s.startRaftNode(globalAccountName, a.cfg) + if err != nil { + panic(err) + } + // Finally restart the driver. + go smLoop(a) +} + +// Total for the adder state machine. +func (a *stateAdder) total() int64 { + a.Lock() + defer a.Unlock() + return a.sum +} + +// Install a snapshot. +func (a *stateAdder) snapshot(t *testing.T) { + a.Lock() + defer a.Unlock() + data := make([]byte, binary.MaxVarintLen64) + n := binary.PutVarint(data, a.sum) + snap := data[:n] + require_NoError(t, a.n.InstallSnapshot(snap)) +} + +// Helper to wait for a certain state. +func (rg smGroup) waitOnTotal(t *testing.T, expected int64) { + t.Helper() + checkFor(t, 20*time.Second, 200*time.Millisecond, func() error { + for _, sm := range rg { + asm := sm.(*stateAdder) + if total := asm.total(); total != expected { + return fmt.Errorf("Adder on %v has wrong total: %d vs %d", + asm.server(), total, expected) + } + } + return nil + }) +} + +// Factory function. +func newStateAdder(s *Server, cfg *RaftConfig, n RaftNode) stateMachine { + return &stateAdder{s: s, n: n, cfg: cfg} +} diff --git a/server/raft_test.go b/server/raft_test.go index 89c983b99..bddce36fb 100644 --- a/server/raft_test.go +++ b/server/raft_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 The NATS Authors +// Copyright 2021-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -17,8 +17,55 @@ import ( "math" "math/rand" "testing" + "time" ) +func TestNRGSimple(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + rg := c.createRaftGroup("TEST", 3, newStateAdder) + rg.waitOnLeader() + // Do several state transitions. + rg.randomMember().(*stateAdder).proposeDelta(11) + rg.randomMember().(*stateAdder).proposeDelta(11) + rg.randomMember().(*stateAdder).proposeDelta(-22) + // Wait for all members to have the correct state. + rg.waitOnTotal(t, 0) +} + +func TestNRGSnapshotAndRestart(t *testing.T) { + c := createJetStreamClusterExplicit(t, "R3S", 3) + defer c.shutdown() + + rg := c.createRaftGroup("TEST", 3, newStateAdder) + rg.waitOnLeader() + + var expectedTotal int64 + + leader := rg.leader().(*stateAdder) + sm := rg.nonLeader().(*stateAdder) + + for i := 0; i < 1000; i++ { + delta := rand.Int63n(222) + expectedTotal += delta + leader.proposeDelta(delta) + + if i == 250 { + // Let some things catchup. + time.Sleep(50 * time.Millisecond) + // Snapshot leader and stop and snapshot a member. + leader.snapshot(t) + sm.snapshot(t) + sm.stop() + } + } + // Restart. + sm.restart() + // Wait for all members to have the correct state. + rg.waitOnTotal(t, expectedTotal) +} + func TestNRGAppendEntryEncode(t *testing.T) { ae := &appendEntry{ term: 1, diff --git a/server/reload.go b/server/reload.go index 2755a93aa..953623429 100644 --- a/server/reload.go +++ b/server/reload.go @@ -1,4 +1,4 @@ -// Copyright 2017-2022 The NATS Authors +// Copyright 2017-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -148,7 +148,7 @@ type debugOption struct { // However we will kick the raft nodes if they exist to reload. func (d *debugOption) Apply(server *Server) { server.Noticef("Reloaded: debug = %v", d.newValue) - server.reloadDebugRaftNodes() + server.reloadDebugRaftNodes(d.newValue) } // logtimeOption implements the option interface for the `logtime` setting. @@ -162,6 +162,17 @@ func (l *logtimeOption) Apply(server *Server) { server.Noticef("Reloaded: logtime = %v", l.newValue) } +// logtimeUTCOption implements the option interface for the `logtime_utc` setting. +type logtimeUTCOption struct { + loggingOption + newValue bool +} + +// Apply is a no-op because logging will be reloaded after options are applied. +func (l *logtimeUTCOption) Apply(server *Server) { + server.Noticef("Reloaded: logtime_utc = %v", l.newValue) +} + // logfileOption implements the option interface for the `log_file` setting. type logfileOption struct { loggingOption @@ -609,7 +620,7 @@ func (jso jetStreamOption) IsStatszChange() bool { } type ocspOption struct { - noopOption + tlsOption newValue *OCSPConfig } @@ -617,6 +628,15 @@ func (a *ocspOption) Apply(s *Server) { s.Noticef("Reloaded: OCSP") } +type ocspResponseCacheOption struct { + tlsOption + newValue *OCSPResponseCacheConfig +} + +func (a *ocspResponseCacheOption) Apply(s *Server) { + s.Noticef("Reloaded OCSP peer cache") +} + // connectErrorReports implements the option interface for the `connect_error_reports` // setting. type connectErrorReports struct { @@ -710,6 +730,7 @@ func (o *mqttInactiveThresholdReload) Apply(s *Server) { s.Noticef("Reloaded: MQTT consumer_inactive_threshold = %v", o.newValue) } +// ** added by Memphis // dlsRetentionHoursOption implements the option interface for the `dls_retention_hours` // setting. type dlsRetentionHoursOption struct { @@ -798,6 +819,8 @@ func (o *restGwOption) Apply(server *Server) { server.Noticef("Reloaded: rest_gw_host = %v", o.newValue) } +// ** added by Memphis + // Compares options and disconnects clients that are no longer listed in pinned certs. Lock must not be held. func (s *Server) recheckPinnedCerts(curOpts *Options, newOpts *Options) { s.mu.Lock() @@ -884,13 +907,6 @@ func (s *Server) Reload() error { func (s *Server) ReloadOptions(newOpts *Options) error { s.mu.Lock() - s.reloading = true - defer func() { - s.mu.Lock() - s.reloading = false - s.mu.Unlock() - }() - curOpts := s.getOpts() // Wipe trusted keys if needed when we have an operator. @@ -1051,7 +1067,7 @@ func imposeOrder(value interface{}) error { sort.Strings(value.AllowedOrigins) case string, bool, uint8, int, int32, int64, time.Duration, float64, nil, LeafNodeOpts, ClusterOpts, *tls.Config, PinnedCertSet, *URLAccResolver, *MemAccResolver, *DirAccResolver, *CacheDirAccResolver, Authentication, MQTTOpts, jwt.TagList, - *OCSPConfig, map[string]string, JSLimitOpts, StoreCipher, map[string]int: + *OCSPConfig, map[string]string, JSLimitOpts, StoreCipher, *OCSPResponseCacheConfig: // explicitly skipped types default: // this will fail during unit tests @@ -1120,6 +1136,8 @@ func (s *Server) diffOptions(newOpts *Options) ([]option, error) { diffOpts = append(diffOpts, &debugOption{newValue: newValue.(bool)}) case "logtime": diffOpts = append(diffOpts, &logtimeOption{newValue: newValue.(bool)}) + case "logtimeutc": + diffOpts = append(diffOpts, &logtimeUTCOption{newValue: newValue.(bool)}) case "logfile": diffOpts = append(diffOpts, &logfileOption{newValue: newValue.(string)}) case "syslog": @@ -1375,8 +1393,8 @@ func (s *Server) diffOptions(newOpts *Options) ([]option, error) { // Similar to gateways tmpOld := oldValue.(WebsocketOpts) tmpNew := newValue.(WebsocketOpts) - tmpOld.TLSConfig = nil - tmpNew.TLSConfig = nil + tmpOld.TLSConfig, tmpOld.tlsConfigOpts = nil, nil + tmpNew.TLSConfig, tmpNew.tlsConfigOpts = nil, nil // If there is really a change prevents reload. if !reflect.DeepEqual(tmpOld, tmpNew) { // See TODO(ik) note below about printing old/new values. @@ -1395,9 +1413,9 @@ func (s *Server) diffOptions(newOpts *Options) ([]option, error) { // we only fail reload if some that we don't support are changed. tmpOld := oldValue.(MQTTOpts) tmpNew := newValue.(MQTTOpts) - tmpOld.TLSConfig, tmpOld.AckWait, tmpOld.MaxAckPending, tmpOld.StreamReplicas, tmpOld.ConsumerReplicas, tmpOld.ConsumerMemoryStorage = nil, 0, 0, 0, 0, false + tmpOld.TLSConfig, tmpOld.tlsConfigOpts, tmpOld.AckWait, tmpOld.MaxAckPending, tmpOld.StreamReplicas, tmpOld.ConsumerReplicas, tmpOld.ConsumerMemoryStorage = nil, nil, 0, 0, 0, 0, false tmpOld.ConsumerInactiveThreshold = 0 - tmpNew.TLSConfig, tmpNew.AckWait, tmpNew.MaxAckPending, tmpNew.StreamReplicas, tmpNew.ConsumerReplicas, tmpNew.ConsumerMemoryStorage = nil, 0, 0, 0, 0, false + tmpNew.TLSConfig, tmpNew.tlsConfigOpts, tmpNew.AckWait, tmpNew.MaxAckPending, tmpNew.StreamReplicas, tmpNew.ConsumerReplicas, tmpNew.ConsumerMemoryStorage = nil, nil, 0, 0, 0, 0, false tmpNew.ConsumerInactiveThreshold = 0 if !reflect.DeepEqual(tmpOld, tmpNew) { @@ -1450,6 +1468,9 @@ func (s *Server) diffOptions(newOpts *Options) ([]option, error) { } case "ocspconfig": diffOpts = append(diffOpts, &ocspOption{newValue: newValue.(*OCSPConfig)}) + case "ocspcacheconfig": + diffOpts = append(diffOpts, &ocspResponseCacheOption{newValue: newValue.(*OCSPResponseCacheConfig)}) + // ** added by Memphis case "logsretentiondays": diffOpts = append(diffOpts, &logsRetentionDaysOption{newValue: newValue.(int)}) case "dlsretentionhours": @@ -1464,6 +1485,7 @@ func (s *Server) diffOptions(newOpts *Options) ([]option, error) { diffOpts = append(diffOpts, &restGwOption{newValue: newValue.(string)}) case "gcproducersconsumersretentionhours": diffOpts = append(diffOpts, &GCProducersConsumersRetentionHoursOption{newValue: newValue.(int)}) + // ** added by Memphis default: // TODO(ik): Implement String() on those options to have a nice print. // %v is difficult to figure what's what, %+v print private fields and @@ -1601,10 +1623,12 @@ func (s *Server) applyOptions(ctx *reloadContext, opts []option) { s.updateRemoteLeafNodesTLSConfig(newOpts) } + // This will fire if TLS enabled at root (NATS listener) -or- if ocsp or ocsp_cache + // appear in the config. if reloadTLS { // Restart OCSP monitoring. if err := s.reloadOCSP(); err != nil { - s.Warnf("Can't restart OCSP Stapling: %v", err) + s.Warnf("Can't restart OCSP features: %v", err) } } @@ -1681,102 +1705,44 @@ func (s *Server) reloadClientTraceLevel() { func (s *Server) reloadAuthorization() { // This map will contain the names of accounts that have their streams // import configuration changed. - awcsti := make(map[string]struct{}) + var awcsti map[string]struct{} checkJetStream := false + opts := s.getOpts() s.mu.Lock() + deletedAccounts := make(map[string]*Account) + // This can not be changed for now so ok to check server's trustedKeys unlocked. // If plain configured accounts, process here. if s.trustedKeys == nil { - // We need to drain the old accounts here since we have something - // new configured. We do not want s.accounts to change since that would - // mean adding a lock to lookupAccount which is what we are trying to - // optimize for with the change from a map to a sync.Map. - oldAccounts := make(map[string]*Account) + // Make a map of the configured account names so we figure out the accounts + // that should be removed later on. + configAccs := make(map[string]struct{}, len(opts.Accounts)) + for _, acc := range opts.Accounts { + configAccs[acc.GetName()] = struct{}{} + } + // Now range over existing accounts and keep track of the ones deleted + // so some cleanup can be made after releasing the server lock. s.accounts.Range(func(k, v interface{}) bool { - acc := v.(*Account) - if acc.GetName() == DEFAULT_GLOBAL_ACCOUNT { + an, acc := k.(string), v.(*Account) + // Exclude default and system account from this test since those + // may not actually be in opts.Accounts. + if an == DEFAULT_GLOBAL_ACCOUNT || an == DEFAULT_SYSTEM_ACCOUNT { return true } - acc.mu.Lock() - oldAccounts[acc.Name] = acc - // Need to clear out eventing timers since they close over this account and not the new one. - clearTimer(&acc.etmr) - clearTimer(&acc.ctmr) - acc.mu.Unlock() - s.accounts.Delete(k) - return true - }) - - s.gacc = nil - s.configureAccounts() - s.configureAuthorization() - s.mu.Unlock() - - s.accounts.Range(func(k, v interface{}) bool { - newAcc := v.(*Account) - if acc, ok := oldAccounts[newAcc.Name]; ok { - // If account exist in latest config, "transfer" the account's - // sublist and client map to the new account. - acc.mu.RLock() - newAcc.mu.Lock() - if len(acc.clients) > 0 { - newAcc.clients = make(map[*client]struct{}, len(acc.clients)) - for c := range acc.clients { - newAcc.clients[c] = struct{}{} - } - } - // Same for leafnodes - newAcc.lleafs = append([]*client(nil), acc.lleafs...) - - newAcc.sl = acc.sl - if acc.rm != nil { - newAcc.rm = make(map[string]int32) - } - for k, v := range acc.rm { - newAcc.rm[k] = v - } - // Transfer internal client state. The configureAccounts call from above may have set up a new one. - // We need to use the old one, and the isid to not confuse internal subs. - newAcc.ic, newAcc.isid = acc.ic, acc.isid - // Transfer any JetStream state. - newAcc.js = acc.js - // Also transfer any internal accounting on different client types. We copy over all clients - // so need to copy this as well for proper accounting going forward. - newAcc.nrclients = acc.nrclients - newAcc.sysclients = acc.sysclients - newAcc.nleafs = acc.nleafs - newAcc.nrleafs = acc.nrleafs - // Process any reverse map entries. - if len(acc.imports.rrMap) > 0 { - newAcc.imports.rrMap = make(map[string][]*serviceRespEntry) - for k, v := range acc.imports.rrMap { - newAcc.imports.rrMap[k] = v - } - } - newAcc.mu.Unlock() - acc.mu.RUnlock() - - // Check if current and new config of this account are same - // in term of stream imports. - if !acc.checkStreamImportsEqual(newAcc) { - awcsti[newAcc.Name] = struct{}{} - } - - // We need to remove all old service import subs. - acc.removeAllServiceImportSubs() - newAcc.addAllServiceImportSubs() + // Check check if existing account is still in opts.Accounts. + if _, ok := configAccs[an]; !ok { + deletedAccounts[an] = acc + s.accounts.Delete(k) } return true }) - s.mu.Lock() - // Check if we had a default system account. - if s.sys != nil && s.sys.account != nil && !s.opts.NoSystemAccount { - s.accounts.Store(s.sys.account.Name, s.sys.account) - } + // This will update existing and add new ones. + awcsti, _ = s.configureAccounts(true) + s.configureAuthorization() // Double check any JetStream configs. checkJetStream = s.js != nil - } else if s.opts.AccountResolver != nil { + } else if opts.AccountResolver != nil { s.configureResolver() if _, ok := s.accResolver.(*MemAccResolver); ok { // Check preloads so we can issue warnings etc if needed. @@ -1832,7 +1798,7 @@ func (s *Server) reloadAuthorization() { routes = append(routes, route) } // Check here for any system/internal clients which will not be in the servers map of normal clients. - if s.sys != nil && s.sys.account != nil && !s.opts.NoSystemAccount { + if s.sys != nil && s.sys.account != nil && !opts.NoSystemAccount { s.accounts.Store(s.sys.account.Name, s.sys.account) } @@ -1858,6 +1824,18 @@ func (s *Server) reloadAuthorization() { } s.mu.Unlock() + // Clear some timers and remove service import subs for deleted accounts. + for _, acc := range deletedAccounts { + acc.mu.Lock() + clearTimer(&acc.etmr) + clearTimer(&acc.ctmr) + for _, se := range acc.exports.services { + se.clearResponseThresholdTimer() + } + acc.mu.Unlock() + acc.removeAllServiceImportSubs() + } + if resetCh != nil { resetCh <- struct{}{} } diff --git a/server/reload_test.go b/server/reload_test.go index 9f334db1d..946179008 100644 --- a/server/reload_test.go +++ b/server/reload_test.go @@ -308,6 +308,9 @@ func TestConfigReload(t *testing.T) { if !updated.Logtime { t.Fatal("Expected Logtime to be true") } + if !updated.LogtimeUTC { + t.Fatal("Expected LogtimeUTC to be true") + } if runtime.GOOS != "windows" { if !updated.Syslog { t.Fatal("Expected Syslog to be true") @@ -2600,6 +2603,25 @@ func TestConfigReloadAccountUsers(t *testing.T) { t.Fatalf("Error on subscribe: %v", err) } + // confirm subscriptions before and after reload. + var expectedSubs uint32 = 4 + sAcc, err := s.LookupAccount("synadia") + require_NoError(t, err) + sAcc.mu.RLock() + n := sAcc.sl.Count() + sAcc.mu.RUnlock() + if n != expectedSubs { + t.Errorf("Synadia account should have %d sub, got %v", expectedSubs, n) + } + nAcc, err := s.LookupAccount("nats.io") + require_NoError(t, err) + nAcc.mu.RLock() + n = nAcc.sl.Count() + nAcc.mu.RUnlock() + if n != expectedSubs { + t.Errorf("Nats.io account should have %d sub, got %v", expectedSubs, n) + } + // Remove user from account and whole account reloadUpdateConfig(t, s, conf, ` listen: "127.0.0.1:-1" @@ -2651,14 +2673,18 @@ func TestConfigReloadAccountUsers(t *testing.T) { // being reconnected does not mean that resent of subscriptions // has already been processed. checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { - gAcc, _ := s.LookupAccount(globalAccountName) + gAcc, err := s.LookupAccount(globalAccountName) + require_NoError(t, err) gAcc.mu.RLock() n := gAcc.sl.Count() fooMatch := gAcc.sl.Match("foo") bazMatch := gAcc.sl.Match("baz") gAcc.mu.RUnlock() - if n != 2 { - return fmt.Errorf("Global account should have 2 subs, got %v", n) + // The number of subscriptions should be 3 ($SYS.REQ.ACCOUNT.PING.CONNZ, + // $SYS.REQ.ACCOUNT.PING.STATZ, $SYS.REQ.SERVER.PING.CONNZ) + // + 2 (foo and baz) + if n != 5 { + return fmt.Errorf("Global account should have 5 subs, got %v", n) } if len(fooMatch.psubs) != 1 { return fmt.Errorf("Global account should have foo sub") @@ -2667,25 +2693,28 @@ func TestConfigReloadAccountUsers(t *testing.T) { return fmt.Errorf("Global account should have baz sub") } - sAcc, _ := s.LookupAccount("synadia") + sAcc, err := s.LookupAccount("synadia") + require_NoError(t, err) sAcc.mu.RLock() n = sAcc.sl.Count() barMatch := sAcc.sl.Match("bar") + sAcc.mu.RUnlock() - if n != 1 { - return fmt.Errorf("Synadia account should have 1 sub, got %v", n) + if n != expectedSubs { + return fmt.Errorf("Synadia account should have %d sub, got %v", expectedSubs, n) } if len(barMatch.psubs) != 1 { return fmt.Errorf("Synadia account should have bar sub") } - nAcc, _ := s.LookupAccount("nats.io") + nAcc, err := s.LookupAccount("nats.io") + require_NoError(t, err) nAcc.mu.RLock() n = nAcc.sl.Count() batMatch := nAcc.sl.Match("bat") nAcc.mu.RUnlock() - if n != 1 { - return fmt.Errorf("Nats.io account should have 1 sub, got %v", n) + if n != expectedSubs { + return fmt.Errorf("Nats.io account should have %d sub, got %v", expectedSubs, n) } if len(batMatch.psubs) != 1 { return fmt.Errorf("Synadia account should have bar sub") @@ -2694,6 +2723,85 @@ func TestConfigReloadAccountUsers(t *testing.T) { }) } +func TestConfigReloadAccountWithNoChanges(t *testing.T) { + conf := createConfFile(t, []byte(` + listen: "127.0.0.1:-1" + system_account: sys + accounts { + A { + users = [{ user: a }] + } + B { + users = [{ user: b }] + } + C { + users = [{ user: c }] + } + sys { + users = [{ user: sys }] + } + } + `)) + s, opts := RunServerWithConfig(conf) + defer s.Shutdown() + + ncA, err := nats.Connect(fmt.Sprintf("nats://a:@%s:%d", opts.Host, opts.Port)) + if err != nil { + t.Fatalf("Error on connect: %v", err) + } + defer ncA.Close() + + // Confirm default service imports are ok. + checkSubs := func(t *testing.T) { + resp, err := ncA.Request("$SYS.REQ.ACCOUNT.PING.CONNZ", nil, time.Second) + if err != nil { + t.Error(err) + } + if resp == nil || !strings.Contains(string(resp.Data), `"num_connections":1`) { + t.Fatal("unexpected data in connz response") + } + resp, err = ncA.Request("$SYS.REQ.SERVER.PING.CONNZ", nil, time.Second) + if err != nil { + t.Error(err) + } + if resp == nil || !strings.Contains(string(resp.Data), `"num_connections":1`) { + t.Fatal("unexpected data in connz response") + } + resp, err = ncA.Request("$SYS.REQ.ACCOUNT.PING.STATZ", nil, time.Second) + if err != nil { + t.Error(err) + } + if resp == nil || !strings.Contains(string(resp.Data), `"conns":1`) { + t.Fatal("unexpected data in connz response") + } + } + checkSubs(t) + before := s.NumSubscriptions() + s.Reload() + after := s.NumSubscriptions() + if before != after { + t.Errorf("Number of subscriptions changed after reload: %d -> %d", before, after) + } + + // Confirm this still works after a reload... + checkSubs(t) + before = s.NumSubscriptions() + s.Reload() + after = s.NumSubscriptions() + if before != after { + t.Errorf("Number of subscriptions changed after reload: %d -> %d", before, after) + } + + // Do another extra reload just in case. + checkSubs(t) + before = s.NumSubscriptions() + s.Reload() + after = s.NumSubscriptions() + if before != after { + t.Errorf("Number of subscriptions changed after reload: %d -> %d", before, after) + } +} + func TestConfigReloadAccountNKeyUsers(t *testing.T) { conf := createConfFile(t, []byte(` listen: "127.0.0.1:-1" @@ -3897,7 +4005,7 @@ func TestConfigReloadConnectErrReports(t *testing.T) { } } -func TestAuthReloadDoesNotBreakRouteInterest(t *testing.T) { +func TestConfigReloadAuthDoesNotBreakRouteInterest(t *testing.T) { s, opts := RunServerWithConfig("./configs/seed_tls.conf") defer s.Shutdown() @@ -4047,7 +4155,7 @@ func TestConfigReloadAccountResolverTLSConfig(t *testing.T) { } } -func TestLoggingReload(t *testing.T) { +func TestConfigReloadLogging(t *testing.T) { // This test basically starts a server and causes it's configuration to be reloaded 3 times. // Each time, a new log file is created and trace levels are turned, off - on - off. @@ -4174,7 +4282,7 @@ func TestLoggingReload(t *testing.T) { check("off-post.log", tracingAbsent) } -func TestReloadValidate(t *testing.T) { +func TestConfigReloadValidate(t *testing.T) { confFileName := createConfFile(t, []byte(` listen: "127.0.0.1:-1" no_auth_user: a @@ -4234,12 +4342,14 @@ func TestConfigReloadAccounts(t *testing.T) { urlSys := fmt.Sprintf("nats://sys:pwd@%s:%d", o.Host, o.Port) urlUsr := fmt.Sprintf("nats://usr:pwd@%s:%d", o.Host, o.Port) - oldAcc, ok := s.accounts.Load("SYS") + oldAcci, ok := s.accounts.Load("SYS") if !ok { t.Fatal("No SYS account") } + oldAcc := oldAcci.(*Account) - testSrvState := func(oldAcc interface{}) { + testSrvState := func(oldAcc *Account) { + t.Helper() sysAcc := s.SystemAccount() s.mu.Lock() defer s.mu.Unlock() @@ -4252,11 +4362,13 @@ func TestConfigReloadAccounts(t *testing.T) { if s.opts.SystemAccount != "SYS" { t.Fatal("Found wrong sys.account") } - // This will fail prior to system account reload - if acc, ok := s.accounts.Load(s.opts.SystemAccount); !ok { - t.Fatal("Found different sys.account pointer") - } else if acc == oldAcc { - t.Fatal("System account is unaltered") + ai, ok := s.accounts.Load(s.opts.SystemAccount) + if !ok { + t.Fatalf("System account %q not found in s.accounts map", s.opts.SystemAccount) + } + acc := ai.(*Account) + if acc != oldAcc { + t.Fatalf("System account pointer was changed during reload, was %p now %p", oldAcc, acc) } if s.sys.client == nil { t.Fatal("Expected sys.client to be non-nil") @@ -4324,7 +4436,7 @@ func TestConfigReloadAccounts(t *testing.T) { } } - testSrvState(nil) + testSrvState(oldAcc) c1, s1C, s1D := subscribe("SYS1") defer c1.Close() defer s1C.Unsubscribe() @@ -4508,3 +4620,73 @@ func TestConfigReloadWithSysAccountOnly(t *testing.T) { // ok } } + +func TestConfigReloadGlobalAccountWithMappingAndJetStream(t *testing.T) { + tmpl := ` + listen: 127.0.0.1:-1 + server_name: %s + jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} + + mappings { + subj.orig: subj.mapped.before.reload + } + + leaf { + listen: 127.0.0.1:-1 + } + + cluster { + name: %s + listen: 127.0.0.1:%d + routes = [%s] + } + + # For access to system account. + accounts { $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } } + ` + c := createJetStreamClusterWithTemplate(t, tmpl, "R3S", 3) + defer c.shutdown() + + nc, js := jsClientConnect(t, c.randomServer()) + defer nc.Close() + + // Verify that mapping works + checkMapping := func(expectedSubj string) { + t.Helper() + sub := natsSubSync(t, nc, "subj.>") + defer sub.Unsubscribe() + natsPub(t, nc, "subj.orig", nil) + msg := natsNexMsg(t, sub, time.Second) + if msg.Subject != expectedSubj { + t.Fatalf("Expected subject to have been mapped to %q, got %q", expectedSubj, msg.Subject) + } + } + checkMapping("subj.mapped.before.reload") + + // Create a stream and check that we can get the INFO + _, err := js.AddStream(&nats.StreamConfig{ + Name: "TEST", + Replicas: 3, + Subjects: []string{"foo"}, + Retention: nats.InterestPolicy, + }) + require_NoError(t, err) + c.waitOnStreamLeader(globalAccountName, "TEST") + + _, err = js.StreamInfo("TEST") + require_NoError(t, err) + + // Change mapping on all servers and issue reload + for i, s := range c.servers { + opts := c.opts[i] + content, err := os.ReadFile(opts.ConfigFile) + require_NoError(t, err) + reloadUpdateConfig(t, s, opts.ConfigFile, strings.Replace(string(content), "subj.mapped.before.reload", "subj.mapped.after.reload", 1)) + } + // Make sure the cluster is still formed + checkClusterFormed(t, c.servers...) + // Now repeat the test for the subject mapping and stream info + checkMapping("subj.mapped.after.reload") + _, err = js.StreamInfo("TEST") + require_NoError(t, err) +} diff --git a/server/route.go b/server/route.go index 9b8450d5e..4703132eb 100644 --- a/server/route.go +++ b/server/route.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The NATS Authors +// Copyright 2013-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -530,7 +530,7 @@ func (c *client) processRouteInfo(info *Info) { remoteID := c.route.remoteID // Check if this is an INFO for gateways... - if info.Gateway != "" { + if info.Gateway != _EMPTY_ { c.mu.Unlock() // If this server has no gateway configured, report error and return. if !s.gateway.enabled { @@ -545,7 +545,7 @@ func (c *client) processRouteInfo(info *Info) { // We receive an INFO from a server that informs us about another server, // so the info.ID in the INFO protocol does not match the ID of this route. - if remoteID != "" && remoteID != info.ID { + if remoteID != _EMPTY_ && remoteID != info.ID { c.mu.Unlock() // Process this implicit route. We will check that it is not an explicit @@ -653,7 +653,7 @@ func (c *client) processRouteInfo(info *Info) { // The incoming INFO from the route will have IP set // if it has Cluster.Advertise. In that case, use that // otherwise construct it from the remote TCP address. - if info.IP == "" { + if info.IP == _EMPTY_ { // Need to get the remote IP address. c.mu.Lock() switch conn := c.nc.(type) { @@ -902,7 +902,7 @@ func (c *client) removeRemoteSubs() { if srv.gateway.enabled { srv.gatewayUpdateSubInterest(accountName, sub, -1) } - srv.updateLeafNodes(ase.acc, sub, -1) + ase.acc.updateLeafNodes(sub, -1) } // Now remove the subs by batch for each account sublist. @@ -972,7 +972,7 @@ func (c *client) processRemoteUnsub(arg []byte) (err error) { } // Now check on leafnode updates. - srv.updateLeafNodes(acc, sub, -1) + acc.updateLeafNodes(sub, -1) if c.opts.Verbose { c.sendOK() @@ -1035,7 +1035,6 @@ func (c *client) processRemoteSub(argo []byte, hasOrigin bool) (err error) { acc = v.(*Account) } if acc == nil { - isNew := false // if the option of retrieving accounts later exists, create an expired one. // When a client comes along, expiration will prevent it from being used, // cause a fetch and update the account to what is should be. @@ -1045,6 +1044,7 @@ func (c *client) processRemoteSub(argo []byte, hasOrigin bool) (err error) { } c.Debugf("Unknown account %q for remote subject %q", accountName, sub.subject) + var isNew bool if acc, isNew = srv.LookupOrRegisterAccount(accountName); isNew { acc.mu.Lock() acc.expired = true @@ -1109,7 +1109,7 @@ func (c *client) processRemoteSub(argo []byte, hasOrigin bool) (err error) { } // Now check on leafnode updates. - srv.updateLeafNodes(acc, sub, delta) + acc.updateLeafNodes(sub, delta) if c.opts.Verbose { c.sendOK() @@ -1163,12 +1163,12 @@ func (c *client) addRouteSubOrUnsubProtoToBuf(buf []byte, accName string, sub *s // complete interest for all subjects, both normal as a binary // and queue group weights. func (s *Server) sendSubsToRoute(route *client) { - s.mu.Lock() // Estimated size of all protocols. It does not have to be accurate at all. - eSize := 0 - // Send over our account subscriptions. - // copy accounts into array first + var eSize int + // Copy of accounts. accs := make([]*Account, 0, 32) + + s.mu.RLock() s.accounts.Range(func(k, v interface{}) bool { a := v.(*Account) accs = append(accs, a) @@ -1188,7 +1188,7 @@ func (s *Server) sendSubsToRoute(route *client) { a.mu.RUnlock() return true }) - s.mu.Unlock() + s.mu.RUnlock() buf := make([]byte, 0, eSize) @@ -1446,7 +1446,7 @@ func (s *Server) addRoute(c *client, info *Info) (bool, bool) { sendInfo = len(s.routes) > 1 // If the INFO contains a Gateway URL, add it to the list for our cluster. - if info.GatewayURL != "" && s.addGatewayURL(info.GatewayURL) { + if info.GatewayURL != _EMPTY_ && s.addGatewayURL(info.GatewayURL) { s.sendAsyncGatewayInfo() } } @@ -1589,12 +1589,12 @@ func (s *Server) updateRouteSubscriptionMap(acc *Account, sub *subscription, del var _routes [32]*client routes := _routes[:0] - s.mu.Lock() + s.mu.RLock() for _, route := range s.routes { routes = append(routes, route) } trace := atomic.LoadInt32(&s.logging.trace) == 1 - s.mu.Unlock() + s.mu.RUnlock() // If we are a queue subscriber we need to make sure our updates are serialized from // potential multiple connections. We want to make sure that the order above is preserved @@ -1732,7 +1732,7 @@ func (s *Server) startRouteAcceptLoop() { s.routeInfo = info // Possibly override Host/Port and set IP based on Cluster.Advertise if err := s.setRouteInfoHostPortAndIP(); err != nil { - s.Fatalf("Error setting route INFO with Cluster.Advertise value of %s, err=%v", s.opts.Cluster.Advertise, err) + s.Fatalf("Error setting route INFO with Cluster.Advertise value of %s, err=%v", opts.Cluster.Advertise, err) l.Close() s.mu.Unlock() return @@ -1772,8 +1772,9 @@ func (s *Server) startRouteAcceptLoop() { // Similar to setInfoHostPortAndGenerateJSON, but for routeInfo. func (s *Server) setRouteInfoHostPortAndIP() error { - if s.opts.Cluster.Advertise != "" { - advHost, advPort, err := parseHostPort(s.opts.Cluster.Advertise, s.opts.Cluster.Port) + opts := s.getOpts() + if opts.Cluster.Advertise != _EMPTY_ { + advHost, advPort, err := parseHostPort(opts.Cluster.Advertise, opts.Cluster.Port) if err != nil { return err } @@ -1781,8 +1782,8 @@ func (s *Server) setRouteInfoHostPortAndIP() error { s.routeInfo.Port = advPort s.routeInfo.IP = fmt.Sprintf("nats-route://%s/", net.JoinHostPort(advHost, strconv.Itoa(advPort))) } else { - s.routeInfo.Host = s.opts.Cluster.Host - s.routeInfo.Port = s.opts.Cluster.Port + s.routeInfo.Host = opts.Cluster.Host + s.routeInfo.Port = opts.Cluster.Port s.routeInfo.IP = "" } // (re)generate the routeInfoJSON byte array @@ -1937,7 +1938,7 @@ func (c *client) processRouteConnect(srv *Server, arg []byte, lang string) error proto := &connectInfo{} if err := json.Unmarshal(arg, proto); err != nil { - c.Errorf("processRouteConnect: ", err) + c.Errorf("processRouteConnect: ", err) // ** added by Memphis return err } // Reject if this has Gateway which means that it would be from a gateway @@ -1949,12 +1950,12 @@ func (c *client) processRouteConnect(srv *Server, arg []byte, lang string) error c.closeConnection(WrongGateway) return ErrWrongGateway } - var perms *RoutePermissions - //TODO this check indicates srv may be nil. see srv usage below - if srv != nil { - perms = srv.getOpts().Cluster.Permissions + + if srv == nil { + return ErrServerNotRunning } + perms := srv.getOpts().Cluster.Permissions clusterName := srv.ClusterName() // If we have a cluster name set, make sure it matches ours. diff --git a/server/routes_test.go b/server/routes_test.go index afaf0ea4c..d6ff57522 100644 --- a/server/routes_test.go +++ b/server/routes_test.go @@ -14,9 +14,11 @@ package server import ( + "bufio" "bytes" "context" "crypto/tls" + "encoding/json" "fmt" "net" "net/url" @@ -586,7 +588,7 @@ func TestBlockedShutdownOnRouteAcceptLoopFailure(t *testing.T) { opts.Cluster.Port = 7222 s := New(opts) - go s.Start() + s.Start() // Wait a second time.Sleep(time.Second) ch := make(chan bool) @@ -701,7 +703,7 @@ func (l *checkDuplicateRouteLogger) Errorf(format string, v ...interface{}) {} func (l *checkDuplicateRouteLogger) Warnf(format string, v ...interface{}) {} func (l *checkDuplicateRouteLogger) Fatalf(format string, v ...interface{}) {} func (l *checkDuplicateRouteLogger) Tracef(format string, v ...interface{}) {} -func (l *checkDuplicateRouteLogger) Systemf(format string, v ...interface{}) {} +func (l *checkDuplicateRouteLogger) Systemf(format string, v ...interface{}) {} // ** added by Memphis func (l *checkDuplicateRouteLogger) Debugf(format string, v ...interface{}) { l.Lock() defer l.Unlock() @@ -1344,9 +1346,7 @@ func TestRouteIPResolutionAndRouteToSelf(t *testing.T) { defer s.Shutdown() l := &routeHostLookupLogger{errCh: make(chan string, 1), ch: make(chan bool, 1)} s.SetLogger(l, true, true) - go func() { - s.Start() - }() + s.Start() if err := s.readyForConnections(time.Second); err != nil { t.Fatal(err) } @@ -1758,3 +1758,81 @@ func TestRouteSaveTLSName(t *testing.T) { reloadUpdateConfig(t, s2, c2And3Conf, fmt.Sprintf(tmpl, "localhost", o1.Cluster.Port)) checkClusterFormed(t, s1, s2, s3) } + +func TestRouteNoLeakOnSlowConsumer(t *testing.T) { + o1 := DefaultOptions() + s1 := RunServer(o1) + defer s1.Shutdown() + + o2 := DefaultOptions() + o2.Routes = RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", o1.Cluster.Port)) + s2 := RunServer(o2) + defer s2.Shutdown() + + checkClusterFormed(t, s1, s2) + + // For any route connections on the first server, drop the write + // deadline down and then get the client to try sending something. + // This should result in an effectively immediate write timeout, + // which will surface as a slow consumer. + s1.mu.Lock() + for _, cli := range s1.routes { + cli.out.wdl = time.Nanosecond + cli.sendRTTPing() + } + s1.mu.Unlock() + + // By now the routes should have gone down, so check that there + // aren't any routes listed still. + checkFor(t, time.Millisecond*500, time.Millisecond*25, func() error { + if nc := s1.NumRoutes(); nc != 0 { + return fmt.Errorf("Server 1 should have no route connections, got %v", nc) + } + if nc := s2.NumRoutes(); nc != 0 { + return fmt.Errorf("Server 2 should have no route connections, got %v", nc) + } + return nil + }) +} + +func TestRouteNoLeakOnAuthTimeout(t *testing.T) { + opts := DefaultOptions() + opts.Cluster.Username = "foo" + opts.Cluster.Password = "bar" + opts.AuthTimeout = 0.01 // Deliberately short timeout + s := RunServer(opts) + defer s.Shutdown() + + c, err := net.Dial("tcp", fmt.Sprintf("%s:%d", opts.Host, opts.Cluster.Port)) + if err != nil { + t.Fatalf("Error connecting: %v", err) + } + defer c.Close() + + cr := bufio.NewReader(c) + + // Wait for INFO... + line, _, _ := cr.ReadLine() + var info serverInfo + if err = json.Unmarshal(line[5:], &info); err != nil { + t.Fatalf("Could not parse INFO json: %v\n", err) + } + + // The server will send a PING, too + line, _, _ = cr.ReadLine() + if string(line) != "PING" { + t.Fatalf("Expected 'PING' but got %q", line) + } + + // Wait out the clock so we hit the auth timeout + time.Sleep(secondsToDuration(opts.AuthTimeout) * 2) + line, _, _ = cr.ReadLine() + if string(line) != "-ERR 'Authentication Timeout'" { + t.Fatalf("Expected '-ERR 'Authentication Timeout'' but got %q", line) + } + + // There shouldn't be a route entry as we didn't set up. + if nc := s.NumRoutes(); nc != 0 { + t.Fatalf("Server should have no route connections, got %v", nc) + } +} diff --git a/server/sendq.go b/server/sendq.go index 2c4139710..0287c5548 100644 --- a/server/sendq.go +++ b/server/sendq.go @@ -1,4 +1,4 @@ -// Copyright 2020-2021 The NATS Authors +// Copyright 2020-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -50,6 +50,14 @@ func (sq *sendq) internalLoop() { defer c.closeConnection(ClientClosed) + // To optimize for not converting a string to a []byte slice. + var ( + subj [256]byte + rply [256]byte + szb [10]byte + hdb [10]byte + ) + for s.isRunning() { select { case <-s.quitCh: @@ -57,14 +65,18 @@ func (sq *sendq) internalLoop() { case <-q.ch: pms := q.pop() for _, pm := range pms { - c.pa.subject = []byte(pm.subj) + c.pa.subject = append(subj[:0], pm.subj...) c.pa.size = len(pm.msg) + len(pm.hdr) - c.pa.szb = []byte(strconv.Itoa(c.pa.size)) - c.pa.reply = []byte(pm.rply) + c.pa.szb = append(szb[:0], strconv.Itoa(c.pa.size)...) + if len(pm.rply) > 0 { + c.pa.reply = append(rply[:0], pm.rply...) + } else { + c.pa.reply = nil + } var msg []byte if len(pm.hdr) > 0 { c.pa.hdr = len(pm.hdr) - c.pa.hdb = []byte(strconv.Itoa(c.pa.hdr)) + c.pa.hdb = append(hdb[:0], strconv.Itoa(c.pa.hdr)...) msg = append(pm.hdr, pm.msg...) msg = append(msg, _CRLF_...) } else { @@ -74,6 +86,7 @@ func (sq *sendq) internalLoop() { } c.processInboundClientMsg(msg) c.pa.szb = nil + outMsgPool.Put(pm) } // TODO: should this be in the for-loop instead? c.flushClients(0) @@ -82,8 +95,20 @@ func (sq *sendq) internalLoop() { } } +var outMsgPool = sync.Pool{ + New: func() any { + return &outMsg{} + }, +} + func (sq *sendq) send(subj, rply string, hdr, msg []byte) { - out := &outMsg{subj, rply, nil, nil} + if sq == nil { + return + } + out := outMsgPool.Get().(*outMsg) + out.subj, out.rply = subj, rply + out.hdr, out.msg = nil, nil + // We will copy these for now. if len(hdr) > 0 { hdr = copyBytes(hdr) diff --git a/server/server.go b/server/server.go index 3fd161da9..1a645848c 100644 --- a/server/server.go +++ b/server/server.go @@ -85,7 +85,7 @@ type Info struct { ClientConnectURLs []string `json:"connect_urls,omitempty"` // Contains URLs a client can connect to. WSConnectURLs []string `json:"ws_connect_urls,omitempty"` // Contains URLs a ws client can connect to. LameDuckMode bool `json:"ldm,omitempty"` - ConnectionId string `json:"connection_id"` + ConnectionId string `json:"connection_id"` // ** added by memphis // Route Specific Import *SubjectPermission `json:"import,omitempty"` @@ -123,7 +123,6 @@ type Server struct { opts *Options running bool shutdown bool - reloading bool listener net.Listener listenerErr error gacc *Account @@ -250,6 +249,12 @@ type Server struct { // OCSP monitoring ocsps []*OCSPMonitor + // OCSP peer verification (at least one TLS block) + ocspPeerVerify bool + + // OCSP response cache + ocsprc OCSPResponseCache + // exporting account name the importer experienced issues with incompleteAccExporterMap sync.Map @@ -295,7 +300,7 @@ type Server struct { // Queue to process JS API requests that come from routes (or gateways) jsAPIRoutedReqs *ipQueue[*jsAPIRoutedReq] - memphis srvMemphis + memphis srvMemphis // ** added by memphis } // For tracking JS nodes. @@ -414,7 +419,7 @@ func NewServer(opts *Options) (*Server, error) { info.TLSAvailable = true } - now := time.Now().UTC() + now := time.Now() s := &Server{ kp: kp, @@ -494,8 +499,8 @@ func NewServer(opts *Options) (*Server, error) { // Ensure that non-exported options (used in tests) are properly set. s.setLeafNodeNonExportedOptions() - // Setup OCSP Stapling. This will abort server from starting if there - // are no valid staples and OCSP policy is set to Always or MustStaple. + // Setup OCSP Stapling and OCSP Peer. This will abort server from starting if there + // are no valid staples and OCSP Stapling policy is set to Always or MustStaple. if err := s.enableOCSP(); err != nil { return nil, err } @@ -571,22 +576,22 @@ func NewServer(opts *Options) (*Server, error) { s.mu.Unlock() var a *Account // perform direct lookup to avoid warning trace - if _, err := fetchAccount(ar, s.opts.SystemAccount); err == nil { - a, _ = s.lookupAccount(s.opts.SystemAccount) + if _, err := fetchAccount(ar, opts.SystemAccount); err == nil { + a, _ = s.lookupAccount(opts.SystemAccount) } s.mu.Lock() if a == nil { - sac := NewAccount(s.opts.SystemAccount) + sac := NewAccount(opts.SystemAccount) sac.Issuer = opts.TrustedOperators[0].Issuer sac.signingKeys = map[string]jwt.Scope{} - sac.signingKeys[s.opts.SystemAccount] = nil + sac.signingKeys[opts.SystemAccount] = nil s.registerAccountNoLock(sac) } } } // For tracking accounts - if err := s.configureAccounts(); err != nil { + if _, err := s.configureAccounts(false); err != nil { return nil, err } @@ -779,6 +784,7 @@ func (s *Server) globalAccount() *Account { return gacc } +// ** added by Memphis func (s *Server) MemphisGlobalAccount() *Account { acc := MEMPHIS_GLOBAL_ACCOUNT if !configuration.USER_PASS_BASED_AUTH { @@ -801,40 +807,89 @@ func (s *Server) MemphisGlobalAccountString() string { return acc } -// Used to setup Accounts. -// Lock is held upon entry. -func (s *Server) configureAccounts() error { +// ** added by Memphis + +// Used to setup or update Accounts. +// Returns a map that indicates which accounts have had their stream imports +// changed (in case of an update in configuration reload). +// Lock is held upon entry, but will be released/reacquired in this function. +func (s *Server) configureAccounts(reloading bool) (map[string]struct{}, error) { + awcsti := make(map[string]struct{}) + // Create the global account. if s.gacc == nil { s.gacc = NewAccount(globalAccountName) s.registerAccountNoLock(s.gacc) } - opts := s.opts + opts := s.getOpts() + + // We need to track service imports since we can not swap them out (unsub and re-sub) + // until the proper server struct accounts have been swapped in properly. Doing it in + // place could lead to data loss or server panic since account under new si has no real + // account and hence no sublist, so will panic on inbound message. + siMap := make(map[*Account][][]byte) // Check opts and walk through them. We need to copy them here // so that we do not keep a real one sitting in the options. - for _, acc := range s.opts.Accounts { + for _, acc := range opts.Accounts { var a *Account - if acc.Name == globalAccountName { - a = s.gacc - } else { - a = acc.shallowCopy() + create := true + // For the global account, we want to skip the reload process + // and fall back into the "create" case which will in that + // case really be just an update (shallowCopy will make sure + // that mappings are copied over). + if reloading && acc.Name != globalAccountName { + if ai, ok := s.accounts.Load(acc.Name); ok { + a = ai.(*Account) + a.mu.Lock() + // Before updating the account, check if stream imports have changed. + if !a.checkStreamImportsEqual(acc) { + awcsti[acc.Name] = struct{}{} + } + // Collect the sids for the service imports since we are going to + // replace with new ones. + var sids [][]byte + for _, si := range a.imports.services { + if si.sid != nil { + sids = append(sids, si.sid) + } + } + // Setup to process later if needed. + if len(sids) > 0 || len(acc.imports.services) > 0 { + siMap[a] = sids + } + + // Now reset all export/imports fields since they are going to be + // filled in shallowCopy() + a.imports.streams, a.imports.services = nil, nil + a.exports.streams, a.exports.services = nil, nil + // We call shallowCopy from the account `acc` (the one in Options) + // and pass `a` (our existing account) to get it updated. + acc.shallowCopy(a) + a.mu.Unlock() + create = false + } } - if acc.hasMappings() { - // For now just move and wipe from opts.Accounts version. - a.mappings = acc.mappings - acc.mappings = nil - // We use this for selecting between multiple weighted destinations. - a.prand = rand.New(rand.NewSource(time.Now().UnixNano())) + if create { + if acc.Name == globalAccountName { + a = s.gacc + } else { + a = NewAccount(acc.Name) + } + // Locking matters in the case of an update of the global account + a.mu.Lock() + acc.shallowCopy(a) + a.mu.Unlock() + // Will be a no-op in case of the global account since it is alrady registered. + s.registerAccountNoLock(a) } - acc.sl = nil - acc.clients = nil - s.registerAccountNoLock(a) + // The `acc` account is stored in options, not in the server, and these can be cleared. + acc.sl, acc.clients, acc.mappings = nil, nil, nil // If we see an account defined using $SYS we will make sure that is set as system account. if acc.Name == DEFAULT_SYSTEM_ACCOUNT && opts.SystemAccount == _EMPTY_ { - s.opts.SystemAccount = DEFAULT_SYSTEM_ACCOUNT + opts.SystemAccount = DEFAULT_SYSTEM_ACCOUNT } } @@ -853,6 +908,7 @@ func (s *Server) configureAccounts() error { s.accounts.Range(func(k, v interface{}) bool { numAccounts++ acc := v.(*Account) + acc.mu.Lock() // Exports for _, se := range acc.exports.streams { if se != nil { @@ -879,19 +935,47 @@ func (s *Server) configureAccounts() error { for _, si := range acc.imports.services { if v, ok := s.accounts.Load(si.acc.Name); ok { si.acc = v.(*Account) + + // It is possible to allow for latency tracking inside your + // own account, so lock only when not the same account. + if si.acc == acc { + si.se = si.acc.getServiceExport(si.to) + continue + } + si.acc.mu.RLock() si.se = si.acc.getServiceExport(si.to) + si.acc.mu.RUnlock() } } // Make sure the subs are running, but only if not reloading. - if len(acc.imports.services) > 0 && acc.ic == nil && !s.reloading { + if len(acc.imports.services) > 0 && acc.ic == nil && !reloading { acc.ic = s.createInternalAccountClient() acc.ic.acc = acc + // Need to release locks to invoke this function. + acc.mu.Unlock() + s.mu.Unlock() acc.addAllServiceImportSubs() + s.mu.Lock() + acc.mu.Lock() } - acc.updated = time.Now().UTC() + acc.updated = time.Now() + acc.mu.Unlock() return true }) + // Check if we need to process service imports pending from above. + // This processing needs to be after we swap in the real accounts above. + for acc, sids := range siMap { + c := acc.ic + for _, sid := range sids { + c.processUnsub(sid) + } + acc.addAllServiceImportSubs() + s.mu.Unlock() + s.registerSystemImports(acc) + s.mu.Lock() + } + // Set the system account if it was configured. // Otherwise create a default one. if opts.SystemAccount != _EMPTY_ { @@ -908,14 +992,14 @@ func (s *Server) configureAccounts() error { s.mu.Lock() } if err != nil { - return fmt.Errorf("error resolving system account: %v", err) + return awcsti, fmt.Errorf("error resolving system account: %v", err) } // If we have defined a system account here check to see if its just us and the $G account. // We would do this to add user/pass to the system account. If this is the case add in // no-auth-user for $G. // Only do this if non-operator mode. - if len(opts.TrustedOperators) == 0 && numAccounts == 2 && s.opts.NoAuthUser == _EMPTY_ { + if len(opts.TrustedOperators) == 0 && numAccounts == 2 && opts.NoAuthUser == _EMPTY_ { // If we come here from config reload, let's not recreate the fake user name otherwise // it will cause currently clients to be disconnected. uname := s.sysAccOnlyNoAuthUser @@ -930,12 +1014,12 @@ func (s *Server) configureAccounts() error { uname = fmt.Sprintf("nats-%s", b[:]) s.sysAccOnlyNoAuthUser = uname } - s.opts.Users = append(s.opts.Users, &User{Username: uname, Password: uname[6:], Account: s.gacc}) - s.opts.NoAuthUser = uname + opts.Users = append(opts.Users, &User{Username: uname, Password: uname[6:], Account: s.gacc}) + opts.NoAuthUser = uname } } - return nil + return awcsti, nil } // Setup the account resolver. For memory resolver, make sure the JWTs are @@ -1064,16 +1148,17 @@ func (s *Server) isTrustedIssuer(issuer string) bool { // options-based trusted nkeys. Returns success. func (s *Server) processTrustedKeys() bool { s.strictSigningKeyUsage = map[string]struct{}{} + opts := s.getOpts() if trustedKeys != _EMPTY_ && !s.initStampedTrustedKeys() { return false - } else if s.opts.TrustedKeys != nil { - for _, key := range s.opts.TrustedKeys { + } else if opts.TrustedKeys != nil { + for _, key := range opts.TrustedKeys { if !nkeys.IsValidPublicOperatorKey(key) { return false } } - s.trustedKeys = append([]string(nil), s.opts.TrustedKeys...) - for _, claim := range s.opts.TrustedOperators { + s.trustedKeys = append([]string(nil), opts.TrustedKeys...) + for _, claim := range opts.TrustedOperators { if !claim.StrictSigningKeyUsage { continue } @@ -1106,7 +1191,7 @@ func checkTrustedKeyString(keys string) []string { // it succeeded or not. func (s *Server) initStampedTrustedKeys() bool { // Check to see if we have an override in options, which will cause us to fail. - if len(s.opts.TrustedKeys) > 0 { + if len(s.getOpts().TrustedKeys) > 0 { return false } tks := checkTrustedKeyString(trustedKeys) @@ -1322,13 +1407,13 @@ func (s *Server) setSystemAccount(acc *Account) error { servers: make(map[string]*serverUpdate), replies: make(map[string]msgHandler), sendq: newIPQueue[*pubMsg](s, "System sendQ"), + recvq: newIPQueue[*inSysMsg](s, "System recvQ"), resetCh: make(chan struct{}), sq: s.newSendQ(), statsz: eventsHBInterval, orphMax: 5 * eventsHBInterval, chkOrph: 3 * eventsHBInterval, } - s.sys.wg.Add(1) s.mu.Unlock() @@ -1341,6 +1426,9 @@ func (s *Server) setSystemAccount(acc *Account) error { // We do our own wg here since we will stop first during shutdown. go s.internalSendLoop(&s.sys.wg) + // Start the internal loop for inbound messages. + go s.internalReceiveLoop() + // Start up our general subscriptions s.initEventTracking() @@ -1382,7 +1470,7 @@ func (s *Server) createInternalClient(kind int) *client { if kind != SYSTEM && kind != JETSTREAM && kind != ACCOUNT { return nil } - now := time.Now().UTC() + now := time.Now() c := &client{srv: s, kind: kind, opts: internalOpts, msubs: -1, mpay: -1, start: now, last: now} c.initClient() c.echo = false @@ -1395,7 +1483,8 @@ func (s *Server) createInternalClient(kind int) *client { // efficient propagation. // Lock should be held on entry. func (s *Server) shouldTrackSubscriptions() bool { - return (s.opts.Cluster.Port != 0 || s.opts.Gateway.Port != 0) + opts := s.getOpts() + return (opts.Cluster.Port != 0 || opts.Gateway.Port != 0) } // Invokes registerAccountNoLock under the protection of the server lock. @@ -1451,7 +1540,7 @@ func (s *Server) registerAccountNoLock(acc *Account) *Account { acc.lqws = make(map[string]int32) } acc.srv = s - acc.updated = time.Now().UTC() + acc.updated = time.Now() accName := acc.Name jsEnabled := len(acc.jsLimits) > 0 acc.mu.Unlock() @@ -1653,20 +1742,32 @@ func (s *Server) fetchAccount(name string) (*Account, error) { } // The sub imports may have been setup but will not have had their // subscriptions properly setup. Do that here. + var needImportSubs bool + + acc.mu.Lock() if len(acc.imports.services) > 0 { if acc.ic == nil { acc.ic = s.createInternalAccountClient() acc.ic.acc = acc } + needImportSubs = true + } + acc.mu.Unlock() + + // Do these outside the lock. + if needImportSubs { acc.addAllServiceImportSubs() } + return acc, nil } -// Start up the server, this will block. -// Start via a Go routine if needed. +// Start up the server, this will not block. +// +// WaitForShutdown can be used to block and wait for the server to shutdown properly if needed +// after calling s.Shutdown() func (s *Server) Start() { - s.Noticef("Starting Memphis{dev} broker") + s.Noticef("Starting Memphis{dev} broker") // ** changed by Memphis gc := gitCommit if gc == _EMPTY_ { @@ -1677,10 +1778,19 @@ func (s *Server) Start() { opts := s.getOpts() clusterName := s.ClusterName() - s.Noticef("Version: %s", s.MemphisVersion()) + s.Noticef("Version: %s", s.MemphisVersion()) // ** changed by Memphis if clusterName != _EMPTY_ { s.Noticef(" Cluster: %s", clusterName) } + // ** deleted by Memphis + // s.Noticef(" Name: %s", s.info.Name) + // if opts.JetStream { + // s.Noticef(" Node: %s", getHash(s.info.Name)) + // } + // s.Noticef(" ID: %s", s.info.ID) + + // defer s.Noticef("Server is ready") + // ** deleted by Memphis // Check for insecure configurations. s.checkAuthforWarnings() @@ -1780,8 +1890,9 @@ func (s *Server) Start() { // In operator mode, when the account resolver depends on an external system and // the system account is the bootstrapping account, start fetching it. if len(opts.TrustedOperators) == 1 && opts.SystemAccount != _EMPTY_ && opts.SystemAccount != DEFAULT_SYSTEM_ACCOUNT { + opts := s.getOpts() _, isMemResolver := ar.(*MemAccResolver) - if v, ok := s.accounts.Load(s.opts.SystemAccount); !isMemResolver && ok && v.(*Account).claimJWT == "" { + if v, ok := s.accounts.Load(opts.SystemAccount); !isMemResolver && ok && v.(*Account).claimJWT == _EMPTY_ { s.Noticef("Using bootstrapping system account") s.startGoRoutine(func() { defer s.grWG.Done() @@ -1793,7 +1904,7 @@ func (s *Server) Start() { return case <-t.C: sacc := s.SystemAccount() - if claimJWT, err := fetchAccount(ar, s.opts.SystemAccount); err != nil { + if claimJWT, err := fetchAccount(ar, opts.SystemAccount); err != nil { continue } else if err = s.updateAccountWithClaimJWT(sacc, claimJWT); err != nil { continue @@ -1866,15 +1977,20 @@ func (s *Server) Start() { } } - // Start OCSP Stapling monitoring for TLS certificates if enabled. + // Start OCSP Stapling monitoring for TLS certificates if enabled. Hook TLS handshake for + // OCSP check on peers (LEAF and CLIENT kind) if enabled. s.startOCSPMonitoring() + // Configure OCSP Response Cache for peer OCSP checks if enabled. + s.initOCSPResponseCache() + // Start up gateway if needed. Do this before starting the routes, because // we want to resolve the gateway host:port so that this information can // be sent to other routes. if opts.Gateway.Port != 0 { s.startGateways() } + // Start websocket server if needed. Do this before starting the routes, and // leaf node because we want to resolve the gateway host:port so that this // information can be sent to other routes. @@ -1934,6 +2050,10 @@ func (s *Server) Start() { if !opts.DontListen { s.AcceptLoop(clientListenReady) } + + // Bring OSCP Response cache online after accept loop started in anticipation of NATS-enabled cache types + s.startOCSPResponseCache() + //** added by memphis s.initializeMemphis() // added by memphis ** @@ -2097,6 +2217,12 @@ func (s *Server) Shutdown() { } s.Noticef("Server Exiting..") + + // Stop OCSP Response Cache + if s.ocsprc != nil { + s.ocsprc.Stop(s) + } + // Close logger if applicable. It allows tests on Windows // to be able to do proper cleanup (delete log file). s.logging.RLock() @@ -2164,7 +2290,7 @@ func (s *Server) AcceptLoop(clr chan struct{}) { // server's info Host/Port with either values from Options or // ClientAdvertise. if err := s.setInfoHostPort(); err != nil { - s.Fatalf("Error setting server INFO with ClientAdvertise value of %s, err=%v", s.opts.ClientAdvertise, err) + s.Fatalf("Error setting server INFO with ClientAdvertise value of %s, err=%v", opts.ClientAdvertise, err) l.Close() s.mu.Unlock() return @@ -2199,7 +2325,7 @@ func (s *Server) AcceptLoop(clr chan struct{}) { func (s *Server) InProcessConn() (net.Conn, error) { pl, pr := net.Pipe() if !s.startGoRoutine(func() { - s.createClient(pl) + s.createClientInProcess(pl) s.grWG.Done() }) { pl.Close() @@ -2243,16 +2369,17 @@ func (s *Server) setInfoHostPort() error { // When this function is called, opts.Port is set to the actual listen // port (if option was originally set to RANDOM), even during a config // reload. So use of s.opts.Port is safe. - if s.opts.ClientAdvertise != _EMPTY_ { - h, p, err := parseHostPort(s.opts.ClientAdvertise, s.opts.Port) + opts := s.getOpts() + if opts.ClientAdvertise != _EMPTY_ { + h, p, err := parseHostPort(opts.ClientAdvertise, opts.Port) if err != nil { return err } s.info.Host = h s.info.Port = p } else { - s.info.Host = s.opts.Host - s.info.Port = s.opts.Port + s.info.Host = opts.Host + s.info.Port = opts.Port } return nil } @@ -2438,6 +2565,9 @@ func (s *Server) startMonitoring(secure bool) error { return fmt.Errorf("can't listen to the monitor port: %v", err) } + rport := httpListener.Addr().(*net.TCPAddr).Port + s.Noticef("Starting %s monitor on %s", monitorProtocol, net.JoinHostPort(opts.HTTPHost, strconv.Itoa(rport))) + mux := http.NewServeMux() // Root @@ -2550,6 +2680,14 @@ func (c *tlsMixConn) Read(b []byte) (int, error) { } func (s *Server) createClient(conn net.Conn) *client { + return s.createClientEx(conn, false) +} + +func (s *Server) createClientInProcess(conn net.Conn) *client { + return s.createClientEx(conn, true) +} + +func (s *Server) createClientEx(conn net.Conn, inProcess bool) *client { // Snapshot server options. opts := s.getOpts() @@ -2559,7 +2697,7 @@ func (s *Server) createClient(conn net.Conn) *client { if maxSubs == 0 { maxSubs = -1 } - now := time.Now().UTC() + now := time.Now() c := &client{srv: s, nc: conn, opts: defaultOpts, mpay: maxPay, msubs: maxSubs, start: now, last: now} @@ -2587,6 +2725,13 @@ func (s *Server) createClient(conn net.Conn) *client { info.AuthRequired = false } + // Check to see if this is an in-process connection with tls_required. + // If so, set as not required, but available. + if inProcess && info.TLSRequired { + info.TLSRequired = false + info.TLSAvailable = true + } + s.totalClients++ s.mu.Unlock() @@ -2648,8 +2793,9 @@ func (s *Server) createClient(conn net.Conn) *client { var pre []byte // If we have both TLS and non-TLS allowed we need to see which - // one the client wants. - if !isClosed && opts.TLSConfig != nil && opts.AllowNonTLS { + // one the client wants. We'll always allow this for in-process + // connections. + if !isClosed && opts.TLSConfig != nil && (inProcess || opts.AllowNonTLS) { pre = make([]byte, 4) c.nc.SetReadDeadline(time.Now().Add(secondsToDuration(opts.TLSTimeout))) n, _ := io.ReadFull(c.nc, pre[:]) @@ -2707,9 +2853,7 @@ func (s *Server) createClient(conn net.Conn) *client { c.setPingTimer() // Spin up the read loop. - s.startGoRoutine(func() { - c.readLoop(pre) - }) + s.startGoRoutine(func() { c.readLoop(pre) }) // Spin up the write loop. s.startGoRoutine(func() { c.writeLoop() }) @@ -2728,7 +2872,7 @@ func (s *Server) createClient(conn net.Conn) *client { // This will save off a closed client in a ring buffer such that // /connz can inspect. Useful for debugging, etc. func (s *Server) saveClosedClient(c *client, nc net.Conn, reason ClosedState) { - now := time.Now().UTC() + now := time.Now() s.accountDisconnectEvent(c, now, reason.String()) @@ -2747,7 +2891,7 @@ func (s *Server) saveClosedClient(c *client, nc net.Conn, reason ClosedState) { } } // Hold user as well. - cc.user = c.opts.Username + cc.user = c.getRawAuthUser() // Hold account name if not the global account. if c.acc != nil && c.acc.Name != globalAccountName { cc.acc = c.acc.Name @@ -3051,7 +3195,7 @@ func (s *Server) readyForConnections(d time.Duration) error { chk["server"] = info{ok: s.listener != nil || opts.DontListen, err: s.listenerErr} chk["route"] = info{ok: (opts.Cluster.Port == 0 || s.routeListener != nil), err: s.routeListenerErr} chk["gateway"] = info{ok: (opts.Gateway.Name == _EMPTY_ || s.gatewayListener != nil), err: s.gatewayListenerErr} - chk["leafNode"] = info{ok: (opts.LeafNode.Port == 0 || s.leafNodeListener != nil), err: s.leafNodeListenerErr} + chk["leafnode"] = info{ok: (opts.LeafNode.Port == 0 || s.leafNodeListener != nil), err: s.leafNodeListenerErr} chk["websocket"] = info{ok: (opts.Websocket.Port == 0 || s.websocket.listener != nil), err: s.websocket.listenerErr} chk["mqtt"] = info{ok: (opts.MQTT.Port == 0 || s.mqtt.listener != nil), err: s.mqtt.listenerErr} s.mu.RUnlock() @@ -3486,6 +3630,7 @@ func (s *Server) lameDuckMode() { } s.Noticef("Entering lame duck mode, stop accepting new clients") s.ldm = true + s.sendLDMShutdownEventLocked() expected := 1 s.listener.Close() s.listener = nil @@ -3540,7 +3685,10 @@ func (s *Server) lameDuckMode() { numClients := int64(len(s.clients)) batch := 1 // Sleep interval between each client connection close. - si := dur / numClients + var si int64 + if numClients != 0 { + si = dur / numClients + } if si < 1 { // Should not happen (except in test with very small LD duration), but // if there are too many clients, batch the number of close and @@ -3745,7 +3893,7 @@ func (s *Server) updateRemoteSubscription(acc *Account, sub *subscription, delta s.gatewayUpdateSubInterest(acc.Name, sub, delta) } - s.updateLeafNodes(acc, sub, delta) + acc.updateLeafNodes(sub, delta) } func (s *Server) startRateLimitLogExpiration() { diff --git a/server/server_test.go b/server/server_test.go index 27cce2354..467a08441 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -86,7 +86,7 @@ func RunServer(opts *Options) *Server { } // Run server in Go routine. - go s.Start() + s.Start() // Wait for accept loop(s) to be started if err := s.readyForConnections(10 * time.Second); err != nil { diff --git a/server/signal_test.go b/server/signal_test.go index 4a2064668..14a8e1d5b 100644 --- a/server/signal_test.go +++ b/server/signal_test.go @@ -44,7 +44,7 @@ func TestSignalToReOpenLogFile(t *testing.T) { defer s.Shutdown() // Set the file log - fileLog := logger.NewFileLogger(s.opts.LogFile, s.opts.Logtime, s.opts.Debug, s.opts.Trace, true) + fileLog := logger.NewFileLogger(s.opts.LogFile, s.opts.Logtime, s.opts.Debug, s.opts.Trace, true, logger.LogUTC(s.opts.LogtimeUTC)) s.SetLogger(fileLog, false, false) // Add a trace diff --git a/server/split_test.go b/server/split_test.go index 76c40260a..571e9ff73 100644 --- a/server/split_test.go +++ b/server/split_test.go @@ -10,6 +10,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + package server import ( diff --git a/server/store.go b/server/store.go index 24326e604..3fdf1c868 100644 --- a/server/store.go +++ b/server/store.go @@ -61,8 +61,6 @@ var ( ErrInvalidSequence = errors.New("invalid sequence") // ErrSequenceMismatch is returned when storing a raw message and the expected sequence is wrong. ErrSequenceMismatch = errors.New("expected sequence does not match store") - // ErrPurgeArgMismatch is returned when PurgeEx is called with sequence > 1 and keep > 0. - ErrPurgeArgMismatch = errors.New("sequence > 1 && keep > 0 not allowed") ) // StoreMsg is the stored message format for messages that are retained by the Store layer. @@ -157,6 +155,9 @@ type SimpleState struct { Msgs uint64 `json:"messages"` First uint64 `json:"first_seq"` Last uint64 `json:"last_seq"` + + // Internal usage for when the first needs to be updated before use. + firstNeedsUpdate bool } // LostStreamData indicates msgs that have been lost. diff --git a/server/stream.go b/server/stream.go index 05fba073b..dd2c97bef 100644 --- a/server/stream.go +++ b/server/stream.go @@ -57,7 +57,7 @@ type StreamConfig struct { Placement *Placement `json:"placement,omitempty"` Mirror *StreamSource `json:"mirror,omitempty"` Sources []*StreamSource `json:"sources,omitempty"` - TieredStorageEnabled bool `json:"tiered_storage_enabled"` + TieredStorageEnabled bool `json:"tiered_storage_enabled"` // ** added by memphis // Allow republish of the message after being sequenced and stored. RePublish *RePublish `json:"republish,omitempty"` @@ -194,6 +194,7 @@ type stream struct { pubAck []byte outq *jsOutQ msgs *ipQueue[*inMsg] + gets *ipQueue[*directGetReq] store StreamStore ackq *ipQueue[uint64] lseq uint64 @@ -211,6 +212,7 @@ type stream struct { qch chan struct{} active bool ddloaded bool + closed bool // Mirror mirror *sourceInfo @@ -231,6 +233,10 @@ type stream struct { sigq *ipQueue[*cMsg] csl *Sublist + // For non limits policy streams when they process an ack before the actual msg. + // Can happen in stretch clusters, multi-cloud, or during catchup for a restarted server. + preAcks map[uint64]map[*consumer]struct{} + // TODO(dlc) - Hide everything below behind two pointers. // Clustered mode. sa *streamAssignment @@ -246,6 +252,7 @@ type stream struct { catchups map[string]uint64 uch chan struct{} compressOK bool + inMonitor bool // Direct get subscription. directSub *subscription @@ -282,7 +289,7 @@ const ( // Headers for published messages. const ( - JSMsgId = "msg-id" + JSMsgId = "msg-id" // ** changed by Memphis JSExpectedStream = "Nats-Expected-Stream" JSExpectedLastSeq = "Nats-Expected-Last-Sequence" JSExpectedLastSubjSeq = "Nats-Expected-Last-Subject-Sequence" @@ -361,10 +368,23 @@ func (a *Account) addStreamWithAssignment(config *StreamConfig, fsConfig *FileSt return nil, ApiErrors[JSStreamReplicasNotSupportedErr] } + // Make sure we are ok when these are done in parallel. + v, loaded := jsa.inflight.LoadOrStore(cfg.Name, &sync.WaitGroup{}) + wg := v.(*sync.WaitGroup) + if loaded { + wg.Wait() + } else { + wg.Add(1) + defer func() { + jsa.inflight.Delete(cfg.Name) + wg.Done() + }() + } + js, isClustered := jsa.jetStreamAndClustered() - jsa.mu.RLock() + jsa.mu.Lock() if mset, ok := jsa.streams[cfg.Name]; ok { - jsa.mu.RUnlock() + jsa.mu.Unlock() // Check to see if configs are same. ocfg := mset.config() if reflect.DeepEqual(ocfg, cfg) { @@ -383,7 +403,8 @@ func (a *Account) addStreamWithAssignment(config *StreamConfig, fsConfig *FileSt if !isClustered { reserved = jsa.tieredReservation(tier, &cfg) } - jsa.mu.RUnlock() + jsa.mu.Unlock() + if !hasTier { return nil, NewJSNoLimitsError() } @@ -441,6 +462,7 @@ func (a *Account) addStreamWithAssignment(config *StreamConfig, fsConfig *FileSt stype: cfg.Storage, consumers: make(map[string]*consumer), msgs: newIPQueue[*inMsg](s, qpfx+"messages"), + gets: newIPQueue[*directGetReq](s, qpfx+"direct gets"), qch: make(chan struct{}), uch: make(chan struct{}, 4), sch: make(chan struct{}, 1), @@ -580,6 +602,20 @@ func (mset *stream) streamAssignment() *streamAssignment { } func (mset *stream) setStreamAssignment(sa *streamAssignment) { + var node RaftNode + + mset.mu.RLock() + js := mset.js + mset.mu.RUnlock() + + if js != nil { + js.mu.RLock() + if sa.Group != nil { + node = sa.Group.node + } + js.mu.RUnlock() + } + mset.mu.Lock() defer mset.mu.Unlock() @@ -589,7 +625,7 @@ func (mset *stream) setStreamAssignment(sa *streamAssignment) { } // Set our node. - mset.node = sa.Group.node + mset.node = node if mset.node != nil { mset.node.UpdateKnownPeers(sa.Group.Peers) } @@ -803,6 +839,14 @@ func (mset *stream) lastSeqAndCLFS() (uint64, uint64) { return mset.lseq, mset.clfs } +func (mset *stream) clearCLFS() uint64 { + mset.mu.Lock() + defer mset.mu.Unlock() + clfs := mset.clfs + mset.clfs, mset.clseq = 0, 0 + return clfs +} + func (mset *stream) lastSeq() uint64 { mset.mu.RLock() lseq := mset.lseq @@ -1077,62 +1121,68 @@ func (s *Server) checkStreamCfg(config *StreamConfig, acc *Account) (StreamConfi if len(cfg.Sources) > 0 { return StreamConfig{}, NewJSMirrorWithSourcesError() } - // We do not require other stream to exist anymore, but if we can see it check payloads. - exists, maxMsgSize, subs := hasStream(cfg.Mirror.Name) - if len(subs) > 0 { - streamSubs = append(streamSubs, subs...) - } - if exists { - if cfg.MaxMsgSize > 0 && maxMsgSize > 0 && cfg.MaxMsgSize < maxMsgSize { - return StreamConfig{}, NewJSMirrorMaxMessageSizeTooBigError() + // Do not perform checks if External is provided, as it could lead to + // checking against itself (if sourced stream name is the same on different JetStream) + if cfg.Mirror.External == nil { + // We do not require other stream to exist anymore, but if we can see it check payloads. + exists, maxMsgSize, subs := hasStream(cfg.Mirror.Name) + if len(subs) > 0 { + streamSubs = append(streamSubs, subs...) } - if !isRecovering && !hasFilterSubjectOverlap(cfg.Mirror.FilterSubject, subs) { - return StreamConfig{}, NewJSStreamInvalidConfigError( - fmt.Errorf("mirror '%s' filter subject '%s' does not overlap with any origin stream subject", - cfg.Mirror.Name, cfg.Mirror.FilterSubject)) + if exists { + if cfg.MaxMsgSize > 0 && maxMsgSize > 0 && cfg.MaxMsgSize < maxMsgSize { + return StreamConfig{}, NewJSMirrorMaxMessageSizeTooBigError() + } + if !isRecovering && !hasFilterSubjectOverlap(cfg.Mirror.FilterSubject, subs) { + return StreamConfig{}, NewJSStreamInvalidConfigError( + fmt.Errorf("mirror '%s' filter subject '%s' does not overlap with any origin stream subject", + cfg.Mirror.Name, cfg.Mirror.FilterSubject)) + } } - } - if cfg.Mirror.External != nil { + // Determine if we are inheriting direct gets. + if exists, ocfg := getStream(cfg.Mirror.Name); exists { + cfg.MirrorDirect = ocfg.AllowDirect + } else if js := s.getJetStream(); js != nil && js.isClustered() { + // Could not find it here. If we are clustered we can look it up. + js.mu.RLock() + if cc := js.cluster; cc != nil { + if as := cc.streams[acc.Name]; as != nil { + if sa := as[cfg.Mirror.Name]; sa != nil { + cfg.MirrorDirect = sa.Config.AllowDirect + } + } + } + js.mu.RUnlock() + } + } else { if cfg.Mirror.External.DeliverPrefix != _EMPTY_ { deliveryPrefixes = append(deliveryPrefixes, cfg.Mirror.External.DeliverPrefix) } if cfg.Mirror.External.ApiPrefix != _EMPTY_ { apiPrefixes = append(apiPrefixes, cfg.Mirror.External.ApiPrefix) } - } - // Determine if we are inheriting direct gets. - if exists, ocfg := getStream(cfg.Mirror.Name); exists { - cfg.MirrorDirect = ocfg.AllowDirect - } else if js := s.getJetStream(); js != nil && js.isClustered() { - // Could not find it here. If we are clustered we can look it up. - js.mu.RLock() - if cc := js.cluster; cc != nil { - if as := cc.streams[acc.Name]; as != nil { - if sa := as[cfg.Mirror.Name]; sa != nil { - cfg.MirrorDirect = sa.Config.AllowDirect - } - } - } - js.mu.RUnlock() + } } if len(cfg.Sources) > 0 { for _, src := range cfg.Sources { - exists, maxMsgSize, subs := hasStream(src.Name) - if len(subs) > 0 { - streamSubs = append(streamSubs, subs...) - } - if exists { - if cfg.MaxMsgSize > 0 && maxMsgSize > 0 && cfg.MaxMsgSize < maxMsgSize { - return StreamConfig{}, NewJSSourceMaxMessageSizeTooBigError() + // Do not perform checks if External is provided, as it could lead to + // checking against itself (if sourced stream name is the same on different JetStream) + if src.External == nil { + exists, maxMsgSize, subs := hasStream(src.Name) + if len(subs) > 0 { + streamSubs = append(streamSubs, subs...) } - if !isRecovering && !hasFilterSubjectOverlap(src.FilterSubject, streamSubs) { - return StreamConfig{}, NewJSStreamInvalidConfigError( - fmt.Errorf("source '%s' filter subject '%s' does not overlap with any origin stream subject", - src.Name, src.FilterSubject)) + if exists { + if cfg.MaxMsgSize > 0 && maxMsgSize > 0 && cfg.MaxMsgSize < maxMsgSize { + return StreamConfig{}, NewJSSourceMaxMessageSizeTooBigError() + } + if !isRecovering && !hasFilterSubjectOverlap(src.FilterSubject, streamSubs) { + return StreamConfig{}, NewJSStreamInvalidConfigError( + fmt.Errorf("source '%s' filter subject '%s' does not overlap with any origin stream subject", + src.Name, src.FilterSubject)) + } } - } - if src.External == nil { continue } if src.External.DeliverPrefix != _EMPTY_ { @@ -1574,14 +1624,14 @@ func (mset *stream) updateWithAdvisory(config *StreamConfig, sendAdvisory bool) if targetTier := tierName(cfg); mset.tier != targetTier { // In cases such as R1->R3, only one update is needed - mset.jsa.usageMu.RLock() - _, ok := mset.jsa.limits[targetTier] - mset.jsa.usageMu.RUnlock() + jsa.usageMu.RLock() + _, ok := jsa.limits[targetTier] + jsa.usageMu.RUnlock() if ok { // error never set _, reported, _ := mset.store.Utilization() - mset.jsa.updateUsage(mset.tier, mset.stype, -int64(reported)) - mset.jsa.updateUsage(targetTier, mset.stype, int64(reported)) + jsa.updateUsage(mset.tier, mset.stype, -int64(reported)) + jsa.updateUsage(targetTier, mset.stype, int64(reported)) mset.tier = targetTier } // else in case the new tier does not exist (say on move), keep the old tier around @@ -1630,7 +1680,7 @@ func (mset *stream) purge(preq *JSApiStreamPurgeRequest) (purged uint64, err err mset.mu.RUnlock() return 0, errors.New("sealed stream") } - store := mset.store + store, mlseq := mset.store, mset.lseq mset.mu.RUnlock() if preq != nil { @@ -1642,11 +1692,17 @@ func (mset *stream) purge(preq *JSApiStreamPurgeRequest) (purged uint64, err err return purged, err } - // Purge consumers. + // Grab our stream state. var state StreamState store.FastState(&state) fseq, lseq := state.FirstSeq, state.LastSeq + // Check if our last has moved past what our original last sequence was, if so reset. + if lseq > mlseq { + mset.setLastSeq(lseq) + } + + // Purge consumers. // Check for filtered purge. if preq != nil && preq.Subject != _EMPTY_ { ss := store.FilteredState(state.FirstSeq, preq.Subject) @@ -1686,6 +1742,7 @@ func (mset *stream) deleteMsg(seq uint64) (bool, error) { return false, fmt.Errorf("invalid stream") } mset.mu.RUnlock() + return mset.store.RemoveMsg(seq) } @@ -1746,7 +1803,9 @@ func gatherSourceMirrorSubjects(subjects []string, cfg *StreamConfig, acc *Accou // Return the subjects for a stream source. func (a *Account) streamSourceSubjects(ss *StreamSource, seen map[string]bool) (subjects []string, hasExt bool) { - if ss != nil && ss.External != nil { + if ss == nil { + return nil, false + } else if ss.External != nil { return nil, true } @@ -2043,7 +2102,7 @@ func (mset *stream) processInboundMirrorMsg(m *inMsg) bool { var err error if node != nil { if js.limitsExceeded(stype) { - s.resourcesExeededError() + s.resourcesExceededError() err = ApiErrors[JSInsufficientResourcesErr] } else { err = node.Propose(encodeStreamMsg(m.subj, _EMPTY_, m.hdr, m.msg, sseq-1, ts)) @@ -2355,7 +2414,14 @@ func (mset *stream) setupMirrorConsumer() error { // Check if we need to skip messages. if state.LastSeq != ccr.ConsumerInfo.Delivered.Stream { - mset.skipMsgs(state.LastSeq+1, ccr.ConsumerInfo.Delivered.Stream) + // Check to see if delivered is past our last and we have no msgs. This will help the + // case when mirroring a stream that has a very high starting sequence number. + if state.Msgs == 0 && ccr.ConsumerInfo.Delivered.Stream > state.LastSeq { + mset.store.PurgeEx(_EMPTY_, ccr.ConsumerInfo.Delivered.Stream+1, 0) + mset.lseq = ccr.ConsumerInfo.Delivered.Stream + } else { + mset.skipMsgs(state.LastSeq+1, ccr.ConsumerInfo.Delivered.Stream) + } } // Capture consumer name. @@ -2918,7 +2984,7 @@ func streamAndSeq(shdr string) (string, uint64) { } // New version which is stream index name sequence fields := strings.Fields(shdr) - if len(fields) != 2 { + if len(fields) < 2 { return _EMPTY_, 0 } return fields[0], uint64(parseAckReplyNum(fields[1])) @@ -3306,10 +3372,12 @@ func (mset *stream) setupStore(fsCfg *FileStoreConfig) error { return err } mset.store = fs + // Register our server. + fs.registerServer(s) } - mset.mu.Unlock() - + // This will fire the callback but we do not require the lock since md will be 0 here. mset.store.RegisterStorageUpdates(mset.storeUpdates) + mset.mu.Unlock() return nil } @@ -3508,12 +3576,25 @@ func (mset *stream) queueInboundMsg(subj, rply string, hdr, msg []byte) { mset.queueInbound(mset.msgs, subj, rply, hdr, msg) } +var dgPool = sync.Pool{ + New: func() interface{} { + return &directGetReq{} + }, +} + +// For when we need to not inline the request. +type directGetReq struct { + // Copy of this is correct for this. + req JSApiMsgGetRequest + reply string +} + // processDirectGetRequest handles direct get request for stream messages. func (mset *stream) processDirectGetRequest(_ *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { - _, msg := c.msgParts(rmsg) if len(reply) == 0 { return } + _, msg := c.msgParts(rmsg) if len(msg) == 0 { hdr := []byte("NATS/1.0 408 Empty Request\r\n\r\n") mset.outq.send(newJSPubMsg(reply, _EMPTY_, _EMPTY_, hdr, nil, nil, 0)) @@ -3546,26 +3627,20 @@ func (mset *stream) processDirectGetRequest(_ *subscription, c *client, _ *Accou inlineOk := c.kind != ROUTER && c.kind != GATEWAY && c.kind != LEAF if !inlineOk { - // Check how long we have been away from the readloop for the route or gateway or leafnode. - // If too long move to a separate go routine. - if elapsed := time.Since(c.in.start); elapsed < noBlockThresh { - inlineOk = true - } - } - - if inlineOk { - mset.getDirectRequest(&req, reply) + dg := dgPool.Get().(*directGetReq) + dg.req, dg.reply = req, reply + mset.gets.push(dg) } else { - go mset.getDirectRequest(&req, reply) + mset.getDirectRequest(&req, reply) } } // This is for direct get by last subject which is part of the subject itself. func (mset *stream) processDirectGetLastBySubjectRequest(_ *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { - _, msg := c.msgParts(rmsg) if len(reply) == 0 { return } + _, msg := c.msgParts(rmsg) // This version expects no payload. if len(msg) != 0 { hdr := []byte("NATS/1.0 408 Bad Request\r\n\r\n") @@ -3591,19 +3666,15 @@ func (mset *stream) processDirectGetLastBySubjectRequest(_ *subscription, c *cli return } + req := JSApiMsgGetRequest{LastFor: key} + inlineOk := c.kind != ROUTER && c.kind != GATEWAY && c.kind != LEAF if !inlineOk { - // Check how long we have been away from the readloop for the route or gateway or leafnode. - // If too long move to a separate go routine. - if elapsed := time.Since(c.in.start); elapsed < noBlockThresh { - inlineOk = true - } - } - - if inlineOk { - mset.getDirectRequest(&JSApiMsgGetRequest{LastFor: key}, reply) + dg := dgPool.Get().(*directGetReq) + dg.req, dg.reply = req, reply + mset.gets.push(dg) } else { - go mset.getDirectRequest(&JSApiMsgGetRequest{LastFor: key}, reply) + mset.getDirectRequest(&req, reply) } } @@ -3650,21 +3721,7 @@ func (mset *stream) getDirectRequest(req *JSApiMsgGetRequest, reply string) { // processInboundJetStreamMsg handles processing messages bound for a stream. func (mset *stream) processInboundJetStreamMsg(_ *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { hdr, msg := c.msgParts(rmsg) - - // If we are not receiving directly from a client we should move this to another Go routine. - // Make sure to grab no stream or js locks. - if c.kind != CLIENT { - mset.queueInboundMsg(subject, reply, hdr, msg) - return - } - - // This is directly from a client so process inline. - // If we are clustered we need to propose this message to the underlying raft group. - if mset.IsClustered() { - mset.processClusteredInboundMsg(subject, reply, hdr, msg) - } else { - mset.processJetStreamMsg(subject, reply, hdr, msg, 0, 0) - } + mset.queueInboundMsg(subject, reply, hdr, msg) } var ( @@ -3676,7 +3733,7 @@ var ( func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, lseq uint64, ts int64) error { mset.mu.Lock() c, s, store := mset.client, mset.srv, mset.store - if c == nil { + if mset.closed || c == nil { mset.mu.Unlock() return nil } @@ -3792,7 +3849,7 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, } // Expected last sequence per subject. // If we are clustered we have prechecked seq > 0. - if seq, exists := getExpectedLastSeqPerSubject(hdr); exists && (!isClustered || seq == 0) { + if seq, exists := getExpectedLastSeqPerSubject(hdr); exists { // TODO(dlc) - We could make a new store func that does this all in one. var smv StoreMsg var fseq uint64 @@ -3906,7 +3963,7 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, // Check to see if we have exceeded our limits. if js.limitsExceeded(stype) { - s.resourcesExeededError() + s.resourcesExceededError() mset.clfs++ mset.mu.Unlock() if canRespond { @@ -3998,7 +4055,14 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, } else { // Make sure to take into account any message assignments that we had to skip (clfs). seq = lseq + 1 - clfs - err = store.StoreRawMsg(subject, hdr, msg, seq, ts) + // Check for preAcks and the need to skip vs store. + + if mset.hasAllPreAcks(seq, subject) { + mset.clearAllPreAcks(seq) + store.SkipMsg() + } else { + err = store.StoreRawMsg(subject, hdr, msg, seq, ts) + } } if err != nil { @@ -4210,7 +4274,7 @@ func newJSPubMsg(dsubj, subj, reply string, hdr, msg []byte, o *consumer, seq ui } else { m = new(jsPubMsg) } - // When getting something from a pool it is criticical that all fields are + // When getting something from a pool it is critical that all fields are // initialized. Doing this way guarantees that if someone adds a field to // the structure, the compiler will fail the build if this line is not updated. (*m) = jsPubMsg{dsubj, reply, StoreMsg{subj, hdr, msg, buf, seq, 0}, o} @@ -4277,8 +4341,8 @@ type StoredMsg struct { Header []byte `json:"hdrs,omitempty"` Data []byte `json:"data,omitempty"` Time time.Time `json:"time"` - ReplySubject string `json:"reply_subject"` - TenantName string `json:"tenant_name"` + ReplySubject string `json:"reply_subject"` // ** added by Memphis + TenantName string `json:"tenant_name"` // ** added by Memphis } // This is similar to system semantics but did not want to overload the single system sendq, @@ -4321,7 +4385,7 @@ func (mset *stream) internalLoop() { c := s.createInternalJetStreamClient() c.registerWithAccount(mset.acc) defer c.closeConnection(ClientClosed) - outq, qch, msgs := mset.outq, mset.qch, mset.msgs + outq, qch, msgs, gets := mset.outq, mset.qch, mset.msgs, mset.gets // For the ack msgs queue for interest retention. var ( @@ -4337,16 +4401,29 @@ func (mset *stream) internalLoop() { // This should be rarely used now so can be smaller. var _r [1024]byte + // To optimize for not converting a string to a []byte slice. + var ( + subj [256]byte + dsubj [256]byte + rply [256]byte + szb [10]byte + hdb [10]byte + ) + for { select { case <-outq.ch: pms := outq.pop() for _, pm := range pms { - c.pa.subject = []byte(pm.dsubj) - c.pa.deliver = []byte(pm.subj) + c.pa.subject = append(dsubj[:0], pm.dsubj...) + c.pa.deliver = append(subj[:0], pm.subj...) c.pa.size = len(pm.msg) + len(pm.hdr) - c.pa.szb = []byte(strconv.Itoa(c.pa.size)) - c.pa.reply = []byte(pm.reply) + c.pa.szb = append(szb[:0], strconv.Itoa(c.pa.size)...) + if len(pm.reply) > 0 { + c.pa.reply = append(rply[:0], pm.reply...) + } else { + c.pa.reply = nil + } // If we have an underlying buf that is the wire contents for hdr + msg, else construct on the fly. var msg []byte @@ -4369,6 +4446,7 @@ func (mset *stream) internalLoop() { if len(pm.hdr) > 0 { c.pa.hdr = len(pm.hdr) c.pa.hdb = []byte(strconv.Itoa(c.pa.hdr)) + c.pa.hdb = append(hdb[:0], strconv.Itoa(c.pa.hdr)...) } else { c.pa.hdr = -1 c.pa.hdb = nil @@ -4382,7 +4460,7 @@ func (mset *stream) internalLoop() { // Check to see if this is a delivery for a consumer and // we failed to deliver the message. If so alert the consumer. if pm.o != nil && pm.seq > 0 && !didDeliver { - pm.o.didNotDeliver(pm.seq) + pm.o.didNotDeliver(pm.seq, pm.dsubj) } pm.returnToPool() } @@ -4402,6 +4480,14 @@ func (mset *stream) internalLoop() { } } msgs.recycle(&ims) + case <-gets.ch: + dgs := gets.pop() + for _, dg := range dgs { + mset.getDirectRequest(&dg.req, dg.reply) + dgPool.Put(dg) + } + gets.recycle(&dgs) + case <-amch: seqs := ackq.pop() for _, seq := range seqs { @@ -4416,6 +4502,28 @@ func (mset *stream) internalLoop() { } } +// Used to break consumers out of their monitorConsumer go routines. +func (mset *stream) resetAndWaitOnConsumers() { + mset.mu.RLock() + consumers := make([]*consumer, 0, len(mset.consumers)) + for _, o := range mset.consumers { + consumers = append(consumers, o) + } + mset.mu.RUnlock() + + for _, o := range consumers { + if node := o.raftNode(); node != nil { + if o.IsLeader() { + node.StepDown() + } + node.Delete() + } + if o.isMonitorRunning() { + o.monitorWg.Wait() + } + } +} + // Internal function to delete a stream. func (mset *stream) delete() error { if mset == nil { @@ -4427,21 +4535,22 @@ func (mset *stream) delete() error { // Internal function to stop or delete the stream. func (mset *stream) stop(deleteFlag, advisory bool) error { mset.mu.RLock() - js, jsa := mset.js, mset.jsa + js, jsa, name := mset.js, mset.jsa, mset.cfg.Name mset.mu.RUnlock() if jsa == nil { return NewJSNotEnabledForAccountError() } - // Remove from our account map. + // Remove from our account map first. jsa.mu.Lock() - delete(jsa.streams, mset.cfg.Name) + delete(jsa.streams, name) accName := jsa.account.Name jsa.mu.Unlock() // Clean up consumers. mset.mu.Lock() + mset.closed = true var obs []*consumer for _, o := range mset.consumers { obs = append(obs, o) @@ -4462,16 +4571,37 @@ func (mset *stream) stop(deleteFlag, advisory bool) error { mset.cancelSourceConsumer(si.iname) } } + + // Cluster cleanup + var sa *streamAssignment + if n := mset.node; n != nil { + if deleteFlag { + n.Delete() + sa = mset.sa + } else { + if n.NeedSnapshot() { + // Attempt snapshot on clean exit. + n.InstallSnapshot(mset.stateSnapshotLocked()) + } + n.Stop() + } + } mset.mu.Unlock() + isShuttingDown := js.isShuttingDown() for _, o := range obs { - // Third flag says do not broadcast a signal. - // TODO(dlc) - If we have an err here we don't want to stop - // but should we log? - o.stopWithFlags(deleteFlag, deleteFlag, false, advisory) + if !o.isClosed() { + // Third flag says do not broadcast a signal. + // TODO(dlc) - If we have an err here we don't want to stop + // but should we log? + o.stopWithFlags(deleteFlag, deleteFlag, false, advisory) + if !isShuttingDown { + o.monitorWg.Wait() + } + } } - mset.mu.Lock() + mset.mu.Lock() // Stop responding to sync requests. mset.stopClusterSubs() // Unsubscribe from direct stream. @@ -4483,19 +4613,6 @@ func (mset *stream) stop(deleteFlag, advisory bool) error { mset.infoSub = nil } - // Cluster cleanup - var sa *streamAssignment - if n := mset.node; n != nil { - if deleteFlag { - n.Delete() - sa = mset.sa - } else if n.NeedSnapshot() { - // Attempt snapshot on clean exit. - n.InstallSnapshot(mset.stateSnapshotLocked()) - n.Stop() - } - } - // Send stream delete advisory after the consumers. if deleteFlag && advisory { mset.sendDeleteAdvisoryLocked() @@ -4557,23 +4674,21 @@ func (mset *stream) stop(deleteFlag, advisory bool) error { sysc.closeConnection(ClientClosed) } - if store == nil { - return nil - } - if deleteFlag { - if err := store.Delete(); err != nil { - return err + if store != nil { + // Ignore errors. + store.Delete() } + // Release any resources. js.releaseStreamResources(&mset.cfg) - // cleanup directories after the stream accDir := filepath.Join(js.config.StoreDir, accName) // no op if not empty os.Remove(filepath.Join(accDir, streamsDir)) os.Remove(accDir) - } else if err := store.Stop(); err != nil { - return err + } else if store != nil { + // Ignore errors. + store.Stop() } return nil @@ -4621,6 +4736,12 @@ func (mset *stream) getPublicConsumers() []*consumer { return obs } +func (mset *stream) isInterestRetention() bool { + mset.mu.RLock() + defer mset.mu.RUnlock() + return mset.cfg.Retention != LimitsPolicy +} + // NumConsumers reports on number of active consumers for this stream. func (mset *stream) numConsumers() int { mset.mu.RLock() @@ -4810,35 +4931,162 @@ func (mset *stream) potentialFilteredConsumers() bool { return false } -func (mset *stream) checkInterest(seq uint64, obs *consumer) bool { +// Check if there is no interest in this sequence number across our consumers. +// The consumer passed is optional if we are processing the ack for that consumer. +// Write lock should be held. +func (mset *stream) noInterest(seq uint64, obs *consumer) bool { + return !mset.checkForInterest(seq, obs) +} + +// Check if there is no interest in this sequence number and subject across our consumers. +// The consumer passed is optional if we are processing the ack for that consumer. +// Write lock should be held. +func (mset *stream) noInterestWithSubject(seq uint64, subj string, obs *consumer) bool { + return !mset.checkForInterestWithSubject(seq, subj, obs) +} + +// Write lock should be held here for the stream to avoid race conditions on state. +func (mset *stream) checkForInterest(seq uint64, obs *consumer) bool { var subj string if mset.potentialFilteredConsumers() { pmsg := getJSPubMsgFromPool() defer pmsg.returnToPool() sm, err := mset.store.LoadMsg(seq, &pmsg.StoreMsg) if err != nil { + if err == ErrStoreEOF { + // Register this as a preAck. + mset.registerPreAck(obs, seq) + return true + } + mset.clearAllPreAcks(seq) return false } subj = sm.subj } + return mset.checkForInterestWithSubject(seq, subj, obs) +} + +// Checks for interest given a sequence and subject. +func (mset *stream) checkForInterestWithSubject(seq uint64, subj string, obs *consumer) bool { for _, o := range mset.consumers { - if o != obs && o.needAck(seq, subj) { + // If this is us or we have a registered preAck for this consumer continue inspecting. + if o == obs || mset.hasPreAck(o, seq) { + continue + } + // Check if we need an ack. + if o.needAck(seq, subj) { return true } } + mset.clearAllPreAcks(seq) return false } +// Check if we have a pre-registered ack for this sequence. +// Write lock should be held. +func (mset *stream) hasPreAck(o *consumer, seq uint64) bool { + if o == nil || len(mset.preAcks) == 0 { + return false + } + consumers := mset.preAcks[seq] + if len(consumers) == 0 { + return false + } + _, found := consumers[o] + return found +} + +// Check if we have all consumers pre-acked for this sequence and subject. +// Write lock should be held. +func (mset *stream) hasAllPreAcks(seq uint64, subj string) bool { + if len(mset.preAcks) == 0 || len(mset.preAcks[seq]) == 0 { + return false + } + // Since these can be filtered and mutually exclusive, + // if we have some preAcks we need to check all interest here. + return mset.noInterestWithSubject(seq, subj, nil) +} + +// Check if we have all consumers pre-acked. +// Write lock should be held. +func (mset *stream) clearAllPreAcks(seq uint64) { + delete(mset.preAcks, seq) +} + +// Clear all preAcks below floor. +// Write lock should be held. +func (mset *stream) clearAllPreAcksBelowFloor(floor uint64) { + for seq := range mset.preAcks { + if seq < floor { + delete(mset.preAcks, seq) + } + } +} + +// This will register an ack for a consumer if it arrives before the actual message. +func (mset *stream) registerPreAckLock(o *consumer, seq uint64) { + mset.mu.Lock() + defer mset.mu.Unlock() + mset.registerPreAck(o, seq) +} + +// This will register an ack for a consumer if it arrives before +// the actual message. +// Write lock should be held. +func (mset *stream) registerPreAck(o *consumer, seq uint64) { + if o == nil { + return + } + if mset.preAcks == nil { + mset.preAcks = make(map[uint64]map[*consumer]struct{}) + } + if mset.preAcks[seq] == nil { + mset.preAcks[seq] = make(map[*consumer]struct{}) + } + mset.preAcks[seq][o] = struct{}{} +} + +// This will clear an ack for a consumer. +// Write lock should be held. +func (mset *stream) clearPreAck(o *consumer, seq uint64) { + if o == nil || len(mset.preAcks) == 0 { + return + } + if consumers := mset.preAcks[seq]; len(consumers) > 0 { + delete(consumers, o) + if len(consumers) == 0 { + delete(mset.preAcks, seq) + } + } +} + // ackMsg is called into from a consumer when we have a WorkQueue or Interest Retention Policy. func (mset *stream) ackMsg(o *consumer, seq uint64) { - if mset.cfg.Retention == LimitsPolicy { + if seq == 0 { + return + } + + // Don't make this RLock(). We need to have only 1 running at a time to gauge interest across all consumers. + mset.mu.Lock() + if mset.closed || mset.store == nil || mset.cfg.Retention == LimitsPolicy { + mset.mu.Unlock() return } - // Make sure this sequence is not below our first sequence. var state StreamState mset.store.FastState(&state) + + // Make sure this sequence is not below our first sequence. if seq < state.FirstSeq { + mset.clearPreAck(o, seq) + mset.mu.Unlock() + return + } + + // If this has arrived before we have processed the message itself. + if seq > state.LastSeq { + mset.registerPreAck(o, seq) + mset.mu.Unlock() return } @@ -4847,24 +5095,21 @@ func (mset *stream) ackMsg(o *consumer, seq uint64) { case WorkQueuePolicy: // Normally we just remove a message when its ack'd here but if we have direct consumers // from sources and/or mirrors we need to make sure they have delivered the msg. - mset.mu.RLock() - shouldRemove = mset.directs <= 0 || !mset.checkInterest(seq, o) - mset.mu.RUnlock() + shouldRemove = mset.directs <= 0 || mset.noInterest(seq, o) case InterestPolicy: - mset.mu.RLock() - shouldRemove = !mset.checkInterest(seq, o) - mset.mu.RUnlock() + shouldRemove = mset.noInterest(seq, o) } + mset.mu.Unlock() - if shouldRemove { - if _, err := mset.store.RemoveMsg(seq); err == ErrStoreEOF { - // This should be rare but I have seen it. - // The ack reached us before the actual msg with AckNone and InterestPolicy. - if n := mset.raftNode(); n != nil { - md := streamMsgDelete{Seq: seq, NoErase: true, Stream: mset.cfg.Name} - n.ForwardProposal(encodeMsgDelete(&md)) - } - } + // If nothing else to do. + if !shouldRemove { + return + } + + // If we are here we should attempt to remove. + if _, err := mset.store.RemoveMsg(seq); err == ErrStoreEOF { + // This should not happen, but being pedantic. + mset.registerPreAckLock(o, seq) } } @@ -4936,7 +5181,7 @@ func (a *Account) RestoreStream(ncfg *StreamConfig, r io.Reader) (*stream, error if err != nil { return nil, err } - if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA { + if hdr.Typeflag != tar.TypeReg { return nil, logAndReturnError() } fpath := filepath.Join(sdir, filepath.Clean(hdr.Name)) @@ -5063,37 +5308,17 @@ func (a *Account) RestoreStream(ncfg *StreamConfig, r io.Reader) (*stream, error return mset, nil } -// This is to check for dangling messages. +// This is to check for dangling messages on interest retention streams. // Issue https://github.com/nats-io/nats-server/issues/3612 func (mset *stream) checkForOrphanMsgs() { - // We need to grab the low water mark for all consumers. - var ackFloor uint64 mset.mu.RLock() + consumers := make([]*consumer, 0, len(mset.consumers)) for _, o := range mset.consumers { - o.mu.RLock() - if o.store != nil { - if state, err := o.store.BorrowState(); err == nil { - if ackFloor == 0 || state.AckFloor.Stream < ackFloor { - ackFloor = state.AckFloor.Stream - } - } - } - o.mu.RUnlock() + consumers = append(consumers, o) } - // Grabs stream state. - var state StreamState - mset.store.FastState(&state) - s, acc := mset.srv, mset.acc mset.mu.RUnlock() - - if ackFloor > state.FirstSeq { - req := &JSApiStreamPurgeRequest{Sequence: ackFloor + 1} - purged, err := mset.purge(req) - if err != nil { - s.Warnf("stream '%s > %s' could not auto purge orphaned messages: %v", acc, mset.name(), err) - } else { - s.Debugf("stream '%s > %s' auto purged %d messages", acc, mset.name(), purged) - } + for _, o := range consumers { + o.checkStateForInterestStream() } } @@ -5119,3 +5344,22 @@ func (mset *stream) checkConsumerReplication() { o.mu.RUnlock() } } + +// Will check if we are running in the monitor already and if not set the appropriate flag. +func (mset *stream) checkInMonitor() bool { + mset.mu.Lock() + defer mset.mu.Unlock() + + if mset.inMonitor { + return true + } + mset.inMonitor = true + return false +} + +// Clear us being in the monitor routine. +func (mset *stream) clearMonitorRunning() { + mset.mu.Lock() + defer mset.mu.Unlock() + mset.inMonitor = false +} diff --git a/server/sublist.go b/server/sublist.go index a77f9f988..d7ffac0c9 100644 --- a/server/sublist.go +++ b/server/sublist.go @@ -1,4 +1,4 @@ -// Copyright 2016-2020 The NATS Authors +// Copyright 2016-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -48,7 +48,7 @@ const ( // cacheMax is used to bound limit the frontend cache slCacheMax = 1024 // If we run a sweeper we will drain to this count. - slCacheSweep = 512 + slCacheSweep = 256 // plistMin is our lower bounds to create a fast plist for Match. plistMin = 256 ) @@ -615,7 +615,7 @@ func (s *Sublist) reduceCacheCount() { // Helper function for auto-expanding remote qsubs. func isRemoteQSub(sub *subscription) bool { - return sub != nil && sub.queue != nil && sub.client != nil && sub.client.kind == ROUTER + return sub != nil && sub.queue != nil && sub.client != nil && (sub.client.kind == ROUTER || sub.client.kind == LEAF) } // UpdateRemoteQSub should be called when we update the weight of an existing @@ -820,9 +820,13 @@ func (s *Sublist) RemoveBatch(subs []*subscription) error { // Turn off our cache if enabled. wasEnabled := s.cache != nil s.cache = nil + // We will try to remove all subscriptions but will report the first that caused + // an error. In other words, we don't bail out at the first error which would + // possibly leave a bunch of subscriptions that could have been removed. + var err error for _, sub := range subs { - if err := s.remove(sub, false, false); err != nil { - return err + if lerr := s.remove(sub, false, false); lerr != nil && err == nil { + err = lerr } } // Turn caching back on here. @@ -830,7 +834,7 @@ func (s *Sublist) RemoveBatch(subs []*subscription) error { if wasEnabled { s.cache = make(map[string]*SublistResult) } - return nil + return err } // pruneNode is used to prune an empty node from the tree. @@ -1529,13 +1533,13 @@ func (s *Sublist) ReverseMatch(subject string) *SublistResult { result := &SublistResult{} - s.Lock() + s.RLock() reverseMatchLevel(s.root, tokens, nil, result) // Check for empty result. if len(result.psubs) == 0 && len(result.qsubs) == 0 { result = emptyResult } - s.Unlock() + s.RUnlock() return result } @@ -1553,9 +1557,22 @@ func reverseMatchLevel(l *level, toks []string, n *node, results *SublistResult) for _, n := range l.nodes { reverseMatchLevel(n.next, toks[i+1:], n, results) } + if l.pwc != nil { + reverseMatchLevel(l.pwc.next, toks[i+1:], n, results) + } + if l.fwc != nil { + getAllNodes(l, results) + } return } } + // If the sub tree has a fwc at this position, match as well. + if l.fwc != nil { + getAllNodes(l, results) + return + } else if l.pwc != nil { + reverseMatchLevel(l.pwc.next, toks[i+1:], n, results) + } n = l.nodes[t] if n == nil { break diff --git a/server/sublist_test.go b/server/sublist_test.go index a302405ac..4d3e913f0 100644 --- a/server/sublist_test.go +++ b/server/sublist_test.go @@ -1,4 +1,4 @@ -// Copyright 2016-2020 The NATS Authors +// Copyright 2016-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -370,6 +370,30 @@ func TestSublistNoCacheRemoveBatch(t *testing.T) { } } +func TestSublistRemoveBatchWithError(t *testing.T) { + s := NewSublistNoCache() + sub1 := newSub("foo") + sub2 := newSub("bar") + sub3 := newSub("baz") + s.Insert(sub1) + s.Insert(sub2) + s.Insert(sub3) + subNotPresent := newSub("not.inserted") + // Try to remove all subs, but include the sub that has not been inserted. + err := s.RemoveBatch([]*subscription{subNotPresent, sub1, sub3}) + // We expect an error to be returned, but sub1,2 and 3 to have been removed. + require_Error(t, err, ErrNotFound) + // Make sure that we have only sub2 present + verifyCount(s, 1, t) + r := s.Match("bar") + verifyLen(r.psubs, 1, t) + verifyMember(r.psubs, sub2, t) + r = s.Match("foo") + verifyLen(r.psubs, 0, t) + r = s.Match("baz") + verifyLen(r.psubs, 0, t) +} + func testSublistInvalidSubjectsInsert(t *testing.T, s *Sublist) { // Insert, or subscriptions, can have wildcards, but not empty tokens, // and can not have a FWC that is not the terminal token. @@ -1478,6 +1502,20 @@ func TestSublistReverseMatch(t *testing.T) { verifyMember(r.psubs, fooBarBazSub, t) } +func TestSublistReverseMatchWider(t *testing.T) { + s := NewSublistWithCache() + sub := newSub("uplink.*.*.>") + s.Insert(sub) + + r := s.ReverseMatch("uplink.1.*.*.>") + verifyLen(r.psubs, 1, t) + verifyMember(r.psubs, sub, t) + + r = s.ReverseMatch("uplink.1.2.3.>") + verifyLen(r.psubs, 1, t) + verifyMember(r.psubs, sub, t) +} + func TestSublistMatchWithEmptyTokens(t *testing.T) { for _, test := range []struct { name string diff --git a/server/test_test.go b/server/test_test.go index 2f0e11d8c..8493eb07b 100644 --- a/server/test_test.go +++ b/server/test_test.go @@ -1,4 +1,4 @@ -// Copyright 2019-2021 The NATS Authors +// Copyright 2019-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -14,7 +14,6 @@ package server import ( - "bytes" "fmt" "math/rand" "net/url" @@ -52,14 +51,6 @@ func RunRandClientPortServer() *Server { return RunServer(&opts) } -// Used to setup clusters of clusters for tests. -type cluster struct { - servers []*Server - opts []*Options - name string - t testing.TB -} - func require_True(t *testing.T, b bool) { t.Helper() if !b { @@ -120,16 +111,16 @@ func require_Error(t *testing.T, err error, expected ...error) { t.Fatalf("Expected one of %v, got '%v'", expected, err) } -func require_Equal(t *testing.T, a, b string) { +func require_Equal[T comparable](t *testing.T, a, b T) { t.Helper() - if strings.Compare(a, b) != 0 { + if a != b { t.Fatalf("require equal, but got: %v != %v", a, b) } } -func require_NotEqual(t *testing.T, a, b [32]byte) { +func require_NotEqual[T comparable](t *testing.T, a, b T) { t.Helper() - if bytes.Equal(a[:], b[:]) { + if a == b { t.Fatalf("require not equal, but got: %v != %v", a, b) } } @@ -276,6 +267,11 @@ func (c *cluster) shutdown() { if c == nil { return } + // Stop any proxies. + for _, np := range c.nproxies { + np.stop() + } + // Shutdown and cleanup servers. for i, s := range c.servers { sd := s.StoreDir() s.Shutdown() diff --git a/server/util.go b/server/util.go index 52e0861f1..454de9d81 100644 --- a/server/util.go +++ b/server/util.go @@ -335,6 +335,7 @@ func copyStrings(src []string) []string { return dst } +// ** added by Memphis // copyMaps make a new map of the same size than `src` and copy its content. // If `src` is nil, then this returns `nil` func copyMaps(src map[string]string) map[string]string { @@ -347,3 +348,5 @@ func copyMaps(src map[string]string) map[string]string { } return dst } + +// ** added by Memphis diff --git a/server/util_test.go b/server/util_test.go index 41100b10b..168a995ac 100644 --- a/server/util_test.go +++ b/server/util_test.go @@ -10,6 +10,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + package server import ( diff --git a/server/websocket.go b/server/websocket.go index 797ed5c86..0b8fe8048 100644 --- a/server/websocket.go +++ b/server/websocket.go @@ -1,4 +1,4 @@ -// Copyright 2020 The NATS Authors +// Copyright 2020-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -15,7 +15,6 @@ package server import ( "bytes" - "compress/flate" "crypto/rand" "crypto/sha1" "crypto/tls" @@ -34,6 +33,8 @@ import ( "sync" "time" "unicode/utf8" + + "github.com/klauspost/compress/flate" ) type wsOpCode int @@ -452,7 +453,9 @@ func (c *client) wsHandleControlFrame(r *wsReadInfo, frameType wsOpCode, nc io.R } } } - c.wsEnqueueControlMessage(wsCloseMessage, wsCreateCloseMessage(status, body)) + clm := wsCreateCloseMessage(status, body) + c.wsEnqueueControlMessage(wsCloseMessage, clm) + nbPoolPut(clm) // wsEnqueueControlMessage has taken a copy. // Return io.EOF so that readLoop will close the connection as ClientClosed // after processing pending buffers. return pos, io.EOF @@ -502,7 +505,7 @@ func wsIsControlFrame(frameType wsOpCode) bool { // Create the frame header. // Encodes the frame type and optional compression flag, and the size of the payload. func wsCreateFrameHeader(useMasking, compressed bool, frameType wsOpCode, l int) ([]byte, []byte) { - fh := make([]byte, wsMaxFrameHeaderSize) + fh := nbPoolGet(wsMaxFrameHeaderSize)[:wsMaxFrameHeaderSize] n, key := wsFillFrameHeader(fh, useMasking, wsFirstFrame, wsFinalFrame, compressed, frameType, l) return fh[:n], key } @@ -596,11 +599,13 @@ func (c *client) wsEnqueueControlMessageLocked(controlMsg wsOpCode, payload []by if useMasking { sz += 4 } - cm := make([]byte, sz+len(payload)) + cm := nbPoolGet(sz + len(payload)) + cm = cm[:cap(cm)] n, key := wsFillFrameHeader(cm, useMasking, wsFirstFrame, wsFinalFrame, wsUncompressedFrame, controlMsg, len(payload)) + cm = cm[:n] // Note that payload is optional. if len(payload) > 0 { - copy(cm[n:], payload) + cm = append(cm, payload...) if useMasking { wsMaskBuf(key, cm[n:]) } @@ -646,6 +651,7 @@ func (c *client) wsEnqueueCloseMessage(reason ClosedState) { } body := wsCreateCloseMessage(status, reason.String()) c.wsEnqueueControlMessageLocked(wsCloseMessage, body) + nbPoolPut(body) // wsEnqueueControlMessageLocked has taken a copy. } // Create and then enqueue a close message with a protocol error and the @@ -655,6 +661,7 @@ func (c *client) wsEnqueueCloseMessage(reason ClosedState) { func (c *client) wsHandleProtocolError(message string) error { buf := wsCreateCloseMessage(wsCloseStatusProtocolError, message) c.wsEnqueueControlMessage(wsCloseMessage, buf) + nbPoolPut(buf) // wsEnqueueControlMessage has taken a copy. return fmt.Errorf(message) } @@ -671,7 +678,7 @@ func wsCreateCloseMessage(status int, body string) []byte { body = body[:wsMaxControlPayloadSize-5] body += "..." } - buf := make([]byte, 2+len(body)) + buf := nbPoolGet(2 + len(body))[:2+len(body)] // We need to have a 2 byte unsigned int that represents the error status code // https://tools.ietf.org/html/rfc6455#section-5.5.1 binary.BigEndian.PutUint16(buf[:2], uint16(status)) @@ -1270,13 +1277,7 @@ func (c *client) wsCollapsePtoNB() (net.Buffers, int64) { if c.ws.browser { mfs = wsFrameSizeForBrowsers } - if len(c.out.p) > 0 { - p := c.out.p - c.out.p = nil - nb = append(c.out.nb, p) - } else if len(c.out.nb) > 0 { - nb = c.out.nb - } + nb = c.out.nb mask := c.ws.maskwrite // Start with possible already framed buffers (that we could have // got from partials or control messages such as ws pings or pongs). @@ -1308,6 +1309,7 @@ func (c *client) wsCollapsePtoNB() (net.Buffers, int64) { var csz int for _, b := range nb { cp.Write(b) + nbPoolPut(b) // No longer needed as contents written to compressor. } if err := cp.Flush(); err != nil { c.Errorf("Error during compression: %v", err) @@ -1324,24 +1326,33 @@ func (c *client) wsCollapsePtoNB() (net.Buffers, int64) { } else { final = true } - fh := make([]byte, wsMaxFrameHeaderSize) // Only the first frame should be marked as compressed, so pass // `first` for the compressed boolean. + fh := nbPoolGet(wsMaxFrameHeaderSize)[:wsMaxFrameHeaderSize] n, key := wsFillFrameHeader(fh, mask, first, final, first, wsBinaryMessage, lp) if mask { wsMaskBuf(key, p[:lp]) } - bufs = append(bufs, fh[:n], p[:lp]) + new := nbPoolGet(wsFrameSizeForBrowsers) + lp = copy(new[:wsFrameSizeForBrowsers], p[:lp]) + bufs = append(bufs, fh[:n], new[:lp]) csz += n + lp p = p[lp:] } } else { - h, key := wsCreateFrameHeader(mask, true, wsBinaryMessage, len(p)) + ol := len(p) + h, key := wsCreateFrameHeader(mask, true, wsBinaryMessage, ol) if mask { wsMaskBuf(key, p) } - bufs = append(bufs, h, p) - csz = len(h) + len(p) + bufs = append(bufs, h) + for len(p) > 0 { + new := nbPoolGet(len(p)) + n := copy(new[:cap(new)], p) + bufs = append(bufs, new[:n]) + p = p[n:] + } + csz = len(h) + ol } // Add to pb the compressed data size (including headers), but // remove the original uncompressed data size that was added @@ -1353,7 +1364,7 @@ func (c *client) wsCollapsePtoNB() (net.Buffers, int64) { if mfs > 0 { // We are limiting the frame size. startFrame := func() int { - bufs = append(bufs, make([]byte, wsMaxFrameHeaderSize)) + bufs = append(bufs, nbPoolGet(wsMaxFrameHeaderSize)[:wsMaxFrameHeaderSize]) return len(bufs) - 1 } endFrame := func(idx, size int) { @@ -1386,8 +1397,10 @@ func (c *client) wsCollapsePtoNB() (net.Buffers, int64) { if endStart { fhIdx = startFrame() } - bufs = append(bufs, b[:total]) - b = b[total:] + new := nbPoolGet(total) + n := copy(new[:cap(new)], b[:total]) + bufs = append(bufs, new[:n]) + b = b[n:] } } if total > 0 { diff --git a/server/websocket_test.go b/server/websocket_test.go index 3439f8e8f..2e590e2c0 100644 --- a/server/websocket_test.go +++ b/server/websocket_test.go @@ -16,7 +16,6 @@ package server import ( "bufio" "bytes" - "compress/flate" "crypto/tls" "encoding/base64" "encoding/binary" @@ -36,6 +35,8 @@ import ( "github.com/nats-io/jwt/v2" "github.com/nats-io/nkeys" + + "github.com/klauspost/compress/flate" ) type testReader struct { @@ -2496,7 +2497,7 @@ func TestWSAdvertise(t *testing.T) { defer s.Shutdown() l := &captureFatalLogger{fatalCh: make(chan string, 1)} s.SetLogger(l, false, false) - go s.Start() + s.Start() select { case e := <-l.fatalCh: if !strings.Contains(e, "Unable to get websocket connect URLs") { @@ -2863,11 +2864,11 @@ func (wc *testWSWrappedConn) Write(p []byte) (int, error) { } func TestWSCompressionBasic(t *testing.T) { - payload := "This is the content of a message that will be compresseddddddddddddddddddddd." + payload := "This is the content of a message that will be compresseddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd." msgProto := fmt.Sprintf("MSG foo 1 %d\r\n%s\r\n", len(payload), payload) - cbuf := &bytes.Buffer{} - compressor, _ := flate.NewWriter(cbuf, flate.BestSpeed) + compressor, err := flate.NewWriter(cbuf, flate.BestSpeed) + require_NoError(t, err) compressor.Write([]byte(msgProto)) compressor.Flush() compressed := cbuf.Bytes() @@ -2890,14 +2891,14 @@ func TestWSCompressionBasic(t *testing.T) { } var wc *testWSWrappedConn - s.mu.Lock() + s.mu.RLock() for _, c := range s.clients { c.mu.Lock() wc = &testWSWrappedConn{Conn: c.nc, buf: &bytes.Buffer{}} c.nc = wc c.mu.Unlock() } - s.mu.Unlock() + s.mu.RUnlock() nc := natsConnect(t, s.ClientURL()) defer nc.Close() From 8433f40387ff053808626ddf1ef8276fda31b510 Mon Sep 17 00:00:00 2001 From: idanasulinStrech Date: Wed, 6 Dec 2023 18:05:08 +0200 Subject: [PATCH 02/16] removing extra logs --- server/jetstream.go | 4 ++-- server/jetstream_cluster.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/server/jetstream.go b/server/jetstream.go index 6fbf25b66..e84399c4a 100644 --- a/server/jetstream.go +++ b/server/jetstream.go @@ -179,7 +179,7 @@ func (s *Server) EnableJetStream(config *JetStreamConfig) error { return fmt.Errorf("jetstream already enabled") } - s.Noticef("Starting JetStream") + // s.Noticef("Starting JetStream") // ** deleted by Memphis if config == nil || config.MaxMemory <= 0 || config.MaxStore <= 0 { var storeDir, domain string var maxStore, maxMem int64 @@ -465,7 +465,7 @@ func (s *Server) restartJetStream() error { MaxStore: opts.JetStreamMaxStore, Domain: opts.JetStreamDomain, } - s.Noticef("Restarting JetStream") + // s.Noticef("Restarting JetStream") // ** deleted by Memphis err := s.EnableJetStream(&cfg) if err != nil { s.Warnf("Can't start JetStream: %v", err) diff --git a/server/jetstream_cluster.go b/server/jetstream_cluster.go index e3c552069..009e640c9 100644 --- a/server/jetstream_cluster.go +++ b/server/jetstream_cluster.go @@ -711,7 +711,7 @@ func (s *Server) enableJetStreamClustering() error { return nil } - s.Noticef("Starting JetStream cluster") + // s.Noticef("Starting JetStream cluster") // ** deleted by Memphis // We need to determine if we have a stable cluster name and expected number of servers. s.Debugf("JetStream cluster checking for stable cluster name and peers") From 5e37dae4d082efc34ad330513555e3e718285b8c Mon Sep 17 00:00:00 2001 From: idanasulinStrech Date: Thu, 7 Dec 2023 09:08:07 +0200 Subject: [PATCH 03/16] changes from cloud --- server/background_tasks.go | 6 ++++-- server/memphis_handlers_dls_messages.go | 18 ++++++++++-------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/server/background_tasks.go b/server/background_tasks.go index e29d42ef7..aa023628e 100644 --- a/server/background_tasks.go +++ b/server/background_tasks.go @@ -565,6 +565,8 @@ func (s *Server) ConsumeSchemaverseDlsMessages() { subject := fmt.Sprintf(JSApiRequestNextT, dlsSchemaverseStream, SCHEMAVERSE_DLS_CONSUMER) s.sendInternalAccountMsgWithReply(s.MemphisGlobalAccount(), subject, replySubj, nil, req, true) + s.Debugf("ConsumeSchemaverseDlsMessages: sending fetch request") + timeout := time.NewTimer(5 * time.Second) msgs := make([]schemaverseDlsMsg, 0) stop := false @@ -582,12 +584,12 @@ func (s *Server) ConsumeSchemaverseDlsMessages() { } case <-timeout.C: stop = true - s.Debugf("ConsumeSchemaverseDlsMessages: finished because of timer") + s.Debugf("ConsumeSchemaverseDlsMessages: finished because of timer: %v messages", len(msgs)) } } for _, message := range msgs { msg := message.Msg - s.handleSchemaverseDlsMsg(msg) + err := s.handleSchemaverseDlsMsg(msg) if err == nil { // send ack s.sendInternalAccountMsgWithEcho(s.MemphisGlobalAccount(), message.ReplySubject, []byte(_EMPTY_)) diff --git a/server/memphis_handlers_dls_messages.go b/server/memphis_handlers_dls_messages.go index f3ea549d7..b87c410da 100644 --- a/server/memphis_handlers_dls_messages.go +++ b/server/memphis_handlers_dls_messages.go @@ -155,45 +155,47 @@ func (s *Server) handleNewUnackedMsg(msg []byte) error { return nil } -func (s *Server) handleSchemaverseDlsMsg(msg []byte) { +func (s *Server) handleSchemaverseDlsMsg(msg []byte) error { tenantName, stringMessage, err := s.getTenantNameAndMessage(msg) if err != nil { s.Errorf("handleSchemaverseDlsMsg at getTenantNameAndMessage: %v", err.Error()) - return + return err } var message models.SchemaVerseDlsMessageSdk err = json.Unmarshal([]byte(stringMessage), &message) if err != nil { serv.Errorf("[tenant: %v]handleSchemaverseDlsMsg: %v", tenantName, err.Error()) - return + return err } exist, station, err := db.GetStationByName(message.StationName, tenantName) if err != nil { serv.Errorf("[tenant: %v]handleSchemaverseDlsMsg: %v", tenantName, err.Error()) - return + return err } if !exist { serv.Warnf("[tenant: %v]handleSchemaverseDlsMsg: station %v couldn't been found", tenantName, message.StationName) - return + return nil } message.Message.TimeSent = time.Now() _, err = db.InsertSchemaverseDlsMsg(station.ID, 0, message.Producer.Name, []string{}, models.MessagePayload(message.Message), message.ValidationError, tenantName, message.PartitionNumber) if err != nil { serv.Errorf("[tenant: %v]handleSchemaverseDlsMsg: %v", tenantName, err.Error()) - return + return err } data, err := hex.DecodeString(message.Message.Data) if err != nil { serv.Errorf("[tenant: %v]handleSchemaverseDlsMsg at DecodeString: %v", tenantName, err.Error()) - return + return err } err = s.sendToDlsStation(station, data, message.Message.Headers, "failed_schema", "") if err != nil { serv.Errorf("[tenant: %v]handleSchemaverseDlsMsg at sendToDlsStation: station: %v, Error while getting notified about a poison message: %v", tenantName, station.DlsStation, err.Error()) - return + return err } + + return nil } func (pmh PoisonMessagesHandler) GetDlsMsgsByStationLight(station models.Station, partitionNumber int) ([]models.LightDlsMessageResponse, []models.LightDlsMessageResponse, []models.LightDlsMessageResponse, int, error) { From 909514b4444c2b9e9de9b02e16a11c1859519e70 Mon Sep 17 00:00:00 2001 From: idanasulinStrech Date: Thu, 7 Dec 2023 09:51:11 +0200 Subject: [PATCH 04/16] test files --- test/bench_test.go | 6 + test/cluster_test.go | 21 + test/cluster_tls_test.go | 6 + .../ocsp_peer/mini-ca/caocsp/caocsp_cert.pem | 91 + .../mini-ca/caocsp/private/caocsp_keypair.pem | 28 + .../mini-ca/client1/System_bundle.pem | 186 ++ .../ocsp_peer/mini-ca/client1/System_cert.pem | 97 + .../mini-ca/client1/UserA1_bundle.pem | 186 ++ .../ocsp_peer/mini-ca/client1/UserA1_cert.pem | 97 + .../mini-ca/client1/UserA2_bundle.pem | 186 ++ .../ocsp_peer/mini-ca/client1/UserA2_cert.pem | 97 + .../ocsp_peer/mini-ca/client1/certfile.pem | 175 + .../client1/private/System_keypair.pem | 28 + .../client1/private/UserA1_keypair.pem | 28 + .../client1/private/UserA2_keypair.pem | 28 + .../mini-ca/client2/UserB1_bundle.pem | 186 ++ .../ocsp_peer/mini-ca/client2/UserB1_cert.pem | 97 + .../mini-ca/client2/UserB2_bundle.pem | 186 ++ .../ocsp_peer/mini-ca/client2/UserB2_cert.pem | 97 + .../ocsp_peer/mini-ca/client2/certfile.pem | 175 + .../client2/private/UserB1_keypair.pem | 28 + .../client2/private/UserB2_keypair.pem | 28 + .../intermediate1/intermediate1_cert.pem | 89 + .../private/intermediate1_keypair.pem | 28 + .../intermediate2/intermediate2_cert.pem | 89 + .../private/intermediate2_keypair.pem | 28 + .../misc/misconfig_TestServer1_bundle.pem | 186 ++ .../mini-ca/misc/trust_config1_bundle.pem | 264 ++ .../mini-ca/misc/trust_config2_bundle.pem | 264 ++ .../mini-ca/misc/trust_config3_bundle.pem | 264 ++ .../ocsp_peer/mini-ca/ocsp1/ocsp1_bundle.pem | 181 + .../ocsp_peer/mini-ca/ocsp1/ocsp1_cert.pem | 92 + .../mini-ca/ocsp1/private/ocsp1_keypair.pem | 28 + .../ocsp_peer/mini-ca/ocsp2/ocsp2_bundle.pem | 181 + .../ocsp_peer/mini-ca/ocsp2/ocsp2_cert.pem | 92 + .../mini-ca/ocsp2/private/ocsp2_keypair.pem | 28 + .../mini-ca/root/private/root_keypair.pem | 28 + .../ocsp_peer/mini-ca/root/root_cert.pem | 86 + .../mini-ca/server1/TestServer1_bundle.pem | 186 ++ .../mini-ca/server1/TestServer1_cert.pem | 97 + .../mini-ca/server1/TestServer2_bundle.pem | 186 ++ .../mini-ca/server1/TestServer2_cert.pem | 97 + .../server1/private/TestServer1_keypair.pem | 28 + .../server1/private/TestServer2_keypair.pem | 28 + .../mini-ca/server2/TestServer3_bundle.pem | 186 ++ .../mini-ca/server2/TestServer3_cert.pem | 97 + .../mini-ca/server2/TestServer4_bundle.pem | 186 ++ .../mini-ca/server2/TestServer4_cert.pem | 97 + .../server2/private/TestServer3_keypair.pem | 28 + .../server2/private/TestServer4_keypair.pem | 28 + .../certs/tlsauth/certstore/client.p12 | Bin 0 -> 2509 bytes .../certstore/delete-cert-from-store.ps1 | 2 + .../tlsauth/certstore/import-p12-client.ps1 | 5 + .../tlsauth/certstore/import-p12-server.ps1 | 5 + .../configs/certs/tlsauth/certstore/pkcs12.md | 22 + .../certs/tlsauth/certstore/server.p12 | Bin 0 -> 2533 bytes test/gateway_test.go | 157 +- test/gosrv_test.go | 1 + test/leafnode_test.go | 3 + test/ocsp_peer_test.go | 2926 +++++++++++++++++ test/ocsp_test.go | 2609 ++++++++++----- test/system_services_test.go | 2 +- test/test_test.go | 3 + test/tls_test.go | 20 + test/verbose_test.go | 1 + 65 files changed, 10136 insertions(+), 849 deletions(-) create mode 100644 test/configs/certs/ocsp_peer/mini-ca/caocsp/caocsp_cert.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/caocsp/private/caocsp_keypair.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client1/System_bundle.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client1/System_cert.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client1/UserA1_bundle.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client1/UserA1_cert.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client1/UserA2_bundle.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client1/UserA2_cert.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client1/certfile.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client1/private/System_keypair.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client1/private/UserA1_keypair.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client1/private/UserA2_keypair.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client2/UserB1_bundle.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client2/UserB1_cert.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client2/UserB2_bundle.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client2/UserB2_cert.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client2/certfile.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client2/private/UserB1_keypair.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/client2/private/UserB2_keypair.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/intermediate1/intermediate1_cert.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/intermediate1/private/intermediate1_keypair.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/intermediate2/intermediate2_cert.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/intermediate2/private/intermediate2_keypair.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/misc/misconfig_TestServer1_bundle.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/misc/trust_config1_bundle.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/misc/trust_config2_bundle.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/misc/trust_config3_bundle.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/ocsp1/ocsp1_bundle.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/ocsp1/ocsp1_cert.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/ocsp1/private/ocsp1_keypair.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/ocsp2/ocsp2_bundle.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/ocsp2/ocsp2_cert.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/ocsp2/private/ocsp2_keypair.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/root/private/root_keypair.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/root/root_cert.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/server1/TestServer1_cert.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/server1/TestServer2_bundle.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/server1/TestServer2_cert.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/server1/private/TestServer2_keypair.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/server2/TestServer3_bundle.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/server2/TestServer3_cert.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/server2/TestServer4_bundle.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/server2/TestServer4_cert.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/server2/private/TestServer3_keypair.pem create mode 100644 test/configs/certs/ocsp_peer/mini-ca/server2/private/TestServer4_keypair.pem create mode 100644 test/configs/certs/tlsauth/certstore/client.p12 create mode 100644 test/configs/certs/tlsauth/certstore/delete-cert-from-store.ps1 create mode 100644 test/configs/certs/tlsauth/certstore/import-p12-client.ps1 create mode 100644 test/configs/certs/tlsauth/certstore/import-p12-server.ps1 create mode 100644 test/configs/certs/tlsauth/certstore/pkcs12.md create mode 100644 test/configs/certs/tlsauth/certstore/server.p12 create mode 100644 test/ocsp_peer_test.go diff --git a/test/bench_test.go b/test/bench_test.go index d1d874394..d98ede451 100644 --- a/test/bench_test.go +++ b/test/bench_test.go @@ -11,6 +11,12 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Please note that these tests will stress a system and they need generous +// amounts of CPU, Memory and network sockets. Make sure the 'open files' +// setting for your platform is at least 8192. On linux and MacOSX you can +// do this via 'ulimit -n 8192' +// + package test import ( diff --git a/test/cluster_test.go b/test/cluster_test.go index a08c30973..e851a841d 100644 --- a/test/cluster_test.go +++ b/test/cluster_test.go @@ -240,6 +240,10 @@ func TestClusterQueueSubs(t *testing.T) { sendB("PING\r\n") expectB(pongRe) + // Give plenty of time for the messages to flush, so that we don't + // accidentally only read some of them. + time.Sleep(time.Millisecond * 250) + // Should receive 5. matches = expectMsgsA(5) checkForQueueSid(t, matches, qg1SidsA) @@ -248,6 +252,10 @@ func TestClusterQueueSubs(t *testing.T) { // Send to A sendA("PUB foo 2\r\nok\r\n") + // Give plenty of time for the messages to flush, so that we don't + // accidentally only read some of them. + time.Sleep(time.Millisecond * 250) + // Should receive 5. matches = expectMsgsA(5) checkForQueueSid(t, matches, qg1SidsA) @@ -270,6 +278,10 @@ func TestClusterQueueSubs(t *testing.T) { // Send to B sendB("PUB foo 2\r\nok\r\n") + // Give plenty of time for the messages to flush, so that we don't + // accidentally only read some of them. + time.Sleep(time.Millisecond * 250) + // Should receive 1 from B. matches = expectMsgsB(1) checkForQueueSid(t, matches, qg2SidsB) @@ -308,6 +320,10 @@ func TestClusterQueueSubs(t *testing.T) { // Send to A sendA("PUB foo 2\r\nok\r\n") + // Give plenty of time for the messages to flush, so that we don't + // accidentally only read some of them. + time.Sleep(time.Millisecond * 250) + // Should receive 4 now. matches = expectMsgsA(4) checkForPubSids(t, matches, pSids) @@ -390,6 +406,8 @@ func TestClusterDoubleMsgs(t *testing.T) { sendB("PING\r\n") expectB(pongRe) + time.Sleep(10 * time.Millisecond) + matches = expectMsgsA2(2) checkMsg(t, matches[0], "foo", "", "", "2", "ok") checkForPubSids(t, matches, pSids) @@ -588,6 +606,7 @@ func (c *captureErrLogger) Errorf(format string, v ...interface{}) { } } +// ** added by Memphis func (c *captureErrLogger) Systemf(format string, v ...interface{}) { msg := fmt.Sprintf(format, v...) select { @@ -596,6 +615,8 @@ func (c *captureErrLogger) Systemf(format string, v ...interface{}) { } } +// ** added by Memphis + func TestClusterNameConflictsDropRoutes(t *testing.T) { ll := &captureErrLogger{ch: make(chan string, 4)} diff --git a/test/cluster_tls_test.go b/test/cluster_tls_test.go index 80bd1f3e3..e172cd6db 100644 --- a/test/cluster_tls_test.go +++ b/test/cluster_tls_test.go @@ -82,6 +82,7 @@ func (c *captureTLSError) Errorf(format string, v ...interface{}) { } } +// ** added by Memphis func (c *captureTLSError) Systemf(format string, v ...interface{}) { msg := fmt.Sprintf(format, v...) if strings.Contains(msg, "handshake error") { @@ -92,6 +93,8 @@ func (c *captureTLSError) Systemf(format string, v ...interface{}) { } } +// ** added by Memphis + type captureClusterTLSInsecureLogger struct { dummyLogger ch chan struct{} @@ -107,6 +110,7 @@ func (c *captureClusterTLSInsecureLogger) Warnf(format string, v ...interface{}) } } +// ** added by Memphis func (c *captureClusterTLSInsecureLogger) Systemf(format string, v ...interface{}) { msg := fmt.Sprintf(format, v...) if strings.Contains(msg, "solicited routes will not be verified") { @@ -117,6 +121,8 @@ func (c *captureClusterTLSInsecureLogger) Systemf(format string, v ...interface{ } } +// ** added by Memphis + func TestClusterTLSInsecure(t *testing.T) { confA := createConfFile(t, []byte(` port: -1 diff --git a/test/configs/certs/ocsp_peer/mini-ca/caocsp/caocsp_cert.pem b/test/configs/certs/ocsp_peer/mini-ca/caocsp/caocsp_cert.pem new file mode 100644 index 000000000..b6d024a90 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/caocsp/caocsp_cert.pem @@ -0,0 +1,91 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 17:37:00:a1:ce:35:e0:84:dd:e9:30:0c:a7:12:b9:50:88:9c:16:07 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:02:58 2023 GMT + Not After : Apr 28 19:02:58 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=CA OCSP Responder + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:d6:10:15:61:34:1e:97:0d:c6:c2:7d:f2:0f:9a: + 35:56:54:7a:9b:9e:a3:0b:ff:31:0d:db:49:4b:98: + e0:64:3a:3c:7f:4f:4b:d0:a8:01:80:c9:68:4e:76: + 3b:be:7b:d9:56:8d:d4:fd:bf:e1:6f:d0:5c:88:07: + 3f:05:a8:83:b3:7e:0b:ba:e0:36:f6:1c:e0:75:fd: + be:38:26:33:1b:42:96:4e:62:0b:88:36:ef:cc:14: + e3:97:86:dd:c2:78:d3:05:b7:4d:cd:2b:52:f2:11: + 16:d2:7e:8f:f3:47:8c:f9:0f:1e:cd:5e:f7:a4:1c: + 62:34:03:70:74:89:6b:bc:75:e3:30:82:c1:5b:67: + f3:d1:ca:81:13:10:d8:c5:d8:20:05:6d:d1:e7:51: + 19:ac:03:96:2a:a1:21:ff:88:2e:d2:e9:67:79:cf: + ef:17:b5:2b:7c:10:1f:5e:79:3e:08:98:7f:42:bb: + 8a:13:17:2d:9a:1a:8d:ff:36:c2:e9:c0:07:ea:cb: + 4f:72:35:f7:f2:d9:86:d2:ab:6b:70:2b:57:82:c8: + 02:93:aa:04:aa:00:3a:53:23:3d:61:82:32:0e:68: + 33:7e:5f:03:52:c9:53:db:e3:26:46:8a:ab:e0:e5: + 54:57:0d:e3:e3:24:b8:d9:69:92:0a:fb:bd:51:25: + 89:fd + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + D2:00:A2:C3:AA:00:76:1C:E7:67:37:96:89:77:38:69:C5:1B:5E:45 + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:FALSE + X509v3 Key Usage: critical + Digital Signature + X509v3 Extended Key Usage: critical + OCSP Signing + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + b0:36:29:84:91:de:14:e5:db:bf:55:fc:d8:0a:81:b5:df:84: + e4:5c:ae:e2:3c:1d:05:09:8a:85:7a:9e:f4:82:61:1b:7b:8a: + 0f:1d:e3:ad:b0:60:45:12:2e:38:6d:9c:95:d2:42:fe:2e:1a: + d2:a5:2c:82:40:1e:6c:4b:35:d1:3c:a6:4c:1c:73:c9:d0:32: + e9:47:c9:9a:fa:d0:1a:ef:86:c7:1e:49:ca:62:f1:81:9d:4e: + 38:35:56:1b:53:fe:4a:f4:4c:91:31:8f:32:70:64:ee:91:f7: + 4e:fe:ab:c5:1e:84:d1:43:cd:af:f6:5d:2a:b1:4f:b1:f4:1f: + 5a:9d:33:7a:48:94:c8:88:23:e5:b9:c8:a1:4d:51:4c:d5:3b: + 5f:f7:e8:e5:e1:53:a6:de:c8:95:14:32:e0:52:db:43:d6:c9: + 2f:7f:96:07:fb:87:0a:f0:53:3d:ce:e1:56:6f:dc:0e:84:f3: + e2:ef:dc:17:0f:59:1f:1a:70:d5:7f:08:36:3d:7e:8e:f8:1f: + 55:47:9a:96:1b:11:25:d9:27:7f:bf:e1:65:e5:16:ca:d9:bc: + 6f:5c:5e:a6:4c:d0:7a:24:8d:42:c4:dc:b5:4a:75:4a:7c:88: + da:21:5e:27:e1:0c:36:64:69:10:58:81:3d:cd:74:df:50:85: + c2:71:fe:43 +-----BEGIN CERTIFICATE----- +MIIEGzCCAwOgAwIBAgIUFzcAoc414ITd6TAMpxK5UIicFgcwDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDI1OFoXDTMzMDQyODE5MDI1OFowWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFD +QSBPQ1NQIFJlc3BvbmRlcjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANYQFWE0HpcNxsJ98g+aNVZUepueowv/MQ3bSUuY4GQ6PH9PS9CoAYDJaE52O757 +2VaN1P2/4W/QXIgHPwWog7N+C7rgNvYc4HX9vjgmMxtClk5iC4g278wU45eG3cJ4 +0wW3Tc0rUvIRFtJ+j/NHjPkPHs1e96QcYjQDcHSJa7x14zCCwVtn89HKgRMQ2MXY +IAVt0edRGawDliqhIf+ILtLpZ3nP7xe1K3wQH155PgiYf0K7ihMXLZoajf82wunA +B+rLT3I19/LZhtKra3ArV4LIApOqBKoAOlMjPWGCMg5oM35fA1LJU9vjJkaKq+Dl +VFcN4+MkuNlpkgr7vVElif0CAwEAAaOB4jCB3zAdBgNVHQ4EFgQU0gCiw6oAdhzn +ZzeWiXc4acUbXkUwHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwDAYD +VR0TAQH/BAIwADAOBgNVHQ8BAf8EBAMCB4AwFgYDVR0lAQH/BAwwCgYIKwYBBQUH +AwkwMwYDVR0fBCwwKjAooCagJIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3Rf +Y3JsLmRlcjAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcu +MC4wLjE6ODg4OC8wDQYJKoZIhvcNAQELBQADggEBALA2KYSR3hTl279V/NgKgbXf +hORcruI8HQUJioV6nvSCYRt7ig8d462wYEUSLjhtnJXSQv4uGtKlLIJAHmxLNdE8 +pkwcc8nQMulHyZr60BrvhsceScpi8YGdTjg1VhtT/kr0TJExjzJwZO6R907+q8Ue +hNFDza/2XSqxT7H0H1qdM3pIlMiII+W5yKFNUUzVO1/36OXhU6beyJUUMuBS20PW +yS9/lgf7hwrwUz3O4VZv3A6E8+Lv3BcPWR8acNV/CDY9fo74H1VHmpYbESXZJ3+/ +4WXlFsrZvG9cXqZM0HokjULE3LVKdUp8iNohXifhDDZkaRBYgT3NdN9QhcJx/kM= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/caocsp/private/caocsp_keypair.pem b/test/configs/certs/ocsp_peer/mini-ca/caocsp/private/caocsp_keypair.pem new file mode 100644 index 000000000..e3ac9b3a4 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/caocsp/private/caocsp_keypair.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDWEBVhNB6XDcbC +ffIPmjVWVHqbnqML/zEN20lLmOBkOjx/T0vQqAGAyWhOdju+e9lWjdT9v+Fv0FyI +Bz8FqIOzfgu64Db2HOB1/b44JjMbQpZOYguINu/MFOOXht3CeNMFt03NK1LyERbS +fo/zR4z5Dx7NXvekHGI0A3B0iWu8deMwgsFbZ/PRyoETENjF2CAFbdHnURmsA5Yq +oSH/iC7S6Wd5z+8XtSt8EB9eeT4ImH9Cu4oTFy2aGo3/NsLpwAfqy09yNffy2YbS +q2twK1eCyAKTqgSqADpTIz1hgjIOaDN+XwNSyVPb4yZGiqvg5VRXDePjJLjZaZIK ++71RJYn9AgMBAAECggEACnoECdtaqervMOKoH7Jc7Oo6i/ZCJZqqRHLYjf4f8VfW +USbI35/xXuO8mqZ3uxVlqDJN29NxzZ6lgLTWFUlPlM/U9CL4HaiBJdUy452e/7UN +FS4AQXzq1JKrJuXfYZ63OT7k7Gcz6owCkW/HTNFSKXhfeg6tURdgiQooDVQSdUk6 +xX4gVEK3skozRXf4mrjTaNnFCOk2+sZdqrRn19ZAUGRisv6ECf8/wQlh3+ySfPYV +u+BHQqzntToYP0HUZAO6rezcTVayW25E+AaOqNdNmSqcOX218ohVCzwzFpzIk8LW +jYLyGQBhHHcw+RHJeitcHrDuTTpOZFznQxzHiGH3AwKBgQD91TNHx9Y9jUBkISSi +XylSiZEAOjPl4VrhRfI5OUx1l3XTqB/e3xBYLwxEpXjs7m3qyXhCe+rVuIwSjLzc +mLCspPZw/fxdRefWW5B+v1HbHxC3/lBOhqaDfLL6x4A/q3n/itG9X0GjpfvRkdJY +GYOJea/2rJuMsFs3atX160p4cwKBgQDX4/VXJWxxWUJbObwoxxABG9VTZdI6Dsqr +8tgg+7NPqw3PAo5W+XLsGZCSWQfJTD49AHcHBon5IfEDa5srfKsOXFXoiNEdCjIG +zJ9mNtGMokXOWLKgxMoqHz+WnqWgxi9D7QwWWNq5hWnACJUqeqelRMzoNkmr96DX +NloqHREHzwKBgQC0jKnlLOfe8FIU5t5AAKBL7T4Og1fW8+zIwBADVBZmrk1JOBUz +Wkct8okvauQQ46ebkaLQ54OqcZJwv1q3LoS8yLnitUaEseyuNIMbJMr8qaQiu+oz +cOOQM2q7ppw6raYhdoSpxs/Rr4bnEmoj8EH3z26ybyRVdjvrtzppqetWsQKBgQCa +YogGA9siy6PqPMVTm9bUFCVfeEb4Aa/pesYYACbgaAB98uP7SnNmZ3m9TjGFQCKZ +2QVFXuW35Q/HVGIonQRuRpWgroZr7+iKeDXdEIKVwU2OHFvRICk6KhJ9EYJ8EH2o +Y5HrQStY1BElpH2XXRMZ2rN1s6zHb1Pz0whzaUnOfQKBgQCpfJYh1Yzpryb0hkfa +MAL2Rsw+mpYeJ27Bmv+taW5iEVMQr2AEYNJhQx1SjNZOml2mqY6un4UPwhUwqAqg +SOgWNQGD5g6xoM6Hom+nZG03QacCYUOaD6xDmVKTY0LnzVBwspfvrIgLKgZ7IWBx +KlqvY5FJ+NXg3wHLNwgGzkgVPg== +-----END PRIVATE KEY----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client1/System_bundle.pem b/test/configs/certs/ocsp_peer/mini-ca/client1/System_bundle.pem new file mode 100644 index 000000000..e89690691 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client1/System_bundle.pem @@ -0,0 +1,186 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 7c:43:65:c7:cf:27:e3:83:ae:2f:60:ac:03:e5:f2:b6:22:88:bc:a2 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Validity + Not Before: May 1 19:37:36 2023 GMT + Not After : Apr 28 19:37:36 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=System + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:a3:21:2f:74:34:c1:1b:41:90:b6:4e:41:72:e0: + 3f:9e:49:94:55:ec:02:4c:dd:14:80:b8:3d:c6:c7: + 47:bb:a5:59:c3:35:86:89:17:08:ce:fe:71:e6:2f: + 9c:c1:db:d2:7e:14:24:da:61:30:3a:e7:6e:b1:e3: + 21:38:81:bc:47:df:b2:7f:1f:60:be:3d:c5:ed:76: + 03:94:e3:c4:b3:3e:bf:f8:43:ba:c2:54:bc:bb:66: + 59:98:a3:f9:aa:e3:10:e8:c3:88:dc:1a:18:6f:dd: + 90:eb:6f:a3:4b:d4:af:34:5c:43:20:d5:5b:e7:98: + a5:7c:7b:a9:15:86:bb:28:bf:ba:e0:bb:f7:1c:08: + c4:26:eb:c1:ac:05:1f:74:4f:05:11:57:e0:12:77: + 17:9e:89:dd:a5:38:ee:cf:cf:67:be:0c:5e:6a:4a: + 74:61:21:79:8e:c3:28:f1:e2:06:00:2d:ea:3a:6d: + e2:a6:25:fd:2d:8b:f5:82:36:91:8a:21:f0:6a:93: + 19:d6:76:08:fd:cd:ee:90:a9:a9:cf:99:30:71:46: + 57:ea:fb:c5:65:4f:7c:86:5c:9d:d7:b4:c3:27:3c: + eb:27:dd:bc:55:76:1f:25:0d:cb:6f:43:9a:9f:ba: + de:54:c1:90:03:9e:e5:0d:d9:cd:84:d4:58:74:63: + be:59 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + A0:FA:B5:24:42:70:DF:E1:BB:E6:10:62:BE:FE:F5:81:13:2F:31:9B + X509v3 Authority Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, S/MIME + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Client Authentication, E-mail Protection + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:18888/intermediate1_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:18888/ + X509v3 Subject Alternative Name: + email:System@user.net + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + ad:00:40:7a:34:ad:07:e9:ed:fa:8f:1f:48:08:79:81:a8:3c: + 90:da:05:95:74:05:51:9c:17:a8:5c:03:09:c8:f8:2c:09:64: + e2:7c:fc:69:e1:c0:5d:8a:d9:f0:f3:e4:cd:2c:5e:43:77:71: + f8:58:20:88:8f:63:e1:b4:86:db:7a:54:df:ce:be:01:e2:55: + a2:70:a8:89:64:cf:2a:13:78:91:de:83:ed:d6:74:24:00:ca: + 3d:67:4a:cd:e3:82:b9:56:a3:3a:b4:80:b2:ac:61:e9:75:6c: + 30:1c:81:96:2f:f0:99:b2:7b:73:b5:45:b0:3c:20:ed:54:b3: + 87:37:9f:5e:07:c4:8a:72:94:53:4e:a2:a0:83:bc:fb:61:59: + ff:8c:91:1c:db:ad:7a:e0:12:e3:a3:b1:91:97:d4:c7:ed:02: + 6e:7e:01:d8:d6:d5:6d:81:a2:32:ca:8c:6d:32:91:40:97:e5: + a1:ad:22:7d:af:ab:ce:68:0b:69:52:53:8a:80:dd:f3:9f:a8: + 1f:34:a7:1f:37:58:cb:6c:da:54:cf:cc:0b:67:95:e9:6e:30: + a4:ce:12:c4:5a:e0:d4:92:fb:0b:67:a8:51:ad:dc:4a:d0:ad: + fb:92:77:85:a5:9d:84:ff:99:50:ca:15:4f:d4:30:c8:85:ca: + 95:a0:88:62 +-----BEGIN CERTIFICATE----- +MIIEXTCCA0WgAwIBAgIUfENlx88n44OuL2CsA+XytiKIvKIwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMTAe +Fw0yMzA1MDExOTM3MzZaFw0zMzA0MjgxOTM3MzZaME8xCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEP +MA0GA1UEAwwGU3lzdGVtMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +oyEvdDTBG0GQtk5BcuA/nkmUVewCTN0UgLg9xsdHu6VZwzWGiRcIzv5x5i+cwdvS +fhQk2mEwOuduseMhOIG8R9+yfx9gvj3F7XYDlOPEsz6/+EO6wlS8u2ZZmKP5quMQ +6MOI3BoYb92Q62+jS9SvNFxDINVb55ilfHupFYa7KL+64Lv3HAjEJuvBrAUfdE8F +EVfgEncXnondpTjuz89nvgxeakp0YSF5jsMo8eIGAC3qOm3ipiX9LYv1gjaRiiHw +apMZ1nYI/c3ukKmpz5kwcUZX6vvFZU98hlyd17TDJzzrJ928VXYfJQ3Lb0Oan7re +VMGQA57lDdnNhNRYdGO+WQIDAQABo4IBJDCCASAwHQYDVR0OBBYEFKD6tSRCcN/h +u+YQYr7+9YETLzGbMB8GA1UdIwQYMBaAFLWRbk9ktxaEdvm0vpnOYJWYGo6dMAwG +A1UdEwEB/wQCMAAwEQYJYIZIAYb4QgEBBAQDAgWgMA4GA1UdDwEB/wQEAwIF4DAd +BgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwPQYDVR0fBDYwNDAyoDCgLoYs +aHR0cDovLzEyNy4wLjAuMToxODg4OC9pbnRlcm1lZGlhdGUxX2NybC5kZXIwMwYI +KwYBBQUHAQEEJzAlMCMGCCsGAQUFBzABhhdodHRwOi8vMTI3LjAuMC4xOjE4ODg4 +LzAaBgNVHREEEzARgQ9TeXN0ZW1AdXNlci5uZXQwDQYJKoZIhvcNAQELBQADggEB +AK0AQHo0rQfp7fqPH0gIeYGoPJDaBZV0BVGcF6hcAwnI+CwJZOJ8/GnhwF2K2fDz +5M0sXkN3cfhYIIiPY+G0htt6VN/OvgHiVaJwqIlkzyoTeJHeg+3WdCQAyj1nSs3j +grlWozq0gLKsYel1bDAcgZYv8Jmye3O1RbA8IO1Us4c3n14HxIpylFNOoqCDvPth +Wf+MkRzbrXrgEuOjsZGX1MftAm5+AdjW1W2BojLKjG0ykUCX5aGtIn2vq85oC2lS +U4qA3fOfqB80px83WMts2lTPzAtnleluMKTOEsRa4NSS+wtnqFGt3ErQrfuSd4Wl +nYT/mVDKFU/UMMiFypWgiGI= +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 55:57:db:45:43:06:ce:52:63:59:b9:5a:26:78:fd:0d:94:68:95:9c + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:15 2023 GMT + Not After : Apr 28 19:01:15 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:bc:c6:84:2d:c2:ab:5d:05:d7:65:a8:e2:15:74: + d8:f2:f1:55:11:45:93:96:4c:a5:dc:cb:44:f5:f4: + 14:7e:46:02:59:e8:ae:78:59:69:21:58:f7:16:38: + b9:c2:c2:60:d8:76:ab:a1:39:ba:0b:a3:03:17:e4: + a1:cb:5d:1a:0c:62:71:24:64:b0:00:f0:6f:4c:af: + 08:62:8c:dc:4f:e0:d7:d4:55:2c:db:36:fc:a9:aa: + d7:58:27:e4:99:cb:dc:29:d9:ea:35:16:cb:2e:be: + 04:b2:82:58:f4:e5:5c:07:db:12:8e:e3:3c:9a:5e: + 90:4b:c5:a3:d4:21:96:5f:e1:8f:f7:cb:9e:db:e0: + 10:a0:6c:a2:1e:30:17:6c:32:9f:7b:43:a4:9f:d3: + 6b:33:1b:18:cd:a4:ad:33:48:a3:98:b0:2b:c8:22: + 74:17:71:d8:f1:64:21:55:e1:33:bc:7f:74:5f:a5: + a6:a2:9b:58:2f:db:ed:c7:c1:e5:36:2e:86:26:ad: + c6:fe:b8:00:85:6e:7c:ed:fd:4a:c6:a0:d9:b2:3f: + 4e:bd:fa:08:52:c8:5d:31:13:86:bd:3f:ec:7a:d8: + 3a:15:e2:71:af:ec:00:88:7e:a6:e8:e1:9d:ab:57: + 5a:8a:1f:f8:e2:4d:29:58:53:79:25:f0:9e:d9:18: + 40:27 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + b1:48:16:3b:d7:91:d0:4d:54:09:cb:ab:c7:41:4f:35:12:8b: + a6:e8:84:11:49:a9:04:91:41:25:7c:02:38:b2:19:a0:e9:2e: + d5:d6:7a:26:c1:1a:f8:f1:c6:51:92:68:af:c8:6e:5b:df:28: + 40:b8:99:94:d5:43:7d:e3:68:75:94:26:56:11:21:9e:50:b3: + 36:7b:f8:5f:33:76:64:71:04:26:2b:bb:2c:83:33:89:ba:74: + c1:e9:9d:eb:c0:86:4b:4d:6f:f8:4d:55:5a:3d:f6:55:95:33: + 0f:b8:f0:53:2b:93:a6:da:8d:5c:1a:e8:30:22:55:67:44:6e: + 17:c4:57:05:0d:ce:fc:61:dd:b1:3c:b0:66:55:f4:42:d0:ce: + 94:7d:6a:82:bd:32:ed:2f:21:ff:c7:70:ff:48:9d:10:4a:71: + be:a8:37:e5:0f:f4:79:1e:7d:a2:f1:6a:6b:2c:e8:03:20:ce: + 80:94:d2:38:80:bc:7e:56:c5:77:62:94:c0:b7:40:11:4d:ba: + 98:4b:2e:52:03:66:68:36:ab:d1:0f:3e:b5:92:a3:95:9d:a4: + ea:d3:8a:14:41:6d:86:24:89:aa:d7:29:20:c8:52:d5:bf:8d: + 3b:09:52:dd:89:8c:2c:85:40:b5:9f:cc:47:63:ca:3a:e0:c9: + 91:5c:43:a9 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUVVfbRUMGzlJjWblaJnj9DZRolZwwDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDExNVoXDTMzMDQyODE5MDExNVowWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALzGhC3Cq10F12Wo4hV02PLxVRFFk5ZMpdzLRPX0FH5GAlnornhZaSFY9xY4ucLC +YNh2q6E5ugujAxfkoctdGgxicSRksADwb0yvCGKM3E/g19RVLNs2/Kmq11gn5JnL +3CnZ6jUWyy6+BLKCWPTlXAfbEo7jPJpekEvFo9Qhll/hj/fLntvgEKBsoh4wF2wy +n3tDpJ/TazMbGM2krTNIo5iwK8gidBdx2PFkIVXhM7x/dF+lpqKbWC/b7cfB5TYu +hiatxv64AIVufO39Ssag2bI/Tr36CFLIXTEThr0/7HrYOhXica/sAIh+pujhnatX +Woof+OJNKVhTeSXwntkYQCcCAwEAAaOB0DCBzTAdBgNVHQ4EFgQUtZFuT2S3FoR2 ++bS+mc5glZgajp0wHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBALFIFjvXkdBNVAnLq8dBTzUSi6bohBFJqQSRQSV8AjiyGaDp +LtXWeibBGvjxxlGSaK/IblvfKEC4mZTVQ33jaHWUJlYRIZ5QszZ7+F8zdmRxBCYr +uyyDM4m6dMHpnevAhktNb/hNVVo99lWVMw+48FMrk6bajVwa6DAiVWdEbhfEVwUN +zvxh3bE8sGZV9ELQzpR9aoK9Mu0vIf/HcP9InRBKcb6oN+UP9HkefaLxamss6AMg +zoCU0jiAvH5WxXdilMC3QBFNuphLLlIDZmg2q9EPPrWSo5WdpOrTihRBbYYkiarX +KSDIUtW/jTsJUt2JjCyFQLWfzEdjyjrgyZFcQ6k= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client1/System_cert.pem b/test/configs/certs/ocsp_peer/mini-ca/client1/System_cert.pem new file mode 100644 index 000000000..335485315 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client1/System_cert.pem @@ -0,0 +1,97 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 7c:43:65:c7:cf:27:e3:83:ae:2f:60:ac:03:e5:f2:b6:22:88:bc:a2 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Validity + Not Before: May 1 19:37:36 2023 GMT + Not After : Apr 28 19:37:36 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=System + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:a3:21:2f:74:34:c1:1b:41:90:b6:4e:41:72:e0: + 3f:9e:49:94:55:ec:02:4c:dd:14:80:b8:3d:c6:c7: + 47:bb:a5:59:c3:35:86:89:17:08:ce:fe:71:e6:2f: + 9c:c1:db:d2:7e:14:24:da:61:30:3a:e7:6e:b1:e3: + 21:38:81:bc:47:df:b2:7f:1f:60:be:3d:c5:ed:76: + 03:94:e3:c4:b3:3e:bf:f8:43:ba:c2:54:bc:bb:66: + 59:98:a3:f9:aa:e3:10:e8:c3:88:dc:1a:18:6f:dd: + 90:eb:6f:a3:4b:d4:af:34:5c:43:20:d5:5b:e7:98: + a5:7c:7b:a9:15:86:bb:28:bf:ba:e0:bb:f7:1c:08: + c4:26:eb:c1:ac:05:1f:74:4f:05:11:57:e0:12:77: + 17:9e:89:dd:a5:38:ee:cf:cf:67:be:0c:5e:6a:4a: + 74:61:21:79:8e:c3:28:f1:e2:06:00:2d:ea:3a:6d: + e2:a6:25:fd:2d:8b:f5:82:36:91:8a:21:f0:6a:93: + 19:d6:76:08:fd:cd:ee:90:a9:a9:cf:99:30:71:46: + 57:ea:fb:c5:65:4f:7c:86:5c:9d:d7:b4:c3:27:3c: + eb:27:dd:bc:55:76:1f:25:0d:cb:6f:43:9a:9f:ba: + de:54:c1:90:03:9e:e5:0d:d9:cd:84:d4:58:74:63: + be:59 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + A0:FA:B5:24:42:70:DF:E1:BB:E6:10:62:BE:FE:F5:81:13:2F:31:9B + X509v3 Authority Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, S/MIME + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Client Authentication, E-mail Protection + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:18888/intermediate1_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:18888/ + X509v3 Subject Alternative Name: + email:System@user.net + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + ad:00:40:7a:34:ad:07:e9:ed:fa:8f:1f:48:08:79:81:a8:3c: + 90:da:05:95:74:05:51:9c:17:a8:5c:03:09:c8:f8:2c:09:64: + e2:7c:fc:69:e1:c0:5d:8a:d9:f0:f3:e4:cd:2c:5e:43:77:71: + f8:58:20:88:8f:63:e1:b4:86:db:7a:54:df:ce:be:01:e2:55: + a2:70:a8:89:64:cf:2a:13:78:91:de:83:ed:d6:74:24:00:ca: + 3d:67:4a:cd:e3:82:b9:56:a3:3a:b4:80:b2:ac:61:e9:75:6c: + 30:1c:81:96:2f:f0:99:b2:7b:73:b5:45:b0:3c:20:ed:54:b3: + 87:37:9f:5e:07:c4:8a:72:94:53:4e:a2:a0:83:bc:fb:61:59: + ff:8c:91:1c:db:ad:7a:e0:12:e3:a3:b1:91:97:d4:c7:ed:02: + 6e:7e:01:d8:d6:d5:6d:81:a2:32:ca:8c:6d:32:91:40:97:e5: + a1:ad:22:7d:af:ab:ce:68:0b:69:52:53:8a:80:dd:f3:9f:a8: + 1f:34:a7:1f:37:58:cb:6c:da:54:cf:cc:0b:67:95:e9:6e:30: + a4:ce:12:c4:5a:e0:d4:92:fb:0b:67:a8:51:ad:dc:4a:d0:ad: + fb:92:77:85:a5:9d:84:ff:99:50:ca:15:4f:d4:30:c8:85:ca: + 95:a0:88:62 +-----BEGIN CERTIFICATE----- +MIIEXTCCA0WgAwIBAgIUfENlx88n44OuL2CsA+XytiKIvKIwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMTAe +Fw0yMzA1MDExOTM3MzZaFw0zMzA0MjgxOTM3MzZaME8xCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEP +MA0GA1UEAwwGU3lzdGVtMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +oyEvdDTBG0GQtk5BcuA/nkmUVewCTN0UgLg9xsdHu6VZwzWGiRcIzv5x5i+cwdvS +fhQk2mEwOuduseMhOIG8R9+yfx9gvj3F7XYDlOPEsz6/+EO6wlS8u2ZZmKP5quMQ +6MOI3BoYb92Q62+jS9SvNFxDINVb55ilfHupFYa7KL+64Lv3HAjEJuvBrAUfdE8F +EVfgEncXnondpTjuz89nvgxeakp0YSF5jsMo8eIGAC3qOm3ipiX9LYv1gjaRiiHw +apMZ1nYI/c3ukKmpz5kwcUZX6vvFZU98hlyd17TDJzzrJ928VXYfJQ3Lb0Oan7re +VMGQA57lDdnNhNRYdGO+WQIDAQABo4IBJDCCASAwHQYDVR0OBBYEFKD6tSRCcN/h +u+YQYr7+9YETLzGbMB8GA1UdIwQYMBaAFLWRbk9ktxaEdvm0vpnOYJWYGo6dMAwG +A1UdEwEB/wQCMAAwEQYJYIZIAYb4QgEBBAQDAgWgMA4GA1UdDwEB/wQEAwIF4DAd +BgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwPQYDVR0fBDYwNDAyoDCgLoYs +aHR0cDovLzEyNy4wLjAuMToxODg4OC9pbnRlcm1lZGlhdGUxX2NybC5kZXIwMwYI +KwYBBQUHAQEEJzAlMCMGCCsGAQUFBzABhhdodHRwOi8vMTI3LjAuMC4xOjE4ODg4 +LzAaBgNVHREEEzARgQ9TeXN0ZW1AdXNlci5uZXQwDQYJKoZIhvcNAQELBQADggEB +AK0AQHo0rQfp7fqPH0gIeYGoPJDaBZV0BVGcF6hcAwnI+CwJZOJ8/GnhwF2K2fDz +5M0sXkN3cfhYIIiPY+G0htt6VN/OvgHiVaJwqIlkzyoTeJHeg+3WdCQAyj1nSs3j +grlWozq0gLKsYel1bDAcgZYv8Jmye3O1RbA8IO1Us4c3n14HxIpylFNOoqCDvPth +Wf+MkRzbrXrgEuOjsZGX1MftAm5+AdjW1W2BojLKjG0ykUCX5aGtIn2vq85oC2lS +U4qA3fOfqB80px83WMts2lTPzAtnleluMKTOEsRa4NSS+wtnqFGt3ErQrfuSd4Wl +nYT/mVDKFU/UMMiFypWgiGI= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client1/UserA1_bundle.pem b/test/configs/certs/ocsp_peer/mini-ca/client1/UserA1_bundle.pem new file mode 100644 index 000000000..a27daa1f0 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client1/UserA1_bundle.pem @@ -0,0 +1,186 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 5c:a1:af:d5:7c:bb:16:ef:c2:c7:e6:53:fc:94:1a:ed:24:bb:b4:17 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Validity + Not Before: May 1 19:37:36 2023 GMT + Not After : Apr 28 19:37:36 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=UserA1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:b4:eb:22:e2:c4:ba:7f:33:aa:57:ab:13:f1:69: + 09:98:28:3c:7d:a7:e2:41:2a:28:2f:f9:85:a1:6c: + 94:ee:0a:eb:4d:01:4c:28:7c:9d:05:4d:d8:10:7f: + b7:cf:13:c2:a6:de:11:0c:97:38:97:cd:6d:11:fd: + 16:76:c0:eb:5a:b7:7b:17:13:45:9d:4b:00:4f:26: + c5:b1:9b:67:93:2c:d6:d5:33:37:e1:50:1d:7b:0d: + be:8c:cb:bd:29:99:8f:54:f6:7e:04:84:82:2a:28: + ee:71:3e:8d:5f:72:b2:6a:77:6b:47:3e:ba:4d:b3: + e2:96:14:71:0a:1e:26:16:8f:6c:1b:07:2a:ac:15: + 89:1e:88:63:c3:81:3b:91:e9:f3:43:1b:f0:ec:08: + 24:96:46:27:21:2a:56:25:2c:b6:cc:d9:02:70:77: + 9d:e4:7c:44:8c:93:04:85:a3:09:0a:8e:f5:e7:21: + fa:bd:56:28:b7:52:20:09:ec:9a:c4:d4:d7:8a:19: + 4e:7a:10:e9:b2:10:36:68:ce:ce:78:8b:79:3f:6f: + 70:3b:75:6d:70:59:3a:c9:85:a8:f8:23:d4:ab:44: + c2:ae:f5:1c:6e:38:11:e1:5f:cc:8f:e2:43:f5:b3: + 0e:09:17:b3:c6:ee:47:fb:39:c4:58:62:ba:e3:a8: + c5:ef + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 70:55:CA:CA:A5:8F:4D:73:39:47:E2:97:A3:1F:F6:3E:33:C9:7A:BF + X509v3 Authority Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, S/MIME + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Client Authentication, E-mail Protection + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:18888/intermediate1_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:18888/ + X509v3 Subject Alternative Name: + email:UserA1@user.net + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 99:81:61:3a:f1:c2:de:05:ad:ab:f3:fd:e0:d5:97:5b:fe:b2: + fa:e2:5f:ab:41:9d:71:1d:10:54:0b:bc:b5:c9:8d:26:91:a9: + 45:71:51:14:61:a7:3c:ef:1d:f7:db:71:2f:1f:c1:d7:80:96: + 03:5d:0d:69:81:fa:be:ca:f7:56:70:7b:89:ca:8f:b6:16:ee: + 4a:83:fc:70:2e:4b:0c:50:ba:c6:06:5e:58:bb:25:d6:19:40: + 82:b4:18:57:16:5f:f2:98:3e:5d:9d:72:7a:8f:20:de:25:c2: + 06:a7:46:b2:cc:4c:f9:da:a7:43:f5:a0:92:e4:e2:05:49:43: + 9d:58:9f:20:5d:e2:88:77:f1:10:0c:f5:fc:a2:85:b6:41:0a: + 1a:12:75:1e:47:3b:b3:4f:c9:45:71:99:b6:14:e9:6b:7d:7a: + 98:ee:82:dd:59:f6:af:fa:a5:d1:1c:24:db:66:e7:82:bb:53: + 70:4f:27:96:dc:19:c0:9e:2d:df:da:00:2f:c3:22:9e:71:9c: + b3:89:da:0a:79:c3:f6:e3:9b:ca:b7:db:b6:5c:8f:e9:29:cb: + d0:9c:e3:0e:0f:7c:2c:b5:b0:36:a9:13:38:d2:8e:6f:6a:6c: + 0a:7f:3f:dd:af:b1:e2:ea:c6:de:1d:b0:97:c9:36:1d:85:81: + aa:42:9f:53 +-----BEGIN CERTIFICATE----- +MIIEXTCCA0WgAwIBAgIUXKGv1Xy7Fu/Cx+ZT/JQa7SS7tBcwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMTAe +Fw0yMzA1MDExOTM3MzZaFw0zMzA0MjgxOTM3MzZaME8xCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEP +MA0GA1UEAwwGVXNlckExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +tOsi4sS6fzOqV6sT8WkJmCg8fafiQSooL/mFoWyU7grrTQFMKHydBU3YEH+3zxPC +pt4RDJc4l81tEf0WdsDrWrd7FxNFnUsATybFsZtnkyzW1TM34VAdew2+jMu9KZmP +VPZ+BISCKijucT6NX3KyandrRz66TbPilhRxCh4mFo9sGwcqrBWJHohjw4E7kenz +Qxvw7AgklkYnISpWJSy2zNkCcHed5HxEjJMEhaMJCo715yH6vVYot1IgCeyaxNTX +ihlOehDpshA2aM7OeIt5P29wO3VtcFk6yYWo+CPUq0TCrvUcbjgR4V/Mj+JD9bMO +CRezxu5H+znEWGK646jF7wIDAQABo4IBJDCCASAwHQYDVR0OBBYEFHBVysqlj01z +OUfil6Mf9j4zyXq/MB8GA1UdIwQYMBaAFLWRbk9ktxaEdvm0vpnOYJWYGo6dMAwG +A1UdEwEB/wQCMAAwEQYJYIZIAYb4QgEBBAQDAgWgMA4GA1UdDwEB/wQEAwIF4DAd +BgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwPQYDVR0fBDYwNDAyoDCgLoYs +aHR0cDovLzEyNy4wLjAuMToxODg4OC9pbnRlcm1lZGlhdGUxX2NybC5kZXIwMwYI +KwYBBQUHAQEEJzAlMCMGCCsGAQUFBzABhhdodHRwOi8vMTI3LjAuMC4xOjE4ODg4 +LzAaBgNVHREEEzARgQ9Vc2VyQTFAdXNlci5uZXQwDQYJKoZIhvcNAQELBQADggEB +AJmBYTrxwt4Fravz/eDVl1v+svriX6tBnXEdEFQLvLXJjSaRqUVxURRhpzzvHffb +cS8fwdeAlgNdDWmB+r7K91Zwe4nKj7YW7kqD/HAuSwxQusYGXli7JdYZQIK0GFcW +X/KYPl2dcnqPIN4lwganRrLMTPnap0P1oJLk4gVJQ51YnyBd4oh38RAM9fyihbZB +ChoSdR5HO7NPyUVxmbYU6Wt9epjugt1Z9q/6pdEcJNtm54K7U3BPJ5bcGcCeLd/a +AC/DIp5xnLOJ2gp5w/bjm8q327Zcj+kpy9Cc4w4PfCy1sDapEzjSjm9qbAp/P92v +seLqxt4dsJfJNh2FgapCn1M= +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 55:57:db:45:43:06:ce:52:63:59:b9:5a:26:78:fd:0d:94:68:95:9c + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:15 2023 GMT + Not After : Apr 28 19:01:15 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:bc:c6:84:2d:c2:ab:5d:05:d7:65:a8:e2:15:74: + d8:f2:f1:55:11:45:93:96:4c:a5:dc:cb:44:f5:f4: + 14:7e:46:02:59:e8:ae:78:59:69:21:58:f7:16:38: + b9:c2:c2:60:d8:76:ab:a1:39:ba:0b:a3:03:17:e4: + a1:cb:5d:1a:0c:62:71:24:64:b0:00:f0:6f:4c:af: + 08:62:8c:dc:4f:e0:d7:d4:55:2c:db:36:fc:a9:aa: + d7:58:27:e4:99:cb:dc:29:d9:ea:35:16:cb:2e:be: + 04:b2:82:58:f4:e5:5c:07:db:12:8e:e3:3c:9a:5e: + 90:4b:c5:a3:d4:21:96:5f:e1:8f:f7:cb:9e:db:e0: + 10:a0:6c:a2:1e:30:17:6c:32:9f:7b:43:a4:9f:d3: + 6b:33:1b:18:cd:a4:ad:33:48:a3:98:b0:2b:c8:22: + 74:17:71:d8:f1:64:21:55:e1:33:bc:7f:74:5f:a5: + a6:a2:9b:58:2f:db:ed:c7:c1:e5:36:2e:86:26:ad: + c6:fe:b8:00:85:6e:7c:ed:fd:4a:c6:a0:d9:b2:3f: + 4e:bd:fa:08:52:c8:5d:31:13:86:bd:3f:ec:7a:d8: + 3a:15:e2:71:af:ec:00:88:7e:a6:e8:e1:9d:ab:57: + 5a:8a:1f:f8:e2:4d:29:58:53:79:25:f0:9e:d9:18: + 40:27 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + b1:48:16:3b:d7:91:d0:4d:54:09:cb:ab:c7:41:4f:35:12:8b: + a6:e8:84:11:49:a9:04:91:41:25:7c:02:38:b2:19:a0:e9:2e: + d5:d6:7a:26:c1:1a:f8:f1:c6:51:92:68:af:c8:6e:5b:df:28: + 40:b8:99:94:d5:43:7d:e3:68:75:94:26:56:11:21:9e:50:b3: + 36:7b:f8:5f:33:76:64:71:04:26:2b:bb:2c:83:33:89:ba:74: + c1:e9:9d:eb:c0:86:4b:4d:6f:f8:4d:55:5a:3d:f6:55:95:33: + 0f:b8:f0:53:2b:93:a6:da:8d:5c:1a:e8:30:22:55:67:44:6e: + 17:c4:57:05:0d:ce:fc:61:dd:b1:3c:b0:66:55:f4:42:d0:ce: + 94:7d:6a:82:bd:32:ed:2f:21:ff:c7:70:ff:48:9d:10:4a:71: + be:a8:37:e5:0f:f4:79:1e:7d:a2:f1:6a:6b:2c:e8:03:20:ce: + 80:94:d2:38:80:bc:7e:56:c5:77:62:94:c0:b7:40:11:4d:ba: + 98:4b:2e:52:03:66:68:36:ab:d1:0f:3e:b5:92:a3:95:9d:a4: + ea:d3:8a:14:41:6d:86:24:89:aa:d7:29:20:c8:52:d5:bf:8d: + 3b:09:52:dd:89:8c:2c:85:40:b5:9f:cc:47:63:ca:3a:e0:c9: + 91:5c:43:a9 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUVVfbRUMGzlJjWblaJnj9DZRolZwwDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDExNVoXDTMzMDQyODE5MDExNVowWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALzGhC3Cq10F12Wo4hV02PLxVRFFk5ZMpdzLRPX0FH5GAlnornhZaSFY9xY4ucLC +YNh2q6E5ugujAxfkoctdGgxicSRksADwb0yvCGKM3E/g19RVLNs2/Kmq11gn5JnL +3CnZ6jUWyy6+BLKCWPTlXAfbEo7jPJpekEvFo9Qhll/hj/fLntvgEKBsoh4wF2wy +n3tDpJ/TazMbGM2krTNIo5iwK8gidBdx2PFkIVXhM7x/dF+lpqKbWC/b7cfB5TYu +hiatxv64AIVufO39Ssag2bI/Tr36CFLIXTEThr0/7HrYOhXica/sAIh+pujhnatX +Woof+OJNKVhTeSXwntkYQCcCAwEAAaOB0DCBzTAdBgNVHQ4EFgQUtZFuT2S3FoR2 ++bS+mc5glZgajp0wHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBALFIFjvXkdBNVAnLq8dBTzUSi6bohBFJqQSRQSV8AjiyGaDp +LtXWeibBGvjxxlGSaK/IblvfKEC4mZTVQ33jaHWUJlYRIZ5QszZ7+F8zdmRxBCYr +uyyDM4m6dMHpnevAhktNb/hNVVo99lWVMw+48FMrk6bajVwa6DAiVWdEbhfEVwUN +zvxh3bE8sGZV9ELQzpR9aoK9Mu0vIf/HcP9InRBKcb6oN+UP9HkefaLxamss6AMg +zoCU0jiAvH5WxXdilMC3QBFNuphLLlIDZmg2q9EPPrWSo5WdpOrTihRBbYYkiarX +KSDIUtW/jTsJUt2JjCyFQLWfzEdjyjrgyZFcQ6k= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client1/UserA1_cert.pem b/test/configs/certs/ocsp_peer/mini-ca/client1/UserA1_cert.pem new file mode 100644 index 000000000..a2c5078bc --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client1/UserA1_cert.pem @@ -0,0 +1,97 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 5c:a1:af:d5:7c:bb:16:ef:c2:c7:e6:53:fc:94:1a:ed:24:bb:b4:17 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Validity + Not Before: May 1 19:37:36 2023 GMT + Not After : Apr 28 19:37:36 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=UserA1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:b4:eb:22:e2:c4:ba:7f:33:aa:57:ab:13:f1:69: + 09:98:28:3c:7d:a7:e2:41:2a:28:2f:f9:85:a1:6c: + 94:ee:0a:eb:4d:01:4c:28:7c:9d:05:4d:d8:10:7f: + b7:cf:13:c2:a6:de:11:0c:97:38:97:cd:6d:11:fd: + 16:76:c0:eb:5a:b7:7b:17:13:45:9d:4b:00:4f:26: + c5:b1:9b:67:93:2c:d6:d5:33:37:e1:50:1d:7b:0d: + be:8c:cb:bd:29:99:8f:54:f6:7e:04:84:82:2a:28: + ee:71:3e:8d:5f:72:b2:6a:77:6b:47:3e:ba:4d:b3: + e2:96:14:71:0a:1e:26:16:8f:6c:1b:07:2a:ac:15: + 89:1e:88:63:c3:81:3b:91:e9:f3:43:1b:f0:ec:08: + 24:96:46:27:21:2a:56:25:2c:b6:cc:d9:02:70:77: + 9d:e4:7c:44:8c:93:04:85:a3:09:0a:8e:f5:e7:21: + fa:bd:56:28:b7:52:20:09:ec:9a:c4:d4:d7:8a:19: + 4e:7a:10:e9:b2:10:36:68:ce:ce:78:8b:79:3f:6f: + 70:3b:75:6d:70:59:3a:c9:85:a8:f8:23:d4:ab:44: + c2:ae:f5:1c:6e:38:11:e1:5f:cc:8f:e2:43:f5:b3: + 0e:09:17:b3:c6:ee:47:fb:39:c4:58:62:ba:e3:a8: + c5:ef + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 70:55:CA:CA:A5:8F:4D:73:39:47:E2:97:A3:1F:F6:3E:33:C9:7A:BF + X509v3 Authority Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, S/MIME + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Client Authentication, E-mail Protection + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:18888/intermediate1_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:18888/ + X509v3 Subject Alternative Name: + email:UserA1@user.net + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 99:81:61:3a:f1:c2:de:05:ad:ab:f3:fd:e0:d5:97:5b:fe:b2: + fa:e2:5f:ab:41:9d:71:1d:10:54:0b:bc:b5:c9:8d:26:91:a9: + 45:71:51:14:61:a7:3c:ef:1d:f7:db:71:2f:1f:c1:d7:80:96: + 03:5d:0d:69:81:fa:be:ca:f7:56:70:7b:89:ca:8f:b6:16:ee: + 4a:83:fc:70:2e:4b:0c:50:ba:c6:06:5e:58:bb:25:d6:19:40: + 82:b4:18:57:16:5f:f2:98:3e:5d:9d:72:7a:8f:20:de:25:c2: + 06:a7:46:b2:cc:4c:f9:da:a7:43:f5:a0:92:e4:e2:05:49:43: + 9d:58:9f:20:5d:e2:88:77:f1:10:0c:f5:fc:a2:85:b6:41:0a: + 1a:12:75:1e:47:3b:b3:4f:c9:45:71:99:b6:14:e9:6b:7d:7a: + 98:ee:82:dd:59:f6:af:fa:a5:d1:1c:24:db:66:e7:82:bb:53: + 70:4f:27:96:dc:19:c0:9e:2d:df:da:00:2f:c3:22:9e:71:9c: + b3:89:da:0a:79:c3:f6:e3:9b:ca:b7:db:b6:5c:8f:e9:29:cb: + d0:9c:e3:0e:0f:7c:2c:b5:b0:36:a9:13:38:d2:8e:6f:6a:6c: + 0a:7f:3f:dd:af:b1:e2:ea:c6:de:1d:b0:97:c9:36:1d:85:81: + aa:42:9f:53 +-----BEGIN CERTIFICATE----- +MIIEXTCCA0WgAwIBAgIUXKGv1Xy7Fu/Cx+ZT/JQa7SS7tBcwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMTAe +Fw0yMzA1MDExOTM3MzZaFw0zMzA0MjgxOTM3MzZaME8xCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEP +MA0GA1UEAwwGVXNlckExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +tOsi4sS6fzOqV6sT8WkJmCg8fafiQSooL/mFoWyU7grrTQFMKHydBU3YEH+3zxPC +pt4RDJc4l81tEf0WdsDrWrd7FxNFnUsATybFsZtnkyzW1TM34VAdew2+jMu9KZmP +VPZ+BISCKijucT6NX3KyandrRz66TbPilhRxCh4mFo9sGwcqrBWJHohjw4E7kenz +Qxvw7AgklkYnISpWJSy2zNkCcHed5HxEjJMEhaMJCo715yH6vVYot1IgCeyaxNTX +ihlOehDpshA2aM7OeIt5P29wO3VtcFk6yYWo+CPUq0TCrvUcbjgR4V/Mj+JD9bMO +CRezxu5H+znEWGK646jF7wIDAQABo4IBJDCCASAwHQYDVR0OBBYEFHBVysqlj01z +OUfil6Mf9j4zyXq/MB8GA1UdIwQYMBaAFLWRbk9ktxaEdvm0vpnOYJWYGo6dMAwG +A1UdEwEB/wQCMAAwEQYJYIZIAYb4QgEBBAQDAgWgMA4GA1UdDwEB/wQEAwIF4DAd +BgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwPQYDVR0fBDYwNDAyoDCgLoYs +aHR0cDovLzEyNy4wLjAuMToxODg4OC9pbnRlcm1lZGlhdGUxX2NybC5kZXIwMwYI +KwYBBQUHAQEEJzAlMCMGCCsGAQUFBzABhhdodHRwOi8vMTI3LjAuMC4xOjE4ODg4 +LzAaBgNVHREEEzARgQ9Vc2VyQTFAdXNlci5uZXQwDQYJKoZIhvcNAQELBQADggEB +AJmBYTrxwt4Fravz/eDVl1v+svriX6tBnXEdEFQLvLXJjSaRqUVxURRhpzzvHffb +cS8fwdeAlgNdDWmB+r7K91Zwe4nKj7YW7kqD/HAuSwxQusYGXli7JdYZQIK0GFcW +X/KYPl2dcnqPIN4lwganRrLMTPnap0P1oJLk4gVJQ51YnyBd4oh38RAM9fyihbZB +ChoSdR5HO7NPyUVxmbYU6Wt9epjugt1Z9q/6pdEcJNtm54K7U3BPJ5bcGcCeLd/a +AC/DIp5xnLOJ2gp5w/bjm8q327Zcj+kpy9Cc4w4PfCy1sDapEzjSjm9qbAp/P92v +seLqxt4dsJfJNh2FgapCn1M= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client1/UserA2_bundle.pem b/test/configs/certs/ocsp_peer/mini-ca/client1/UserA2_bundle.pem new file mode 100644 index 000000000..a181550a0 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client1/UserA2_bundle.pem @@ -0,0 +1,186 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 7a:3d:fa:5b:9b:df:69:55:6e:9c:53:4c:fc:86:75:65:bc:78:4c:24 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Validity + Not Before: May 1 19:37:36 2023 GMT + Not After : Apr 28 19:37:36 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=UserA2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:a6:7c:40:80:2b:44:00:33:11:c6:c2:9d:67:3e: + 87:8e:7e:40:d3:f5:d3:27:b6:7d:18:3c:c0:86:ac: + 96:3a:ad:d8:c3:cb:ab:72:5e:4c:b7:24:45:da:c7: + a8:cc:74:b8:21:75:62:9e:81:88:96:54:6e:db:f9: + 8c:2f:4c:97:0d:ce:21:42:2f:92:57:7f:34:2b:02: + 43:4c:22:ae:14:ca:fc:b2:2c:d0:67:0e:52:e0:6d: + 61:96:a6:3b:cc:4f:6a:d6:ef:45:9c:74:92:25:6c: + 0a:10:62:1b:22:2b:11:6b:d1:52:4d:da:8d:c3:4a: + e6:74:a7:1b:1e:ef:8a:f4:96:88:02:0d:b7:57:35: + 9f:a3:ff:a2:2c:b7:0e:27:4e:79:2f:cf:0c:f1:91: + 0e:bf:01:d7:a2:71:2c:b7:0e:4b:7e:50:91:89:71: + c2:17:aa:cb:29:80:9e:d7:2b:fa:33:41:e8:82:d1: + 3a:97:3d:6c:de:66:9b:b4:ea:1a:eb:94:be:6e:c0: + 66:e8:77:3d:72:d5:5c:a5:e8:ab:3b:33:f4:b3:c2: + 26:49:bc:08:55:cf:16:b6:12:22:91:fe:c1:5a:b2: + d7:77:e3:f4:47:bc:c4:77:6b:f5:7f:c3:e8:48:99: + b9:a8:ea:b1:ae:e6:cc:3a:12:fa:4d:2f:5f:0f:a8: + fd:8d + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + B8:8E:4F:76:F1:F8:3C:A5:23:C5:8F:A1:2E:64:3E:48:53:02:CD:6B + X509v3 Authority Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, S/MIME + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Client Authentication, E-mail Protection + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:18888/intermediate1_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:18888/ + X509v3 Subject Alternative Name: + email:UserA2@user.net + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 7c:1b:ae:98:16:42:f3:b2:a6:66:e9:a4:4f:61:04:a8:23:d5: + 55:ea:d4:68:b5:98:fd:66:ff:10:dc:54:b7:01:78:4f:fc:e1: + 75:e8:09:6d:ad:ac:57:b0:33:41:26:3d:ac:b0:17:46:c4:6f: + 5b:c7:fa:ad:d2:94:13:ef:5e:bb:f5:ad:2d:39:85:d3:af:ff: + 56:8e:f6:d1:20:12:03:86:cd:e8:ad:38:49:30:fb:98:de:3a: + 5f:61:5a:08:37:a9:c3:10:ed:a3:60:3c:46:68:30:d8:4a:ac: + 5d:eb:fd:d9:5d:90:b1:f0:b8:a8:68:5e:c8:41:6f:de:eb:a1: + cc:33:98:2d:06:17:26:c4:24:bf:62:82:a9:13:04:71:3e:6e: + ca:20:cf:5c:c5:47:67:f5:db:2e:56:60:4c:52:0c:4e:59:16: + da:6a:e3:b2:e4:cb:d6:65:26:df:26:2e:e0:f4:11:b1:36:92: + 7c:ab:c3:c3:97:a5:06:26:54:5c:c1:35:a1:2f:e5:0f:2f:91: + 2d:cd:c5:dd:a7:f2:4c:e1:4d:0d:5c:bd:25:4f:c8:52:79:c2: + 29:78:ef:88:10:43:a4:c4:df:97:48:22:09:db:48:19:85:01: + 48:39:28:20:69:1d:31:b5:4f:97:e0:ea:38:6d:e0:98:4b:78: + a4:b7:fd:c2 +-----BEGIN CERTIFICATE----- +MIIEXTCCA0WgAwIBAgIUej36W5vfaVVunFNM/IZ1Zbx4TCQwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMTAe +Fw0yMzA1MDExOTM3MzZaFw0zMzA0MjgxOTM3MzZaME8xCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEP +MA0GA1UEAwwGVXNlckEyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +pnxAgCtEADMRxsKdZz6Hjn5A0/XTJ7Z9GDzAhqyWOq3Yw8urcl5MtyRF2seozHS4 +IXVinoGIllRu2/mML0yXDc4hQi+SV380KwJDTCKuFMr8sizQZw5S4G1hlqY7zE9q +1u9FnHSSJWwKEGIbIisRa9FSTdqNw0rmdKcbHu+K9JaIAg23VzWfo/+iLLcOJ055 +L88M8ZEOvwHXonEstw5LflCRiXHCF6rLKYCe1yv6M0HogtE6lz1s3mabtOoa65S+ +bsBm6Hc9ctVcpeirOzP0s8ImSbwIVc8WthIikf7BWrLXd+P0R7zEd2v1f8PoSJm5 +qOqxrubMOhL6TS9fD6j9jQIDAQABo4IBJDCCASAwHQYDVR0OBBYEFLiOT3bx+Dyl +I8WPoS5kPkhTAs1rMB8GA1UdIwQYMBaAFLWRbk9ktxaEdvm0vpnOYJWYGo6dMAwG +A1UdEwEB/wQCMAAwEQYJYIZIAYb4QgEBBAQDAgWgMA4GA1UdDwEB/wQEAwIF4DAd +BgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwPQYDVR0fBDYwNDAyoDCgLoYs +aHR0cDovLzEyNy4wLjAuMToxODg4OC9pbnRlcm1lZGlhdGUxX2NybC5kZXIwMwYI +KwYBBQUHAQEEJzAlMCMGCCsGAQUFBzABhhdodHRwOi8vMTI3LjAuMC4xOjE4ODg4 +LzAaBgNVHREEEzARgQ9Vc2VyQTJAdXNlci5uZXQwDQYJKoZIhvcNAQELBQADggEB +AHwbrpgWQvOypmbppE9hBKgj1VXq1Gi1mP1m/xDcVLcBeE/84XXoCW2trFewM0Em +PaywF0bEb1vH+q3SlBPvXrv1rS05hdOv/1aO9tEgEgOGzeitOEkw+5jeOl9hWgg3 +qcMQ7aNgPEZoMNhKrF3r/dldkLHwuKhoXshBb97rocwzmC0GFybEJL9igqkTBHE+ +bsogz1zFR2f12y5WYExSDE5ZFtpq47Lky9ZlJt8mLuD0EbE2knyrw8OXpQYmVFzB +NaEv5Q8vkS3Nxd2n8kzhTQ1cvSVPyFJ5wil474gQQ6TE35dIIgnbSBmFAUg5KCBp +HTG1T5fg6jht4JhLeKS3/cI= +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 55:57:db:45:43:06:ce:52:63:59:b9:5a:26:78:fd:0d:94:68:95:9c + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:15 2023 GMT + Not After : Apr 28 19:01:15 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:bc:c6:84:2d:c2:ab:5d:05:d7:65:a8:e2:15:74: + d8:f2:f1:55:11:45:93:96:4c:a5:dc:cb:44:f5:f4: + 14:7e:46:02:59:e8:ae:78:59:69:21:58:f7:16:38: + b9:c2:c2:60:d8:76:ab:a1:39:ba:0b:a3:03:17:e4: + a1:cb:5d:1a:0c:62:71:24:64:b0:00:f0:6f:4c:af: + 08:62:8c:dc:4f:e0:d7:d4:55:2c:db:36:fc:a9:aa: + d7:58:27:e4:99:cb:dc:29:d9:ea:35:16:cb:2e:be: + 04:b2:82:58:f4:e5:5c:07:db:12:8e:e3:3c:9a:5e: + 90:4b:c5:a3:d4:21:96:5f:e1:8f:f7:cb:9e:db:e0: + 10:a0:6c:a2:1e:30:17:6c:32:9f:7b:43:a4:9f:d3: + 6b:33:1b:18:cd:a4:ad:33:48:a3:98:b0:2b:c8:22: + 74:17:71:d8:f1:64:21:55:e1:33:bc:7f:74:5f:a5: + a6:a2:9b:58:2f:db:ed:c7:c1:e5:36:2e:86:26:ad: + c6:fe:b8:00:85:6e:7c:ed:fd:4a:c6:a0:d9:b2:3f: + 4e:bd:fa:08:52:c8:5d:31:13:86:bd:3f:ec:7a:d8: + 3a:15:e2:71:af:ec:00:88:7e:a6:e8:e1:9d:ab:57: + 5a:8a:1f:f8:e2:4d:29:58:53:79:25:f0:9e:d9:18: + 40:27 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + b1:48:16:3b:d7:91:d0:4d:54:09:cb:ab:c7:41:4f:35:12:8b: + a6:e8:84:11:49:a9:04:91:41:25:7c:02:38:b2:19:a0:e9:2e: + d5:d6:7a:26:c1:1a:f8:f1:c6:51:92:68:af:c8:6e:5b:df:28: + 40:b8:99:94:d5:43:7d:e3:68:75:94:26:56:11:21:9e:50:b3: + 36:7b:f8:5f:33:76:64:71:04:26:2b:bb:2c:83:33:89:ba:74: + c1:e9:9d:eb:c0:86:4b:4d:6f:f8:4d:55:5a:3d:f6:55:95:33: + 0f:b8:f0:53:2b:93:a6:da:8d:5c:1a:e8:30:22:55:67:44:6e: + 17:c4:57:05:0d:ce:fc:61:dd:b1:3c:b0:66:55:f4:42:d0:ce: + 94:7d:6a:82:bd:32:ed:2f:21:ff:c7:70:ff:48:9d:10:4a:71: + be:a8:37:e5:0f:f4:79:1e:7d:a2:f1:6a:6b:2c:e8:03:20:ce: + 80:94:d2:38:80:bc:7e:56:c5:77:62:94:c0:b7:40:11:4d:ba: + 98:4b:2e:52:03:66:68:36:ab:d1:0f:3e:b5:92:a3:95:9d:a4: + ea:d3:8a:14:41:6d:86:24:89:aa:d7:29:20:c8:52:d5:bf:8d: + 3b:09:52:dd:89:8c:2c:85:40:b5:9f:cc:47:63:ca:3a:e0:c9: + 91:5c:43:a9 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUVVfbRUMGzlJjWblaJnj9DZRolZwwDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDExNVoXDTMzMDQyODE5MDExNVowWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALzGhC3Cq10F12Wo4hV02PLxVRFFk5ZMpdzLRPX0FH5GAlnornhZaSFY9xY4ucLC +YNh2q6E5ugujAxfkoctdGgxicSRksADwb0yvCGKM3E/g19RVLNs2/Kmq11gn5JnL +3CnZ6jUWyy6+BLKCWPTlXAfbEo7jPJpekEvFo9Qhll/hj/fLntvgEKBsoh4wF2wy +n3tDpJ/TazMbGM2krTNIo5iwK8gidBdx2PFkIVXhM7x/dF+lpqKbWC/b7cfB5TYu +hiatxv64AIVufO39Ssag2bI/Tr36CFLIXTEThr0/7HrYOhXica/sAIh+pujhnatX +Woof+OJNKVhTeSXwntkYQCcCAwEAAaOB0DCBzTAdBgNVHQ4EFgQUtZFuT2S3FoR2 ++bS+mc5glZgajp0wHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBALFIFjvXkdBNVAnLq8dBTzUSi6bohBFJqQSRQSV8AjiyGaDp +LtXWeibBGvjxxlGSaK/IblvfKEC4mZTVQ33jaHWUJlYRIZ5QszZ7+F8zdmRxBCYr +uyyDM4m6dMHpnevAhktNb/hNVVo99lWVMw+48FMrk6bajVwa6DAiVWdEbhfEVwUN +zvxh3bE8sGZV9ELQzpR9aoK9Mu0vIf/HcP9InRBKcb6oN+UP9HkefaLxamss6AMg +zoCU0jiAvH5WxXdilMC3QBFNuphLLlIDZmg2q9EPPrWSo5WdpOrTihRBbYYkiarX +KSDIUtW/jTsJUt2JjCyFQLWfzEdjyjrgyZFcQ6k= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client1/UserA2_cert.pem b/test/configs/certs/ocsp_peer/mini-ca/client1/UserA2_cert.pem new file mode 100644 index 000000000..19b70a48c --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client1/UserA2_cert.pem @@ -0,0 +1,97 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 7a:3d:fa:5b:9b:df:69:55:6e:9c:53:4c:fc:86:75:65:bc:78:4c:24 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Validity + Not Before: May 1 19:37:36 2023 GMT + Not After : Apr 28 19:37:36 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=UserA2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:a6:7c:40:80:2b:44:00:33:11:c6:c2:9d:67:3e: + 87:8e:7e:40:d3:f5:d3:27:b6:7d:18:3c:c0:86:ac: + 96:3a:ad:d8:c3:cb:ab:72:5e:4c:b7:24:45:da:c7: + a8:cc:74:b8:21:75:62:9e:81:88:96:54:6e:db:f9: + 8c:2f:4c:97:0d:ce:21:42:2f:92:57:7f:34:2b:02: + 43:4c:22:ae:14:ca:fc:b2:2c:d0:67:0e:52:e0:6d: + 61:96:a6:3b:cc:4f:6a:d6:ef:45:9c:74:92:25:6c: + 0a:10:62:1b:22:2b:11:6b:d1:52:4d:da:8d:c3:4a: + e6:74:a7:1b:1e:ef:8a:f4:96:88:02:0d:b7:57:35: + 9f:a3:ff:a2:2c:b7:0e:27:4e:79:2f:cf:0c:f1:91: + 0e:bf:01:d7:a2:71:2c:b7:0e:4b:7e:50:91:89:71: + c2:17:aa:cb:29:80:9e:d7:2b:fa:33:41:e8:82:d1: + 3a:97:3d:6c:de:66:9b:b4:ea:1a:eb:94:be:6e:c0: + 66:e8:77:3d:72:d5:5c:a5:e8:ab:3b:33:f4:b3:c2: + 26:49:bc:08:55:cf:16:b6:12:22:91:fe:c1:5a:b2: + d7:77:e3:f4:47:bc:c4:77:6b:f5:7f:c3:e8:48:99: + b9:a8:ea:b1:ae:e6:cc:3a:12:fa:4d:2f:5f:0f:a8: + fd:8d + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + B8:8E:4F:76:F1:F8:3C:A5:23:C5:8F:A1:2E:64:3E:48:53:02:CD:6B + X509v3 Authority Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, S/MIME + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Client Authentication, E-mail Protection + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:18888/intermediate1_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:18888/ + X509v3 Subject Alternative Name: + email:UserA2@user.net + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 7c:1b:ae:98:16:42:f3:b2:a6:66:e9:a4:4f:61:04:a8:23:d5: + 55:ea:d4:68:b5:98:fd:66:ff:10:dc:54:b7:01:78:4f:fc:e1: + 75:e8:09:6d:ad:ac:57:b0:33:41:26:3d:ac:b0:17:46:c4:6f: + 5b:c7:fa:ad:d2:94:13:ef:5e:bb:f5:ad:2d:39:85:d3:af:ff: + 56:8e:f6:d1:20:12:03:86:cd:e8:ad:38:49:30:fb:98:de:3a: + 5f:61:5a:08:37:a9:c3:10:ed:a3:60:3c:46:68:30:d8:4a:ac: + 5d:eb:fd:d9:5d:90:b1:f0:b8:a8:68:5e:c8:41:6f:de:eb:a1: + cc:33:98:2d:06:17:26:c4:24:bf:62:82:a9:13:04:71:3e:6e: + ca:20:cf:5c:c5:47:67:f5:db:2e:56:60:4c:52:0c:4e:59:16: + da:6a:e3:b2:e4:cb:d6:65:26:df:26:2e:e0:f4:11:b1:36:92: + 7c:ab:c3:c3:97:a5:06:26:54:5c:c1:35:a1:2f:e5:0f:2f:91: + 2d:cd:c5:dd:a7:f2:4c:e1:4d:0d:5c:bd:25:4f:c8:52:79:c2: + 29:78:ef:88:10:43:a4:c4:df:97:48:22:09:db:48:19:85:01: + 48:39:28:20:69:1d:31:b5:4f:97:e0:ea:38:6d:e0:98:4b:78: + a4:b7:fd:c2 +-----BEGIN CERTIFICATE----- +MIIEXTCCA0WgAwIBAgIUej36W5vfaVVunFNM/IZ1Zbx4TCQwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMTAe +Fw0yMzA1MDExOTM3MzZaFw0zMzA0MjgxOTM3MzZaME8xCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEP +MA0GA1UEAwwGVXNlckEyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +pnxAgCtEADMRxsKdZz6Hjn5A0/XTJ7Z9GDzAhqyWOq3Yw8urcl5MtyRF2seozHS4 +IXVinoGIllRu2/mML0yXDc4hQi+SV380KwJDTCKuFMr8sizQZw5S4G1hlqY7zE9q +1u9FnHSSJWwKEGIbIisRa9FSTdqNw0rmdKcbHu+K9JaIAg23VzWfo/+iLLcOJ055 +L88M8ZEOvwHXonEstw5LflCRiXHCF6rLKYCe1yv6M0HogtE6lz1s3mabtOoa65S+ +bsBm6Hc9ctVcpeirOzP0s8ImSbwIVc8WthIikf7BWrLXd+P0R7zEd2v1f8PoSJm5 +qOqxrubMOhL6TS9fD6j9jQIDAQABo4IBJDCCASAwHQYDVR0OBBYEFLiOT3bx+Dyl +I8WPoS5kPkhTAs1rMB8GA1UdIwQYMBaAFLWRbk9ktxaEdvm0vpnOYJWYGo6dMAwG +A1UdEwEB/wQCMAAwEQYJYIZIAYb4QgEBBAQDAgWgMA4GA1UdDwEB/wQEAwIF4DAd +BgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwPQYDVR0fBDYwNDAyoDCgLoYs +aHR0cDovLzEyNy4wLjAuMToxODg4OC9pbnRlcm1lZGlhdGUxX2NybC5kZXIwMwYI +KwYBBQUHAQEEJzAlMCMGCCsGAQUFBzABhhdodHRwOi8vMTI3LjAuMC4xOjE4ODg4 +LzAaBgNVHREEEzARgQ9Vc2VyQTJAdXNlci5uZXQwDQYJKoZIhvcNAQELBQADggEB +AHwbrpgWQvOypmbppE9hBKgj1VXq1Gi1mP1m/xDcVLcBeE/84XXoCW2trFewM0Em +PaywF0bEb1vH+q3SlBPvXrv1rS05hdOv/1aO9tEgEgOGzeitOEkw+5jeOl9hWgg3 +qcMQ7aNgPEZoMNhKrF3r/dldkLHwuKhoXshBb97rocwzmC0GFybEJL9igqkTBHE+ +bsogz1zFR2f12y5WYExSDE5ZFtpq47Lky9ZlJt8mLuD0EbE2knyrw8OXpQYmVFzB +NaEv5Q8vkS3Nxd2n8kzhTQ1cvSVPyFJ5wil474gQQ6TE35dIIgnbSBmFAUg5KCBp +HTG1T5fg6jht4JhLeKS3/cI= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client1/certfile.pem b/test/configs/certs/ocsp_peer/mini-ca/client1/certfile.pem new file mode 100644 index 000000000..719d7516e --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client1/certfile.pem @@ -0,0 +1,175 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 55:57:db:45:43:06:ce:52:63:59:b9:5a:26:78:fd:0d:94:68:95:9c + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:15 2023 GMT + Not After : Apr 28 19:01:15 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:bc:c6:84:2d:c2:ab:5d:05:d7:65:a8:e2:15:74: + d8:f2:f1:55:11:45:93:96:4c:a5:dc:cb:44:f5:f4: + 14:7e:46:02:59:e8:ae:78:59:69:21:58:f7:16:38: + b9:c2:c2:60:d8:76:ab:a1:39:ba:0b:a3:03:17:e4: + a1:cb:5d:1a:0c:62:71:24:64:b0:00:f0:6f:4c:af: + 08:62:8c:dc:4f:e0:d7:d4:55:2c:db:36:fc:a9:aa: + d7:58:27:e4:99:cb:dc:29:d9:ea:35:16:cb:2e:be: + 04:b2:82:58:f4:e5:5c:07:db:12:8e:e3:3c:9a:5e: + 90:4b:c5:a3:d4:21:96:5f:e1:8f:f7:cb:9e:db:e0: + 10:a0:6c:a2:1e:30:17:6c:32:9f:7b:43:a4:9f:d3: + 6b:33:1b:18:cd:a4:ad:33:48:a3:98:b0:2b:c8:22: + 74:17:71:d8:f1:64:21:55:e1:33:bc:7f:74:5f:a5: + a6:a2:9b:58:2f:db:ed:c7:c1:e5:36:2e:86:26:ad: + c6:fe:b8:00:85:6e:7c:ed:fd:4a:c6:a0:d9:b2:3f: + 4e:bd:fa:08:52:c8:5d:31:13:86:bd:3f:ec:7a:d8: + 3a:15:e2:71:af:ec:00:88:7e:a6:e8:e1:9d:ab:57: + 5a:8a:1f:f8:e2:4d:29:58:53:79:25:f0:9e:d9:18: + 40:27 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + b1:48:16:3b:d7:91:d0:4d:54:09:cb:ab:c7:41:4f:35:12:8b: + a6:e8:84:11:49:a9:04:91:41:25:7c:02:38:b2:19:a0:e9:2e: + d5:d6:7a:26:c1:1a:f8:f1:c6:51:92:68:af:c8:6e:5b:df:28: + 40:b8:99:94:d5:43:7d:e3:68:75:94:26:56:11:21:9e:50:b3: + 36:7b:f8:5f:33:76:64:71:04:26:2b:bb:2c:83:33:89:ba:74: + c1:e9:9d:eb:c0:86:4b:4d:6f:f8:4d:55:5a:3d:f6:55:95:33: + 0f:b8:f0:53:2b:93:a6:da:8d:5c:1a:e8:30:22:55:67:44:6e: + 17:c4:57:05:0d:ce:fc:61:dd:b1:3c:b0:66:55:f4:42:d0:ce: + 94:7d:6a:82:bd:32:ed:2f:21:ff:c7:70:ff:48:9d:10:4a:71: + be:a8:37:e5:0f:f4:79:1e:7d:a2:f1:6a:6b:2c:e8:03:20:ce: + 80:94:d2:38:80:bc:7e:56:c5:77:62:94:c0:b7:40:11:4d:ba: + 98:4b:2e:52:03:66:68:36:ab:d1:0f:3e:b5:92:a3:95:9d:a4: + ea:d3:8a:14:41:6d:86:24:89:aa:d7:29:20:c8:52:d5:bf:8d: + 3b:09:52:dd:89:8c:2c:85:40:b5:9f:cc:47:63:ca:3a:e0:c9: + 91:5c:43:a9 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUVVfbRUMGzlJjWblaJnj9DZRolZwwDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDExNVoXDTMzMDQyODE5MDExNVowWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALzGhC3Cq10F12Wo4hV02PLxVRFFk5ZMpdzLRPX0FH5GAlnornhZaSFY9xY4ucLC +YNh2q6E5ugujAxfkoctdGgxicSRksADwb0yvCGKM3E/g19RVLNs2/Kmq11gn5JnL +3CnZ6jUWyy6+BLKCWPTlXAfbEo7jPJpekEvFo9Qhll/hj/fLntvgEKBsoh4wF2wy +n3tDpJ/TazMbGM2krTNIo5iwK8gidBdx2PFkIVXhM7x/dF+lpqKbWC/b7cfB5TYu +hiatxv64AIVufO39Ssag2bI/Tr36CFLIXTEThr0/7HrYOhXica/sAIh+pujhnatX +Woof+OJNKVhTeSXwntkYQCcCAwEAAaOB0DCBzTAdBgNVHQ4EFgQUtZFuT2S3FoR2 ++bS+mc5glZgajp0wHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBALFIFjvXkdBNVAnLq8dBTzUSi6bohBFJqQSRQSV8AjiyGaDp +LtXWeibBGvjxxlGSaK/IblvfKEC4mZTVQ33jaHWUJlYRIZ5QszZ7+F8zdmRxBCYr +uyyDM4m6dMHpnevAhktNb/hNVVo99lWVMw+48FMrk6bajVwa6DAiVWdEbhfEVwUN +zvxh3bE8sGZV9ELQzpR9aoK9Mu0vIf/HcP9InRBKcb6oN+UP9HkefaLxamss6AMg +zoCU0jiAvH5WxXdilMC3QBFNuphLLlIDZmg2q9EPPrWSo5WdpOrTihRBbYYkiarX +KSDIUtW/jTsJUt2JjCyFQLWfzEdjyjrgyZFcQ6k= +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 27:5e:cf:7e:be:aa:02:b9:a9:c7:42:30:43:fe:0e:80:05:91:dd:0b + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 18:57:57 2023 GMT + Not After : Apr 28 18:57:57 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:e2:21:6b:9f:ef:48:b9:de:22:fb:5b:37:09:68: + c7:b5:92:57:52:24:ef:85:00:e8:71:85:4d:0f:5b: + 8c:c6:e7:4f:19:f6:e3:0b:70:a3:41:7e:71:d4:0f: + d6:fd:f2:1a:ca:aa:57:91:76:9a:b2:82:62:60:ce: + f2:00:2e:d4:bc:58:d3:60:30:42:a6:28:b2:50:7b: + 58:01:9f:fb:0a:65:b0:40:d6:7c:e2:b7:da:8d:19: + d9:a5:51:d2:46:7e:14:46:ab:fa:df:ce:fe:84:08: + 98:63:46:1d:4d:8a:77:57:67:da:16:8b:32:0c:7c: + 41:e2:a5:ec:ee:7d:20:28:eb:03:5f:f5:e6:05:d8: + 8b:96:78:6f:ae:29:9a:50:f7:dc:96:31:86:81:b1: + 78:e8:eb:ef:5d:bb:ed:42:ec:94:c6:54:46:ec:05: + 6f:1b:0c:36:24:c6:a8:06:7e:5c:56:b8:43:3b:11: + f4:06:0a:05:15:19:3b:1f:c8:67:31:eb:3b:5b:2a: + 15:0a:7b:f9:6b:e4:10:ee:44:be:19:d8:db:44:01: + fa:3a:56:f5:6c:4e:f3:60:aa:e4:cd:b2:ad:77:07: + 45:ef:f1:d7:f5:fa:52:84:5c:03:4e:72:e0:a9:91: + c5:d9:d6:0a:84:33:98:31:f2:02:5b:3f:10:15:65: + 76:d7 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 22:79:1a:b9:5d:fa:f5:c9:a3:88:22:c4:92:e6:64:6d:ce:a5: + ae:2e:69:48:6a:9e:d5:11:c5:bb:b0:de:38:1b:5b:04:85:60: + d6:64:14:ed:c2:62:02:7d:ad:d2:17:ad:ef:40:27:2b:50:59: + 4a:ff:88:c6:b3:16:5c:55:30:d9:23:bd:4f:0f:34:b7:7b:ed: + 7a:e1:f3:39:35:e9:18:6d:70:b1:2b:2a:e2:e5:cd:a1:54:8a: + f9:f4:95:81:29:84:3f:95:2f:48:e0:35:3e:d9:cb:84:4d:3d: + 3e:3c:0e:8d:24:42:5f:19:e6:06:a5:87:ae:ba:af:07:02:e7: + 6a:83:0a:89:d4:a4:38:ce:05:6e:f6:15:f1:7a:53:bb:50:28: + 89:51:3f:f2:54:f1:d3:c4:28:07:a1:3e:55:e5:84:b8:df:58: + af:c3:e7:81:c2:08:9c:35:e4:c4:86:75:a8:17:99:2c:a6:7f: + 46:30:9b:23:55:c5:d8:e2:6a:e4:08:a1:8b:dc:bc:5b:86:95: + 4a:79:fe:a6:93:3d:1a:5b:10:9a:2f:6a:45:2f:5d:c9:fa:95: + 2e:66:eb:52:df:88:a7:5f:42:8f:5f:46:07:79:8b:a7:49:82: + d3:81:c6:3e:c2:5a:15:c4:83:69:30:49:4d:6e:ea:05:1e:d8: + dc:29:ac:17 +-----BEGIN CERTIFICATE----- +MIIDyDCCArCgAwIBAgIUJ17Pfr6qArmpx0IwQ/4OgAWR3QswDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE4 +NTc1N1oXDTMzMDQyODE4NTc1N1owUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdS +b290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4iFrn+9Iud4i ++1s3CWjHtZJXUiTvhQDocYVND1uMxudPGfbjC3CjQX5x1A/W/fIayqpXkXaasoJi +YM7yAC7UvFjTYDBCpiiyUHtYAZ/7CmWwQNZ84rfajRnZpVHSRn4URqv6387+hAiY +Y0YdTYp3V2faFosyDHxB4qXs7n0gKOsDX/XmBdiLlnhvrimaUPfcljGGgbF46Ovv +XbvtQuyUxlRG7AVvGww2JMaoBn5cVrhDOxH0BgoFFRk7H8hnMes7WyoVCnv5a+QQ +7kS+GdjbRAH6Olb1bE7zYKrkzbKtdwdF7/HX9fpShFwDTnLgqZHF2dYKhDOYMfIC +Wz8QFWV21wIDAQABo4GZMIGWMB0GA1UdDgQWBBTDEkK6qdhN4MM+utdHQaYJL220 +4TAfBgNVHSMEGDAWgBTDEkK6qdhN4MM+utdHQaYJL2204TAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBhjAzBgNVHR8ELDAqMCigJqAkhiJodHRwOi8vMTI3 +LjAuMC4xOjg4ODgvcm9vdF9jcmwuZGVyMA0GCSqGSIb3DQEBCwUAA4IBAQAieRq5 +Xfr1yaOIIsSS5mRtzqWuLmlIap7VEcW7sN44G1sEhWDWZBTtwmICfa3SF63vQCcr +UFlK/4jGsxZcVTDZI71PDzS3e+164fM5NekYbXCxKyri5c2hVIr59JWBKYQ/lS9I +4DU+2cuETT0+PA6NJEJfGeYGpYeuuq8HAudqgwqJ1KQ4zgVu9hXxelO7UCiJUT/y +VPHTxCgHoT5V5YS431ivw+eBwgicNeTEhnWoF5kspn9GMJsjVcXY4mrkCKGL3Lxb +hpVKef6mkz0aWxCaL2pFL13J+pUuZutS34inX0KPX0YHeYunSYLTgcY+wloVxINp +MElNbuoFHtjcKawX +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client1/private/System_keypair.pem b/test/configs/certs/ocsp_peer/mini-ca/client1/private/System_keypair.pem new file mode 100644 index 000000000..cc779bece --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client1/private/System_keypair.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCjIS90NMEbQZC2 +TkFy4D+eSZRV7AJM3RSAuD3Gx0e7pVnDNYaJFwjO/nHmL5zB29J+FCTaYTA6526x +4yE4gbxH37J/H2C+PcXtdgOU48SzPr/4Q7rCVLy7ZlmYo/mq4xDow4jcGhhv3ZDr +b6NL1K80XEMg1VvnmKV8e6kVhrsov7rgu/ccCMQm68GsBR90TwURV+ASdxeeid2l +OO7Pz2e+DF5qSnRhIXmOwyjx4gYALeo6beKmJf0ti/WCNpGKIfBqkxnWdgj9ze6Q +qanPmTBxRlfq+8VlT3yGXJ3XtMMnPOsn3bxVdh8lDctvQ5qfut5UwZADnuUN2c2E +1Fh0Y75ZAgMBAAECggEAGJh8EGwU0pB56nbVmOW1Sd8jsanGNgMeYIMG83Xf+6uk +Y1GqcXiK4DTOhQuYOcV0UQSmAtQlAriawNDzVRMAiaCxh8e6HSzwrws8YoJOCc2U +AbFqkvrWQvYdW62bive1+LZkp/T6SsGQJGNebmRIr18a0vRAaWSjTOfTOFbqWKwD +640JDw2KmJmba6JtOaEL4QWrvbugTNwh3OEHugBVTCiRTdruVpCpLSxW1yZEpwB2 +BmxQxHvbtIjiOmHuNrsh21jzi7IEx+TFawJ0EV6Wm9XCbjX62XETraILg5bWVGv7 +X+TIDE2JBCC9GZMm9Qj1EfCojRmKfxopv7sA1yBYRQKBgQC5K5NzQzk14G64tyvW +61BteydWlBzFbgiMjYUq9wqgf2WbDVONBUB5x3MOArOmYuJOy0Xbt+pF3pF8wrkl +hMt/hZqKzDtdDWo3+PnNFgcWmB+T76Jei9khIk3D9ENcGaNwGAS3l8sxqXNLVVBJ +u5qHKeKFreXSra7xlXOuw5IMbQKBgQDhh1QqpQMloXOPCQWWzPtrEa8BRGNMFQTU +yZFHeetQjjX5opxxMbbXU/wNz/dRgdfe2VLVo9e4dtQbzCKenuwWeivuDxd4YOsF +Von9XDOzVWoXuP01MxDcU+sRoBLwWbpCWMe7r4Ny98C78+/5GssvkFUo+hd2vPo6 +U20pVZfuHQKBgGYD1eZooLpH/XgSojpzxgmrEc8nJnq21krpJPa4x8gIp+e2fdNx +k0YEViTf5C3EyL10S/Zy6sS3jBvaA7rh4GNPLgdN4V6wp1ZS+vy8KAeQo8US/rds +AKG6jnFovzucfGijMuYa4L1ph7V3ORaGHupcbwoK9lUNjxZVqjgcUvg5AoGBANOU +zpWjcaxgJ7XNVP0BGe59DJ43tqCuJ3YqFK3l56oPgPvOXs6jQVIKbLHYpcJF+mwL +nvbnW36nnJ7niKMfnYYI4CXa6r34zwSXB6Y2Vhqsy3euCX9bhTnvUN2cO6hZxbBw +8hFWvA+j96FdXYlqZa0dz4c9+b1f1bHaitL4hizRAoGAVlH2lJr6s+ZmzkDY7D+Y +6YKyjXaxhHBIqB2oLK1KuxGMiQnRADs9UOC4x2PQPOfemjVTJ3eN3rwxdqSh+Y2v +K+RejHBJzbd4JIv0QRxpPAm9sezaNEHa7ss387cLZEBEYUI9HkIuPunKX+2lHITn +WpVRyzYjVkFUUcRe3DyTlh8= +-----END PRIVATE KEY----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client1/private/UserA1_keypair.pem b/test/configs/certs/ocsp_peer/mini-ca/client1/private/UserA1_keypair.pem new file mode 100644 index 000000000..1f4a6df18 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client1/private/UserA1_keypair.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC06yLixLp/M6pX +qxPxaQmYKDx9p+JBKigv+YWhbJTuCutNAUwofJ0FTdgQf7fPE8Km3hEMlziXzW0R +/RZ2wOtat3sXE0WdSwBPJsWxm2eTLNbVMzfhUB17Db6My70pmY9U9n4EhIIqKO5x +Po1fcrJqd2tHPrpNs+KWFHEKHiYWj2wbByqsFYkeiGPDgTuR6fNDG/DsCCSWRich +KlYlLLbM2QJwd53kfESMkwSFowkKjvXnIfq9Vii3UiAJ7JrE1NeKGU56EOmyEDZo +zs54i3k/b3A7dW1wWTrJhaj4I9SrRMKu9RxuOBHhX8yP4kP1sw4JF7PG7kf7OcRY +YrrjqMXvAgMBAAECggEATFRQOaCKlpQzsB0rotSQCbQgIVutZ5Tjs6nwqTRoeS3+ +LFT5zrMUhGJdYEiiQimyHDjgtJEwfUtcUxSWX6/xHCsBMbEd08kK7loLWm2Ye02V +rgmX7+WfKoWX+UsUGfMBt/TvIfTN/f+a6ghcGQMJJ0YO6tYaQCI+3NbvAjfKFgXi +nWWZA+ipjh+Nu3YhVAy/uMInMi0qGWmpomU1yS+04E3OQksKYc3OmER7zFwbmNbF +0LanWlLURUeHIS1BY+V4yXw6yBJaCDUpVA37mfLqRQshGtGjmWLtMt/AuSFokwHd +yewoORlpVkZnE4Igv1JDggFdEI5lZ4PTmOjEXfntYQKBgQDH6sBr24OMUceNWyvf +k03pqUaoiJkivAcUI/krfY7mdSLkiqs+UPuRrikGbvKT3R+iJVbTB4dXzGG6nzBc +es7xwvzDGNHHXe0KAFhyIXwNMZmLGTmsNVfnKPAQ1BfKG9MtD5ck2gI1L1DkpaRz +X57YONvG05HYmY7TaV2VOKK1iQKBgQDnq/zW6P9WHpIHBjZRN0V4yFl1dMfn2VwZ +c3QWBd+kTwVBBhlJlqYeRIt4kmwExPnd3OX8Y7N18RttIc+k4dZgTA4w8G3xzvgk +0sHgf3EBbrkUuS23BJ2IPIb4LmWckH6+KJkvBrlZOfoLBj8uxQwz/wmWA2IoQgKv +CvDNr6G5twKBgECWSgZOjAhoX1T+0ITRvUkxJB/MydSb9JmAKb7wOJuh2l0Fo99l +IHFnV9+5Nmuo89BZydwxwXsPD7/QMDqgfn1C5pBNU3Damnsxs2FkCgTlMlrrEmPd +dAG9ixmUu/7S0H3tXIJOYIo4OCU2kpOnn9TxQafRsHvO2ILatp5ABukpAoGBALgP +KJ4GF3bwaswx302/P+6qHoj28yv8wPNnir9Eg14jeeUjV0vj6K77fmOY0UEozeu6 +6O4QuC/oEwYtaq9wzcVMJ6oyGueWrAd1eptGJR4iPeF9DhjuDcqDbCgZlJlDI68o +yitWiEOfkEzZ9bDO1NcqtQ7+OSoK597yLkb8Vt0ZAoGAN2dHPkTiNFlbzCefv/EP +A4xQUAUiwfQ9ZlhMtD9Tlea8cMAD901rxy52YrgCvBPxw3HmKG2H0NOpa7BwrgA8 +uODxi6xBRExRhvaqZe1aP1xn4XKw2VVsMlIlJQj2Wmuxeknfm9R1sfRD797c4nuN +ntLUOPAWtDkLoJLrTd9EqFk= +-----END PRIVATE KEY----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client1/private/UserA2_keypair.pem b/test/configs/certs/ocsp_peer/mini-ca/client1/private/UserA2_keypair.pem new file mode 100644 index 000000000..8485c7cf7 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client1/private/UserA2_keypair.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCmfECAK0QAMxHG +wp1nPoeOfkDT9dMntn0YPMCGrJY6rdjDy6tyXky3JEXax6jMdLghdWKegYiWVG7b ++YwvTJcNziFCL5JXfzQrAkNMIq4UyvyyLNBnDlLgbWGWpjvMT2rW70WcdJIlbAoQ +YhsiKxFr0VJN2o3DSuZ0pxse74r0logCDbdXNZ+j/6Istw4nTnkvzwzxkQ6/Adei +cSy3Dkt+UJGJccIXqsspgJ7XK/ozQeiC0TqXPWzeZpu06hrrlL5uwGbodz1y1Vyl +6Ks7M/SzwiZJvAhVzxa2EiKR/sFastd34/RHvMR3a/V/w+hImbmo6rGu5sw6EvpN +L18PqP2NAgMBAAECggEAHsf4UPou326RydLvsUgRXhofuFDKEpyd8l5BJmVAfWbp +HgJJF6Mxwea196ZUokCuTplae33tmAXSXV99OL2LbCUBZzBOeVjud0k60hfTYcrJ +/9NjULqIPjBbC7R+d97zHPwuPagb4UlhbvgElkOqO+n+sqBG96WgiE7hJ84YPfJO +Y+Is9vRbESkMVK1TH5PxfDE0Yu/i2vm/Fv+Ekgqe+GiZgbDw+L98D7ZX1xb8W2Ix +WnM2Skd22pit1ftpixuHbdHcPX2NNdhFKy8r/ZYK0SCLwkb8yJhDLQF8Q01YXd6q +FHtuE+MGXsr7dkcqYtc2QvigJdHs72WCjZwcpA+vgQKBgQC9qt3AIXPjPckhTEEK +97tg0zqFVPHyhiy23qsKJ/egMIhYESQngLOPcQ0Q/bG5OJqe5sx31rmKQ368QUSX +lIPG9WrRxCh3BTo7nOOEmAh4uGnKtvDJbTRP56fPQhkKlDywua8vKs0moUdcact7 +jjXYxXSPGEqHQrjPkuurPJvc7QKBgQDgtd4/kYGM9R2ltSLWm/TZnE6LM1EtBWrA +HNAYV7WxxKdUvTtxBIXDKer0RAbDKHIoZ6HI3lon5siuBVtIFoq2VLxS1jm3rEJv +qV6USxxDnEkbLla6Jzmd5eqFPZErWfNqmdXP1sqC8fs5q1PUJXNtEIdJulXQnHP2 +5lJxq8ovIQKBgQC1HBerg0YZ08HfHeVuB6jRiGH1N2vhXeYMqQtCI2/9ctp+3b9c +STUs35LOirHOYBKlcVYFiPCa6mB2ewx4gcRjk61wqJLLNB6rFeDbmCFexRmgDJhY +fwLY2igPbNpkk7BwQJ7bt082eAKgaBV54g3g9IucqGFiT4ASFgUb+kAK8QKBgFYJ +rJgAWW8kJv7clQNA4YY0j+pCctFfIpl+LrszUhFHr54Fem3ygljQgvKV3VT59oO7 +8jkb0b83YR0oVeQLJX9cgGLjPWQzI5jna5wyChdlDqTGoFRUUn4/mwT7JstHfKkT +T8dtgUqT5lIVZFp1IHXg/zveiZ7/WHNvip+VXCuhAoGAE3aA/rPBYEHJFLHaSgcR +E+ggLP3HjQPN6347NXBZSaWSXqXtdLDlgnnysRnEd+JHSDI3lNuFa6d6nY7x7Mc0 +Bn54Tf3KLLHcyUrwQTCjY230212gYGqWXMgeaTPJRtl4K0PchWzKzZ1m9RQAZHOQ +OaBsh0IA+LCDTmsPsbzh6U4= +-----END PRIVATE KEY----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client2/UserB1_bundle.pem b/test/configs/certs/ocsp_peer/mini-ca/client2/UserB1_bundle.pem new file mode 100644 index 000000000..c09aef2d9 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client2/UserB1_bundle.pem @@ -0,0 +1,186 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3a:d5:76:e0:a4:4b:67:ba:da:f2:9b:15:09:4c:ff:54:58:1d:e9:92 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Validity + Not Before: May 1 19:40:31 2023 GMT + Not After : Apr 28 19:40:31 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=UserB1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:ba:19:65:ab:3f:2e:f2:7a:93:ea:06:eb:a2:9a: + c5:b9:20:66:2e:74:1b:94:5a:43:1c:8c:22:72:00: + 79:2d:20:18:e3:4a:35:a6:df:8a:58:33:73:2c:28: + 20:e7:d9:85:ec:f5:81:ae:44:44:55:66:65:d6:b5: + 78:71:c4:d8:c2:7b:4c:2d:8b:18:b6:86:fc:50:c0: + 7e:b6:6e:f7:76:c0:30:6c:67:09:53:2d:87:98:d6: + d4:d8:b3:a9:80:45:93:7f:33:3f:41:2a:70:f3:e1: + df:a0:85:64:4b:25:e4:91:e9:e6:c8:c3:a0:3e:b3: + ef:97:1f:ae:9d:44:84:35:26:26:4e:0c:7a:1d:c7: + ef:b6:46:8d:82:b8:b0:18:fb:25:77:04:20:8c:da: + af:fa:9e:a2:b0:67:b6:a6:5b:d7:95:a5:3c:3e:76: + b4:37:4a:48:98:34:96:9d:d2:ff:36:6a:f4:2a:cd: + 85:b3:e3:71:74:0f:e0:25:f1:06:cb:9d:53:fc:b4: + 5d:c4:8d:7a:0b:bd:16:ee:5c:58:21:ad:49:34:9f: + 9e:1b:6d:f6:47:52:1f:a0:74:00:fe:3c:4d:5f:4c: + 5a:23:4a:d5:4c:ff:3f:42:5d:85:df:f6:3b:32:c4: + ca:4b:d0:9d:4b:9e:86:a6:64:44:b8:ae:24:1a:f4: + 66:6b + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + EC:AB:7B:4D:CD:62:D6:89:63:69:FE:97:34:5A:96:58:A5:94:A6:D9 + X509v3 Authority Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, S/MIME + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Client Authentication, E-mail Protection + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:28888/intermediate2_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:28888/ + X509v3 Subject Alternative Name: + email:UserB1@user.net + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + a8:78:fa:c2:44:e0:b9:c7:af:d5:cc:b6:b4:2b:3d:74:ae:b8: + d1:e1:22:d0:63:7d:77:97:db:97:2f:f1:f0:ce:e3:9e:5e:e1: + 2a:19:54:00:38:7b:30:0b:8b:95:3a:4b:5d:83:08:80:fe:29: + 85:72:fd:c9:80:6b:c3:fd:a3:00:4f:b5:f2:34:a3:42:54:77: + 77:70:43:40:fe:1f:7a:b7:7f:55:c3:c0:e2:44:d1:95:fb:4c: + eb:f8:39:dd:b6:3d:07:27:39:8e:89:e4:a8:49:fd:02:70:65: + 72:6f:c7:d4:12:57:bd:47:ea:7d:2d:63:b4:fe:81:33:20:3c: + e0:36:a2:60:58:79:5e:ce:6c:ed:7c:97:6e:6b:52:25:8d:73: + bb:ea:b5:8b:1e:d2:97:24:88:59:ea:a4:29:a3:ea:04:45:e1: + 6a:cd:c8:b9:13:44:57:f8:7e:1a:85:34:11:71:f9:10:a4:6f: + 07:d4:7d:21:84:f1:52:6f:f9:e8:36:83:28:32:aa:ad:2a:c3: + fb:98:02:c7:2e:2c:49:08:21:af:fe:15:0e:f3:ce:e7:24:b5: + c8:08:d6:20:e8:8c:24:ce:1f:84:0b:9a:46:07:8c:05:d0:86: + 04:06:2b:a2:a8:e2:20:c1:1f:ac:07:fc:ac:e0:f5:ee:7a:c6: + 5a:e4:81:74 +-----BEGIN CERTIFICATE----- +MIIEXTCCA0WgAwIBAgIUOtV24KRLZ7ra8psVCUz/VFgd6ZIwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMjAe +Fw0yMzA1MDExOTQwMzFaFw0zMzA0MjgxOTQwMzFaME8xCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEP +MA0GA1UEAwwGVXNlckIxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +uhllqz8u8nqT6gbroprFuSBmLnQblFpDHIwicgB5LSAY40o1pt+KWDNzLCgg59mF +7PWBrkREVWZl1rV4ccTYwntMLYsYtob8UMB+tm73dsAwbGcJUy2HmNbU2LOpgEWT +fzM/QSpw8+HfoIVkSyXkkenmyMOgPrPvlx+unUSENSYmTgx6HcfvtkaNgriwGPsl +dwQgjNqv+p6isGe2plvXlaU8Pna0N0pImDSWndL/Nmr0Ks2Fs+NxdA/gJfEGy51T +/LRdxI16C70W7lxYIa1JNJ+eG232R1IfoHQA/jxNX0xaI0rVTP8/Ql2F3/Y7MsTK +S9CdS56GpmREuK4kGvRmawIDAQABo4IBJDCCASAwHQYDVR0OBBYEFOyre03NYtaJ +Y2n+lzRallillKbZMB8GA1UdIwQYMBaAFHVV4o7nraXdgD3JMwssold37RWsMAwG +A1UdEwEB/wQCMAAwEQYJYIZIAYb4QgEBBAQDAgWgMA4GA1UdDwEB/wQEAwIF4DAd +BgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwPQYDVR0fBDYwNDAyoDCgLoYs +aHR0cDovLzEyNy4wLjAuMToyODg4OC9pbnRlcm1lZGlhdGUyX2NybC5kZXIwMwYI +KwYBBQUHAQEEJzAlMCMGCCsGAQUFBzABhhdodHRwOi8vMTI3LjAuMC4xOjI4ODg4 +LzAaBgNVHREEEzARgQ9Vc2VyQjFAdXNlci5uZXQwDQYJKoZIhvcNAQELBQADggEB +AKh4+sJE4LnHr9XMtrQrPXSuuNHhItBjfXeX25cv8fDO455e4SoZVAA4ezALi5U6 +S12DCID+KYVy/cmAa8P9owBPtfI0o0JUd3dwQ0D+H3q3f1XDwOJE0ZX7TOv4Od22 +PQcnOY6J5KhJ/QJwZXJvx9QSV71H6n0tY7T+gTMgPOA2omBYeV7ObO18l25rUiWN +c7vqtYse0pckiFnqpCmj6gRF4WrNyLkTRFf4fhqFNBFx+RCkbwfUfSGE8VJv+eg2 +gygyqq0qw/uYAscuLEkIIa/+FQ7zzucktcgI1iDojCTOH4QLmkYHjAXQhgQGK6Ko +4iDBH6wH/Kzg9e56xlrkgXQ= +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3c:d7:16:fb:15:99:81:4e:53:f8:80:7c:b6:7c:77:a6:06:a4:3e:ea + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:43 2023 GMT + Not After : Apr 28 19:01:43 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:da:5f:ff:1d:f7:8d:1a:9e:9a:f3:2b:68:8f:c1: + 0c:33:06:41:00:c9:3e:e4:1a:e1:e0:70:6a:f5:2f: + ad:df:f3:e9:99:ed:c5:d7:aa:93:13:37:ff:47:aa: + f3:c5:89:f7:b7:ad:3a:47:e5:9c:4e:9f:8c:e2:41: + ed:a4:7c:9d:88:32:ae:f5:8a:84:9f:0c:18:a0:b3: + fe:8e:dc:2a:88:6a:f5:2f:9c:86:92:fa:7b:6e:b3: + 5a:78:67:53:0b:21:6c:0d:6c:80:1a:0e:1e:ee:06: + c4:d2:e7:24:c6:e5:74:be:1e:2e:17:55:2b:e5:9f: + 0b:a0:58:cc:fe:bf:53:37:f7:dc:95:88:f4:77:a6: + 59:b4:b8:7c:a2:4b:b7:6a:67:aa:84:dc:29:f1:f9: + d7:89:05:4d:0b:f3:8b:2d:52:99:57:ed:6f:11:9e: + af:28:a3:61:44:c2:ec:6e:7f:9f:3d:0b:dc:f7:19: + 6d:14:8a:a5:b8:b6:29:02:34:90:b4:96:c1:cb:a7: + 42:46:97:cf:8d:59:fd:17:b1:a6:27:a7:7b:8a:47: + 6f:fa:03:24:1c:12:25:ee:34:d6:5c:da:45:98:23: + 30:e1:48:c9:9a:df:37:aa:1b:70:6c:b2:0f:95:39: + d6:6d:3e:25:20:a8:07:2c:48:57:0c:99:52:cb:89: + 08:41 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 1f:c6:fc:1c:a1:a5:6d:76:f0:7d:28:1f:e1:15:ab:86:e0:c3: + dd:a0:17:96:0a:c0:16:32:52:37:a4:b6:ad:24:d7:fd:3c:01: + 34:3b:a9:a2:ea:81:05:e7:06:5f:a3:af:7b:fa:b2:a9:c3:63: + 89:bb:0c:70:48:e9:73:cc:33:64:cd:b3:71:88:d1:d1:a1:5a: + 22:a6:ed:03:46:8e:9a:c0:92:37:46:9b:e5:37:78:a5:43:d5: + 46:99:1b:34:40:27:8f:95:dd:c6:9a:55:d9:60:25:8d:b8:e9: + 6e:c9:b3:ee:e8:f0:d9:11:ef:4e:ae:1e:03:70:03:60:66:fd: + ab:b0:f4:74:b6:27:7c:7a:96:9d:86:58:5f:5c:d3:04:ab:16: + 57:12:53:51:c7:93:ca:0b:4e:67:27:2d:b7:20:79:b6:b7:8c: + e7:c3:d9:25:5e:25:63:cf:93:f0:6e:31:c0:d5:4f:05:1c:8d: + 14:1b:6a:d5:01:b6:7a:09:6f:38:f3:e5:e2:5a:e4:e2:42:d5: + 8a:8d:de:ef:73:25:85:3c:e3:a9:ef:f7:f7:23:4f:d3:27:c2: + 3a:c6:c0:6f:2a:9b:1e:fe:fc:31:73:10:e1:08:62:98:2b:6d: + 2f:cc:ab:dd:3a:65:c2:00:7f:29:18:32:cd:8f:56:a9:1d:86: + f1:5e:60:55 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUPNcW+xWZgU5T+IB8tnx3pgakPuowDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDE0M1oXDTMzMDQyODE5MDE0M1owWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANpf/x33jRqemvMraI/BDDMGQQDJPuQa4eBwavUvrd/z6ZntxdeqkxM3/0eq88WJ +97etOkflnE6fjOJB7aR8nYgyrvWKhJ8MGKCz/o7cKohq9S+chpL6e26zWnhnUwsh +bA1sgBoOHu4GxNLnJMbldL4eLhdVK+WfC6BYzP6/Uzf33JWI9HemWbS4fKJLt2pn +qoTcKfH514kFTQvziy1SmVftbxGeryijYUTC7G5/nz0L3PcZbRSKpbi2KQI0kLSW +wcunQkaXz41Z/Rexpiene4pHb/oDJBwSJe401lzaRZgjMOFIyZrfN6obcGyyD5U5 +1m0+JSCoByxIVwyZUsuJCEECAwEAAaOB0DCBzTAdBgNVHQ4EFgQUdVXijuetpd2A +PckzCyyiV3ftFawwHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBAB/G/ByhpW128H0oH+EVq4bgw92gF5YKwBYyUjektq0k1/08 +ATQ7qaLqgQXnBl+jr3v6sqnDY4m7DHBI6XPMM2TNs3GI0dGhWiKm7QNGjprAkjdG +m+U3eKVD1UaZGzRAJ4+V3caaVdlgJY246W7Js+7o8NkR706uHgNwA2Bm/auw9HS2 +J3x6lp2GWF9c0wSrFlcSU1HHk8oLTmcnLbcgeba3jOfD2SVeJWPPk/BuMcDVTwUc +jRQbatUBtnoJbzjz5eJa5OJC1YqN3u9zJYU846nv9/cjT9MnwjrGwG8qmx7+/DFz +EOEIYpgrbS/Mq906ZcIAfykYMs2PVqkdhvFeYFU= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client2/UserB1_cert.pem b/test/configs/certs/ocsp_peer/mini-ca/client2/UserB1_cert.pem new file mode 100644 index 000000000..51050116b --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client2/UserB1_cert.pem @@ -0,0 +1,97 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3a:d5:76:e0:a4:4b:67:ba:da:f2:9b:15:09:4c:ff:54:58:1d:e9:92 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Validity + Not Before: May 1 19:40:31 2023 GMT + Not After : Apr 28 19:40:31 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=UserB1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:ba:19:65:ab:3f:2e:f2:7a:93:ea:06:eb:a2:9a: + c5:b9:20:66:2e:74:1b:94:5a:43:1c:8c:22:72:00: + 79:2d:20:18:e3:4a:35:a6:df:8a:58:33:73:2c:28: + 20:e7:d9:85:ec:f5:81:ae:44:44:55:66:65:d6:b5: + 78:71:c4:d8:c2:7b:4c:2d:8b:18:b6:86:fc:50:c0: + 7e:b6:6e:f7:76:c0:30:6c:67:09:53:2d:87:98:d6: + d4:d8:b3:a9:80:45:93:7f:33:3f:41:2a:70:f3:e1: + df:a0:85:64:4b:25:e4:91:e9:e6:c8:c3:a0:3e:b3: + ef:97:1f:ae:9d:44:84:35:26:26:4e:0c:7a:1d:c7: + ef:b6:46:8d:82:b8:b0:18:fb:25:77:04:20:8c:da: + af:fa:9e:a2:b0:67:b6:a6:5b:d7:95:a5:3c:3e:76: + b4:37:4a:48:98:34:96:9d:d2:ff:36:6a:f4:2a:cd: + 85:b3:e3:71:74:0f:e0:25:f1:06:cb:9d:53:fc:b4: + 5d:c4:8d:7a:0b:bd:16:ee:5c:58:21:ad:49:34:9f: + 9e:1b:6d:f6:47:52:1f:a0:74:00:fe:3c:4d:5f:4c: + 5a:23:4a:d5:4c:ff:3f:42:5d:85:df:f6:3b:32:c4: + ca:4b:d0:9d:4b:9e:86:a6:64:44:b8:ae:24:1a:f4: + 66:6b + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + EC:AB:7B:4D:CD:62:D6:89:63:69:FE:97:34:5A:96:58:A5:94:A6:D9 + X509v3 Authority Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, S/MIME + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Client Authentication, E-mail Protection + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:28888/intermediate2_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:28888/ + X509v3 Subject Alternative Name: + email:UserB1@user.net + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + a8:78:fa:c2:44:e0:b9:c7:af:d5:cc:b6:b4:2b:3d:74:ae:b8: + d1:e1:22:d0:63:7d:77:97:db:97:2f:f1:f0:ce:e3:9e:5e:e1: + 2a:19:54:00:38:7b:30:0b:8b:95:3a:4b:5d:83:08:80:fe:29: + 85:72:fd:c9:80:6b:c3:fd:a3:00:4f:b5:f2:34:a3:42:54:77: + 77:70:43:40:fe:1f:7a:b7:7f:55:c3:c0:e2:44:d1:95:fb:4c: + eb:f8:39:dd:b6:3d:07:27:39:8e:89:e4:a8:49:fd:02:70:65: + 72:6f:c7:d4:12:57:bd:47:ea:7d:2d:63:b4:fe:81:33:20:3c: + e0:36:a2:60:58:79:5e:ce:6c:ed:7c:97:6e:6b:52:25:8d:73: + bb:ea:b5:8b:1e:d2:97:24:88:59:ea:a4:29:a3:ea:04:45:e1: + 6a:cd:c8:b9:13:44:57:f8:7e:1a:85:34:11:71:f9:10:a4:6f: + 07:d4:7d:21:84:f1:52:6f:f9:e8:36:83:28:32:aa:ad:2a:c3: + fb:98:02:c7:2e:2c:49:08:21:af:fe:15:0e:f3:ce:e7:24:b5: + c8:08:d6:20:e8:8c:24:ce:1f:84:0b:9a:46:07:8c:05:d0:86: + 04:06:2b:a2:a8:e2:20:c1:1f:ac:07:fc:ac:e0:f5:ee:7a:c6: + 5a:e4:81:74 +-----BEGIN CERTIFICATE----- +MIIEXTCCA0WgAwIBAgIUOtV24KRLZ7ra8psVCUz/VFgd6ZIwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMjAe +Fw0yMzA1MDExOTQwMzFaFw0zMzA0MjgxOTQwMzFaME8xCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEP +MA0GA1UEAwwGVXNlckIxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +uhllqz8u8nqT6gbroprFuSBmLnQblFpDHIwicgB5LSAY40o1pt+KWDNzLCgg59mF +7PWBrkREVWZl1rV4ccTYwntMLYsYtob8UMB+tm73dsAwbGcJUy2HmNbU2LOpgEWT +fzM/QSpw8+HfoIVkSyXkkenmyMOgPrPvlx+unUSENSYmTgx6HcfvtkaNgriwGPsl +dwQgjNqv+p6isGe2plvXlaU8Pna0N0pImDSWndL/Nmr0Ks2Fs+NxdA/gJfEGy51T +/LRdxI16C70W7lxYIa1JNJ+eG232R1IfoHQA/jxNX0xaI0rVTP8/Ql2F3/Y7MsTK +S9CdS56GpmREuK4kGvRmawIDAQABo4IBJDCCASAwHQYDVR0OBBYEFOyre03NYtaJ +Y2n+lzRallillKbZMB8GA1UdIwQYMBaAFHVV4o7nraXdgD3JMwssold37RWsMAwG +A1UdEwEB/wQCMAAwEQYJYIZIAYb4QgEBBAQDAgWgMA4GA1UdDwEB/wQEAwIF4DAd +BgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwPQYDVR0fBDYwNDAyoDCgLoYs +aHR0cDovLzEyNy4wLjAuMToyODg4OC9pbnRlcm1lZGlhdGUyX2NybC5kZXIwMwYI +KwYBBQUHAQEEJzAlMCMGCCsGAQUFBzABhhdodHRwOi8vMTI3LjAuMC4xOjI4ODg4 +LzAaBgNVHREEEzARgQ9Vc2VyQjFAdXNlci5uZXQwDQYJKoZIhvcNAQELBQADggEB +AKh4+sJE4LnHr9XMtrQrPXSuuNHhItBjfXeX25cv8fDO455e4SoZVAA4ezALi5U6 +S12DCID+KYVy/cmAa8P9owBPtfI0o0JUd3dwQ0D+H3q3f1XDwOJE0ZX7TOv4Od22 +PQcnOY6J5KhJ/QJwZXJvx9QSV71H6n0tY7T+gTMgPOA2omBYeV7ObO18l25rUiWN +c7vqtYse0pckiFnqpCmj6gRF4WrNyLkTRFf4fhqFNBFx+RCkbwfUfSGE8VJv+eg2 +gygyqq0qw/uYAscuLEkIIa/+FQ7zzucktcgI1iDojCTOH4QLmkYHjAXQhgQGK6Ko +4iDBH6wH/Kzg9e56xlrkgXQ= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client2/UserB2_bundle.pem b/test/configs/certs/ocsp_peer/mini-ca/client2/UserB2_bundle.pem new file mode 100644 index 000000000..b28ac13a5 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client2/UserB2_bundle.pem @@ -0,0 +1,186 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 1e:dc:a2:b9:fd:aa:6e:73:ae:1c:7d:8d:13:73:d1:cd:16:bb:40:90 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Validity + Not Before: May 1 19:40:31 2023 GMT + Not After : Apr 28 19:40:31 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=UserB2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:b2:1d:92:83:be:0f:40:5c:b8:34:93:66:28:ea: + d3:85:1e:ec:66:e3:97:d0:fe:a7:2d:2c:89:c4:aa: + e0:ff:62:a2:8b:19:19:8a:1f:bb:a9:24:2f:a8:a1: + 16:95:a7:5b:42:65:2f:03:27:12:ac:44:fb:2f:e0: + 9b:19:52:32:a7:db:83:d0:1a:d6:36:d7:b7:40:0e: + 85:c6:a7:75:5c:d1:71:a9:99:d3:da:2b:70:f9:9e: + 9d:0b:a8:35:bc:3c:7f:24:1e:b5:2e:83:31:07:c9: + 9b:4a:0e:a3:32:36:bd:a6:2c:55:79:f8:71:66:6a: + 2a:8f:f9:f9:67:b0:06:21:e4:2a:02:44:b6:39:84: + 18:7a:00:5e:34:36:f4:61:0d:11:a9:e2:0c:b8:05: + ed:67:97:bc:29:e7:69:ac:48:6e:fb:78:e9:3b:38: + e3:db:09:cb:22:0f:9a:57:1c:cc:06:f1:f7:44:66: + d0:01:c4:c1:14:65:29:e5:cf:19:26:73:c9:8a:5c: + 2b:25:a9:d1:c6:3e:d8:4d:f5:f3:67:c7:23:b9:7b: + 2b:f5:97:28:89:81:99:9d:82:45:21:27:f4:ca:86: + 02:22:2f:26:4b:61:8a:cb:76:fb:b1:7b:4c:42:b6: + 25:e8:3e:cb:ab:2c:60:a7:a3:82:fb:ef:05:59:03: + a5:5b + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + C6:25:DB:6C:4E:18:89:96:67:30:E8:5F:EC:0C:03:70:A4:4C:07:98 + X509v3 Authority Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, S/MIME + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Client Authentication, E-mail Protection + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:28888/intermediate2_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:28888/ + X509v3 Subject Alternative Name: + email:UserB2@user.net + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 7d:93:8d:17:4b:fe:9e:5d:d0:4e:c3:47:dc:6c:05:1b:10:7f: + 9d:24:75:ea:30:27:c3:b1:26:2c:38:c3:c9:18:ec:21:d2:ef: + 07:b2:d4:f9:2e:a1:a2:1a:a5:68:cb:1a:14:55:7f:82:05:8a: + a3:0d:11:f0:ed:f2:e2:c0:e3:6a:1c:76:42:01:92:68:2b:f7: + 4d:98:ae:7b:02:f1:36:2e:44:67:43:39:8e:08:91:f1:f0:ab: + 9c:84:df:08:80:bf:76:6b:37:3f:e8:70:e0:d6:27:73:e9:bc: + 49:1f:c2:4a:15:51:22:c6:f3:85:52:e3:a6:93:aa:f6:c9:b4: + 96:f2:09:e6:62:53:0e:87:76:fd:7a:38:69:e2:41:54:c5:51: + 6e:cf:bc:1a:7b:0a:ef:c6:6e:be:b5:72:4d:f4:6f:fd:a5:a8: + ba:23:15:80:fa:b6:37:8d:68:d8:3e:36:c5:ae:f6:6c:22:a0: + 00:0d:93:e1:ae:41:9a:d7:35:d0:ab:98:71:1b:6b:8d:da:78: + 65:3c:97:be:9c:9e:d7:32:a1:0c:2b:60:ac:74:18:18:e4:48: + 87:40:dd:bf:eb:0e:27:17:96:a1:aa:32:a9:58:b5:ee:fc:42: + 7e:d7:71:a4:8e:a0:5b:06:6f:f1:85:27:8c:6b:20:df:e0:6b: + 13:5f:cf:4c +-----BEGIN CERTIFICATE----- +MIIEXTCCA0WgAwIBAgIUHtyiuf2qbnOuHH2NE3PRzRa7QJAwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMjAe +Fw0yMzA1MDExOTQwMzFaFw0zMzA0MjgxOTQwMzFaME8xCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEP +MA0GA1UEAwwGVXNlckIyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +sh2Sg74PQFy4NJNmKOrThR7sZuOX0P6nLSyJxKrg/2KiixkZih+7qSQvqKEWladb +QmUvAycSrET7L+CbGVIyp9uD0BrWNte3QA6Fxqd1XNFxqZnT2itw+Z6dC6g1vDx/ +JB61LoMxB8mbSg6jMja9pixVefhxZmoqj/n5Z7AGIeQqAkS2OYQYegBeNDb0YQ0R +qeIMuAXtZ5e8KedprEhu+3jpOzjj2wnLIg+aVxzMBvH3RGbQAcTBFGUp5c8ZJnPJ +ilwrJanRxj7YTfXzZ8cjuXsr9ZcoiYGZnYJFISf0yoYCIi8mS2GKy3b7sXtMQrYl +6D7Lqyxgp6OC++8FWQOlWwIDAQABo4IBJDCCASAwHQYDVR0OBBYEFMYl22xOGImW +ZzDoX+wMA3CkTAeYMB8GA1UdIwQYMBaAFHVV4o7nraXdgD3JMwssold37RWsMAwG +A1UdEwEB/wQCMAAwEQYJYIZIAYb4QgEBBAQDAgWgMA4GA1UdDwEB/wQEAwIF4DAd +BgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwPQYDVR0fBDYwNDAyoDCgLoYs +aHR0cDovLzEyNy4wLjAuMToyODg4OC9pbnRlcm1lZGlhdGUyX2NybC5kZXIwMwYI +KwYBBQUHAQEEJzAlMCMGCCsGAQUFBzABhhdodHRwOi8vMTI3LjAuMC4xOjI4ODg4 +LzAaBgNVHREEEzARgQ9Vc2VyQjJAdXNlci5uZXQwDQYJKoZIhvcNAQELBQADggEB +AH2TjRdL/p5d0E7DR9xsBRsQf50kdeowJ8OxJiw4w8kY7CHS7wey1PkuoaIapWjL +GhRVf4IFiqMNEfDt8uLA42ocdkIBkmgr902YrnsC8TYuRGdDOY4IkfHwq5yE3wiA +v3ZrNz/ocODWJ3PpvEkfwkoVUSLG84VS46aTqvbJtJbyCeZiUw6Hdv16OGniQVTF +UW7PvBp7Cu/Gbr61ck30b/2lqLojFYD6tjeNaNg+NsWu9mwioAANk+GuQZrXNdCr +mHEba43aeGU8l76cntcyoQwrYKx0GBjkSIdA3b/rDicXlqGqMqlYte78Qn7XcaSO +oFsGb/GFJ4xrIN/gaxNfz0w= +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3c:d7:16:fb:15:99:81:4e:53:f8:80:7c:b6:7c:77:a6:06:a4:3e:ea + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:43 2023 GMT + Not After : Apr 28 19:01:43 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:da:5f:ff:1d:f7:8d:1a:9e:9a:f3:2b:68:8f:c1: + 0c:33:06:41:00:c9:3e:e4:1a:e1:e0:70:6a:f5:2f: + ad:df:f3:e9:99:ed:c5:d7:aa:93:13:37:ff:47:aa: + f3:c5:89:f7:b7:ad:3a:47:e5:9c:4e:9f:8c:e2:41: + ed:a4:7c:9d:88:32:ae:f5:8a:84:9f:0c:18:a0:b3: + fe:8e:dc:2a:88:6a:f5:2f:9c:86:92:fa:7b:6e:b3: + 5a:78:67:53:0b:21:6c:0d:6c:80:1a:0e:1e:ee:06: + c4:d2:e7:24:c6:e5:74:be:1e:2e:17:55:2b:e5:9f: + 0b:a0:58:cc:fe:bf:53:37:f7:dc:95:88:f4:77:a6: + 59:b4:b8:7c:a2:4b:b7:6a:67:aa:84:dc:29:f1:f9: + d7:89:05:4d:0b:f3:8b:2d:52:99:57:ed:6f:11:9e: + af:28:a3:61:44:c2:ec:6e:7f:9f:3d:0b:dc:f7:19: + 6d:14:8a:a5:b8:b6:29:02:34:90:b4:96:c1:cb:a7: + 42:46:97:cf:8d:59:fd:17:b1:a6:27:a7:7b:8a:47: + 6f:fa:03:24:1c:12:25:ee:34:d6:5c:da:45:98:23: + 30:e1:48:c9:9a:df:37:aa:1b:70:6c:b2:0f:95:39: + d6:6d:3e:25:20:a8:07:2c:48:57:0c:99:52:cb:89: + 08:41 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 1f:c6:fc:1c:a1:a5:6d:76:f0:7d:28:1f:e1:15:ab:86:e0:c3: + dd:a0:17:96:0a:c0:16:32:52:37:a4:b6:ad:24:d7:fd:3c:01: + 34:3b:a9:a2:ea:81:05:e7:06:5f:a3:af:7b:fa:b2:a9:c3:63: + 89:bb:0c:70:48:e9:73:cc:33:64:cd:b3:71:88:d1:d1:a1:5a: + 22:a6:ed:03:46:8e:9a:c0:92:37:46:9b:e5:37:78:a5:43:d5: + 46:99:1b:34:40:27:8f:95:dd:c6:9a:55:d9:60:25:8d:b8:e9: + 6e:c9:b3:ee:e8:f0:d9:11:ef:4e:ae:1e:03:70:03:60:66:fd: + ab:b0:f4:74:b6:27:7c:7a:96:9d:86:58:5f:5c:d3:04:ab:16: + 57:12:53:51:c7:93:ca:0b:4e:67:27:2d:b7:20:79:b6:b7:8c: + e7:c3:d9:25:5e:25:63:cf:93:f0:6e:31:c0:d5:4f:05:1c:8d: + 14:1b:6a:d5:01:b6:7a:09:6f:38:f3:e5:e2:5a:e4:e2:42:d5: + 8a:8d:de:ef:73:25:85:3c:e3:a9:ef:f7:f7:23:4f:d3:27:c2: + 3a:c6:c0:6f:2a:9b:1e:fe:fc:31:73:10:e1:08:62:98:2b:6d: + 2f:cc:ab:dd:3a:65:c2:00:7f:29:18:32:cd:8f:56:a9:1d:86: + f1:5e:60:55 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUPNcW+xWZgU5T+IB8tnx3pgakPuowDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDE0M1oXDTMzMDQyODE5MDE0M1owWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANpf/x33jRqemvMraI/BDDMGQQDJPuQa4eBwavUvrd/z6ZntxdeqkxM3/0eq88WJ +97etOkflnE6fjOJB7aR8nYgyrvWKhJ8MGKCz/o7cKohq9S+chpL6e26zWnhnUwsh +bA1sgBoOHu4GxNLnJMbldL4eLhdVK+WfC6BYzP6/Uzf33JWI9HemWbS4fKJLt2pn +qoTcKfH514kFTQvziy1SmVftbxGeryijYUTC7G5/nz0L3PcZbRSKpbi2KQI0kLSW +wcunQkaXz41Z/Rexpiene4pHb/oDJBwSJe401lzaRZgjMOFIyZrfN6obcGyyD5U5 +1m0+JSCoByxIVwyZUsuJCEECAwEAAaOB0DCBzTAdBgNVHQ4EFgQUdVXijuetpd2A +PckzCyyiV3ftFawwHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBAB/G/ByhpW128H0oH+EVq4bgw92gF5YKwBYyUjektq0k1/08 +ATQ7qaLqgQXnBl+jr3v6sqnDY4m7DHBI6XPMM2TNs3GI0dGhWiKm7QNGjprAkjdG +m+U3eKVD1UaZGzRAJ4+V3caaVdlgJY246W7Js+7o8NkR706uHgNwA2Bm/auw9HS2 +J3x6lp2GWF9c0wSrFlcSU1HHk8oLTmcnLbcgeba3jOfD2SVeJWPPk/BuMcDVTwUc +jRQbatUBtnoJbzjz5eJa5OJC1YqN3u9zJYU846nv9/cjT9MnwjrGwG8qmx7+/DFz +EOEIYpgrbS/Mq906ZcIAfykYMs2PVqkdhvFeYFU= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client2/UserB2_cert.pem b/test/configs/certs/ocsp_peer/mini-ca/client2/UserB2_cert.pem new file mode 100644 index 000000000..a76cdf8c3 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client2/UserB2_cert.pem @@ -0,0 +1,97 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 1e:dc:a2:b9:fd:aa:6e:73:ae:1c:7d:8d:13:73:d1:cd:16:bb:40:90 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Validity + Not Before: May 1 19:40:31 2023 GMT + Not After : Apr 28 19:40:31 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=UserB2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:b2:1d:92:83:be:0f:40:5c:b8:34:93:66:28:ea: + d3:85:1e:ec:66:e3:97:d0:fe:a7:2d:2c:89:c4:aa: + e0:ff:62:a2:8b:19:19:8a:1f:bb:a9:24:2f:a8:a1: + 16:95:a7:5b:42:65:2f:03:27:12:ac:44:fb:2f:e0: + 9b:19:52:32:a7:db:83:d0:1a:d6:36:d7:b7:40:0e: + 85:c6:a7:75:5c:d1:71:a9:99:d3:da:2b:70:f9:9e: + 9d:0b:a8:35:bc:3c:7f:24:1e:b5:2e:83:31:07:c9: + 9b:4a:0e:a3:32:36:bd:a6:2c:55:79:f8:71:66:6a: + 2a:8f:f9:f9:67:b0:06:21:e4:2a:02:44:b6:39:84: + 18:7a:00:5e:34:36:f4:61:0d:11:a9:e2:0c:b8:05: + ed:67:97:bc:29:e7:69:ac:48:6e:fb:78:e9:3b:38: + e3:db:09:cb:22:0f:9a:57:1c:cc:06:f1:f7:44:66: + d0:01:c4:c1:14:65:29:e5:cf:19:26:73:c9:8a:5c: + 2b:25:a9:d1:c6:3e:d8:4d:f5:f3:67:c7:23:b9:7b: + 2b:f5:97:28:89:81:99:9d:82:45:21:27:f4:ca:86: + 02:22:2f:26:4b:61:8a:cb:76:fb:b1:7b:4c:42:b6: + 25:e8:3e:cb:ab:2c:60:a7:a3:82:fb:ef:05:59:03: + a5:5b + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + C6:25:DB:6C:4E:18:89:96:67:30:E8:5F:EC:0C:03:70:A4:4C:07:98 + X509v3 Authority Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, S/MIME + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Client Authentication, E-mail Protection + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:28888/intermediate2_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:28888/ + X509v3 Subject Alternative Name: + email:UserB2@user.net + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 7d:93:8d:17:4b:fe:9e:5d:d0:4e:c3:47:dc:6c:05:1b:10:7f: + 9d:24:75:ea:30:27:c3:b1:26:2c:38:c3:c9:18:ec:21:d2:ef: + 07:b2:d4:f9:2e:a1:a2:1a:a5:68:cb:1a:14:55:7f:82:05:8a: + a3:0d:11:f0:ed:f2:e2:c0:e3:6a:1c:76:42:01:92:68:2b:f7: + 4d:98:ae:7b:02:f1:36:2e:44:67:43:39:8e:08:91:f1:f0:ab: + 9c:84:df:08:80:bf:76:6b:37:3f:e8:70:e0:d6:27:73:e9:bc: + 49:1f:c2:4a:15:51:22:c6:f3:85:52:e3:a6:93:aa:f6:c9:b4: + 96:f2:09:e6:62:53:0e:87:76:fd:7a:38:69:e2:41:54:c5:51: + 6e:cf:bc:1a:7b:0a:ef:c6:6e:be:b5:72:4d:f4:6f:fd:a5:a8: + ba:23:15:80:fa:b6:37:8d:68:d8:3e:36:c5:ae:f6:6c:22:a0: + 00:0d:93:e1:ae:41:9a:d7:35:d0:ab:98:71:1b:6b:8d:da:78: + 65:3c:97:be:9c:9e:d7:32:a1:0c:2b:60:ac:74:18:18:e4:48: + 87:40:dd:bf:eb:0e:27:17:96:a1:aa:32:a9:58:b5:ee:fc:42: + 7e:d7:71:a4:8e:a0:5b:06:6f:f1:85:27:8c:6b:20:df:e0:6b: + 13:5f:cf:4c +-----BEGIN CERTIFICATE----- +MIIEXTCCA0WgAwIBAgIUHtyiuf2qbnOuHH2NE3PRzRa7QJAwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMjAe +Fw0yMzA1MDExOTQwMzFaFw0zMzA0MjgxOTQwMzFaME8xCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEP +MA0GA1UEAwwGVXNlckIyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +sh2Sg74PQFy4NJNmKOrThR7sZuOX0P6nLSyJxKrg/2KiixkZih+7qSQvqKEWladb +QmUvAycSrET7L+CbGVIyp9uD0BrWNte3QA6Fxqd1XNFxqZnT2itw+Z6dC6g1vDx/ +JB61LoMxB8mbSg6jMja9pixVefhxZmoqj/n5Z7AGIeQqAkS2OYQYegBeNDb0YQ0R +qeIMuAXtZ5e8KedprEhu+3jpOzjj2wnLIg+aVxzMBvH3RGbQAcTBFGUp5c8ZJnPJ +ilwrJanRxj7YTfXzZ8cjuXsr9ZcoiYGZnYJFISf0yoYCIi8mS2GKy3b7sXtMQrYl +6D7Lqyxgp6OC++8FWQOlWwIDAQABo4IBJDCCASAwHQYDVR0OBBYEFMYl22xOGImW +ZzDoX+wMA3CkTAeYMB8GA1UdIwQYMBaAFHVV4o7nraXdgD3JMwssold37RWsMAwG +A1UdEwEB/wQCMAAwEQYJYIZIAYb4QgEBBAQDAgWgMA4GA1UdDwEB/wQEAwIF4DAd +BgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwPQYDVR0fBDYwNDAyoDCgLoYs +aHR0cDovLzEyNy4wLjAuMToyODg4OC9pbnRlcm1lZGlhdGUyX2NybC5kZXIwMwYI +KwYBBQUHAQEEJzAlMCMGCCsGAQUFBzABhhdodHRwOi8vMTI3LjAuMC4xOjI4ODg4 +LzAaBgNVHREEEzARgQ9Vc2VyQjJAdXNlci5uZXQwDQYJKoZIhvcNAQELBQADggEB +AH2TjRdL/p5d0E7DR9xsBRsQf50kdeowJ8OxJiw4w8kY7CHS7wey1PkuoaIapWjL +GhRVf4IFiqMNEfDt8uLA42ocdkIBkmgr902YrnsC8TYuRGdDOY4IkfHwq5yE3wiA +v3ZrNz/ocODWJ3PpvEkfwkoVUSLG84VS46aTqvbJtJbyCeZiUw6Hdv16OGniQVTF +UW7PvBp7Cu/Gbr61ck30b/2lqLojFYD6tjeNaNg+NsWu9mwioAANk+GuQZrXNdCr +mHEba43aeGU8l76cntcyoQwrYKx0GBjkSIdA3b/rDicXlqGqMqlYte78Qn7XcaSO +oFsGb/GFJ4xrIN/gaxNfz0w= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client2/certfile.pem b/test/configs/certs/ocsp_peer/mini-ca/client2/certfile.pem new file mode 100644 index 000000000..a25efa0b7 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client2/certfile.pem @@ -0,0 +1,175 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3c:d7:16:fb:15:99:81:4e:53:f8:80:7c:b6:7c:77:a6:06:a4:3e:ea + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:43 2023 GMT + Not After : Apr 28 19:01:43 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:da:5f:ff:1d:f7:8d:1a:9e:9a:f3:2b:68:8f:c1: + 0c:33:06:41:00:c9:3e:e4:1a:e1:e0:70:6a:f5:2f: + ad:df:f3:e9:99:ed:c5:d7:aa:93:13:37:ff:47:aa: + f3:c5:89:f7:b7:ad:3a:47:e5:9c:4e:9f:8c:e2:41: + ed:a4:7c:9d:88:32:ae:f5:8a:84:9f:0c:18:a0:b3: + fe:8e:dc:2a:88:6a:f5:2f:9c:86:92:fa:7b:6e:b3: + 5a:78:67:53:0b:21:6c:0d:6c:80:1a:0e:1e:ee:06: + c4:d2:e7:24:c6:e5:74:be:1e:2e:17:55:2b:e5:9f: + 0b:a0:58:cc:fe:bf:53:37:f7:dc:95:88:f4:77:a6: + 59:b4:b8:7c:a2:4b:b7:6a:67:aa:84:dc:29:f1:f9: + d7:89:05:4d:0b:f3:8b:2d:52:99:57:ed:6f:11:9e: + af:28:a3:61:44:c2:ec:6e:7f:9f:3d:0b:dc:f7:19: + 6d:14:8a:a5:b8:b6:29:02:34:90:b4:96:c1:cb:a7: + 42:46:97:cf:8d:59:fd:17:b1:a6:27:a7:7b:8a:47: + 6f:fa:03:24:1c:12:25:ee:34:d6:5c:da:45:98:23: + 30:e1:48:c9:9a:df:37:aa:1b:70:6c:b2:0f:95:39: + d6:6d:3e:25:20:a8:07:2c:48:57:0c:99:52:cb:89: + 08:41 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 1f:c6:fc:1c:a1:a5:6d:76:f0:7d:28:1f:e1:15:ab:86:e0:c3: + dd:a0:17:96:0a:c0:16:32:52:37:a4:b6:ad:24:d7:fd:3c:01: + 34:3b:a9:a2:ea:81:05:e7:06:5f:a3:af:7b:fa:b2:a9:c3:63: + 89:bb:0c:70:48:e9:73:cc:33:64:cd:b3:71:88:d1:d1:a1:5a: + 22:a6:ed:03:46:8e:9a:c0:92:37:46:9b:e5:37:78:a5:43:d5: + 46:99:1b:34:40:27:8f:95:dd:c6:9a:55:d9:60:25:8d:b8:e9: + 6e:c9:b3:ee:e8:f0:d9:11:ef:4e:ae:1e:03:70:03:60:66:fd: + ab:b0:f4:74:b6:27:7c:7a:96:9d:86:58:5f:5c:d3:04:ab:16: + 57:12:53:51:c7:93:ca:0b:4e:67:27:2d:b7:20:79:b6:b7:8c: + e7:c3:d9:25:5e:25:63:cf:93:f0:6e:31:c0:d5:4f:05:1c:8d: + 14:1b:6a:d5:01:b6:7a:09:6f:38:f3:e5:e2:5a:e4:e2:42:d5: + 8a:8d:de:ef:73:25:85:3c:e3:a9:ef:f7:f7:23:4f:d3:27:c2: + 3a:c6:c0:6f:2a:9b:1e:fe:fc:31:73:10:e1:08:62:98:2b:6d: + 2f:cc:ab:dd:3a:65:c2:00:7f:29:18:32:cd:8f:56:a9:1d:86: + f1:5e:60:55 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUPNcW+xWZgU5T+IB8tnx3pgakPuowDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDE0M1oXDTMzMDQyODE5MDE0M1owWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANpf/x33jRqemvMraI/BDDMGQQDJPuQa4eBwavUvrd/z6ZntxdeqkxM3/0eq88WJ +97etOkflnE6fjOJB7aR8nYgyrvWKhJ8MGKCz/o7cKohq9S+chpL6e26zWnhnUwsh +bA1sgBoOHu4GxNLnJMbldL4eLhdVK+WfC6BYzP6/Uzf33JWI9HemWbS4fKJLt2pn +qoTcKfH514kFTQvziy1SmVftbxGeryijYUTC7G5/nz0L3PcZbRSKpbi2KQI0kLSW +wcunQkaXz41Z/Rexpiene4pHb/oDJBwSJe401lzaRZgjMOFIyZrfN6obcGyyD5U5 +1m0+JSCoByxIVwyZUsuJCEECAwEAAaOB0DCBzTAdBgNVHQ4EFgQUdVXijuetpd2A +PckzCyyiV3ftFawwHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBAB/G/ByhpW128H0oH+EVq4bgw92gF5YKwBYyUjektq0k1/08 +ATQ7qaLqgQXnBl+jr3v6sqnDY4m7DHBI6XPMM2TNs3GI0dGhWiKm7QNGjprAkjdG +m+U3eKVD1UaZGzRAJ4+V3caaVdlgJY246W7Js+7o8NkR706uHgNwA2Bm/auw9HS2 +J3x6lp2GWF9c0wSrFlcSU1HHk8oLTmcnLbcgeba3jOfD2SVeJWPPk/BuMcDVTwUc +jRQbatUBtnoJbzjz5eJa5OJC1YqN3u9zJYU846nv9/cjT9MnwjrGwG8qmx7+/DFz +EOEIYpgrbS/Mq906ZcIAfykYMs2PVqkdhvFeYFU= +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 27:5e:cf:7e:be:aa:02:b9:a9:c7:42:30:43:fe:0e:80:05:91:dd:0b + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 18:57:57 2023 GMT + Not After : Apr 28 18:57:57 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:e2:21:6b:9f:ef:48:b9:de:22:fb:5b:37:09:68: + c7:b5:92:57:52:24:ef:85:00:e8:71:85:4d:0f:5b: + 8c:c6:e7:4f:19:f6:e3:0b:70:a3:41:7e:71:d4:0f: + d6:fd:f2:1a:ca:aa:57:91:76:9a:b2:82:62:60:ce: + f2:00:2e:d4:bc:58:d3:60:30:42:a6:28:b2:50:7b: + 58:01:9f:fb:0a:65:b0:40:d6:7c:e2:b7:da:8d:19: + d9:a5:51:d2:46:7e:14:46:ab:fa:df:ce:fe:84:08: + 98:63:46:1d:4d:8a:77:57:67:da:16:8b:32:0c:7c: + 41:e2:a5:ec:ee:7d:20:28:eb:03:5f:f5:e6:05:d8: + 8b:96:78:6f:ae:29:9a:50:f7:dc:96:31:86:81:b1: + 78:e8:eb:ef:5d:bb:ed:42:ec:94:c6:54:46:ec:05: + 6f:1b:0c:36:24:c6:a8:06:7e:5c:56:b8:43:3b:11: + f4:06:0a:05:15:19:3b:1f:c8:67:31:eb:3b:5b:2a: + 15:0a:7b:f9:6b:e4:10:ee:44:be:19:d8:db:44:01: + fa:3a:56:f5:6c:4e:f3:60:aa:e4:cd:b2:ad:77:07: + 45:ef:f1:d7:f5:fa:52:84:5c:03:4e:72:e0:a9:91: + c5:d9:d6:0a:84:33:98:31:f2:02:5b:3f:10:15:65: + 76:d7 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 22:79:1a:b9:5d:fa:f5:c9:a3:88:22:c4:92:e6:64:6d:ce:a5: + ae:2e:69:48:6a:9e:d5:11:c5:bb:b0:de:38:1b:5b:04:85:60: + d6:64:14:ed:c2:62:02:7d:ad:d2:17:ad:ef:40:27:2b:50:59: + 4a:ff:88:c6:b3:16:5c:55:30:d9:23:bd:4f:0f:34:b7:7b:ed: + 7a:e1:f3:39:35:e9:18:6d:70:b1:2b:2a:e2:e5:cd:a1:54:8a: + f9:f4:95:81:29:84:3f:95:2f:48:e0:35:3e:d9:cb:84:4d:3d: + 3e:3c:0e:8d:24:42:5f:19:e6:06:a5:87:ae:ba:af:07:02:e7: + 6a:83:0a:89:d4:a4:38:ce:05:6e:f6:15:f1:7a:53:bb:50:28: + 89:51:3f:f2:54:f1:d3:c4:28:07:a1:3e:55:e5:84:b8:df:58: + af:c3:e7:81:c2:08:9c:35:e4:c4:86:75:a8:17:99:2c:a6:7f: + 46:30:9b:23:55:c5:d8:e2:6a:e4:08:a1:8b:dc:bc:5b:86:95: + 4a:79:fe:a6:93:3d:1a:5b:10:9a:2f:6a:45:2f:5d:c9:fa:95: + 2e:66:eb:52:df:88:a7:5f:42:8f:5f:46:07:79:8b:a7:49:82: + d3:81:c6:3e:c2:5a:15:c4:83:69:30:49:4d:6e:ea:05:1e:d8: + dc:29:ac:17 +-----BEGIN CERTIFICATE----- +MIIDyDCCArCgAwIBAgIUJ17Pfr6qArmpx0IwQ/4OgAWR3QswDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE4 +NTc1N1oXDTMzMDQyODE4NTc1N1owUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdS +b290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4iFrn+9Iud4i ++1s3CWjHtZJXUiTvhQDocYVND1uMxudPGfbjC3CjQX5x1A/W/fIayqpXkXaasoJi +YM7yAC7UvFjTYDBCpiiyUHtYAZ/7CmWwQNZ84rfajRnZpVHSRn4URqv6387+hAiY +Y0YdTYp3V2faFosyDHxB4qXs7n0gKOsDX/XmBdiLlnhvrimaUPfcljGGgbF46Ovv +XbvtQuyUxlRG7AVvGww2JMaoBn5cVrhDOxH0BgoFFRk7H8hnMes7WyoVCnv5a+QQ +7kS+GdjbRAH6Olb1bE7zYKrkzbKtdwdF7/HX9fpShFwDTnLgqZHF2dYKhDOYMfIC +Wz8QFWV21wIDAQABo4GZMIGWMB0GA1UdDgQWBBTDEkK6qdhN4MM+utdHQaYJL220 +4TAfBgNVHSMEGDAWgBTDEkK6qdhN4MM+utdHQaYJL2204TAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBhjAzBgNVHR8ELDAqMCigJqAkhiJodHRwOi8vMTI3 +LjAuMC4xOjg4ODgvcm9vdF9jcmwuZGVyMA0GCSqGSIb3DQEBCwUAA4IBAQAieRq5 +Xfr1yaOIIsSS5mRtzqWuLmlIap7VEcW7sN44G1sEhWDWZBTtwmICfa3SF63vQCcr +UFlK/4jGsxZcVTDZI71PDzS3e+164fM5NekYbXCxKyri5c2hVIr59JWBKYQ/lS9I +4DU+2cuETT0+PA6NJEJfGeYGpYeuuq8HAudqgwqJ1KQ4zgVu9hXxelO7UCiJUT/y +VPHTxCgHoT5V5YS431ivw+eBwgicNeTEhnWoF5kspn9GMJsjVcXY4mrkCKGL3Lxb +hpVKef6mkz0aWxCaL2pFL13J+pUuZutS34inX0KPX0YHeYunSYLTgcY+wloVxINp +MElNbuoFHtjcKawX +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client2/private/UserB1_keypair.pem b/test/configs/certs/ocsp_peer/mini-ca/client2/private/UserB1_keypair.pem new file mode 100644 index 000000000..1b2df180c --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client2/private/UserB1_keypair.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6GWWrPy7yepPq +BuuimsW5IGYudBuUWkMcjCJyAHktIBjjSjWm34pYM3MsKCDn2YXs9YGuRERVZmXW +tXhxxNjCe0wtixi2hvxQwH62bvd2wDBsZwlTLYeY1tTYs6mARZN/Mz9BKnDz4d+g +hWRLJeSR6ebIw6A+s++XH66dRIQ1JiZODHodx++2Ro2CuLAY+yV3BCCM2q/6nqKw +Z7amW9eVpTw+drQ3SkiYNJad0v82avQqzYWz43F0D+Al8QbLnVP8tF3EjXoLvRbu +XFghrUk0n54bbfZHUh+gdAD+PE1fTFojStVM/z9CXYXf9jsyxMpL0J1LnoamZES4 +riQa9GZrAgMBAAECggEAAVnSLX+MajFnQj3our9FMrZSGx4bbAhQz9dndUWc1HT4 +d4AgPFpAfqpof6vVycHx2jSnILhuseJykSGzwoHgynrVpI82T6f9EzhRmkLbK1Y5 +6t6jC9uwXDvv37RgYcW02o1avD8VdHtN+qXtO4Db22P1p7zeA6LzSscmmLjf4QcY +15O5DFUsVD6jfjI+edTKY4OgqblwD/t5EqApBI/KhAypSRD/NDzKdtHZO+K3eJW0 +apznw5wrzPVX1xk4p+1LnM5nLBRnwECqRyzlmxjX3rJr7tVVWqOkTHs807wK+7AW +o9rujmS/J8I86BtZdj938VGVyuyqhJndANF8rOh6nQKBgQD09ZFmj/SMIeJIa2Xj +MiK1JMU1rcr2h8NxYhQqZV/sj8TD+Sm/ljCDDClqyo5wAvBdIkFO689sIDEFT1W1 +vUOnE8xa4kkoSf4TVADiGAt4aLHiPiRAoX0aPqgBSy9IcXg7p/iG5qFLp72CNEFg +3vM5vgjX+xio42Hqdo6+ruE1pwKBgQDCfK4KpR2BAv6dbuGNF8qZHWkgBDpSSlug +WMEZe6c9l44EAIHgJNr4nBviVZTZAHD+H5qSC8STQ6Y4ccOZYnG4dGxAztKYnX9Z +T6R+zOkisK+Zhq9noj8veBwS6F2fGTL7cagBkj2q3SveagGtutkV6kOKUw5uu8dI +GnSxaiNpnQKBgQDrzURlVWgUST3ZdsECvq1YcIgCj0TUooYKLF67HREE2LSR7dU5 +XytdyyRHb6tDuiCFlscFYMwwCqEFuoQISaPJPq62QiQoS2nwUynyezD3fNjXr/gX +2xxhWjVB4Y0nkEssKhp8SaC1AkjUANd6l8PNLti2iDkJwrDsEaqBdjjG+wKBgAVM +Eg12K9SMuVSeZYRLRphfBbL6ioAdSFuYr0G7bXWvAA452U+6kUA+OEA05oX2jh1N +zQ73RRZhvFBDQPmXhdNpUF1/hJrlh0dudOODP0JTn6TF11cyQxhO5CzbqVkg/ZN9 +p/7K9eUGeyBmsL8DnNAM/mPxGS6I7MeY+N6wLmC9AoGBAPL97OOwtkfCqCXBzIua +eNFIPvW8cKEM1ggxUPGar36TuaKnDt8bdGL3/ZEAD28XMGCbUwo99WrW0J4r9b95 +Rrs1FzUW9iVIqB+4W35lMfSbFOC/2GsSUf95ANT4wihu2QbVQU7iqjXw+w8ZN9Vx +Qkiwv6M/K0lzm6Q1H1pb7urx +-----END PRIVATE KEY----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/client2/private/UserB2_keypair.pem b/test/configs/certs/ocsp_peer/mini-ca/client2/private/UserB2_keypair.pem new file mode 100644 index 000000000..587c4544a --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/client2/private/UserB2_keypair.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCyHZKDvg9AXLg0 +k2Yo6tOFHuxm45fQ/qctLInEquD/YqKLGRmKH7upJC+ooRaVp1tCZS8DJxKsRPsv +4JsZUjKn24PQGtY217dADoXGp3Vc0XGpmdPaK3D5np0LqDW8PH8kHrUugzEHyZtK +DqMyNr2mLFV5+HFmaiqP+flnsAYh5CoCRLY5hBh6AF40NvRhDRGp4gy4Be1nl7wp +52msSG77eOk7OOPbCcsiD5pXHMwG8fdEZtABxMEUZSnlzxkmc8mKXCslqdHGPthN +9fNnxyO5eyv1lyiJgZmdgkUhJ/TKhgIiLyZLYYrLdvuxe0xCtiXoPsurLGCno4L7 +7wVZA6VbAgMBAAECggEACzGbuulEMPd1DwetATtNZTbHBOoMe3vVj0A7dEiIXokG +zc2tl10Td26EVEBFvTpI5YiaqwzElYNMTo2M7TjizvTynZyGusPnisl6SoWoh0U5 +2HWIAHkSKCAww1RbGL+HbEuO5Wy3R7FMC0C6PuQPP3Bo+swVnqn1s6wf88U/zWml +Nthu0uQSj+pxW4tK/p7IoUVBnSqKExODDLG4LpO3meSaZIr36wC6bJZ8w8lZfRBy +DkPJu9NNknL6qSoVGozLzgtg1//yCkU+LX0OcDgTNeup5DlA08jglQY8p3Xo3FPn +evofoPvDnku4H1gCXT/djERRSlPdcGPEcy7xMQx12QKBgQDqdoL8hkp/DUzoKZyM +u2Vud5E1jULal3SmRB1XFzqxEiFsAT6UBH2feVweBOKTjLBqIuC+teQ+JgC5TsYP +CGbclQG/XBTYzOPfn3bBJWS4j7Jd68uXDQvkM9+RroFVaCXn75UGWEMqcbtgTNyU +wUrAVgfTtz07iHf2oUy+IreW7wKBgQDCegdlOojhn4juC+B5ROJHXzwI1qEznpJa +ftI7RERUbDFRIaucwvI6y95nduIRORO1bzpBhHZzJDPNBhZZya9wkaLElXktgi1Z +IwF6eb3m/FtOxx7DtI9daCVsuZsoPEw08NJq6UYQqeauaJ3LM5rDSMX0DN3V//2m +7tULbZn4VQKBgQCT4dwMWsdyC3mOlXBgc3IuksvL8yVPqmew1xWKcORb+wuJi99k +jNCPXYR0irA+UGaVCxqmLyOe72lVeBIEOVBnoLRRdkrP06uGyJWmjWdR4ZCnHKp0 +w43UicNhp6d7rwz5lWtxbQowIzwEKXaXfLMhTSHyr4i3nAPOUz6MTmltkQKBgB6z +ePtoFDfaIZnC0jsSvs4ZoLace3JUtDIJF1M34bmaIub188uZkvfpO0EGKYYihpP7 +7SxupuxiaLMTJPAjwMh6lUGHf0vJ4zLRLeiR04Llj9yN3rNyi7dpO49AddgSPM2W +vwEVtnPm/n3GEjMEAIiXsnhml5azBO4XghZ9xPLJAoGBALctm1sK8MdawZ+cnc1i +4P3VP2/nzGeODF29MbJefYrg0hlKHZSfKsWMKg3Dk9jDUplwsVjK5oBgN1vg/zOV +ysTtyn1q/RBbe96lYkPHzdYPWDD5Rg80/t0n6jItTOQr6QCshDLrMB3bruIQz7V9 +6PPhzvdQu3v3e07wrKDa1F3t +-----END PRIVATE KEY----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/intermediate1/intermediate1_cert.pem b/test/configs/certs/ocsp_peer/mini-ca/intermediate1/intermediate1_cert.pem new file mode 100644 index 000000000..53786eb14 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/intermediate1/intermediate1_cert.pem @@ -0,0 +1,89 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 55:57:db:45:43:06:ce:52:63:59:b9:5a:26:78:fd:0d:94:68:95:9c + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:15 2023 GMT + Not After : Apr 28 19:01:15 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:bc:c6:84:2d:c2:ab:5d:05:d7:65:a8:e2:15:74: + d8:f2:f1:55:11:45:93:96:4c:a5:dc:cb:44:f5:f4: + 14:7e:46:02:59:e8:ae:78:59:69:21:58:f7:16:38: + b9:c2:c2:60:d8:76:ab:a1:39:ba:0b:a3:03:17:e4: + a1:cb:5d:1a:0c:62:71:24:64:b0:00:f0:6f:4c:af: + 08:62:8c:dc:4f:e0:d7:d4:55:2c:db:36:fc:a9:aa: + d7:58:27:e4:99:cb:dc:29:d9:ea:35:16:cb:2e:be: + 04:b2:82:58:f4:e5:5c:07:db:12:8e:e3:3c:9a:5e: + 90:4b:c5:a3:d4:21:96:5f:e1:8f:f7:cb:9e:db:e0: + 10:a0:6c:a2:1e:30:17:6c:32:9f:7b:43:a4:9f:d3: + 6b:33:1b:18:cd:a4:ad:33:48:a3:98:b0:2b:c8:22: + 74:17:71:d8:f1:64:21:55:e1:33:bc:7f:74:5f:a5: + a6:a2:9b:58:2f:db:ed:c7:c1:e5:36:2e:86:26:ad: + c6:fe:b8:00:85:6e:7c:ed:fd:4a:c6:a0:d9:b2:3f: + 4e:bd:fa:08:52:c8:5d:31:13:86:bd:3f:ec:7a:d8: + 3a:15:e2:71:af:ec:00:88:7e:a6:e8:e1:9d:ab:57: + 5a:8a:1f:f8:e2:4d:29:58:53:79:25:f0:9e:d9:18: + 40:27 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + b1:48:16:3b:d7:91:d0:4d:54:09:cb:ab:c7:41:4f:35:12:8b: + a6:e8:84:11:49:a9:04:91:41:25:7c:02:38:b2:19:a0:e9:2e: + d5:d6:7a:26:c1:1a:f8:f1:c6:51:92:68:af:c8:6e:5b:df:28: + 40:b8:99:94:d5:43:7d:e3:68:75:94:26:56:11:21:9e:50:b3: + 36:7b:f8:5f:33:76:64:71:04:26:2b:bb:2c:83:33:89:ba:74: + c1:e9:9d:eb:c0:86:4b:4d:6f:f8:4d:55:5a:3d:f6:55:95:33: + 0f:b8:f0:53:2b:93:a6:da:8d:5c:1a:e8:30:22:55:67:44:6e: + 17:c4:57:05:0d:ce:fc:61:dd:b1:3c:b0:66:55:f4:42:d0:ce: + 94:7d:6a:82:bd:32:ed:2f:21:ff:c7:70:ff:48:9d:10:4a:71: + be:a8:37:e5:0f:f4:79:1e:7d:a2:f1:6a:6b:2c:e8:03:20:ce: + 80:94:d2:38:80:bc:7e:56:c5:77:62:94:c0:b7:40:11:4d:ba: + 98:4b:2e:52:03:66:68:36:ab:d1:0f:3e:b5:92:a3:95:9d:a4: + ea:d3:8a:14:41:6d:86:24:89:aa:d7:29:20:c8:52:d5:bf:8d: + 3b:09:52:dd:89:8c:2c:85:40:b5:9f:cc:47:63:ca:3a:e0:c9: + 91:5c:43:a9 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUVVfbRUMGzlJjWblaJnj9DZRolZwwDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDExNVoXDTMzMDQyODE5MDExNVowWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALzGhC3Cq10F12Wo4hV02PLxVRFFk5ZMpdzLRPX0FH5GAlnornhZaSFY9xY4ucLC +YNh2q6E5ugujAxfkoctdGgxicSRksADwb0yvCGKM3E/g19RVLNs2/Kmq11gn5JnL +3CnZ6jUWyy6+BLKCWPTlXAfbEo7jPJpekEvFo9Qhll/hj/fLntvgEKBsoh4wF2wy +n3tDpJ/TazMbGM2krTNIo5iwK8gidBdx2PFkIVXhM7x/dF+lpqKbWC/b7cfB5TYu +hiatxv64AIVufO39Ssag2bI/Tr36CFLIXTEThr0/7HrYOhXica/sAIh+pujhnatX +Woof+OJNKVhTeSXwntkYQCcCAwEAAaOB0DCBzTAdBgNVHQ4EFgQUtZFuT2S3FoR2 ++bS+mc5glZgajp0wHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBALFIFjvXkdBNVAnLq8dBTzUSi6bohBFJqQSRQSV8AjiyGaDp +LtXWeibBGvjxxlGSaK/IblvfKEC4mZTVQ33jaHWUJlYRIZ5QszZ7+F8zdmRxBCYr +uyyDM4m6dMHpnevAhktNb/hNVVo99lWVMw+48FMrk6bajVwa6DAiVWdEbhfEVwUN +zvxh3bE8sGZV9ELQzpR9aoK9Mu0vIf/HcP9InRBKcb6oN+UP9HkefaLxamss6AMg +zoCU0jiAvH5WxXdilMC3QBFNuphLLlIDZmg2q9EPPrWSo5WdpOrTihRBbYYkiarX +KSDIUtW/jTsJUt2JjCyFQLWfzEdjyjrgyZFcQ6k= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/intermediate1/private/intermediate1_keypair.pem b/test/configs/certs/ocsp_peer/mini-ca/intermediate1/private/intermediate1_keypair.pem new file mode 100644 index 000000000..6c04954d8 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/intermediate1/private/intermediate1_keypair.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQC8xoQtwqtdBddl +qOIVdNjy8VURRZOWTKXcy0T19BR+RgJZ6K54WWkhWPcWOLnCwmDYdquhOboLowMX +5KHLXRoMYnEkZLAA8G9MrwhijNxP4NfUVSzbNvypqtdYJ+SZy9wp2eo1FssuvgSy +glj05VwH2xKO4zyaXpBLxaPUIZZf4Y/3y57b4BCgbKIeMBdsMp97Q6Sf02szGxjN +pK0zSKOYsCvIInQXcdjxZCFV4TO8f3Rfpaaim1gv2+3HweU2LoYmrcb+uACFbnzt +/UrGoNmyP069+ghSyF0xE4a9P+x62DoV4nGv7ACIfqbo4Z2rV1qKH/jiTSlYU3kl +8J7ZGEAnAgMBAAECgf9Now4nMXr/fdU8+hNvCMPnuMbV5ewWCN2bzEa04K1D09BI +Tmm78MCVGwGoRoeNJBr5fdTPMMoJ/yVrG+W34iSvzqgnT4rJ/KqlA6CTwsiPyFay +RgxRQHCpVuLwp8ClyQ0wu26XQlrgJ480trAoUQdj6pC3V+ICdk90R/j0RW5JtsSu +e0ML3jNA9C4OgKlt2ia/MLqriaHXOf30EPONvtyqyKeGUFL7Un4eYKh4euRFEEMb +MKngNonefDCIdYA1wVFa3wT8bNBbpuHl3ghkokv6VpdHIVn9wC1l6HY5nPRjgmo7 +sguRI1bRa2TFkOIVwZjCJTyfANyQw14pRS6rxIkCgYEAwzSYHRpJlPHAD7wi3tls +bw7cBF9Q1P9PYKmVD9fAjx6eOjzDVOCdpGDijEkYoQoX1yYK3JaS8Vvp8V1wZ5Uh +HTTr6Y5uS6CPh37wGTJc9XhXdJpeN67fEOBZGU04FUlASVFeCiV3Ga6YX0HQ/yKd +VSc2JMX9mzxZjwhKRHmCEr0CgYEA95FFAxPxPNzYU3yHIdBlQWB1Z6AUFn+D4FgF +xeFOGmul1E+0PnPH78IlYanMjhhJ1nkc6X71rdX4ylonB/x6ldUpghWW9VWrqRGG +76S010aaZgOinwVE7+eeoelsIuma2W0QDwWrUT+RAsJBvZpGx1keo1qZEAaocs9V +R2lvHrMCgYBNMTMl7wtB9wd4MXGoploW4M1ofTi9wehl1Sm5BhyDfBwd84FawygT +pKxxxUYUCKW80rJg4Lpi73HnnIeirnpVzmOsDELZbTjU4AGaNSxFdb0/wvuXEXPs +fIs/UiXnZPwjAiYp5P7gDQb8RE6dVdbZoZPrns/W31qbETAtO8+QEQKBgQDgA710 +yYjSz+uXr+j/OflFrSjPedRzfzMvv7aJlhP8aEgH049/q3jRhNYah3Enaub1gWYe +Ctn4UNPtFqKW4WlzRw1mPm741Gqec9Or6VgSLDrt8IAocLYud2HdlMBa3xNVhxCu +5yxcOq7W1jxyerVtEUFeA07ZZ4zpRp8eHVOFbQKBgGJGU7xoJWO9P17SUGNfmSEF +6VIYFX6orA1Fi/kAJiqiFf98T4jnUWnL8LXVckt9FNw6KQqBCB6JuKXBFVkG2Bkr +f5IIhziTuDVpdLQSf0Z2i59TspgYjiKs4WEN3N0HGtCXfbyPO6Tt08d4icxL5Myt +W84T6Uof3+QQaqQnGvBE +-----END PRIVATE KEY----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/intermediate2/intermediate2_cert.pem b/test/configs/certs/ocsp_peer/mini-ca/intermediate2/intermediate2_cert.pem new file mode 100644 index 000000000..4ca1762a0 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/intermediate2/intermediate2_cert.pem @@ -0,0 +1,89 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3c:d7:16:fb:15:99:81:4e:53:f8:80:7c:b6:7c:77:a6:06:a4:3e:ea + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:43 2023 GMT + Not After : Apr 28 19:01:43 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:da:5f:ff:1d:f7:8d:1a:9e:9a:f3:2b:68:8f:c1: + 0c:33:06:41:00:c9:3e:e4:1a:e1:e0:70:6a:f5:2f: + ad:df:f3:e9:99:ed:c5:d7:aa:93:13:37:ff:47:aa: + f3:c5:89:f7:b7:ad:3a:47:e5:9c:4e:9f:8c:e2:41: + ed:a4:7c:9d:88:32:ae:f5:8a:84:9f:0c:18:a0:b3: + fe:8e:dc:2a:88:6a:f5:2f:9c:86:92:fa:7b:6e:b3: + 5a:78:67:53:0b:21:6c:0d:6c:80:1a:0e:1e:ee:06: + c4:d2:e7:24:c6:e5:74:be:1e:2e:17:55:2b:e5:9f: + 0b:a0:58:cc:fe:bf:53:37:f7:dc:95:88:f4:77:a6: + 59:b4:b8:7c:a2:4b:b7:6a:67:aa:84:dc:29:f1:f9: + d7:89:05:4d:0b:f3:8b:2d:52:99:57:ed:6f:11:9e: + af:28:a3:61:44:c2:ec:6e:7f:9f:3d:0b:dc:f7:19: + 6d:14:8a:a5:b8:b6:29:02:34:90:b4:96:c1:cb:a7: + 42:46:97:cf:8d:59:fd:17:b1:a6:27:a7:7b:8a:47: + 6f:fa:03:24:1c:12:25:ee:34:d6:5c:da:45:98:23: + 30:e1:48:c9:9a:df:37:aa:1b:70:6c:b2:0f:95:39: + d6:6d:3e:25:20:a8:07:2c:48:57:0c:99:52:cb:89: + 08:41 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 1f:c6:fc:1c:a1:a5:6d:76:f0:7d:28:1f:e1:15:ab:86:e0:c3: + dd:a0:17:96:0a:c0:16:32:52:37:a4:b6:ad:24:d7:fd:3c:01: + 34:3b:a9:a2:ea:81:05:e7:06:5f:a3:af:7b:fa:b2:a9:c3:63: + 89:bb:0c:70:48:e9:73:cc:33:64:cd:b3:71:88:d1:d1:a1:5a: + 22:a6:ed:03:46:8e:9a:c0:92:37:46:9b:e5:37:78:a5:43:d5: + 46:99:1b:34:40:27:8f:95:dd:c6:9a:55:d9:60:25:8d:b8:e9: + 6e:c9:b3:ee:e8:f0:d9:11:ef:4e:ae:1e:03:70:03:60:66:fd: + ab:b0:f4:74:b6:27:7c:7a:96:9d:86:58:5f:5c:d3:04:ab:16: + 57:12:53:51:c7:93:ca:0b:4e:67:27:2d:b7:20:79:b6:b7:8c: + e7:c3:d9:25:5e:25:63:cf:93:f0:6e:31:c0:d5:4f:05:1c:8d: + 14:1b:6a:d5:01:b6:7a:09:6f:38:f3:e5:e2:5a:e4:e2:42:d5: + 8a:8d:de:ef:73:25:85:3c:e3:a9:ef:f7:f7:23:4f:d3:27:c2: + 3a:c6:c0:6f:2a:9b:1e:fe:fc:31:73:10:e1:08:62:98:2b:6d: + 2f:cc:ab:dd:3a:65:c2:00:7f:29:18:32:cd:8f:56:a9:1d:86: + f1:5e:60:55 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUPNcW+xWZgU5T+IB8tnx3pgakPuowDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDE0M1oXDTMzMDQyODE5MDE0M1owWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANpf/x33jRqemvMraI/BDDMGQQDJPuQa4eBwavUvrd/z6ZntxdeqkxM3/0eq88WJ +97etOkflnE6fjOJB7aR8nYgyrvWKhJ8MGKCz/o7cKohq9S+chpL6e26zWnhnUwsh +bA1sgBoOHu4GxNLnJMbldL4eLhdVK+WfC6BYzP6/Uzf33JWI9HemWbS4fKJLt2pn +qoTcKfH514kFTQvziy1SmVftbxGeryijYUTC7G5/nz0L3PcZbRSKpbi2KQI0kLSW +wcunQkaXz41Z/Rexpiene4pHb/oDJBwSJe401lzaRZgjMOFIyZrfN6obcGyyD5U5 +1m0+JSCoByxIVwyZUsuJCEECAwEAAaOB0DCBzTAdBgNVHQ4EFgQUdVXijuetpd2A +PckzCyyiV3ftFawwHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBAB/G/ByhpW128H0oH+EVq4bgw92gF5YKwBYyUjektq0k1/08 +ATQ7qaLqgQXnBl+jr3v6sqnDY4m7DHBI6XPMM2TNs3GI0dGhWiKm7QNGjprAkjdG +m+U3eKVD1UaZGzRAJ4+V3caaVdlgJY246W7Js+7o8NkR706uHgNwA2Bm/auw9HS2 +J3x6lp2GWF9c0wSrFlcSU1HHk8oLTmcnLbcgeba3jOfD2SVeJWPPk/BuMcDVTwUc +jRQbatUBtnoJbzjz5eJa5OJC1YqN3u9zJYU846nv9/cjT9MnwjrGwG8qmx7+/DFz +EOEIYpgrbS/Mq906ZcIAfykYMs2PVqkdhvFeYFU= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/intermediate2/private/intermediate2_keypair.pem b/test/configs/certs/ocsp_peer/mini-ca/intermediate2/private/intermediate2_keypair.pem new file mode 100644 index 000000000..91e2908cd --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/intermediate2/private/intermediate2_keypair.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDaX/8d940anprz +K2iPwQwzBkEAyT7kGuHgcGr1L63f8+mZ7cXXqpMTN/9HqvPFife3rTpH5ZxOn4zi +Qe2kfJ2IMq71ioSfDBigs/6O3CqIavUvnIaS+ntus1p4Z1MLIWwNbIAaDh7uBsTS +5yTG5XS+Hi4XVSvlnwugWMz+v1M399yViPR3plm0uHyiS7dqZ6qE3Cnx+deJBU0L +84stUplX7W8Rnq8oo2FEwuxuf589C9z3GW0UiqW4tikCNJC0lsHLp0JGl8+NWf0X +saYnp3uKR2/6AyQcEiXuNNZc2kWYIzDhSMma3zeqG3Bssg+VOdZtPiUgqAcsSFcM +mVLLiQhBAgMBAAECggEABFew29ByrKMXSzsjgOpSmwgmjkSyPLiBIeSyZ85LK58u +18oH62Y/tvvf1nXCk7zO4YbvGANrpI+dLlmnx2PYAR+a5ZSb1wrXSYjSyNX9fYl8 +9zWqYm1bO4QTCj5pwximzKyJ7pq1yD93tgb1LwRcmjRA7+NYdGBBi66AYxd8aOo6 +QB7JoME+hzYAWB+foCOAPGAxYe7EFCPkPEyz08oxRCvDua0xa0+tWkU77MhUSCu+ +/uSq/Og9C9TfzCX0W91TNDnq8VeXbLDJoPNzgfSWIeYxSw/X5dUkYU8N2LuPLQOO +84Xv5UqU9YV22TEjg22YAL8/GMZ160K1xzXnQb1LPQKBgQDs/jOBp9NFiFlcNbJ8 +MKdfv+sktQR7onGehOaz/dEFEKiHNO8UmSlkAk+aZoTmYXKgIT4uytKRSOfZUWSl +kY64sKJ7KTvVq/Dzm4KsyH8VgYYQ3OrNbqSCSK7DiOiKJxQ+Jhm2+a+io16B8ZbM +RXLoaQ5+8oET6BgM5R6IMe4iFQKBgQDr44q7I7nhQdNz4h7OJ4HZ8X8WmQG7YpSX +EMLb5sX5wymfPi5uUDTcthUw7dpleO9js5iT93fB6+rd5yyiDPIes/dWjqULprvR +zIIr0u+cyt1TRxrNSa6dz/dJO3t/g/fTPKeM9j7ON4RvEGW4LPA+PbEUU0Q6xfSq +OZ0sZSXUfQKBgQDh8+r/rxbLsJgiRkAKEAlETSLQOJYxmkthq6yZ52ElxyAm6N0Z +cn34EAv9VclYLYiwC4HR8yaXxj7m/6dKBGFizWXcrw+RRQHSAW6xdedUhc1gvoBP +pTHL1ahqXVn4fhHav1C9F4nRMpmkosX3tC8+Twu3FVbjt+FWSgy2JYS5kQKBgD5B +6u6jaj7Skc2HA5xjfvkXrPQ44+UiCpeoW9WQHfZilQyra7O/xYPvJr6oODkJ5xzI +XN/Is7nh2zY/+l62zfxegUw+D794fR/NOxn37TfTrwB4xtEhvk12gwy3/0tTeEgv +PQWORFtG+dQaXs5yReIXhDIaG+rrLjzzQdFizM49AoGBAOulUGVDBpUFDVJn8S5r +bqss/PXW+5xj5g8b9/tzBuyfL0NJ9p3q6EWlELPRTX3zXuVRYjSe9cBUm5FXPxP2 +s1TsGUILjSw21dOtodahvXRDN3Uw2ALQy1MTDy8xLhr9Le+e6xF1T2muzg0vDT6L +VXAYfY5NPUOiPaYAj792oZk/ +-----END PRIVATE KEY----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/misc/misconfig_TestServer1_bundle.pem b/test/configs/certs/ocsp_peer/mini-ca/misc/misconfig_TestServer1_bundle.pem new file mode 100644 index 000000000..c3d1d2c96 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/misc/misconfig_TestServer1_bundle.pem @@ -0,0 +1,186 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3c:c4:82:66:f8:5d:a6:b6:c7:66:e1:b2:01:3f:e0:72:fc:72:61:33 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Validity + Not Before: May 1 19:33:37 2023 GMT + Not After : Apr 28 19:33:37 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=TestServer1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:af:26:5c:50:c0:fa:62:b5:fd:3d:c1:9e:26:51: + 58:62:04:37:b0:b5:6a:9b:6a:e3:22:3c:cd:ee:3c: + e7:8b:d3:e2:4c:08:1a:4d:63:c1:81:20:f4:53:a5: + 5d:2f:d2:71:d8:af:e3:26:95:b4:27:14:46:7f:e2: + 0a:73:12:a7:0e:ff:99:5a:29:f5:d0:65:96:b1:d1: + 96:7f:0c:43:b8:71:f2:4b:21:e1:97:6c:1b:01:e5: + 38:1a:39:44:72:d5:19:20:87:fe:90:4f:3b:97:f2: + 7d:bd:57:97:4d:9d:56:50:89:5b:79:29:7a:3a:13: + 97:08:61:c2:0c:a6:02:49:c9:8a:41:ab:8e:9f:25: + c9:33:18:f8:92:64:58:04:cc:a3:9d:cf:d4:d2:bd: + 20:ab:8b:9d:55:df:fb:5b:23:ac:95:12:fa:6f:07: + 93:3f:0e:03:86:c4:9b:25:06:21:9b:03:96:32:b8: + e0:0f:63:e2:1d:34:d1:41:35:19:09:c1:a0:dc:26: + b9:c8:66:fa:87:67:22:6e:0c:a6:e7:0f:24:64:b1: + 4f:84:05:ef:ad:8e:1b:f2:f4:38:87:d3:e3:48:a5: + 82:e0:66:89:1d:92:9a:59:67:a4:1d:03:6f:4d:a5: + fb:3b:c0:0b:73:a7:ab:8f:b4:10:25:8e:69:42:76: + 82:5f + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 43:16:E6:03:AF:37:B2:7B:BD:B3:C8:A2:9C:95:D7:FA:32:F8:9E:6F + X509v3 Authority Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, SSL Server + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Server Authentication, TLS Web Client Authentication + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:18888/intermediate1_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:18888/ + X509v3 Subject Alternative Name: + DNS:localhost, IP Address:127.0.0.1 + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + a3:87:9f:05:e4:38:61:f7:c4:5b:17:13:4b:2c:9d:a2:4d:e6: + ad:93:54:c5:a3:00:27:0b:5c:45:c5:bd:f8:b6:a7:5a:2a:ec: + dc:9b:59:8a:c7:59:e7:b9:86:f7:27:be:45:0d:d9:86:76:cf: + 00:71:ad:aa:cc:73:50:8c:68:63:b0:e2:3a:59:dd:85:fa:0d: + f0:82:51:05:79:e6:d5:0e:0b:bb:ed:23:65:8f:d0:8b:01:df: + 86:74:bc:3a:22:90:e4:59:44:91:d5:44:d8:21:4d:4e:10:72: + 0a:12:2e:4a:20:5f:15:e7:16:0b:6f:76:f3:04:1f:da:44:50: + 3b:c3:b3:0f:fa:05:cf:6e:64:9c:65:e2:0d:38:28:31:c3:c3: + b6:66:ef:80:d3:c4:5f:e9:f9:01:e9:ce:e6:99:46:a0:9d:ce: + 90:63:77:d2:85:21:d7:88:32:55:38:fe:10:07:69:cd:c8:06: + b7:6f:49:98:bf:cd:be:4f:ab:44:ea:78:af:ab:01:c8:3e:fa: + d9:54:bc:59:28:db:03:9b:1c:ee:e4:c3:ed:f3:97:30:c6:40: + 33:76:84:40:b2:b8:4d:b4:ca:a9:2d:d1:4d:17:92:ea:c0:c9: + cb:f6:b1:d7:d3:c7:e6:75:15:00:ff:c7:d9:54:63:27:19:5c: + 96:a5:e5:d9 +-----BEGIN CERTIFICATE----- +MIIEYjCCA0qgAwIBAgIUPMSCZvhdprbHZuGyAT/gcvxyYTMwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMTAe +Fw0yMzA1MDExOTMzMzdaFw0zMzA0MjgxOTMzMzdaMFQxCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEU +MBIGA1UEAwwLVGVzdFNlcnZlcjEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCvJlxQwPpitf09wZ4mUVhiBDewtWqbauMiPM3uPOeL0+JMCBpNY8GBIPRT +pV0v0nHYr+MmlbQnFEZ/4gpzEqcO/5laKfXQZZax0ZZ/DEO4cfJLIeGXbBsB5Tga +OURy1Rkgh/6QTzuX8n29V5dNnVZQiVt5KXo6E5cIYcIMpgJJyYpBq46fJckzGPiS +ZFgEzKOdz9TSvSCri51V3/tbI6yVEvpvB5M/DgOGxJslBiGbA5YyuOAPY+IdNNFB +NRkJwaDcJrnIZvqHZyJuDKbnDyRksU+EBe+tjhvy9DiH0+NIpYLgZokdkppZZ6Qd +A29Npfs7wAtzp6uPtBAljmlCdoJfAgMBAAGjggEkMIIBIDAdBgNVHQ4EFgQUQxbm +A683snu9s8iinJXX+jL4nm8wHwYDVR0jBBgwFoAUtZFuT2S3FoR2+bS+mc5glZga +jp0wDAYDVR0TAQH/BAIwADARBglghkgBhvhCAQEEBAMCBsAwDgYDVR0PAQH/BAQD +AgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA9BgNVHR8ENjA0MDKg +MKAuhixodHRwOi8vMTI3LjAuMC4xOjE4ODg4L2ludGVybWVkaWF0ZTFfY3JsLmRl +cjAzBggrBgEFBQcBAQQnMCUwIwYIKwYBBQUHMAGGF2h0dHA6Ly8xMjcuMC4wLjE6 +MTg4ODgvMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsF +AAOCAQEAo4efBeQ4YffEWxcTSyydok3mrZNUxaMAJwtcRcW9+LanWirs3JtZisdZ +57mG9ye+RQ3ZhnbPAHGtqsxzUIxoY7DiOlndhfoN8IJRBXnm1Q4Lu+0jZY/QiwHf +hnS8OiKQ5FlEkdVE2CFNThByChIuSiBfFecWC2928wQf2kRQO8OzD/oFz25knGXi +DTgoMcPDtmbvgNPEX+n5AenO5plGoJ3OkGN30oUh14gyVTj+EAdpzcgGt29JmL/N +vk+rROp4r6sByD762VS8WSjbA5sc7uTD7fOXMMZAM3aEQLK4TbTKqS3RTReS6sDJ +y/ax19PH5nUVAP/H2VRjJxlclqXl2Q== +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3c:d7:16:fb:15:99:81:4e:53:f8:80:7c:b6:7c:77:a6:06:a4:3e:ea + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:43 2023 GMT + Not After : Apr 28 19:01:43 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:da:5f:ff:1d:f7:8d:1a:9e:9a:f3:2b:68:8f:c1: + 0c:33:06:41:00:c9:3e:e4:1a:e1:e0:70:6a:f5:2f: + ad:df:f3:e9:99:ed:c5:d7:aa:93:13:37:ff:47:aa: + f3:c5:89:f7:b7:ad:3a:47:e5:9c:4e:9f:8c:e2:41: + ed:a4:7c:9d:88:32:ae:f5:8a:84:9f:0c:18:a0:b3: + fe:8e:dc:2a:88:6a:f5:2f:9c:86:92:fa:7b:6e:b3: + 5a:78:67:53:0b:21:6c:0d:6c:80:1a:0e:1e:ee:06: + c4:d2:e7:24:c6:e5:74:be:1e:2e:17:55:2b:e5:9f: + 0b:a0:58:cc:fe:bf:53:37:f7:dc:95:88:f4:77:a6: + 59:b4:b8:7c:a2:4b:b7:6a:67:aa:84:dc:29:f1:f9: + d7:89:05:4d:0b:f3:8b:2d:52:99:57:ed:6f:11:9e: + af:28:a3:61:44:c2:ec:6e:7f:9f:3d:0b:dc:f7:19: + 6d:14:8a:a5:b8:b6:29:02:34:90:b4:96:c1:cb:a7: + 42:46:97:cf:8d:59:fd:17:b1:a6:27:a7:7b:8a:47: + 6f:fa:03:24:1c:12:25:ee:34:d6:5c:da:45:98:23: + 30:e1:48:c9:9a:df:37:aa:1b:70:6c:b2:0f:95:39: + d6:6d:3e:25:20:a8:07:2c:48:57:0c:99:52:cb:89: + 08:41 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 1f:c6:fc:1c:a1:a5:6d:76:f0:7d:28:1f:e1:15:ab:86:e0:c3: + dd:a0:17:96:0a:c0:16:32:52:37:a4:b6:ad:24:d7:fd:3c:01: + 34:3b:a9:a2:ea:81:05:e7:06:5f:a3:af:7b:fa:b2:a9:c3:63: + 89:bb:0c:70:48:e9:73:cc:33:64:cd:b3:71:88:d1:d1:a1:5a: + 22:a6:ed:03:46:8e:9a:c0:92:37:46:9b:e5:37:78:a5:43:d5: + 46:99:1b:34:40:27:8f:95:dd:c6:9a:55:d9:60:25:8d:b8:e9: + 6e:c9:b3:ee:e8:f0:d9:11:ef:4e:ae:1e:03:70:03:60:66:fd: + ab:b0:f4:74:b6:27:7c:7a:96:9d:86:58:5f:5c:d3:04:ab:16: + 57:12:53:51:c7:93:ca:0b:4e:67:27:2d:b7:20:79:b6:b7:8c: + e7:c3:d9:25:5e:25:63:cf:93:f0:6e:31:c0:d5:4f:05:1c:8d: + 14:1b:6a:d5:01:b6:7a:09:6f:38:f3:e5:e2:5a:e4:e2:42:d5: + 8a:8d:de:ef:73:25:85:3c:e3:a9:ef:f7:f7:23:4f:d3:27:c2: + 3a:c6:c0:6f:2a:9b:1e:fe:fc:31:73:10:e1:08:62:98:2b:6d: + 2f:cc:ab:dd:3a:65:c2:00:7f:29:18:32:cd:8f:56:a9:1d:86: + f1:5e:60:55 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUPNcW+xWZgU5T+IB8tnx3pgakPuowDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDE0M1oXDTMzMDQyODE5MDE0M1owWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANpf/x33jRqemvMraI/BDDMGQQDJPuQa4eBwavUvrd/z6ZntxdeqkxM3/0eq88WJ +97etOkflnE6fjOJB7aR8nYgyrvWKhJ8MGKCz/o7cKohq9S+chpL6e26zWnhnUwsh +bA1sgBoOHu4GxNLnJMbldL4eLhdVK+WfC6BYzP6/Uzf33JWI9HemWbS4fKJLt2pn +qoTcKfH514kFTQvziy1SmVftbxGeryijYUTC7G5/nz0L3PcZbRSKpbi2KQI0kLSW +wcunQkaXz41Z/Rexpiene4pHb/oDJBwSJe401lzaRZgjMOFIyZrfN6obcGyyD5U5 +1m0+JSCoByxIVwyZUsuJCEECAwEAAaOB0DCBzTAdBgNVHQ4EFgQUdVXijuetpd2A +PckzCyyiV3ftFawwHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBAB/G/ByhpW128H0oH+EVq4bgw92gF5YKwBYyUjektq0k1/08 +ATQ7qaLqgQXnBl+jr3v6sqnDY4m7DHBI6XPMM2TNs3GI0dGhWiKm7QNGjprAkjdG +m+U3eKVD1UaZGzRAJ4+V3caaVdlgJY246W7Js+7o8NkR706uHgNwA2Bm/auw9HS2 +J3x6lp2GWF9c0wSrFlcSU1HHk8oLTmcnLbcgeba3jOfD2SVeJWPPk/BuMcDVTwUc +jRQbatUBtnoJbzjz5eJa5OJC1YqN3u9zJYU846nv9/cjT9MnwjrGwG8qmx7+/DFz +EOEIYpgrbS/Mq906ZcIAfykYMs2PVqkdhvFeYFU= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/misc/trust_config1_bundle.pem b/test/configs/certs/ocsp_peer/mini-ca/misc/trust_config1_bundle.pem new file mode 100644 index 000000000..f632ad54a --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/misc/trust_config1_bundle.pem @@ -0,0 +1,264 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3c:d7:16:fb:15:99:81:4e:53:f8:80:7c:b6:7c:77:a6:06:a4:3e:ea + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:43 2023 GMT + Not After : Apr 28 19:01:43 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:da:5f:ff:1d:f7:8d:1a:9e:9a:f3:2b:68:8f:c1: + 0c:33:06:41:00:c9:3e:e4:1a:e1:e0:70:6a:f5:2f: + ad:df:f3:e9:99:ed:c5:d7:aa:93:13:37:ff:47:aa: + f3:c5:89:f7:b7:ad:3a:47:e5:9c:4e:9f:8c:e2:41: + ed:a4:7c:9d:88:32:ae:f5:8a:84:9f:0c:18:a0:b3: + fe:8e:dc:2a:88:6a:f5:2f:9c:86:92:fa:7b:6e:b3: + 5a:78:67:53:0b:21:6c:0d:6c:80:1a:0e:1e:ee:06: + c4:d2:e7:24:c6:e5:74:be:1e:2e:17:55:2b:e5:9f: + 0b:a0:58:cc:fe:bf:53:37:f7:dc:95:88:f4:77:a6: + 59:b4:b8:7c:a2:4b:b7:6a:67:aa:84:dc:29:f1:f9: + d7:89:05:4d:0b:f3:8b:2d:52:99:57:ed:6f:11:9e: + af:28:a3:61:44:c2:ec:6e:7f:9f:3d:0b:dc:f7:19: + 6d:14:8a:a5:b8:b6:29:02:34:90:b4:96:c1:cb:a7: + 42:46:97:cf:8d:59:fd:17:b1:a6:27:a7:7b:8a:47: + 6f:fa:03:24:1c:12:25:ee:34:d6:5c:da:45:98:23: + 30:e1:48:c9:9a:df:37:aa:1b:70:6c:b2:0f:95:39: + d6:6d:3e:25:20:a8:07:2c:48:57:0c:99:52:cb:89: + 08:41 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 1f:c6:fc:1c:a1:a5:6d:76:f0:7d:28:1f:e1:15:ab:86:e0:c3: + dd:a0:17:96:0a:c0:16:32:52:37:a4:b6:ad:24:d7:fd:3c:01: + 34:3b:a9:a2:ea:81:05:e7:06:5f:a3:af:7b:fa:b2:a9:c3:63: + 89:bb:0c:70:48:e9:73:cc:33:64:cd:b3:71:88:d1:d1:a1:5a: + 22:a6:ed:03:46:8e:9a:c0:92:37:46:9b:e5:37:78:a5:43:d5: + 46:99:1b:34:40:27:8f:95:dd:c6:9a:55:d9:60:25:8d:b8:e9: + 6e:c9:b3:ee:e8:f0:d9:11:ef:4e:ae:1e:03:70:03:60:66:fd: + ab:b0:f4:74:b6:27:7c:7a:96:9d:86:58:5f:5c:d3:04:ab:16: + 57:12:53:51:c7:93:ca:0b:4e:67:27:2d:b7:20:79:b6:b7:8c: + e7:c3:d9:25:5e:25:63:cf:93:f0:6e:31:c0:d5:4f:05:1c:8d: + 14:1b:6a:d5:01:b6:7a:09:6f:38:f3:e5:e2:5a:e4:e2:42:d5: + 8a:8d:de:ef:73:25:85:3c:e3:a9:ef:f7:f7:23:4f:d3:27:c2: + 3a:c6:c0:6f:2a:9b:1e:fe:fc:31:73:10:e1:08:62:98:2b:6d: + 2f:cc:ab:dd:3a:65:c2:00:7f:29:18:32:cd:8f:56:a9:1d:86: + f1:5e:60:55 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUPNcW+xWZgU5T+IB8tnx3pgakPuowDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDE0M1oXDTMzMDQyODE5MDE0M1owWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANpf/x33jRqemvMraI/BDDMGQQDJPuQa4eBwavUvrd/z6ZntxdeqkxM3/0eq88WJ +97etOkflnE6fjOJB7aR8nYgyrvWKhJ8MGKCz/o7cKohq9S+chpL6e26zWnhnUwsh +bA1sgBoOHu4GxNLnJMbldL4eLhdVK+WfC6BYzP6/Uzf33JWI9HemWbS4fKJLt2pn +qoTcKfH514kFTQvziy1SmVftbxGeryijYUTC7G5/nz0L3PcZbRSKpbi2KQI0kLSW +wcunQkaXz41Z/Rexpiene4pHb/oDJBwSJe401lzaRZgjMOFIyZrfN6obcGyyD5U5 +1m0+JSCoByxIVwyZUsuJCEECAwEAAaOB0DCBzTAdBgNVHQ4EFgQUdVXijuetpd2A +PckzCyyiV3ftFawwHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBAB/G/ByhpW128H0oH+EVq4bgw92gF5YKwBYyUjektq0k1/08 +ATQ7qaLqgQXnBl+jr3v6sqnDY4m7DHBI6XPMM2TNs3GI0dGhWiKm7QNGjprAkjdG +m+U3eKVD1UaZGzRAJ4+V3caaVdlgJY246W7Js+7o8NkR706uHgNwA2Bm/auw9HS2 +J3x6lp2GWF9c0wSrFlcSU1HHk8oLTmcnLbcgeba3jOfD2SVeJWPPk/BuMcDVTwUc +jRQbatUBtnoJbzjz5eJa5OJC1YqN3u9zJYU846nv9/cjT9MnwjrGwG8qmx7+/DFz +EOEIYpgrbS/Mq906ZcIAfykYMs2PVqkdhvFeYFU= +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 27:5e:cf:7e:be:aa:02:b9:a9:c7:42:30:43:fe:0e:80:05:91:dd:0b + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 18:57:57 2023 GMT + Not After : Apr 28 18:57:57 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:e2:21:6b:9f:ef:48:b9:de:22:fb:5b:37:09:68: + c7:b5:92:57:52:24:ef:85:00:e8:71:85:4d:0f:5b: + 8c:c6:e7:4f:19:f6:e3:0b:70:a3:41:7e:71:d4:0f: + d6:fd:f2:1a:ca:aa:57:91:76:9a:b2:82:62:60:ce: + f2:00:2e:d4:bc:58:d3:60:30:42:a6:28:b2:50:7b: + 58:01:9f:fb:0a:65:b0:40:d6:7c:e2:b7:da:8d:19: + d9:a5:51:d2:46:7e:14:46:ab:fa:df:ce:fe:84:08: + 98:63:46:1d:4d:8a:77:57:67:da:16:8b:32:0c:7c: + 41:e2:a5:ec:ee:7d:20:28:eb:03:5f:f5:e6:05:d8: + 8b:96:78:6f:ae:29:9a:50:f7:dc:96:31:86:81:b1: + 78:e8:eb:ef:5d:bb:ed:42:ec:94:c6:54:46:ec:05: + 6f:1b:0c:36:24:c6:a8:06:7e:5c:56:b8:43:3b:11: + f4:06:0a:05:15:19:3b:1f:c8:67:31:eb:3b:5b:2a: + 15:0a:7b:f9:6b:e4:10:ee:44:be:19:d8:db:44:01: + fa:3a:56:f5:6c:4e:f3:60:aa:e4:cd:b2:ad:77:07: + 45:ef:f1:d7:f5:fa:52:84:5c:03:4e:72:e0:a9:91: + c5:d9:d6:0a:84:33:98:31:f2:02:5b:3f:10:15:65: + 76:d7 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 22:79:1a:b9:5d:fa:f5:c9:a3:88:22:c4:92:e6:64:6d:ce:a5: + ae:2e:69:48:6a:9e:d5:11:c5:bb:b0:de:38:1b:5b:04:85:60: + d6:64:14:ed:c2:62:02:7d:ad:d2:17:ad:ef:40:27:2b:50:59: + 4a:ff:88:c6:b3:16:5c:55:30:d9:23:bd:4f:0f:34:b7:7b:ed: + 7a:e1:f3:39:35:e9:18:6d:70:b1:2b:2a:e2:e5:cd:a1:54:8a: + f9:f4:95:81:29:84:3f:95:2f:48:e0:35:3e:d9:cb:84:4d:3d: + 3e:3c:0e:8d:24:42:5f:19:e6:06:a5:87:ae:ba:af:07:02:e7: + 6a:83:0a:89:d4:a4:38:ce:05:6e:f6:15:f1:7a:53:bb:50:28: + 89:51:3f:f2:54:f1:d3:c4:28:07:a1:3e:55:e5:84:b8:df:58: + af:c3:e7:81:c2:08:9c:35:e4:c4:86:75:a8:17:99:2c:a6:7f: + 46:30:9b:23:55:c5:d8:e2:6a:e4:08:a1:8b:dc:bc:5b:86:95: + 4a:79:fe:a6:93:3d:1a:5b:10:9a:2f:6a:45:2f:5d:c9:fa:95: + 2e:66:eb:52:df:88:a7:5f:42:8f:5f:46:07:79:8b:a7:49:82: + d3:81:c6:3e:c2:5a:15:c4:83:69:30:49:4d:6e:ea:05:1e:d8: + dc:29:ac:17 +-----BEGIN CERTIFICATE----- +MIIDyDCCArCgAwIBAgIUJ17Pfr6qArmpx0IwQ/4OgAWR3QswDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE4 +NTc1N1oXDTMzMDQyODE4NTc1N1owUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdS +b290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4iFrn+9Iud4i ++1s3CWjHtZJXUiTvhQDocYVND1uMxudPGfbjC3CjQX5x1A/W/fIayqpXkXaasoJi +YM7yAC7UvFjTYDBCpiiyUHtYAZ/7CmWwQNZ84rfajRnZpVHSRn4URqv6387+hAiY +Y0YdTYp3V2faFosyDHxB4qXs7n0gKOsDX/XmBdiLlnhvrimaUPfcljGGgbF46Ovv +XbvtQuyUxlRG7AVvGww2JMaoBn5cVrhDOxH0BgoFFRk7H8hnMes7WyoVCnv5a+QQ +7kS+GdjbRAH6Olb1bE7zYKrkzbKtdwdF7/HX9fpShFwDTnLgqZHF2dYKhDOYMfIC +Wz8QFWV21wIDAQABo4GZMIGWMB0GA1UdDgQWBBTDEkK6qdhN4MM+utdHQaYJL220 +4TAfBgNVHSMEGDAWgBTDEkK6qdhN4MM+utdHQaYJL2204TAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBhjAzBgNVHR8ELDAqMCigJqAkhiJodHRwOi8vMTI3 +LjAuMC4xOjg4ODgvcm9vdF9jcmwuZGVyMA0GCSqGSIb3DQEBCwUAA4IBAQAieRq5 +Xfr1yaOIIsSS5mRtzqWuLmlIap7VEcW7sN44G1sEhWDWZBTtwmICfa3SF63vQCcr +UFlK/4jGsxZcVTDZI71PDzS3e+164fM5NekYbXCxKyri5c2hVIr59JWBKYQ/lS9I +4DU+2cuETT0+PA6NJEJfGeYGpYeuuq8HAudqgwqJ1KQ4zgVu9hXxelO7UCiJUT/y +VPHTxCgHoT5V5YS431ivw+eBwgicNeTEhnWoF5kspn9GMJsjVcXY4mrkCKGL3Lxb +hpVKef6mkz0aWxCaL2pFL13J+pUuZutS34inX0KPX0YHeYunSYLTgcY+wloVxINp +MElNbuoFHtjcKawX +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 55:57:db:45:43:06:ce:52:63:59:b9:5a:26:78:fd:0d:94:68:95:9c + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:15 2023 GMT + Not After : Apr 28 19:01:15 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:bc:c6:84:2d:c2:ab:5d:05:d7:65:a8:e2:15:74: + d8:f2:f1:55:11:45:93:96:4c:a5:dc:cb:44:f5:f4: + 14:7e:46:02:59:e8:ae:78:59:69:21:58:f7:16:38: + b9:c2:c2:60:d8:76:ab:a1:39:ba:0b:a3:03:17:e4: + a1:cb:5d:1a:0c:62:71:24:64:b0:00:f0:6f:4c:af: + 08:62:8c:dc:4f:e0:d7:d4:55:2c:db:36:fc:a9:aa: + d7:58:27:e4:99:cb:dc:29:d9:ea:35:16:cb:2e:be: + 04:b2:82:58:f4:e5:5c:07:db:12:8e:e3:3c:9a:5e: + 90:4b:c5:a3:d4:21:96:5f:e1:8f:f7:cb:9e:db:e0: + 10:a0:6c:a2:1e:30:17:6c:32:9f:7b:43:a4:9f:d3: + 6b:33:1b:18:cd:a4:ad:33:48:a3:98:b0:2b:c8:22: + 74:17:71:d8:f1:64:21:55:e1:33:bc:7f:74:5f:a5: + a6:a2:9b:58:2f:db:ed:c7:c1:e5:36:2e:86:26:ad: + c6:fe:b8:00:85:6e:7c:ed:fd:4a:c6:a0:d9:b2:3f: + 4e:bd:fa:08:52:c8:5d:31:13:86:bd:3f:ec:7a:d8: + 3a:15:e2:71:af:ec:00:88:7e:a6:e8:e1:9d:ab:57: + 5a:8a:1f:f8:e2:4d:29:58:53:79:25:f0:9e:d9:18: + 40:27 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + b1:48:16:3b:d7:91:d0:4d:54:09:cb:ab:c7:41:4f:35:12:8b: + a6:e8:84:11:49:a9:04:91:41:25:7c:02:38:b2:19:a0:e9:2e: + d5:d6:7a:26:c1:1a:f8:f1:c6:51:92:68:af:c8:6e:5b:df:28: + 40:b8:99:94:d5:43:7d:e3:68:75:94:26:56:11:21:9e:50:b3: + 36:7b:f8:5f:33:76:64:71:04:26:2b:bb:2c:83:33:89:ba:74: + c1:e9:9d:eb:c0:86:4b:4d:6f:f8:4d:55:5a:3d:f6:55:95:33: + 0f:b8:f0:53:2b:93:a6:da:8d:5c:1a:e8:30:22:55:67:44:6e: + 17:c4:57:05:0d:ce:fc:61:dd:b1:3c:b0:66:55:f4:42:d0:ce: + 94:7d:6a:82:bd:32:ed:2f:21:ff:c7:70:ff:48:9d:10:4a:71: + be:a8:37:e5:0f:f4:79:1e:7d:a2:f1:6a:6b:2c:e8:03:20:ce: + 80:94:d2:38:80:bc:7e:56:c5:77:62:94:c0:b7:40:11:4d:ba: + 98:4b:2e:52:03:66:68:36:ab:d1:0f:3e:b5:92:a3:95:9d:a4: + ea:d3:8a:14:41:6d:86:24:89:aa:d7:29:20:c8:52:d5:bf:8d: + 3b:09:52:dd:89:8c:2c:85:40:b5:9f:cc:47:63:ca:3a:e0:c9: + 91:5c:43:a9 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUVVfbRUMGzlJjWblaJnj9DZRolZwwDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDExNVoXDTMzMDQyODE5MDExNVowWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALzGhC3Cq10F12Wo4hV02PLxVRFFk5ZMpdzLRPX0FH5GAlnornhZaSFY9xY4ucLC +YNh2q6E5ugujAxfkoctdGgxicSRksADwb0yvCGKM3E/g19RVLNs2/Kmq11gn5JnL +3CnZ6jUWyy6+BLKCWPTlXAfbEo7jPJpekEvFo9Qhll/hj/fLntvgEKBsoh4wF2wy +n3tDpJ/TazMbGM2krTNIo5iwK8gidBdx2PFkIVXhM7x/dF+lpqKbWC/b7cfB5TYu +hiatxv64AIVufO39Ssag2bI/Tr36CFLIXTEThr0/7HrYOhXica/sAIh+pujhnatX +Woof+OJNKVhTeSXwntkYQCcCAwEAAaOB0DCBzTAdBgNVHQ4EFgQUtZFuT2S3FoR2 ++bS+mc5glZgajp0wHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBALFIFjvXkdBNVAnLq8dBTzUSi6bohBFJqQSRQSV8AjiyGaDp +LtXWeibBGvjxxlGSaK/IblvfKEC4mZTVQ33jaHWUJlYRIZ5QszZ7+F8zdmRxBCYr +uyyDM4m6dMHpnevAhktNb/hNVVo99lWVMw+48FMrk6bajVwa6DAiVWdEbhfEVwUN +zvxh3bE8sGZV9ELQzpR9aoK9Mu0vIf/HcP9InRBKcb6oN+UP9HkefaLxamss6AMg +zoCU0jiAvH5WxXdilMC3QBFNuphLLlIDZmg2q9EPPrWSo5WdpOrTihRBbYYkiarX +KSDIUtW/jTsJUt2JjCyFQLWfzEdjyjrgyZFcQ6k= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/misc/trust_config2_bundle.pem b/test/configs/certs/ocsp_peer/mini-ca/misc/trust_config2_bundle.pem new file mode 100644 index 000000000..fb390ca11 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/misc/trust_config2_bundle.pem @@ -0,0 +1,264 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3c:d7:16:fb:15:99:81:4e:53:f8:80:7c:b6:7c:77:a6:06:a4:3e:ea + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:43 2023 GMT + Not After : Apr 28 19:01:43 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:da:5f:ff:1d:f7:8d:1a:9e:9a:f3:2b:68:8f:c1: + 0c:33:06:41:00:c9:3e:e4:1a:e1:e0:70:6a:f5:2f: + ad:df:f3:e9:99:ed:c5:d7:aa:93:13:37:ff:47:aa: + f3:c5:89:f7:b7:ad:3a:47:e5:9c:4e:9f:8c:e2:41: + ed:a4:7c:9d:88:32:ae:f5:8a:84:9f:0c:18:a0:b3: + fe:8e:dc:2a:88:6a:f5:2f:9c:86:92:fa:7b:6e:b3: + 5a:78:67:53:0b:21:6c:0d:6c:80:1a:0e:1e:ee:06: + c4:d2:e7:24:c6:e5:74:be:1e:2e:17:55:2b:e5:9f: + 0b:a0:58:cc:fe:bf:53:37:f7:dc:95:88:f4:77:a6: + 59:b4:b8:7c:a2:4b:b7:6a:67:aa:84:dc:29:f1:f9: + d7:89:05:4d:0b:f3:8b:2d:52:99:57:ed:6f:11:9e: + af:28:a3:61:44:c2:ec:6e:7f:9f:3d:0b:dc:f7:19: + 6d:14:8a:a5:b8:b6:29:02:34:90:b4:96:c1:cb:a7: + 42:46:97:cf:8d:59:fd:17:b1:a6:27:a7:7b:8a:47: + 6f:fa:03:24:1c:12:25:ee:34:d6:5c:da:45:98:23: + 30:e1:48:c9:9a:df:37:aa:1b:70:6c:b2:0f:95:39: + d6:6d:3e:25:20:a8:07:2c:48:57:0c:99:52:cb:89: + 08:41 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 1f:c6:fc:1c:a1:a5:6d:76:f0:7d:28:1f:e1:15:ab:86:e0:c3: + dd:a0:17:96:0a:c0:16:32:52:37:a4:b6:ad:24:d7:fd:3c:01: + 34:3b:a9:a2:ea:81:05:e7:06:5f:a3:af:7b:fa:b2:a9:c3:63: + 89:bb:0c:70:48:e9:73:cc:33:64:cd:b3:71:88:d1:d1:a1:5a: + 22:a6:ed:03:46:8e:9a:c0:92:37:46:9b:e5:37:78:a5:43:d5: + 46:99:1b:34:40:27:8f:95:dd:c6:9a:55:d9:60:25:8d:b8:e9: + 6e:c9:b3:ee:e8:f0:d9:11:ef:4e:ae:1e:03:70:03:60:66:fd: + ab:b0:f4:74:b6:27:7c:7a:96:9d:86:58:5f:5c:d3:04:ab:16: + 57:12:53:51:c7:93:ca:0b:4e:67:27:2d:b7:20:79:b6:b7:8c: + e7:c3:d9:25:5e:25:63:cf:93:f0:6e:31:c0:d5:4f:05:1c:8d: + 14:1b:6a:d5:01:b6:7a:09:6f:38:f3:e5:e2:5a:e4:e2:42:d5: + 8a:8d:de:ef:73:25:85:3c:e3:a9:ef:f7:f7:23:4f:d3:27:c2: + 3a:c6:c0:6f:2a:9b:1e:fe:fc:31:73:10:e1:08:62:98:2b:6d: + 2f:cc:ab:dd:3a:65:c2:00:7f:29:18:32:cd:8f:56:a9:1d:86: + f1:5e:60:55 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUPNcW+xWZgU5T+IB8tnx3pgakPuowDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDE0M1oXDTMzMDQyODE5MDE0M1owWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANpf/x33jRqemvMraI/BDDMGQQDJPuQa4eBwavUvrd/z6ZntxdeqkxM3/0eq88WJ +97etOkflnE6fjOJB7aR8nYgyrvWKhJ8MGKCz/o7cKohq9S+chpL6e26zWnhnUwsh +bA1sgBoOHu4GxNLnJMbldL4eLhdVK+WfC6BYzP6/Uzf33JWI9HemWbS4fKJLt2pn +qoTcKfH514kFTQvziy1SmVftbxGeryijYUTC7G5/nz0L3PcZbRSKpbi2KQI0kLSW +wcunQkaXz41Z/Rexpiene4pHb/oDJBwSJe401lzaRZgjMOFIyZrfN6obcGyyD5U5 +1m0+JSCoByxIVwyZUsuJCEECAwEAAaOB0DCBzTAdBgNVHQ4EFgQUdVXijuetpd2A +PckzCyyiV3ftFawwHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBAB/G/ByhpW128H0oH+EVq4bgw92gF5YKwBYyUjektq0k1/08 +ATQ7qaLqgQXnBl+jr3v6sqnDY4m7DHBI6XPMM2TNs3GI0dGhWiKm7QNGjprAkjdG +m+U3eKVD1UaZGzRAJ4+V3caaVdlgJY246W7Js+7o8NkR706uHgNwA2Bm/auw9HS2 +J3x6lp2GWF9c0wSrFlcSU1HHk8oLTmcnLbcgeba3jOfD2SVeJWPPk/BuMcDVTwUc +jRQbatUBtnoJbzjz5eJa5OJC1YqN3u9zJYU846nv9/cjT9MnwjrGwG8qmx7+/DFz +EOEIYpgrbS/Mq906ZcIAfykYMs2PVqkdhvFeYFU= +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 55:57:db:45:43:06:ce:52:63:59:b9:5a:26:78:fd:0d:94:68:95:9c + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:15 2023 GMT + Not After : Apr 28 19:01:15 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:bc:c6:84:2d:c2:ab:5d:05:d7:65:a8:e2:15:74: + d8:f2:f1:55:11:45:93:96:4c:a5:dc:cb:44:f5:f4: + 14:7e:46:02:59:e8:ae:78:59:69:21:58:f7:16:38: + b9:c2:c2:60:d8:76:ab:a1:39:ba:0b:a3:03:17:e4: + a1:cb:5d:1a:0c:62:71:24:64:b0:00:f0:6f:4c:af: + 08:62:8c:dc:4f:e0:d7:d4:55:2c:db:36:fc:a9:aa: + d7:58:27:e4:99:cb:dc:29:d9:ea:35:16:cb:2e:be: + 04:b2:82:58:f4:e5:5c:07:db:12:8e:e3:3c:9a:5e: + 90:4b:c5:a3:d4:21:96:5f:e1:8f:f7:cb:9e:db:e0: + 10:a0:6c:a2:1e:30:17:6c:32:9f:7b:43:a4:9f:d3: + 6b:33:1b:18:cd:a4:ad:33:48:a3:98:b0:2b:c8:22: + 74:17:71:d8:f1:64:21:55:e1:33:bc:7f:74:5f:a5: + a6:a2:9b:58:2f:db:ed:c7:c1:e5:36:2e:86:26:ad: + c6:fe:b8:00:85:6e:7c:ed:fd:4a:c6:a0:d9:b2:3f: + 4e:bd:fa:08:52:c8:5d:31:13:86:bd:3f:ec:7a:d8: + 3a:15:e2:71:af:ec:00:88:7e:a6:e8:e1:9d:ab:57: + 5a:8a:1f:f8:e2:4d:29:58:53:79:25:f0:9e:d9:18: + 40:27 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + b1:48:16:3b:d7:91:d0:4d:54:09:cb:ab:c7:41:4f:35:12:8b: + a6:e8:84:11:49:a9:04:91:41:25:7c:02:38:b2:19:a0:e9:2e: + d5:d6:7a:26:c1:1a:f8:f1:c6:51:92:68:af:c8:6e:5b:df:28: + 40:b8:99:94:d5:43:7d:e3:68:75:94:26:56:11:21:9e:50:b3: + 36:7b:f8:5f:33:76:64:71:04:26:2b:bb:2c:83:33:89:ba:74: + c1:e9:9d:eb:c0:86:4b:4d:6f:f8:4d:55:5a:3d:f6:55:95:33: + 0f:b8:f0:53:2b:93:a6:da:8d:5c:1a:e8:30:22:55:67:44:6e: + 17:c4:57:05:0d:ce:fc:61:dd:b1:3c:b0:66:55:f4:42:d0:ce: + 94:7d:6a:82:bd:32:ed:2f:21:ff:c7:70:ff:48:9d:10:4a:71: + be:a8:37:e5:0f:f4:79:1e:7d:a2:f1:6a:6b:2c:e8:03:20:ce: + 80:94:d2:38:80:bc:7e:56:c5:77:62:94:c0:b7:40:11:4d:ba: + 98:4b:2e:52:03:66:68:36:ab:d1:0f:3e:b5:92:a3:95:9d:a4: + ea:d3:8a:14:41:6d:86:24:89:aa:d7:29:20:c8:52:d5:bf:8d: + 3b:09:52:dd:89:8c:2c:85:40:b5:9f:cc:47:63:ca:3a:e0:c9: + 91:5c:43:a9 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUVVfbRUMGzlJjWblaJnj9DZRolZwwDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDExNVoXDTMzMDQyODE5MDExNVowWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALzGhC3Cq10F12Wo4hV02PLxVRFFk5ZMpdzLRPX0FH5GAlnornhZaSFY9xY4ucLC +YNh2q6E5ugujAxfkoctdGgxicSRksADwb0yvCGKM3E/g19RVLNs2/Kmq11gn5JnL +3CnZ6jUWyy6+BLKCWPTlXAfbEo7jPJpekEvFo9Qhll/hj/fLntvgEKBsoh4wF2wy +n3tDpJ/TazMbGM2krTNIo5iwK8gidBdx2PFkIVXhM7x/dF+lpqKbWC/b7cfB5TYu +hiatxv64AIVufO39Ssag2bI/Tr36CFLIXTEThr0/7HrYOhXica/sAIh+pujhnatX +Woof+OJNKVhTeSXwntkYQCcCAwEAAaOB0DCBzTAdBgNVHQ4EFgQUtZFuT2S3FoR2 ++bS+mc5glZgajp0wHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBALFIFjvXkdBNVAnLq8dBTzUSi6bohBFJqQSRQSV8AjiyGaDp +LtXWeibBGvjxxlGSaK/IblvfKEC4mZTVQ33jaHWUJlYRIZ5QszZ7+F8zdmRxBCYr +uyyDM4m6dMHpnevAhktNb/hNVVo99lWVMw+48FMrk6bajVwa6DAiVWdEbhfEVwUN +zvxh3bE8sGZV9ELQzpR9aoK9Mu0vIf/HcP9InRBKcb6oN+UP9HkefaLxamss6AMg +zoCU0jiAvH5WxXdilMC3QBFNuphLLlIDZmg2q9EPPrWSo5WdpOrTihRBbYYkiarX +KSDIUtW/jTsJUt2JjCyFQLWfzEdjyjrgyZFcQ6k= +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 27:5e:cf:7e:be:aa:02:b9:a9:c7:42:30:43:fe:0e:80:05:91:dd:0b + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 18:57:57 2023 GMT + Not After : Apr 28 18:57:57 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:e2:21:6b:9f:ef:48:b9:de:22:fb:5b:37:09:68: + c7:b5:92:57:52:24:ef:85:00:e8:71:85:4d:0f:5b: + 8c:c6:e7:4f:19:f6:e3:0b:70:a3:41:7e:71:d4:0f: + d6:fd:f2:1a:ca:aa:57:91:76:9a:b2:82:62:60:ce: + f2:00:2e:d4:bc:58:d3:60:30:42:a6:28:b2:50:7b: + 58:01:9f:fb:0a:65:b0:40:d6:7c:e2:b7:da:8d:19: + d9:a5:51:d2:46:7e:14:46:ab:fa:df:ce:fe:84:08: + 98:63:46:1d:4d:8a:77:57:67:da:16:8b:32:0c:7c: + 41:e2:a5:ec:ee:7d:20:28:eb:03:5f:f5:e6:05:d8: + 8b:96:78:6f:ae:29:9a:50:f7:dc:96:31:86:81:b1: + 78:e8:eb:ef:5d:bb:ed:42:ec:94:c6:54:46:ec:05: + 6f:1b:0c:36:24:c6:a8:06:7e:5c:56:b8:43:3b:11: + f4:06:0a:05:15:19:3b:1f:c8:67:31:eb:3b:5b:2a: + 15:0a:7b:f9:6b:e4:10:ee:44:be:19:d8:db:44:01: + fa:3a:56:f5:6c:4e:f3:60:aa:e4:cd:b2:ad:77:07: + 45:ef:f1:d7:f5:fa:52:84:5c:03:4e:72:e0:a9:91: + c5:d9:d6:0a:84:33:98:31:f2:02:5b:3f:10:15:65: + 76:d7 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 22:79:1a:b9:5d:fa:f5:c9:a3:88:22:c4:92:e6:64:6d:ce:a5: + ae:2e:69:48:6a:9e:d5:11:c5:bb:b0:de:38:1b:5b:04:85:60: + d6:64:14:ed:c2:62:02:7d:ad:d2:17:ad:ef:40:27:2b:50:59: + 4a:ff:88:c6:b3:16:5c:55:30:d9:23:bd:4f:0f:34:b7:7b:ed: + 7a:e1:f3:39:35:e9:18:6d:70:b1:2b:2a:e2:e5:cd:a1:54:8a: + f9:f4:95:81:29:84:3f:95:2f:48:e0:35:3e:d9:cb:84:4d:3d: + 3e:3c:0e:8d:24:42:5f:19:e6:06:a5:87:ae:ba:af:07:02:e7: + 6a:83:0a:89:d4:a4:38:ce:05:6e:f6:15:f1:7a:53:bb:50:28: + 89:51:3f:f2:54:f1:d3:c4:28:07:a1:3e:55:e5:84:b8:df:58: + af:c3:e7:81:c2:08:9c:35:e4:c4:86:75:a8:17:99:2c:a6:7f: + 46:30:9b:23:55:c5:d8:e2:6a:e4:08:a1:8b:dc:bc:5b:86:95: + 4a:79:fe:a6:93:3d:1a:5b:10:9a:2f:6a:45:2f:5d:c9:fa:95: + 2e:66:eb:52:df:88:a7:5f:42:8f:5f:46:07:79:8b:a7:49:82: + d3:81:c6:3e:c2:5a:15:c4:83:69:30:49:4d:6e:ea:05:1e:d8: + dc:29:ac:17 +-----BEGIN CERTIFICATE----- +MIIDyDCCArCgAwIBAgIUJ17Pfr6qArmpx0IwQ/4OgAWR3QswDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE4 +NTc1N1oXDTMzMDQyODE4NTc1N1owUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdS +b290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4iFrn+9Iud4i ++1s3CWjHtZJXUiTvhQDocYVND1uMxudPGfbjC3CjQX5x1A/W/fIayqpXkXaasoJi +YM7yAC7UvFjTYDBCpiiyUHtYAZ/7CmWwQNZ84rfajRnZpVHSRn4URqv6387+hAiY +Y0YdTYp3V2faFosyDHxB4qXs7n0gKOsDX/XmBdiLlnhvrimaUPfcljGGgbF46Ovv +XbvtQuyUxlRG7AVvGww2JMaoBn5cVrhDOxH0BgoFFRk7H8hnMes7WyoVCnv5a+QQ +7kS+GdjbRAH6Olb1bE7zYKrkzbKtdwdF7/HX9fpShFwDTnLgqZHF2dYKhDOYMfIC +Wz8QFWV21wIDAQABo4GZMIGWMB0GA1UdDgQWBBTDEkK6qdhN4MM+utdHQaYJL220 +4TAfBgNVHSMEGDAWgBTDEkK6qdhN4MM+utdHQaYJL2204TAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBhjAzBgNVHR8ELDAqMCigJqAkhiJodHRwOi8vMTI3 +LjAuMC4xOjg4ODgvcm9vdF9jcmwuZGVyMA0GCSqGSIb3DQEBCwUAA4IBAQAieRq5 +Xfr1yaOIIsSS5mRtzqWuLmlIap7VEcW7sN44G1sEhWDWZBTtwmICfa3SF63vQCcr +UFlK/4jGsxZcVTDZI71PDzS3e+164fM5NekYbXCxKyri5c2hVIr59JWBKYQ/lS9I +4DU+2cuETT0+PA6NJEJfGeYGpYeuuq8HAudqgwqJ1KQ4zgVu9hXxelO7UCiJUT/y +VPHTxCgHoT5V5YS431ivw+eBwgicNeTEhnWoF5kspn9GMJsjVcXY4mrkCKGL3Lxb +hpVKef6mkz0aWxCaL2pFL13J+pUuZutS34inX0KPX0YHeYunSYLTgcY+wloVxINp +MElNbuoFHtjcKawX +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/misc/trust_config3_bundle.pem b/test/configs/certs/ocsp_peer/mini-ca/misc/trust_config3_bundle.pem new file mode 100644 index 000000000..2ba91b0d0 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/misc/trust_config3_bundle.pem @@ -0,0 +1,264 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 55:57:db:45:43:06:ce:52:63:59:b9:5a:26:78:fd:0d:94:68:95:9c + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:15 2023 GMT + Not After : Apr 28 19:01:15 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:bc:c6:84:2d:c2:ab:5d:05:d7:65:a8:e2:15:74: + d8:f2:f1:55:11:45:93:96:4c:a5:dc:cb:44:f5:f4: + 14:7e:46:02:59:e8:ae:78:59:69:21:58:f7:16:38: + b9:c2:c2:60:d8:76:ab:a1:39:ba:0b:a3:03:17:e4: + a1:cb:5d:1a:0c:62:71:24:64:b0:00:f0:6f:4c:af: + 08:62:8c:dc:4f:e0:d7:d4:55:2c:db:36:fc:a9:aa: + d7:58:27:e4:99:cb:dc:29:d9:ea:35:16:cb:2e:be: + 04:b2:82:58:f4:e5:5c:07:db:12:8e:e3:3c:9a:5e: + 90:4b:c5:a3:d4:21:96:5f:e1:8f:f7:cb:9e:db:e0: + 10:a0:6c:a2:1e:30:17:6c:32:9f:7b:43:a4:9f:d3: + 6b:33:1b:18:cd:a4:ad:33:48:a3:98:b0:2b:c8:22: + 74:17:71:d8:f1:64:21:55:e1:33:bc:7f:74:5f:a5: + a6:a2:9b:58:2f:db:ed:c7:c1:e5:36:2e:86:26:ad: + c6:fe:b8:00:85:6e:7c:ed:fd:4a:c6:a0:d9:b2:3f: + 4e:bd:fa:08:52:c8:5d:31:13:86:bd:3f:ec:7a:d8: + 3a:15:e2:71:af:ec:00:88:7e:a6:e8:e1:9d:ab:57: + 5a:8a:1f:f8:e2:4d:29:58:53:79:25:f0:9e:d9:18: + 40:27 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + b1:48:16:3b:d7:91:d0:4d:54:09:cb:ab:c7:41:4f:35:12:8b: + a6:e8:84:11:49:a9:04:91:41:25:7c:02:38:b2:19:a0:e9:2e: + d5:d6:7a:26:c1:1a:f8:f1:c6:51:92:68:af:c8:6e:5b:df:28: + 40:b8:99:94:d5:43:7d:e3:68:75:94:26:56:11:21:9e:50:b3: + 36:7b:f8:5f:33:76:64:71:04:26:2b:bb:2c:83:33:89:ba:74: + c1:e9:9d:eb:c0:86:4b:4d:6f:f8:4d:55:5a:3d:f6:55:95:33: + 0f:b8:f0:53:2b:93:a6:da:8d:5c:1a:e8:30:22:55:67:44:6e: + 17:c4:57:05:0d:ce:fc:61:dd:b1:3c:b0:66:55:f4:42:d0:ce: + 94:7d:6a:82:bd:32:ed:2f:21:ff:c7:70:ff:48:9d:10:4a:71: + be:a8:37:e5:0f:f4:79:1e:7d:a2:f1:6a:6b:2c:e8:03:20:ce: + 80:94:d2:38:80:bc:7e:56:c5:77:62:94:c0:b7:40:11:4d:ba: + 98:4b:2e:52:03:66:68:36:ab:d1:0f:3e:b5:92:a3:95:9d:a4: + ea:d3:8a:14:41:6d:86:24:89:aa:d7:29:20:c8:52:d5:bf:8d: + 3b:09:52:dd:89:8c:2c:85:40:b5:9f:cc:47:63:ca:3a:e0:c9: + 91:5c:43:a9 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUVVfbRUMGzlJjWblaJnj9DZRolZwwDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDExNVoXDTMzMDQyODE5MDExNVowWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALzGhC3Cq10F12Wo4hV02PLxVRFFk5ZMpdzLRPX0FH5GAlnornhZaSFY9xY4ucLC +YNh2q6E5ugujAxfkoctdGgxicSRksADwb0yvCGKM3E/g19RVLNs2/Kmq11gn5JnL +3CnZ6jUWyy6+BLKCWPTlXAfbEo7jPJpekEvFo9Qhll/hj/fLntvgEKBsoh4wF2wy +n3tDpJ/TazMbGM2krTNIo5iwK8gidBdx2PFkIVXhM7x/dF+lpqKbWC/b7cfB5TYu +hiatxv64AIVufO39Ssag2bI/Tr36CFLIXTEThr0/7HrYOhXica/sAIh+pujhnatX +Woof+OJNKVhTeSXwntkYQCcCAwEAAaOB0DCBzTAdBgNVHQ4EFgQUtZFuT2S3FoR2 ++bS+mc5glZgajp0wHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBALFIFjvXkdBNVAnLq8dBTzUSi6bohBFJqQSRQSV8AjiyGaDp +LtXWeibBGvjxxlGSaK/IblvfKEC4mZTVQ33jaHWUJlYRIZ5QszZ7+F8zdmRxBCYr +uyyDM4m6dMHpnevAhktNb/hNVVo99lWVMw+48FMrk6bajVwa6DAiVWdEbhfEVwUN +zvxh3bE8sGZV9ELQzpR9aoK9Mu0vIf/HcP9InRBKcb6oN+UP9HkefaLxamss6AMg +zoCU0jiAvH5WxXdilMC3QBFNuphLLlIDZmg2q9EPPrWSo5WdpOrTihRBbYYkiarX +KSDIUtW/jTsJUt2JjCyFQLWfzEdjyjrgyZFcQ6k= +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3c:d7:16:fb:15:99:81:4e:53:f8:80:7c:b6:7c:77:a6:06:a4:3e:ea + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:43 2023 GMT + Not After : Apr 28 19:01:43 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:da:5f:ff:1d:f7:8d:1a:9e:9a:f3:2b:68:8f:c1: + 0c:33:06:41:00:c9:3e:e4:1a:e1:e0:70:6a:f5:2f: + ad:df:f3:e9:99:ed:c5:d7:aa:93:13:37:ff:47:aa: + f3:c5:89:f7:b7:ad:3a:47:e5:9c:4e:9f:8c:e2:41: + ed:a4:7c:9d:88:32:ae:f5:8a:84:9f:0c:18:a0:b3: + fe:8e:dc:2a:88:6a:f5:2f:9c:86:92:fa:7b:6e:b3: + 5a:78:67:53:0b:21:6c:0d:6c:80:1a:0e:1e:ee:06: + c4:d2:e7:24:c6:e5:74:be:1e:2e:17:55:2b:e5:9f: + 0b:a0:58:cc:fe:bf:53:37:f7:dc:95:88:f4:77:a6: + 59:b4:b8:7c:a2:4b:b7:6a:67:aa:84:dc:29:f1:f9: + d7:89:05:4d:0b:f3:8b:2d:52:99:57:ed:6f:11:9e: + af:28:a3:61:44:c2:ec:6e:7f:9f:3d:0b:dc:f7:19: + 6d:14:8a:a5:b8:b6:29:02:34:90:b4:96:c1:cb:a7: + 42:46:97:cf:8d:59:fd:17:b1:a6:27:a7:7b:8a:47: + 6f:fa:03:24:1c:12:25:ee:34:d6:5c:da:45:98:23: + 30:e1:48:c9:9a:df:37:aa:1b:70:6c:b2:0f:95:39: + d6:6d:3e:25:20:a8:07:2c:48:57:0c:99:52:cb:89: + 08:41 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 1f:c6:fc:1c:a1:a5:6d:76:f0:7d:28:1f:e1:15:ab:86:e0:c3: + dd:a0:17:96:0a:c0:16:32:52:37:a4:b6:ad:24:d7:fd:3c:01: + 34:3b:a9:a2:ea:81:05:e7:06:5f:a3:af:7b:fa:b2:a9:c3:63: + 89:bb:0c:70:48:e9:73:cc:33:64:cd:b3:71:88:d1:d1:a1:5a: + 22:a6:ed:03:46:8e:9a:c0:92:37:46:9b:e5:37:78:a5:43:d5: + 46:99:1b:34:40:27:8f:95:dd:c6:9a:55:d9:60:25:8d:b8:e9: + 6e:c9:b3:ee:e8:f0:d9:11:ef:4e:ae:1e:03:70:03:60:66:fd: + ab:b0:f4:74:b6:27:7c:7a:96:9d:86:58:5f:5c:d3:04:ab:16: + 57:12:53:51:c7:93:ca:0b:4e:67:27:2d:b7:20:79:b6:b7:8c: + e7:c3:d9:25:5e:25:63:cf:93:f0:6e:31:c0:d5:4f:05:1c:8d: + 14:1b:6a:d5:01:b6:7a:09:6f:38:f3:e5:e2:5a:e4:e2:42:d5: + 8a:8d:de:ef:73:25:85:3c:e3:a9:ef:f7:f7:23:4f:d3:27:c2: + 3a:c6:c0:6f:2a:9b:1e:fe:fc:31:73:10:e1:08:62:98:2b:6d: + 2f:cc:ab:dd:3a:65:c2:00:7f:29:18:32:cd:8f:56:a9:1d:86: + f1:5e:60:55 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUPNcW+xWZgU5T+IB8tnx3pgakPuowDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDE0M1oXDTMzMDQyODE5MDE0M1owWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANpf/x33jRqemvMraI/BDDMGQQDJPuQa4eBwavUvrd/z6ZntxdeqkxM3/0eq88WJ +97etOkflnE6fjOJB7aR8nYgyrvWKhJ8MGKCz/o7cKohq9S+chpL6e26zWnhnUwsh +bA1sgBoOHu4GxNLnJMbldL4eLhdVK+WfC6BYzP6/Uzf33JWI9HemWbS4fKJLt2pn +qoTcKfH514kFTQvziy1SmVftbxGeryijYUTC7G5/nz0L3PcZbRSKpbi2KQI0kLSW +wcunQkaXz41Z/Rexpiene4pHb/oDJBwSJe401lzaRZgjMOFIyZrfN6obcGyyD5U5 +1m0+JSCoByxIVwyZUsuJCEECAwEAAaOB0DCBzTAdBgNVHQ4EFgQUdVXijuetpd2A +PckzCyyiV3ftFawwHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBAB/G/ByhpW128H0oH+EVq4bgw92gF5YKwBYyUjektq0k1/08 +ATQ7qaLqgQXnBl+jr3v6sqnDY4m7DHBI6XPMM2TNs3GI0dGhWiKm7QNGjprAkjdG +m+U3eKVD1UaZGzRAJ4+V3caaVdlgJY246W7Js+7o8NkR706uHgNwA2Bm/auw9HS2 +J3x6lp2GWF9c0wSrFlcSU1HHk8oLTmcnLbcgeba3jOfD2SVeJWPPk/BuMcDVTwUc +jRQbatUBtnoJbzjz5eJa5OJC1YqN3u9zJYU846nv9/cjT9MnwjrGwG8qmx7+/DFz +EOEIYpgrbS/Mq906ZcIAfykYMs2PVqkdhvFeYFU= +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 27:5e:cf:7e:be:aa:02:b9:a9:c7:42:30:43:fe:0e:80:05:91:dd:0b + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 18:57:57 2023 GMT + Not After : Apr 28 18:57:57 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:e2:21:6b:9f:ef:48:b9:de:22:fb:5b:37:09:68: + c7:b5:92:57:52:24:ef:85:00:e8:71:85:4d:0f:5b: + 8c:c6:e7:4f:19:f6:e3:0b:70:a3:41:7e:71:d4:0f: + d6:fd:f2:1a:ca:aa:57:91:76:9a:b2:82:62:60:ce: + f2:00:2e:d4:bc:58:d3:60:30:42:a6:28:b2:50:7b: + 58:01:9f:fb:0a:65:b0:40:d6:7c:e2:b7:da:8d:19: + d9:a5:51:d2:46:7e:14:46:ab:fa:df:ce:fe:84:08: + 98:63:46:1d:4d:8a:77:57:67:da:16:8b:32:0c:7c: + 41:e2:a5:ec:ee:7d:20:28:eb:03:5f:f5:e6:05:d8: + 8b:96:78:6f:ae:29:9a:50:f7:dc:96:31:86:81:b1: + 78:e8:eb:ef:5d:bb:ed:42:ec:94:c6:54:46:ec:05: + 6f:1b:0c:36:24:c6:a8:06:7e:5c:56:b8:43:3b:11: + f4:06:0a:05:15:19:3b:1f:c8:67:31:eb:3b:5b:2a: + 15:0a:7b:f9:6b:e4:10:ee:44:be:19:d8:db:44:01: + fa:3a:56:f5:6c:4e:f3:60:aa:e4:cd:b2:ad:77:07: + 45:ef:f1:d7:f5:fa:52:84:5c:03:4e:72:e0:a9:91: + c5:d9:d6:0a:84:33:98:31:f2:02:5b:3f:10:15:65: + 76:d7 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 22:79:1a:b9:5d:fa:f5:c9:a3:88:22:c4:92:e6:64:6d:ce:a5: + ae:2e:69:48:6a:9e:d5:11:c5:bb:b0:de:38:1b:5b:04:85:60: + d6:64:14:ed:c2:62:02:7d:ad:d2:17:ad:ef:40:27:2b:50:59: + 4a:ff:88:c6:b3:16:5c:55:30:d9:23:bd:4f:0f:34:b7:7b:ed: + 7a:e1:f3:39:35:e9:18:6d:70:b1:2b:2a:e2:e5:cd:a1:54:8a: + f9:f4:95:81:29:84:3f:95:2f:48:e0:35:3e:d9:cb:84:4d:3d: + 3e:3c:0e:8d:24:42:5f:19:e6:06:a5:87:ae:ba:af:07:02:e7: + 6a:83:0a:89:d4:a4:38:ce:05:6e:f6:15:f1:7a:53:bb:50:28: + 89:51:3f:f2:54:f1:d3:c4:28:07:a1:3e:55:e5:84:b8:df:58: + af:c3:e7:81:c2:08:9c:35:e4:c4:86:75:a8:17:99:2c:a6:7f: + 46:30:9b:23:55:c5:d8:e2:6a:e4:08:a1:8b:dc:bc:5b:86:95: + 4a:79:fe:a6:93:3d:1a:5b:10:9a:2f:6a:45:2f:5d:c9:fa:95: + 2e:66:eb:52:df:88:a7:5f:42:8f:5f:46:07:79:8b:a7:49:82: + d3:81:c6:3e:c2:5a:15:c4:83:69:30:49:4d:6e:ea:05:1e:d8: + dc:29:ac:17 +-----BEGIN CERTIFICATE----- +MIIDyDCCArCgAwIBAgIUJ17Pfr6qArmpx0IwQ/4OgAWR3QswDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE4 +NTc1N1oXDTMzMDQyODE4NTc1N1owUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdS +b290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4iFrn+9Iud4i ++1s3CWjHtZJXUiTvhQDocYVND1uMxudPGfbjC3CjQX5x1A/W/fIayqpXkXaasoJi +YM7yAC7UvFjTYDBCpiiyUHtYAZ/7CmWwQNZ84rfajRnZpVHSRn4URqv6387+hAiY +Y0YdTYp3V2faFosyDHxB4qXs7n0gKOsDX/XmBdiLlnhvrimaUPfcljGGgbF46Ovv +XbvtQuyUxlRG7AVvGww2JMaoBn5cVrhDOxH0BgoFFRk7H8hnMes7WyoVCnv5a+QQ +7kS+GdjbRAH6Olb1bE7zYKrkzbKtdwdF7/HX9fpShFwDTnLgqZHF2dYKhDOYMfIC +Wz8QFWV21wIDAQABo4GZMIGWMB0GA1UdDgQWBBTDEkK6qdhN4MM+utdHQaYJL220 +4TAfBgNVHSMEGDAWgBTDEkK6qdhN4MM+utdHQaYJL2204TAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBhjAzBgNVHR8ELDAqMCigJqAkhiJodHRwOi8vMTI3 +LjAuMC4xOjg4ODgvcm9vdF9jcmwuZGVyMA0GCSqGSIb3DQEBCwUAA4IBAQAieRq5 +Xfr1yaOIIsSS5mRtzqWuLmlIap7VEcW7sN44G1sEhWDWZBTtwmICfa3SF63vQCcr +UFlK/4jGsxZcVTDZI71PDzS3e+164fM5NekYbXCxKyri5c2hVIr59JWBKYQ/lS9I +4DU+2cuETT0+PA6NJEJfGeYGpYeuuq8HAudqgwqJ1KQ4zgVu9hXxelO7UCiJUT/y +VPHTxCgHoT5V5YS431ivw+eBwgicNeTEhnWoF5kspn9GMJsjVcXY4mrkCKGL3Lxb +hpVKef6mkz0aWxCaL2pFL13J+pUuZutS34inX0KPX0YHeYunSYLTgcY+wloVxINp +MElNbuoFHtjcKawX +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/ocsp1/ocsp1_bundle.pem b/test/configs/certs/ocsp_peer/mini-ca/ocsp1/ocsp1_bundle.pem new file mode 100644 index 000000000..760eb22ec --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/ocsp1/ocsp1_bundle.pem @@ -0,0 +1,181 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 29:e1:52:8d:fd:a5:2a:87:eb:1d:e4:1d:47:6c:e1:8a:58:69:73:ab + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Validity + Not Before: May 1 19:28:39 2023 GMT + Not After : Apr 28 19:28:39 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=OCSP Responder + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:a3:0c:ca:eb:80:eb:a1:0e:1e:71:9b:d3:b3:f9: + 65:ce:70:c2:21:06:3c:31:c1:06:7e:a5:a8:4a:e1: + 21:a3:74:54:9f:57:ce:50:d6:c3:29:3c:43:b0:9d: + 3e:54:94:ee:8d:fa:0d:71:6c:df:5e:9e:01:30:79: + 6c:bb:97:5d:af:bb:5b:05:77:72:9f:55:e6:66:45: + f4:e2:c2:cf:7b:0e:58:d6:14:6a:76:29:ac:e3:30: + 28:0d:ee:bd:ca:aa:ae:1f:1e:ef:40:f3:c3:ab:17: + f2:d7:ec:0d:e1:fb:68:9a:09:83:99:11:58:42:94: + f8:0d:d4:9a:6f:9f:3b:e8:56:f0:a9:b7:18:1a:91: + 41:7c:43:e3:db:b1:01:f1:ad:0b:39:d7:65:98:e6: + 15:b0:17:a9:56:6e:fb:84:7a:c0:cc:67:75:fc:f6: + 75:84:31:78:c5:6d:51:8f:d0:19:d3:16:4f:87:ef: + 5b:33:b9:7a:dd:fe:5f:a8:6a:fd:44:54:00:f3:a4: + a6:5b:fd:3b:65:38:4f:82:4f:b9:c4:bd:c9:9a:56: + fc:54:f1:58:2f:cb:ee:f4:08:fd:b7:ec:ad:28:08: + 66:9b:f8:78:98:32:db:b1:56:dd:0e:31:ba:c6:e3: + 56:f5:02:2f:fb:76:28:bb:c4:8b:f3:6b:da:aa:1d: + 38:21 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + CB:5E:50:60:7A:AB:2F:A9:3B:1E:24:AB:02:42:8D:EC:81:60:48:13 + X509v3 Authority Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Basic Constraints: critical + CA:FALSE + X509v3 Key Usage: critical + Digital Signature + X509v3 Extended Key Usage: critical + OCSP Signing + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:18888/intermediate1_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:18888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 48:65:ce:6d:91:46:30:37:b6:f2:76:c0:42:e3:f5:ee:e9:32: + 0e:46:b5:d5:9d:ac:b0:f2:23:f5:35:a8:1c:61:66:81:c0:0d: + bc:a4:bb:b5:be:47:58:8b:f1:d1:5f:73:83:d2:99:da:3e:a3: + 0b:32:81:96:a4:bd:a8:57:8e:fe:3d:c4:93:57:ef:05:77:60: + c9:88:1c:2e:25:7e:ea:c8:95:8d:a6:4a:73:e5:bb:6c:c4:3b: + 01:03:90:8d:12:f5:69:13:c5:79:87:ae:45:cb:49:c8:90:24: + 39:30:cf:27:ba:31:1e:5f:5b:e0:0f:93:82:66:28:33:dc:e3: + a1:a8:fc:ad:40:d0:48:31:63:fb:a0:6a:13:18:b1:8b:59:bb: + ef:96:f8:83:98:6c:4a:18:37:1a:02:ad:c2:42:1d:7e:1c:dc: + 4a:77:b7:f5:ae:97:3e:17:e8:35:96:85:a0:e4:30:c5:03:0b: + 62:55:13:c1:3f:df:15:1b:c3:45:f7:69:d6:5e:f5:77:fc:4f: + e8:28:3b:3e:f0:2c:20:22:81:72:a3:d6:1b:d1:52:63:86:21: + 22:06:7a:5b:f4:2a:c7:e5:b9:97:ac:1b:56:b5:4c:62:e9:f9: + 6f:49:5f:43:3d:9c:e6:85:3a:f8:c9:4c:33:fd:e9:aa:88:8e: + cf:28:5c:69 +-----BEGIN CERTIFICATE----- +MIIELTCCAxWgAwIBAgIUKeFSjf2lKofrHeQdR2zhilhpc6swDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMTAe +Fw0yMzA1MDExOTI4MzlaFw0zMzA0MjgxOTI4MzlaMFcxCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEX +MBUGA1UEAwwOT0NTUCBSZXNwb25kZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCjDMrrgOuhDh5xm9Oz+WXOcMIhBjwxwQZ+pahK4SGjdFSfV85Q1sMp +PEOwnT5UlO6N+g1xbN9engEweWy7l12vu1sFd3KfVeZmRfTiws97DljWFGp2Kazj +MCgN7r3Kqq4fHu9A88OrF/LX7A3h+2iaCYOZEVhClPgN1JpvnzvoVvCptxgakUF8 +Q+PbsQHxrQs512WY5hWwF6lWbvuEesDMZ3X89nWEMXjFbVGP0BnTFk+H71szuXrd +/l+oav1EVADzpKZb/TtlOE+CT7nEvcmaVvxU8Vgvy+70CP237K0oCGab+HiYMtux +Vt0OMbrG41b1Ai/7dii7xIvza9qqHTghAgMBAAGjge0wgeowHQYDVR0OBBYEFMte +UGB6qy+pOx4kqwJCjeyBYEgTMB8GA1UdIwQYMBaAFLWRbk9ktxaEdvm0vpnOYJWY +Go6dMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/BAQDAgeAMBYGA1UdJQEB/wQMMAoG +CCsGAQUFBwMJMD0GA1UdHwQ2MDQwMqAwoC6GLGh0dHA6Ly8xMjcuMC4wLjE6MTg4 +ODgvaW50ZXJtZWRpYXRlMV9jcmwuZGVyMDMGCCsGAQUFBwEBBCcwJTAjBggrBgEF +BQcwAYYXaHR0cDovLzEyNy4wLjAuMToxODg4OC8wDQYJKoZIhvcNAQELBQADggEB +AEhlzm2RRjA3tvJ2wELj9e7pMg5GtdWdrLDyI/U1qBxhZoHADbyku7W+R1iL8dFf +c4PSmdo+owsygZakvahXjv49xJNX7wV3YMmIHC4lfurIlY2mSnPlu2zEOwEDkI0S +9WkTxXmHrkXLSciQJDkwzye6MR5fW+APk4JmKDPc46Go/K1A0EgxY/ugahMYsYtZ +u++W+IOYbEoYNxoCrcJCHX4c3Ep3t/Wulz4X6DWWhaDkMMUDC2JVE8E/3xUbw0X3 +adZe9Xf8T+goOz7wLCAigXKj1hvRUmOGISIGelv0KsfluZesG1a1TGLp+W9JX0M9 +nOaFOvjJTDP96aqIjs8oXGk= +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 55:57:db:45:43:06:ce:52:63:59:b9:5a:26:78:fd:0d:94:68:95:9c + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:15 2023 GMT + Not After : Apr 28 19:01:15 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:bc:c6:84:2d:c2:ab:5d:05:d7:65:a8:e2:15:74: + d8:f2:f1:55:11:45:93:96:4c:a5:dc:cb:44:f5:f4: + 14:7e:46:02:59:e8:ae:78:59:69:21:58:f7:16:38: + b9:c2:c2:60:d8:76:ab:a1:39:ba:0b:a3:03:17:e4: + a1:cb:5d:1a:0c:62:71:24:64:b0:00:f0:6f:4c:af: + 08:62:8c:dc:4f:e0:d7:d4:55:2c:db:36:fc:a9:aa: + d7:58:27:e4:99:cb:dc:29:d9:ea:35:16:cb:2e:be: + 04:b2:82:58:f4:e5:5c:07:db:12:8e:e3:3c:9a:5e: + 90:4b:c5:a3:d4:21:96:5f:e1:8f:f7:cb:9e:db:e0: + 10:a0:6c:a2:1e:30:17:6c:32:9f:7b:43:a4:9f:d3: + 6b:33:1b:18:cd:a4:ad:33:48:a3:98:b0:2b:c8:22: + 74:17:71:d8:f1:64:21:55:e1:33:bc:7f:74:5f:a5: + a6:a2:9b:58:2f:db:ed:c7:c1:e5:36:2e:86:26:ad: + c6:fe:b8:00:85:6e:7c:ed:fd:4a:c6:a0:d9:b2:3f: + 4e:bd:fa:08:52:c8:5d:31:13:86:bd:3f:ec:7a:d8: + 3a:15:e2:71:af:ec:00:88:7e:a6:e8:e1:9d:ab:57: + 5a:8a:1f:f8:e2:4d:29:58:53:79:25:f0:9e:d9:18: + 40:27 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + b1:48:16:3b:d7:91:d0:4d:54:09:cb:ab:c7:41:4f:35:12:8b: + a6:e8:84:11:49:a9:04:91:41:25:7c:02:38:b2:19:a0:e9:2e: + d5:d6:7a:26:c1:1a:f8:f1:c6:51:92:68:af:c8:6e:5b:df:28: + 40:b8:99:94:d5:43:7d:e3:68:75:94:26:56:11:21:9e:50:b3: + 36:7b:f8:5f:33:76:64:71:04:26:2b:bb:2c:83:33:89:ba:74: + c1:e9:9d:eb:c0:86:4b:4d:6f:f8:4d:55:5a:3d:f6:55:95:33: + 0f:b8:f0:53:2b:93:a6:da:8d:5c:1a:e8:30:22:55:67:44:6e: + 17:c4:57:05:0d:ce:fc:61:dd:b1:3c:b0:66:55:f4:42:d0:ce: + 94:7d:6a:82:bd:32:ed:2f:21:ff:c7:70:ff:48:9d:10:4a:71: + be:a8:37:e5:0f:f4:79:1e:7d:a2:f1:6a:6b:2c:e8:03:20:ce: + 80:94:d2:38:80:bc:7e:56:c5:77:62:94:c0:b7:40:11:4d:ba: + 98:4b:2e:52:03:66:68:36:ab:d1:0f:3e:b5:92:a3:95:9d:a4: + ea:d3:8a:14:41:6d:86:24:89:aa:d7:29:20:c8:52:d5:bf:8d: + 3b:09:52:dd:89:8c:2c:85:40:b5:9f:cc:47:63:ca:3a:e0:c9: + 91:5c:43:a9 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUVVfbRUMGzlJjWblaJnj9DZRolZwwDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDExNVoXDTMzMDQyODE5MDExNVowWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALzGhC3Cq10F12Wo4hV02PLxVRFFk5ZMpdzLRPX0FH5GAlnornhZaSFY9xY4ucLC +YNh2q6E5ugujAxfkoctdGgxicSRksADwb0yvCGKM3E/g19RVLNs2/Kmq11gn5JnL +3CnZ6jUWyy6+BLKCWPTlXAfbEo7jPJpekEvFo9Qhll/hj/fLntvgEKBsoh4wF2wy +n3tDpJ/TazMbGM2krTNIo5iwK8gidBdx2PFkIVXhM7x/dF+lpqKbWC/b7cfB5TYu +hiatxv64AIVufO39Ssag2bI/Tr36CFLIXTEThr0/7HrYOhXica/sAIh+pujhnatX +Woof+OJNKVhTeSXwntkYQCcCAwEAAaOB0DCBzTAdBgNVHQ4EFgQUtZFuT2S3FoR2 ++bS+mc5glZgajp0wHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBALFIFjvXkdBNVAnLq8dBTzUSi6bohBFJqQSRQSV8AjiyGaDp +LtXWeibBGvjxxlGSaK/IblvfKEC4mZTVQ33jaHWUJlYRIZ5QszZ7+F8zdmRxBCYr +uyyDM4m6dMHpnevAhktNb/hNVVo99lWVMw+48FMrk6bajVwa6DAiVWdEbhfEVwUN +zvxh3bE8sGZV9ELQzpR9aoK9Mu0vIf/HcP9InRBKcb6oN+UP9HkefaLxamss6AMg +zoCU0jiAvH5WxXdilMC3QBFNuphLLlIDZmg2q9EPPrWSo5WdpOrTihRBbYYkiarX +KSDIUtW/jTsJUt2JjCyFQLWfzEdjyjrgyZFcQ6k= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/ocsp1/ocsp1_cert.pem b/test/configs/certs/ocsp_peer/mini-ca/ocsp1/ocsp1_cert.pem new file mode 100644 index 000000000..218a28e9a --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/ocsp1/ocsp1_cert.pem @@ -0,0 +1,92 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 29:e1:52:8d:fd:a5:2a:87:eb:1d:e4:1d:47:6c:e1:8a:58:69:73:ab + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Validity + Not Before: May 1 19:28:39 2023 GMT + Not After : Apr 28 19:28:39 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=OCSP Responder + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:a3:0c:ca:eb:80:eb:a1:0e:1e:71:9b:d3:b3:f9: + 65:ce:70:c2:21:06:3c:31:c1:06:7e:a5:a8:4a:e1: + 21:a3:74:54:9f:57:ce:50:d6:c3:29:3c:43:b0:9d: + 3e:54:94:ee:8d:fa:0d:71:6c:df:5e:9e:01:30:79: + 6c:bb:97:5d:af:bb:5b:05:77:72:9f:55:e6:66:45: + f4:e2:c2:cf:7b:0e:58:d6:14:6a:76:29:ac:e3:30: + 28:0d:ee:bd:ca:aa:ae:1f:1e:ef:40:f3:c3:ab:17: + f2:d7:ec:0d:e1:fb:68:9a:09:83:99:11:58:42:94: + f8:0d:d4:9a:6f:9f:3b:e8:56:f0:a9:b7:18:1a:91: + 41:7c:43:e3:db:b1:01:f1:ad:0b:39:d7:65:98:e6: + 15:b0:17:a9:56:6e:fb:84:7a:c0:cc:67:75:fc:f6: + 75:84:31:78:c5:6d:51:8f:d0:19:d3:16:4f:87:ef: + 5b:33:b9:7a:dd:fe:5f:a8:6a:fd:44:54:00:f3:a4: + a6:5b:fd:3b:65:38:4f:82:4f:b9:c4:bd:c9:9a:56: + fc:54:f1:58:2f:cb:ee:f4:08:fd:b7:ec:ad:28:08: + 66:9b:f8:78:98:32:db:b1:56:dd:0e:31:ba:c6:e3: + 56:f5:02:2f:fb:76:28:bb:c4:8b:f3:6b:da:aa:1d: + 38:21 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + CB:5E:50:60:7A:AB:2F:A9:3B:1E:24:AB:02:42:8D:EC:81:60:48:13 + X509v3 Authority Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Basic Constraints: critical + CA:FALSE + X509v3 Key Usage: critical + Digital Signature + X509v3 Extended Key Usage: critical + OCSP Signing + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:18888/intermediate1_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:18888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 48:65:ce:6d:91:46:30:37:b6:f2:76:c0:42:e3:f5:ee:e9:32: + 0e:46:b5:d5:9d:ac:b0:f2:23:f5:35:a8:1c:61:66:81:c0:0d: + bc:a4:bb:b5:be:47:58:8b:f1:d1:5f:73:83:d2:99:da:3e:a3: + 0b:32:81:96:a4:bd:a8:57:8e:fe:3d:c4:93:57:ef:05:77:60: + c9:88:1c:2e:25:7e:ea:c8:95:8d:a6:4a:73:e5:bb:6c:c4:3b: + 01:03:90:8d:12:f5:69:13:c5:79:87:ae:45:cb:49:c8:90:24: + 39:30:cf:27:ba:31:1e:5f:5b:e0:0f:93:82:66:28:33:dc:e3: + a1:a8:fc:ad:40:d0:48:31:63:fb:a0:6a:13:18:b1:8b:59:bb: + ef:96:f8:83:98:6c:4a:18:37:1a:02:ad:c2:42:1d:7e:1c:dc: + 4a:77:b7:f5:ae:97:3e:17:e8:35:96:85:a0:e4:30:c5:03:0b: + 62:55:13:c1:3f:df:15:1b:c3:45:f7:69:d6:5e:f5:77:fc:4f: + e8:28:3b:3e:f0:2c:20:22:81:72:a3:d6:1b:d1:52:63:86:21: + 22:06:7a:5b:f4:2a:c7:e5:b9:97:ac:1b:56:b5:4c:62:e9:f9: + 6f:49:5f:43:3d:9c:e6:85:3a:f8:c9:4c:33:fd:e9:aa:88:8e: + cf:28:5c:69 +-----BEGIN CERTIFICATE----- +MIIELTCCAxWgAwIBAgIUKeFSjf2lKofrHeQdR2zhilhpc6swDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMTAe +Fw0yMzA1MDExOTI4MzlaFw0zMzA0MjgxOTI4MzlaMFcxCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEX +MBUGA1UEAwwOT0NTUCBSZXNwb25kZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCjDMrrgOuhDh5xm9Oz+WXOcMIhBjwxwQZ+pahK4SGjdFSfV85Q1sMp +PEOwnT5UlO6N+g1xbN9engEweWy7l12vu1sFd3KfVeZmRfTiws97DljWFGp2Kazj +MCgN7r3Kqq4fHu9A88OrF/LX7A3h+2iaCYOZEVhClPgN1JpvnzvoVvCptxgakUF8 +Q+PbsQHxrQs512WY5hWwF6lWbvuEesDMZ3X89nWEMXjFbVGP0BnTFk+H71szuXrd +/l+oav1EVADzpKZb/TtlOE+CT7nEvcmaVvxU8Vgvy+70CP237K0oCGab+HiYMtux +Vt0OMbrG41b1Ai/7dii7xIvza9qqHTghAgMBAAGjge0wgeowHQYDVR0OBBYEFMte +UGB6qy+pOx4kqwJCjeyBYEgTMB8GA1UdIwQYMBaAFLWRbk9ktxaEdvm0vpnOYJWY +Go6dMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/BAQDAgeAMBYGA1UdJQEB/wQMMAoG +CCsGAQUFBwMJMD0GA1UdHwQ2MDQwMqAwoC6GLGh0dHA6Ly8xMjcuMC4wLjE6MTg4 +ODgvaW50ZXJtZWRpYXRlMV9jcmwuZGVyMDMGCCsGAQUFBwEBBCcwJTAjBggrBgEF +BQcwAYYXaHR0cDovLzEyNy4wLjAuMToxODg4OC8wDQYJKoZIhvcNAQELBQADggEB +AEhlzm2RRjA3tvJ2wELj9e7pMg5GtdWdrLDyI/U1qBxhZoHADbyku7W+R1iL8dFf +c4PSmdo+owsygZakvahXjv49xJNX7wV3YMmIHC4lfurIlY2mSnPlu2zEOwEDkI0S +9WkTxXmHrkXLSciQJDkwzye6MR5fW+APk4JmKDPc46Go/K1A0EgxY/ugahMYsYtZ +u++W+IOYbEoYNxoCrcJCHX4c3Ep3t/Wulz4X6DWWhaDkMMUDC2JVE8E/3xUbw0X3 +adZe9Xf8T+goOz7wLCAigXKj1hvRUmOGISIGelv0KsfluZesG1a1TGLp+W9JX0M9 +nOaFOvjJTDP96aqIjs8oXGk= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/ocsp1/private/ocsp1_keypair.pem b/test/configs/certs/ocsp_peer/mini-ca/ocsp1/private/ocsp1_keypair.pem new file mode 100644 index 000000000..13b6dbe96 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/ocsp1/private/ocsp1_keypair.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCjDMrrgOuhDh5x +m9Oz+WXOcMIhBjwxwQZ+pahK4SGjdFSfV85Q1sMpPEOwnT5UlO6N+g1xbN9engEw +eWy7l12vu1sFd3KfVeZmRfTiws97DljWFGp2KazjMCgN7r3Kqq4fHu9A88OrF/LX +7A3h+2iaCYOZEVhClPgN1JpvnzvoVvCptxgakUF8Q+PbsQHxrQs512WY5hWwF6lW +bvuEesDMZ3X89nWEMXjFbVGP0BnTFk+H71szuXrd/l+oav1EVADzpKZb/TtlOE+C +T7nEvcmaVvxU8Vgvy+70CP237K0oCGab+HiYMtuxVt0OMbrG41b1Ai/7dii7xIvz +a9qqHTghAgMBAAECggEAE++sPxPuG6zzhX4hakvYCiAo6GtQAGBi6CjetTsmRwti +DnKoyCMeTUQwXZ+4X5SvP35f1urSPAozSIdMR3qoSqSsqjQy+G8DIyWyHejmgBwe +uhxYcRbC7Ct29k8m9ykb7bO1WtqDZf/hYkvbXbKFFXKM2/IuOcPnuZ8xe+z7IPsQ +ODHnrQs45wQyi2i2/+AbvEJjb3bb3oS8MfoZfvO8F06ejTOmv/ATZSxX0T6ppCPj +HdmKqKDXlYQNA/LQeM4cs2FaQH170R1vGHppDjcs2ezqElB7/HKfKWeEn0Eytu9E +eWw9tZteisnzfqEvDMgOM2eWwAzfIhXSQYMWlVBicQKBgQC6MPaLd4r82BBMj7qx +ChdBxB7LXptvx/q3SrMjZ6GKmrGdXMbsos50XexajktBqkXfUMa8hGqmlciN5xL1 ++w//p7oSzb3VorOyHVXZpc8p79eUeX8ONcwySOYwO+CpqFBBDlvPn1OuPnlUL1pv +IgCMT66flWJxRklDMIJsHr+iWQKBgQDgLq3I2cj4q+3121ECPXKLt+VCHUY0aygc +tl6lvQw61UnmyLQ+k53/MmyPGGCxIFr18DsoKeWYwt3kWTW0MCDrQuO6PZkB268v +gdsmN3nhAKiR0gUwJDrFjpPWr0GAhw9LE7HqpvkQ3fG5YSnXTUibhm6smHg7dzVL +ER+QJ+Y7CQKBgHIDN4WRjy9jEx/+x0BPwIwKDx1TcnURjQoeGPHuLHJWZbrJrBoN +W8TQGsIc7iJopN6pdPjNUQ1vHN8gB3FO6q4PRBbtm3gtaEICSqa7LM8uSeFmQJIw +CTklgKc6k0jwgyxDIZ9SnghNwzf0wzjYJmPFC1Y3QI/CjWwyUTrp3UkJAoGBANHc +IKcS6MWQ/RPYGP+F0kLlBWJc0Smk3knylquES3yPybyXSdQCkDcjVuilo25soXn1 +RwuUHPBiCyIGOPXS0B4r4c6odyF8K4THhQVDjX6KBUNsXZrxb2scy1x/d0wAItrf +NwA5CpM1kWE+idKY8E1XDSfZG0Rfla4N+4QRNb8xAoGAQrVe80TpPpzDH846xaPF +BAhjKz7dRrUQ1n7ZI6yw8I5bU2ky9jSF3PRsDismM23aNbYhIgjcYv0tG0LMlWLV +2eIrU9OoA7aepDPozyhEkENaWqYXX/T8AjD+Kaw7XJnt/NX8eS0RF2qgDA/HEwWw +uf1ecRqpjZ9cxNGLZ+/pOkM= +-----END PRIVATE KEY----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/ocsp2/ocsp2_bundle.pem b/test/configs/certs/ocsp_peer/mini-ca/ocsp2/ocsp2_bundle.pem new file mode 100644 index 000000000..2d3f2d020 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/ocsp2/ocsp2_bundle.pem @@ -0,0 +1,181 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 7b:97:35:73:2b:2b:5f:74:c6:43:83:8f:ae:65:5b:a0:f5:f4:ff:1f + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Validity + Not Before: May 1 19:29:28 2023 GMT + Not After : Apr 28 19:29:28 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=OCSP Responder 2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:b8:98:3d:03:4d:5e:b2:66:5e:51:3b:f9:3d:f2: + 7a:24:6b:70:5c:2f:7a:05:b2:51:77:62:45:e7:33: + 75:77:db:31:6f:2d:13:32:cd:d3:a0:03:84:ee:f9: + 2b:81:9d:e5:c9:ba:e2:25:c9:a7:18:2b:fd:f1:95: + ad:d3:46:90:d9:7b:7f:39:2d:85:b4:70:7c:72:44: + 99:fb:df:9f:22:4c:81:77:35:bb:fe:41:7f:86:f5: + c7:29:53:7c:ee:d4:cc:09:54:fa:cc:b1:4d:4b:c2: + c7:c7:3e:1a:13:59:66:36:31:ae:60:1b:6a:05:b0: + 5b:64:96:77:9d:74:cc:42:6e:13:d1:21:83:94:8e: + 6c:4c:d8:42:57:94:17:ff:26:d4:d1:2f:64:58:b5: + 47:1a:22:38:69:bf:c0:5a:9c:c3:88:01:0a:1d:f7: + d8:68:88:7c:57:5d:44:c4:71:d0:66:8d:1c:39:e0: + af:e8:f7:ce:51:60:7c:1d:b7:d5:e7:b5:3e:6a:a5: + 2b:46:c3:4e:b9:ef:de:bd:a6:be:e2:66:79:a9:6a: + 0d:c1:b2:e7:5e:03:9d:de:dd:41:b9:c9:80:2c:bd: + 6d:1f:09:5f:4e:25:e7:ac:ff:23:47:8f:5f:74:69: + be:81:42:5c:e6:1a:f7:65:1f:eb:a1:d0:69:6f:be: + 7e:89 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + E4:4D:EE:6A:A3:30:91:37:3E:5C:1D:BD:26:96:5F:FF:DB:D3:E2:15 + X509v3 Authority Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Basic Constraints: critical + CA:FALSE + X509v3 Key Usage: critical + Digital Signature + X509v3 Extended Key Usage: critical + OCSP Signing + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:28888/intermediate2_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:28888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 6c:d6:fa:8f:6f:c9:0a:99:0b:ee:6c:27:1f:75:52:b8:82:33: + 41:fe:01:a1:f8:c5:24:4e:9e:3b:e2:89:0f:01:2b:8e:c4:76: + fb:d9:75:5a:b2:9c:e0:36:8d:fd:90:9f:28:92:1b:a3:74:fd: + c5:39:28:51:06:ab:95:f7:64:95:e8:7b:d9:97:35:33:97:05: + 38:87:e6:e6:d7:a5:0b:a1:11:0c:b7:8b:76:b8:a9:46:33:ba: + 50:b3:3b:96:90:65:4b:ea:14:20:c9:f7:0d:8d:5e:89:c6:78: + e3:0b:4f:d2:db:10:46:8a:c4:81:6f:20:13:30:83:a8:45:4d: + 2b:ef:f0:ce:18:a7:96:fc:b9:67:79:e9:a9:f0:2f:b2:33:1c: + 83:cf:a3:4b:df:fd:c5:58:ae:87:83:d9:be:22:85:58:41:f5: + a0:a2:2d:56:98:40:12:78:c5:43:b0:50:34:0f:6c:0b:52:ad: + 68:e1:7a:9e:c1:54:58:bf:b4:f1:c5:3b:bf:97:e4:f9:44:09: + f5:c7:67:7d:dc:3d:ea:a9:9f:0f:3a:aa:9c:4a:c1:ef:a1:52: + 25:e4:57:22:d6:af:c6:c9:c8:02:91:4b:ec:a2:d6:ba:b5:bf: + ed:22:7c:b2:71:6c:78:f4:ba:e4:b9:b7:1f:11:65:d4:4f:77: + 4d:ef:b5:43 +-----BEGIN CERTIFICATE----- +MIIELzCCAxegAwIBAgIUe5c1cysrX3TGQ4OPrmVboPX0/x8wDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMjAe +Fw0yMzA1MDExOTI5MjhaFw0zMzA0MjgxOTI5MjhaMFkxCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEZ +MBcGA1UEAwwQT0NTUCBSZXNwb25kZXIgMjCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBALiYPQNNXrJmXlE7+T3yeiRrcFwvegWyUXdiReczdXfbMW8tEzLN +06ADhO75K4Gd5cm64iXJpxgr/fGVrdNGkNl7fzkthbRwfHJEmfvfnyJMgXc1u/5B +f4b1xylTfO7UzAlU+syxTUvCx8c+GhNZZjYxrmAbagWwW2SWd510zEJuE9Ehg5SO +bEzYQleUF/8m1NEvZFi1RxoiOGm/wFqcw4gBCh332GiIfFddRMRx0GaNHDngr+j3 +zlFgfB231ee1PmqlK0bDTrnv3r2mvuJmealqDcGy514Dnd7dQbnJgCy9bR8JX04l +56z/I0ePX3RpvoFCXOYa92Uf66HQaW++fokCAwEAAaOB7TCB6jAdBgNVHQ4EFgQU +5E3uaqMwkTc+XB29JpZf/9vT4hUwHwYDVR0jBBgwFoAUdVXijuetpd2APckzCyyi +V3ftFawwDAYDVR0TAQH/BAIwADAOBgNVHQ8BAf8EBAMCB4AwFgYDVR0lAQH/BAww +CgYIKwYBBQUHAwkwPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDovLzEyNy4wLjAuMToy +ODg4OC9pbnRlcm1lZGlhdGUyX2NybC5kZXIwMwYIKwYBBQUHAQEEJzAlMCMGCCsG +AQUFBzABhhdodHRwOi8vMTI3LjAuMC4xOjI4ODg4LzANBgkqhkiG9w0BAQsFAAOC +AQEAbNb6j2/JCpkL7mwnH3VSuIIzQf4BofjFJE6eO+KJDwErjsR2+9l1WrKc4DaN +/ZCfKJIbo3T9xTkoUQarlfdkleh72Zc1M5cFOIfm5telC6ERDLeLdripRjO6ULM7 +lpBlS+oUIMn3DY1eicZ44wtP0tsQRorEgW8gEzCDqEVNK+/wzhinlvy5Z3npqfAv +sjMcg8+jS9/9xViuh4PZviKFWEH1oKItVphAEnjFQ7BQNA9sC1KtaOF6nsFUWL+0 +8cU7v5fk+UQJ9cdnfdw96qmfDzqqnErB76FSJeRXItavxsnIApFL7KLWurW/7SJ8 +snFsePS65Lm3HxFl1E93Te+1Qw== +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3c:d7:16:fb:15:99:81:4e:53:f8:80:7c:b6:7c:77:a6:06:a4:3e:ea + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:43 2023 GMT + Not After : Apr 28 19:01:43 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:da:5f:ff:1d:f7:8d:1a:9e:9a:f3:2b:68:8f:c1: + 0c:33:06:41:00:c9:3e:e4:1a:e1:e0:70:6a:f5:2f: + ad:df:f3:e9:99:ed:c5:d7:aa:93:13:37:ff:47:aa: + f3:c5:89:f7:b7:ad:3a:47:e5:9c:4e:9f:8c:e2:41: + ed:a4:7c:9d:88:32:ae:f5:8a:84:9f:0c:18:a0:b3: + fe:8e:dc:2a:88:6a:f5:2f:9c:86:92:fa:7b:6e:b3: + 5a:78:67:53:0b:21:6c:0d:6c:80:1a:0e:1e:ee:06: + c4:d2:e7:24:c6:e5:74:be:1e:2e:17:55:2b:e5:9f: + 0b:a0:58:cc:fe:bf:53:37:f7:dc:95:88:f4:77:a6: + 59:b4:b8:7c:a2:4b:b7:6a:67:aa:84:dc:29:f1:f9: + d7:89:05:4d:0b:f3:8b:2d:52:99:57:ed:6f:11:9e: + af:28:a3:61:44:c2:ec:6e:7f:9f:3d:0b:dc:f7:19: + 6d:14:8a:a5:b8:b6:29:02:34:90:b4:96:c1:cb:a7: + 42:46:97:cf:8d:59:fd:17:b1:a6:27:a7:7b:8a:47: + 6f:fa:03:24:1c:12:25:ee:34:d6:5c:da:45:98:23: + 30:e1:48:c9:9a:df:37:aa:1b:70:6c:b2:0f:95:39: + d6:6d:3e:25:20:a8:07:2c:48:57:0c:99:52:cb:89: + 08:41 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 1f:c6:fc:1c:a1:a5:6d:76:f0:7d:28:1f:e1:15:ab:86:e0:c3: + dd:a0:17:96:0a:c0:16:32:52:37:a4:b6:ad:24:d7:fd:3c:01: + 34:3b:a9:a2:ea:81:05:e7:06:5f:a3:af:7b:fa:b2:a9:c3:63: + 89:bb:0c:70:48:e9:73:cc:33:64:cd:b3:71:88:d1:d1:a1:5a: + 22:a6:ed:03:46:8e:9a:c0:92:37:46:9b:e5:37:78:a5:43:d5: + 46:99:1b:34:40:27:8f:95:dd:c6:9a:55:d9:60:25:8d:b8:e9: + 6e:c9:b3:ee:e8:f0:d9:11:ef:4e:ae:1e:03:70:03:60:66:fd: + ab:b0:f4:74:b6:27:7c:7a:96:9d:86:58:5f:5c:d3:04:ab:16: + 57:12:53:51:c7:93:ca:0b:4e:67:27:2d:b7:20:79:b6:b7:8c: + e7:c3:d9:25:5e:25:63:cf:93:f0:6e:31:c0:d5:4f:05:1c:8d: + 14:1b:6a:d5:01:b6:7a:09:6f:38:f3:e5:e2:5a:e4:e2:42:d5: + 8a:8d:de:ef:73:25:85:3c:e3:a9:ef:f7:f7:23:4f:d3:27:c2: + 3a:c6:c0:6f:2a:9b:1e:fe:fc:31:73:10:e1:08:62:98:2b:6d: + 2f:cc:ab:dd:3a:65:c2:00:7f:29:18:32:cd:8f:56:a9:1d:86: + f1:5e:60:55 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUPNcW+xWZgU5T+IB8tnx3pgakPuowDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDE0M1oXDTMzMDQyODE5MDE0M1owWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANpf/x33jRqemvMraI/BDDMGQQDJPuQa4eBwavUvrd/z6ZntxdeqkxM3/0eq88WJ +97etOkflnE6fjOJB7aR8nYgyrvWKhJ8MGKCz/o7cKohq9S+chpL6e26zWnhnUwsh +bA1sgBoOHu4GxNLnJMbldL4eLhdVK+WfC6BYzP6/Uzf33JWI9HemWbS4fKJLt2pn +qoTcKfH514kFTQvziy1SmVftbxGeryijYUTC7G5/nz0L3PcZbRSKpbi2KQI0kLSW +wcunQkaXz41Z/Rexpiene4pHb/oDJBwSJe401lzaRZgjMOFIyZrfN6obcGyyD5U5 +1m0+JSCoByxIVwyZUsuJCEECAwEAAaOB0DCBzTAdBgNVHQ4EFgQUdVXijuetpd2A +PckzCyyiV3ftFawwHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBAB/G/ByhpW128H0oH+EVq4bgw92gF5YKwBYyUjektq0k1/08 +ATQ7qaLqgQXnBl+jr3v6sqnDY4m7DHBI6XPMM2TNs3GI0dGhWiKm7QNGjprAkjdG +m+U3eKVD1UaZGzRAJ4+V3caaVdlgJY246W7Js+7o8NkR706uHgNwA2Bm/auw9HS2 +J3x6lp2GWF9c0wSrFlcSU1HHk8oLTmcnLbcgeba3jOfD2SVeJWPPk/BuMcDVTwUc +jRQbatUBtnoJbzjz5eJa5OJC1YqN3u9zJYU846nv9/cjT9MnwjrGwG8qmx7+/DFz +EOEIYpgrbS/Mq906ZcIAfykYMs2PVqkdhvFeYFU= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/ocsp2/ocsp2_cert.pem b/test/configs/certs/ocsp_peer/mini-ca/ocsp2/ocsp2_cert.pem new file mode 100644 index 000000000..1f26c3843 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/ocsp2/ocsp2_cert.pem @@ -0,0 +1,92 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 7b:97:35:73:2b:2b:5f:74:c6:43:83:8f:ae:65:5b:a0:f5:f4:ff:1f + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Validity + Not Before: May 1 19:29:28 2023 GMT + Not After : Apr 28 19:29:28 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=OCSP Responder 2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:b8:98:3d:03:4d:5e:b2:66:5e:51:3b:f9:3d:f2: + 7a:24:6b:70:5c:2f:7a:05:b2:51:77:62:45:e7:33: + 75:77:db:31:6f:2d:13:32:cd:d3:a0:03:84:ee:f9: + 2b:81:9d:e5:c9:ba:e2:25:c9:a7:18:2b:fd:f1:95: + ad:d3:46:90:d9:7b:7f:39:2d:85:b4:70:7c:72:44: + 99:fb:df:9f:22:4c:81:77:35:bb:fe:41:7f:86:f5: + c7:29:53:7c:ee:d4:cc:09:54:fa:cc:b1:4d:4b:c2: + c7:c7:3e:1a:13:59:66:36:31:ae:60:1b:6a:05:b0: + 5b:64:96:77:9d:74:cc:42:6e:13:d1:21:83:94:8e: + 6c:4c:d8:42:57:94:17:ff:26:d4:d1:2f:64:58:b5: + 47:1a:22:38:69:bf:c0:5a:9c:c3:88:01:0a:1d:f7: + d8:68:88:7c:57:5d:44:c4:71:d0:66:8d:1c:39:e0: + af:e8:f7:ce:51:60:7c:1d:b7:d5:e7:b5:3e:6a:a5: + 2b:46:c3:4e:b9:ef:de:bd:a6:be:e2:66:79:a9:6a: + 0d:c1:b2:e7:5e:03:9d:de:dd:41:b9:c9:80:2c:bd: + 6d:1f:09:5f:4e:25:e7:ac:ff:23:47:8f:5f:74:69: + be:81:42:5c:e6:1a:f7:65:1f:eb:a1:d0:69:6f:be: + 7e:89 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + E4:4D:EE:6A:A3:30:91:37:3E:5C:1D:BD:26:96:5F:FF:DB:D3:E2:15 + X509v3 Authority Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Basic Constraints: critical + CA:FALSE + X509v3 Key Usage: critical + Digital Signature + X509v3 Extended Key Usage: critical + OCSP Signing + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:28888/intermediate2_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:28888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 6c:d6:fa:8f:6f:c9:0a:99:0b:ee:6c:27:1f:75:52:b8:82:33: + 41:fe:01:a1:f8:c5:24:4e:9e:3b:e2:89:0f:01:2b:8e:c4:76: + fb:d9:75:5a:b2:9c:e0:36:8d:fd:90:9f:28:92:1b:a3:74:fd: + c5:39:28:51:06:ab:95:f7:64:95:e8:7b:d9:97:35:33:97:05: + 38:87:e6:e6:d7:a5:0b:a1:11:0c:b7:8b:76:b8:a9:46:33:ba: + 50:b3:3b:96:90:65:4b:ea:14:20:c9:f7:0d:8d:5e:89:c6:78: + e3:0b:4f:d2:db:10:46:8a:c4:81:6f:20:13:30:83:a8:45:4d: + 2b:ef:f0:ce:18:a7:96:fc:b9:67:79:e9:a9:f0:2f:b2:33:1c: + 83:cf:a3:4b:df:fd:c5:58:ae:87:83:d9:be:22:85:58:41:f5: + a0:a2:2d:56:98:40:12:78:c5:43:b0:50:34:0f:6c:0b:52:ad: + 68:e1:7a:9e:c1:54:58:bf:b4:f1:c5:3b:bf:97:e4:f9:44:09: + f5:c7:67:7d:dc:3d:ea:a9:9f:0f:3a:aa:9c:4a:c1:ef:a1:52: + 25:e4:57:22:d6:af:c6:c9:c8:02:91:4b:ec:a2:d6:ba:b5:bf: + ed:22:7c:b2:71:6c:78:f4:ba:e4:b9:b7:1f:11:65:d4:4f:77: + 4d:ef:b5:43 +-----BEGIN CERTIFICATE----- +MIIELzCCAxegAwIBAgIUe5c1cysrX3TGQ4OPrmVboPX0/x8wDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMjAe +Fw0yMzA1MDExOTI5MjhaFw0zMzA0MjgxOTI5MjhaMFkxCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEZ +MBcGA1UEAwwQT0NTUCBSZXNwb25kZXIgMjCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBALiYPQNNXrJmXlE7+T3yeiRrcFwvegWyUXdiReczdXfbMW8tEzLN +06ADhO75K4Gd5cm64iXJpxgr/fGVrdNGkNl7fzkthbRwfHJEmfvfnyJMgXc1u/5B +f4b1xylTfO7UzAlU+syxTUvCx8c+GhNZZjYxrmAbagWwW2SWd510zEJuE9Ehg5SO +bEzYQleUF/8m1NEvZFi1RxoiOGm/wFqcw4gBCh332GiIfFddRMRx0GaNHDngr+j3 +zlFgfB231ee1PmqlK0bDTrnv3r2mvuJmealqDcGy514Dnd7dQbnJgCy9bR8JX04l +56z/I0ePX3RpvoFCXOYa92Uf66HQaW++fokCAwEAAaOB7TCB6jAdBgNVHQ4EFgQU +5E3uaqMwkTc+XB29JpZf/9vT4hUwHwYDVR0jBBgwFoAUdVXijuetpd2APckzCyyi +V3ftFawwDAYDVR0TAQH/BAIwADAOBgNVHQ8BAf8EBAMCB4AwFgYDVR0lAQH/BAww +CgYIKwYBBQUHAwkwPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDovLzEyNy4wLjAuMToy +ODg4OC9pbnRlcm1lZGlhdGUyX2NybC5kZXIwMwYIKwYBBQUHAQEEJzAlMCMGCCsG +AQUFBzABhhdodHRwOi8vMTI3LjAuMC4xOjI4ODg4LzANBgkqhkiG9w0BAQsFAAOC +AQEAbNb6j2/JCpkL7mwnH3VSuIIzQf4BofjFJE6eO+KJDwErjsR2+9l1WrKc4DaN +/ZCfKJIbo3T9xTkoUQarlfdkleh72Zc1M5cFOIfm5telC6ERDLeLdripRjO6ULM7 +lpBlS+oUIMn3DY1eicZ44wtP0tsQRorEgW8gEzCDqEVNK+/wzhinlvy5Z3npqfAv +sjMcg8+jS9/9xViuh4PZviKFWEH1oKItVphAEnjFQ7BQNA9sC1KtaOF6nsFUWL+0 +8cU7v5fk+UQJ9cdnfdw96qmfDzqqnErB76FSJeRXItavxsnIApFL7KLWurW/7SJ8 +snFsePS65Lm3HxFl1E93Te+1Qw== +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/ocsp2/private/ocsp2_keypair.pem b/test/configs/certs/ocsp_peer/mini-ca/ocsp2/private/ocsp2_keypair.pem new file mode 100644 index 000000000..ad13f6f80 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/ocsp2/private/ocsp2_keypair.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC4mD0DTV6yZl5R +O/k98noka3BcL3oFslF3YkXnM3V32zFvLRMyzdOgA4Tu+SuBneXJuuIlyacYK/3x +la3TRpDZe385LYW0cHxyRJn7358iTIF3Nbv+QX+G9ccpU3zu1MwJVPrMsU1LwsfH +PhoTWWY2Ma5gG2oFsFtklneddMxCbhPRIYOUjmxM2EJXlBf/JtTRL2RYtUcaIjhp +v8BanMOIAQod99hoiHxXXUTEcdBmjRw54K/o985RYHwdt9XntT5qpStGw0657969 +pr7iZnmpag3BsudeA53e3UG5yYAsvW0fCV9OJees/yNHj190ab6BQlzmGvdlH+uh +0Glvvn6JAgMBAAECggEAIlCyruV4ICPljqZefASSbijG12w/+8UdXdsX8ZXgVWqa +8vbnJb+bgpiE4sPRMaQ/rlOebLXi6RxsdbeEe80XakaJ7QAoZdWvXLKiCW+VrpOY +UafcjbRxV45i+qy5gdBvKaDxipG/M8E+0CwcPtKUrKhpqRYPjIUvSDCshcnLmuF3 +zztB/4VyVEUUaM0pEqSZhxSyraRmGARvF1iOSu1npe3AzWTrrjrSkbk6fi4GyECL +If0EQ1ZD+ZXQ6tcGDyNtmPox7lPMZOgwLJZ5zISXZ6QBjn0JvSzE+e4z0IFinLgx +q5yBz2BhJEN8OBcs3J2N/ivQetWil64YbrbK6WbocQKBgQD/b4uHOuJVVifjIf6/ +kJ0UHhki4Q2Fj164royDigyyzaZmMzrlReZ5rAQLk8wGqw2hI+9gYoYBYqHm71kd +WrwLS1TVZJ6x8TBh0sYOG2CPndqIjWFx9Wjjf1xNknwYdIoEdAAKZ/M1E71V0tZb ++Ampl+lHPnKqYRSCd7gbYBU/TQKBgQC5AKGJusjRRRRWQqQ0RdJuxusZrtAAUd7l +wOGMC0zVQSSvUuegFtWEaZUbByhCARtYp8o4rT6Fw9yOvMaMNcfd8tV5nYVHDsrw +MurPhPitgI0/LdVvkAOO4fgPZHIXV9GbUDGq4uqB61daBSLQg1JjtzG8GvlGiYZl +mKOWEXjWLQKBgQC3nHHaehxZpT20yin5f7U50czVwppaqE05Sdcdcq1gFe2Hx0mN +pypdyaV6wPnGzUxVyaP3T7rt4f1pKCGRtTg4kiTf450jYbEakEzntQw7EAgXYjFq +njKQXWt3I1XqqlLPkqa41DIBtDfEKnMF1wzzCIyaNqxsBq6cffwsSWvcfQKBgF/y +UNUCd0X5Yqu+EjU+BP4I0kNWo2+XBlf36cHc1nM/Psxi3dfsH751l6wV0S4yLsGS ++9DbILL1On0YsIxlFAwq9cYGCOoqZNugPKF1oBcztY2PssMSWJYQ4brx6C3tELtR +IwEygFby/DGmukCT6vXmO7gH8UJA7t/gAu9Ajn/dAoGAI/Ejqb7HborIvCw/p+kB +JkPIhTUuT5XonDm8h6KHWUESPikS7SMeRM/4V+AL/Y5MiiCBfjh3tCOup/16x6GQ +4z6FvcIaYusxKup+afQaDyv1Phv5/mr74liLhC5Qp9EGU2FZrMZwG3EZjSn/0IE+ +dBJeWNtNHiFPcyTzYMMhDBw= +-----END PRIVATE KEY----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/root/private/root_keypair.pem b/test/configs/certs/ocsp_peer/mini-ca/root/private/root_keypair.pem new file mode 100644 index 000000000..dd6f2fb05 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/root/private/root_keypair.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDiIWuf70i53iL7 +WzcJaMe1kldSJO+FAOhxhU0PW4zG508Z9uMLcKNBfnHUD9b98hrKqleRdpqygmJg +zvIALtS8WNNgMEKmKLJQe1gBn/sKZbBA1nzit9qNGdmlUdJGfhRGq/rfzv6ECJhj +Rh1NindXZ9oWizIMfEHipezufSAo6wNf9eYF2IuWeG+uKZpQ99yWMYaBsXjo6+9d +u+1C7JTGVEbsBW8bDDYkxqgGflxWuEM7EfQGCgUVGTsfyGcx6ztbKhUKe/lr5BDu +RL4Z2NtEAfo6VvVsTvNgquTNsq13B0Xv8df1+lKEXANOcuCpkcXZ1gqEM5gx8gJb +PxAVZXbXAgMBAAECggEAW6YC6i+/cIFs+SW3cStT4a29kU/h+axsCPJnUIWg0U6X +WyUaUR0mNZmrRbDjyEmS/Te7xPtmaFn6yFSndVaFpw5zIQV+RbyxxHexK/tscgLT +w/uKYxLz04M6GExIpoRb8Gash3/r3JRlOrsEjlRD2RuAoulob/H+e/8Wv3PcEGio +R8jwCj5DEnWiMxDzgtxsVgR4OeRYqg3zKjWrLALEYoRbFTVncCVA40OnmGJZ3+E5 ++3OOX6p9y/nY36888345yuwiCOTdNwQVaCXnLDZlAIVpB8QmjXVB35RSs+r2H5SF +p/KRbZ/JNKdNrbTKfJyvbnIpyTAtJB9OkhyiR9AegQKBgQDkKAplyZ6ChT3l53nn +4ngFi/nSTfrfJepmA5lVJk1Wxk0a4W++HxJkdKY2sUP7WuQ1xaPdcHxKzfp2HQE5 +L95jObU5dtY64QD4q0xqOw1ISDQi1euqZEmZziupEgPcMtw4sAVhHohzvTWo6a8o +fGMSkLTd+2303xgBCZo2I/hZVwKBgQD9uha6pQmCg4Oi2i/38Vm3ByArnQvgkELC +eGBBJrCE8pSm+ToPtgL+gSuA8PlWzIYlIf0UVOJFyws8GkUFm6n0nUlN0NmK8Rhm +Bg4IvasxdRgtySJzZO7ipAqGIaWJIBi1Vj4/rnAVggkadbQgyw+eCZNc5Pg3D9MV +TJ7d/xHegQKBgQCprGVfITuyUSihKy3rlu4vIdPd5IQnI2lYCGElg+CMIdkBnpmd +SDpDXsSlc9rcuNFyc9LTQW4Nq3USFavtPX4jSK1PWOMk0mQIiku/zL6p/JhZN8GU +7BQYP80UZQNd5K0Fs1Gs0ioj+JhJT9AlSavcCKWZV/yD2M1fKCb5EHMG7QKBgQDV +SvtSeeytp8sgOtU6VMz7fOUBZOsYI43Ll5ArFNAtYxOt7jNuA68urf2ZTnn9Cr/2 +NUVgMx9oVpEiPF8roLlV5mc6IEjQcW72TT69AF0KnYnu63enlADxy78BFQXoaW/7 ++P0pYYXdvsvST4JWUv3U9+3GmMFE4GutKxUeQA+QgQKBgQCauejVixhfKcmkM9nn +MGLSOUuFyd9HpQk3efxylphFNjpohk+k3fVKXBhmE4BDXbSlYUmMemm27tuQ/I6Z +bWOjGl57ZbCgJ7LdXLanJhyJJ6cSmkX8+oD+fwPMrD8yaAfh37MdTnriZKIDMXp2 +7HtfLcz0evmbW06b/dReyvcqyQ== +-----END PRIVATE KEY----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/root/root_cert.pem b/test/configs/certs/ocsp_peer/mini-ca/root/root_cert.pem new file mode 100644 index 000000000..f4658e142 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/root/root_cert.pem @@ -0,0 +1,86 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 27:5e:cf:7e:be:aa:02:b9:a9:c7:42:30:43:fe:0e:80:05:91:dd:0b + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 18:57:57 2023 GMT + Not After : Apr 28 18:57:57 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:e2:21:6b:9f:ef:48:b9:de:22:fb:5b:37:09:68: + c7:b5:92:57:52:24:ef:85:00:e8:71:85:4d:0f:5b: + 8c:c6:e7:4f:19:f6:e3:0b:70:a3:41:7e:71:d4:0f: + d6:fd:f2:1a:ca:aa:57:91:76:9a:b2:82:62:60:ce: + f2:00:2e:d4:bc:58:d3:60:30:42:a6:28:b2:50:7b: + 58:01:9f:fb:0a:65:b0:40:d6:7c:e2:b7:da:8d:19: + d9:a5:51:d2:46:7e:14:46:ab:fa:df:ce:fe:84:08: + 98:63:46:1d:4d:8a:77:57:67:da:16:8b:32:0c:7c: + 41:e2:a5:ec:ee:7d:20:28:eb:03:5f:f5:e6:05:d8: + 8b:96:78:6f:ae:29:9a:50:f7:dc:96:31:86:81:b1: + 78:e8:eb:ef:5d:bb:ed:42:ec:94:c6:54:46:ec:05: + 6f:1b:0c:36:24:c6:a8:06:7e:5c:56:b8:43:3b:11: + f4:06:0a:05:15:19:3b:1f:c8:67:31:eb:3b:5b:2a: + 15:0a:7b:f9:6b:e4:10:ee:44:be:19:d8:db:44:01: + fa:3a:56:f5:6c:4e:f3:60:aa:e4:cd:b2:ad:77:07: + 45:ef:f1:d7:f5:fa:52:84:5c:03:4e:72:e0:a9:91: + c5:d9:d6:0a:84:33:98:31:f2:02:5b:3f:10:15:65: + 76:d7 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 22:79:1a:b9:5d:fa:f5:c9:a3:88:22:c4:92:e6:64:6d:ce:a5: + ae:2e:69:48:6a:9e:d5:11:c5:bb:b0:de:38:1b:5b:04:85:60: + d6:64:14:ed:c2:62:02:7d:ad:d2:17:ad:ef:40:27:2b:50:59: + 4a:ff:88:c6:b3:16:5c:55:30:d9:23:bd:4f:0f:34:b7:7b:ed: + 7a:e1:f3:39:35:e9:18:6d:70:b1:2b:2a:e2:e5:cd:a1:54:8a: + f9:f4:95:81:29:84:3f:95:2f:48:e0:35:3e:d9:cb:84:4d:3d: + 3e:3c:0e:8d:24:42:5f:19:e6:06:a5:87:ae:ba:af:07:02:e7: + 6a:83:0a:89:d4:a4:38:ce:05:6e:f6:15:f1:7a:53:bb:50:28: + 89:51:3f:f2:54:f1:d3:c4:28:07:a1:3e:55:e5:84:b8:df:58: + af:c3:e7:81:c2:08:9c:35:e4:c4:86:75:a8:17:99:2c:a6:7f: + 46:30:9b:23:55:c5:d8:e2:6a:e4:08:a1:8b:dc:bc:5b:86:95: + 4a:79:fe:a6:93:3d:1a:5b:10:9a:2f:6a:45:2f:5d:c9:fa:95: + 2e:66:eb:52:df:88:a7:5f:42:8f:5f:46:07:79:8b:a7:49:82: + d3:81:c6:3e:c2:5a:15:c4:83:69:30:49:4d:6e:ea:05:1e:d8: + dc:29:ac:17 +-----BEGIN CERTIFICATE----- +MIIDyDCCArCgAwIBAgIUJ17Pfr6qArmpx0IwQ/4OgAWR3QswDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE4 +NTc1N1oXDTMzMDQyODE4NTc1N1owUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdS +b290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4iFrn+9Iud4i ++1s3CWjHtZJXUiTvhQDocYVND1uMxudPGfbjC3CjQX5x1A/W/fIayqpXkXaasoJi +YM7yAC7UvFjTYDBCpiiyUHtYAZ/7CmWwQNZ84rfajRnZpVHSRn4URqv6387+hAiY +Y0YdTYp3V2faFosyDHxB4qXs7n0gKOsDX/XmBdiLlnhvrimaUPfcljGGgbF46Ovv +XbvtQuyUxlRG7AVvGww2JMaoBn5cVrhDOxH0BgoFFRk7H8hnMes7WyoVCnv5a+QQ +7kS+GdjbRAH6Olb1bE7zYKrkzbKtdwdF7/HX9fpShFwDTnLgqZHF2dYKhDOYMfIC +Wz8QFWV21wIDAQABo4GZMIGWMB0GA1UdDgQWBBTDEkK6qdhN4MM+utdHQaYJL220 +4TAfBgNVHSMEGDAWgBTDEkK6qdhN4MM+utdHQaYJL2204TAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBhjAzBgNVHR8ELDAqMCigJqAkhiJodHRwOi8vMTI3 +LjAuMC4xOjg4ODgvcm9vdF9jcmwuZGVyMA0GCSqGSIb3DQEBCwUAA4IBAQAieRq5 +Xfr1yaOIIsSS5mRtzqWuLmlIap7VEcW7sN44G1sEhWDWZBTtwmICfa3SF63vQCcr +UFlK/4jGsxZcVTDZI71PDzS3e+164fM5NekYbXCxKyri5c2hVIr59JWBKYQ/lS9I +4DU+2cuETT0+PA6NJEJfGeYGpYeuuq8HAudqgwqJ1KQ4zgVu9hXxelO7UCiJUT/y +VPHTxCgHoT5V5YS431ivw+eBwgicNeTEhnWoF5kspn9GMJsjVcXY4mrkCKGL3Lxb +hpVKef6mkz0aWxCaL2pFL13J+pUuZutS34inX0KPX0YHeYunSYLTgcY+wloVxINp +MElNbuoFHtjcKawX +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem b/test/configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem new file mode 100644 index 000000000..544e3d444 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem @@ -0,0 +1,186 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3c:c4:82:66:f8:5d:a6:b6:c7:66:e1:b2:01:3f:e0:72:fc:72:61:33 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Validity + Not Before: May 1 19:33:37 2023 GMT + Not After : Apr 28 19:33:37 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=TestServer1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:af:26:5c:50:c0:fa:62:b5:fd:3d:c1:9e:26:51: + 58:62:04:37:b0:b5:6a:9b:6a:e3:22:3c:cd:ee:3c: + e7:8b:d3:e2:4c:08:1a:4d:63:c1:81:20:f4:53:a5: + 5d:2f:d2:71:d8:af:e3:26:95:b4:27:14:46:7f:e2: + 0a:73:12:a7:0e:ff:99:5a:29:f5:d0:65:96:b1:d1: + 96:7f:0c:43:b8:71:f2:4b:21:e1:97:6c:1b:01:e5: + 38:1a:39:44:72:d5:19:20:87:fe:90:4f:3b:97:f2: + 7d:bd:57:97:4d:9d:56:50:89:5b:79:29:7a:3a:13: + 97:08:61:c2:0c:a6:02:49:c9:8a:41:ab:8e:9f:25: + c9:33:18:f8:92:64:58:04:cc:a3:9d:cf:d4:d2:bd: + 20:ab:8b:9d:55:df:fb:5b:23:ac:95:12:fa:6f:07: + 93:3f:0e:03:86:c4:9b:25:06:21:9b:03:96:32:b8: + e0:0f:63:e2:1d:34:d1:41:35:19:09:c1:a0:dc:26: + b9:c8:66:fa:87:67:22:6e:0c:a6:e7:0f:24:64:b1: + 4f:84:05:ef:ad:8e:1b:f2:f4:38:87:d3:e3:48:a5: + 82:e0:66:89:1d:92:9a:59:67:a4:1d:03:6f:4d:a5: + fb:3b:c0:0b:73:a7:ab:8f:b4:10:25:8e:69:42:76: + 82:5f + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 43:16:E6:03:AF:37:B2:7B:BD:B3:C8:A2:9C:95:D7:FA:32:F8:9E:6F + X509v3 Authority Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, SSL Server + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Server Authentication, TLS Web Client Authentication + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:18888/intermediate1_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:18888/ + X509v3 Subject Alternative Name: + DNS:localhost, IP Address:127.0.0.1 + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + a3:87:9f:05:e4:38:61:f7:c4:5b:17:13:4b:2c:9d:a2:4d:e6: + ad:93:54:c5:a3:00:27:0b:5c:45:c5:bd:f8:b6:a7:5a:2a:ec: + dc:9b:59:8a:c7:59:e7:b9:86:f7:27:be:45:0d:d9:86:76:cf: + 00:71:ad:aa:cc:73:50:8c:68:63:b0:e2:3a:59:dd:85:fa:0d: + f0:82:51:05:79:e6:d5:0e:0b:bb:ed:23:65:8f:d0:8b:01:df: + 86:74:bc:3a:22:90:e4:59:44:91:d5:44:d8:21:4d:4e:10:72: + 0a:12:2e:4a:20:5f:15:e7:16:0b:6f:76:f3:04:1f:da:44:50: + 3b:c3:b3:0f:fa:05:cf:6e:64:9c:65:e2:0d:38:28:31:c3:c3: + b6:66:ef:80:d3:c4:5f:e9:f9:01:e9:ce:e6:99:46:a0:9d:ce: + 90:63:77:d2:85:21:d7:88:32:55:38:fe:10:07:69:cd:c8:06: + b7:6f:49:98:bf:cd:be:4f:ab:44:ea:78:af:ab:01:c8:3e:fa: + d9:54:bc:59:28:db:03:9b:1c:ee:e4:c3:ed:f3:97:30:c6:40: + 33:76:84:40:b2:b8:4d:b4:ca:a9:2d:d1:4d:17:92:ea:c0:c9: + cb:f6:b1:d7:d3:c7:e6:75:15:00:ff:c7:d9:54:63:27:19:5c: + 96:a5:e5:d9 +-----BEGIN CERTIFICATE----- +MIIEYjCCA0qgAwIBAgIUPMSCZvhdprbHZuGyAT/gcvxyYTMwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMTAe +Fw0yMzA1MDExOTMzMzdaFw0zMzA0MjgxOTMzMzdaMFQxCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEU +MBIGA1UEAwwLVGVzdFNlcnZlcjEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCvJlxQwPpitf09wZ4mUVhiBDewtWqbauMiPM3uPOeL0+JMCBpNY8GBIPRT +pV0v0nHYr+MmlbQnFEZ/4gpzEqcO/5laKfXQZZax0ZZ/DEO4cfJLIeGXbBsB5Tga +OURy1Rkgh/6QTzuX8n29V5dNnVZQiVt5KXo6E5cIYcIMpgJJyYpBq46fJckzGPiS +ZFgEzKOdz9TSvSCri51V3/tbI6yVEvpvB5M/DgOGxJslBiGbA5YyuOAPY+IdNNFB +NRkJwaDcJrnIZvqHZyJuDKbnDyRksU+EBe+tjhvy9DiH0+NIpYLgZokdkppZZ6Qd +A29Npfs7wAtzp6uPtBAljmlCdoJfAgMBAAGjggEkMIIBIDAdBgNVHQ4EFgQUQxbm +A683snu9s8iinJXX+jL4nm8wHwYDVR0jBBgwFoAUtZFuT2S3FoR2+bS+mc5glZga +jp0wDAYDVR0TAQH/BAIwADARBglghkgBhvhCAQEEBAMCBsAwDgYDVR0PAQH/BAQD +AgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA9BgNVHR8ENjA0MDKg +MKAuhixodHRwOi8vMTI3LjAuMC4xOjE4ODg4L2ludGVybWVkaWF0ZTFfY3JsLmRl +cjAzBggrBgEFBQcBAQQnMCUwIwYIKwYBBQUHMAGGF2h0dHA6Ly8xMjcuMC4wLjE6 +MTg4ODgvMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsF +AAOCAQEAo4efBeQ4YffEWxcTSyydok3mrZNUxaMAJwtcRcW9+LanWirs3JtZisdZ +57mG9ye+RQ3ZhnbPAHGtqsxzUIxoY7DiOlndhfoN8IJRBXnm1Q4Lu+0jZY/QiwHf +hnS8OiKQ5FlEkdVE2CFNThByChIuSiBfFecWC2928wQf2kRQO8OzD/oFz25knGXi +DTgoMcPDtmbvgNPEX+n5AenO5plGoJ3OkGN30oUh14gyVTj+EAdpzcgGt29JmL/N +vk+rROp4r6sByD762VS8WSjbA5sc7uTD7fOXMMZAM3aEQLK4TbTKqS3RTReS6sDJ +y/ax19PH5nUVAP/H2VRjJxlclqXl2Q== +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 55:57:db:45:43:06:ce:52:63:59:b9:5a:26:78:fd:0d:94:68:95:9c + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:15 2023 GMT + Not After : Apr 28 19:01:15 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:bc:c6:84:2d:c2:ab:5d:05:d7:65:a8:e2:15:74: + d8:f2:f1:55:11:45:93:96:4c:a5:dc:cb:44:f5:f4: + 14:7e:46:02:59:e8:ae:78:59:69:21:58:f7:16:38: + b9:c2:c2:60:d8:76:ab:a1:39:ba:0b:a3:03:17:e4: + a1:cb:5d:1a:0c:62:71:24:64:b0:00:f0:6f:4c:af: + 08:62:8c:dc:4f:e0:d7:d4:55:2c:db:36:fc:a9:aa: + d7:58:27:e4:99:cb:dc:29:d9:ea:35:16:cb:2e:be: + 04:b2:82:58:f4:e5:5c:07:db:12:8e:e3:3c:9a:5e: + 90:4b:c5:a3:d4:21:96:5f:e1:8f:f7:cb:9e:db:e0: + 10:a0:6c:a2:1e:30:17:6c:32:9f:7b:43:a4:9f:d3: + 6b:33:1b:18:cd:a4:ad:33:48:a3:98:b0:2b:c8:22: + 74:17:71:d8:f1:64:21:55:e1:33:bc:7f:74:5f:a5: + a6:a2:9b:58:2f:db:ed:c7:c1:e5:36:2e:86:26:ad: + c6:fe:b8:00:85:6e:7c:ed:fd:4a:c6:a0:d9:b2:3f: + 4e:bd:fa:08:52:c8:5d:31:13:86:bd:3f:ec:7a:d8: + 3a:15:e2:71:af:ec:00:88:7e:a6:e8:e1:9d:ab:57: + 5a:8a:1f:f8:e2:4d:29:58:53:79:25:f0:9e:d9:18: + 40:27 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + b1:48:16:3b:d7:91:d0:4d:54:09:cb:ab:c7:41:4f:35:12:8b: + a6:e8:84:11:49:a9:04:91:41:25:7c:02:38:b2:19:a0:e9:2e: + d5:d6:7a:26:c1:1a:f8:f1:c6:51:92:68:af:c8:6e:5b:df:28: + 40:b8:99:94:d5:43:7d:e3:68:75:94:26:56:11:21:9e:50:b3: + 36:7b:f8:5f:33:76:64:71:04:26:2b:bb:2c:83:33:89:ba:74: + c1:e9:9d:eb:c0:86:4b:4d:6f:f8:4d:55:5a:3d:f6:55:95:33: + 0f:b8:f0:53:2b:93:a6:da:8d:5c:1a:e8:30:22:55:67:44:6e: + 17:c4:57:05:0d:ce:fc:61:dd:b1:3c:b0:66:55:f4:42:d0:ce: + 94:7d:6a:82:bd:32:ed:2f:21:ff:c7:70:ff:48:9d:10:4a:71: + be:a8:37:e5:0f:f4:79:1e:7d:a2:f1:6a:6b:2c:e8:03:20:ce: + 80:94:d2:38:80:bc:7e:56:c5:77:62:94:c0:b7:40:11:4d:ba: + 98:4b:2e:52:03:66:68:36:ab:d1:0f:3e:b5:92:a3:95:9d:a4: + ea:d3:8a:14:41:6d:86:24:89:aa:d7:29:20:c8:52:d5:bf:8d: + 3b:09:52:dd:89:8c:2c:85:40:b5:9f:cc:47:63:ca:3a:e0:c9: + 91:5c:43:a9 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUVVfbRUMGzlJjWblaJnj9DZRolZwwDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDExNVoXDTMzMDQyODE5MDExNVowWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALzGhC3Cq10F12Wo4hV02PLxVRFFk5ZMpdzLRPX0FH5GAlnornhZaSFY9xY4ucLC +YNh2q6E5ugujAxfkoctdGgxicSRksADwb0yvCGKM3E/g19RVLNs2/Kmq11gn5JnL +3CnZ6jUWyy6+BLKCWPTlXAfbEo7jPJpekEvFo9Qhll/hj/fLntvgEKBsoh4wF2wy +n3tDpJ/TazMbGM2krTNIo5iwK8gidBdx2PFkIVXhM7x/dF+lpqKbWC/b7cfB5TYu +hiatxv64AIVufO39Ssag2bI/Tr36CFLIXTEThr0/7HrYOhXica/sAIh+pujhnatX +Woof+OJNKVhTeSXwntkYQCcCAwEAAaOB0DCBzTAdBgNVHQ4EFgQUtZFuT2S3FoR2 ++bS+mc5glZgajp0wHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBALFIFjvXkdBNVAnLq8dBTzUSi6bohBFJqQSRQSV8AjiyGaDp +LtXWeibBGvjxxlGSaK/IblvfKEC4mZTVQ33jaHWUJlYRIZ5QszZ7+F8zdmRxBCYr +uyyDM4m6dMHpnevAhktNb/hNVVo99lWVMw+48FMrk6bajVwa6DAiVWdEbhfEVwUN +zvxh3bE8sGZV9ELQzpR9aoK9Mu0vIf/HcP9InRBKcb6oN+UP9HkefaLxamss6AMg +zoCU0jiAvH5WxXdilMC3QBFNuphLLlIDZmg2q9EPPrWSo5WdpOrTihRBbYYkiarX +KSDIUtW/jTsJUt2JjCyFQLWfzEdjyjrgyZFcQ6k= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/server1/TestServer1_cert.pem b/test/configs/certs/ocsp_peer/mini-ca/server1/TestServer1_cert.pem new file mode 100644 index 000000000..ef73af87d --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/server1/TestServer1_cert.pem @@ -0,0 +1,97 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3c:c4:82:66:f8:5d:a6:b6:c7:66:e1:b2:01:3f:e0:72:fc:72:61:33 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Validity + Not Before: May 1 19:33:37 2023 GMT + Not After : Apr 28 19:33:37 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=TestServer1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:af:26:5c:50:c0:fa:62:b5:fd:3d:c1:9e:26:51: + 58:62:04:37:b0:b5:6a:9b:6a:e3:22:3c:cd:ee:3c: + e7:8b:d3:e2:4c:08:1a:4d:63:c1:81:20:f4:53:a5: + 5d:2f:d2:71:d8:af:e3:26:95:b4:27:14:46:7f:e2: + 0a:73:12:a7:0e:ff:99:5a:29:f5:d0:65:96:b1:d1: + 96:7f:0c:43:b8:71:f2:4b:21:e1:97:6c:1b:01:e5: + 38:1a:39:44:72:d5:19:20:87:fe:90:4f:3b:97:f2: + 7d:bd:57:97:4d:9d:56:50:89:5b:79:29:7a:3a:13: + 97:08:61:c2:0c:a6:02:49:c9:8a:41:ab:8e:9f:25: + c9:33:18:f8:92:64:58:04:cc:a3:9d:cf:d4:d2:bd: + 20:ab:8b:9d:55:df:fb:5b:23:ac:95:12:fa:6f:07: + 93:3f:0e:03:86:c4:9b:25:06:21:9b:03:96:32:b8: + e0:0f:63:e2:1d:34:d1:41:35:19:09:c1:a0:dc:26: + b9:c8:66:fa:87:67:22:6e:0c:a6:e7:0f:24:64:b1: + 4f:84:05:ef:ad:8e:1b:f2:f4:38:87:d3:e3:48:a5: + 82:e0:66:89:1d:92:9a:59:67:a4:1d:03:6f:4d:a5: + fb:3b:c0:0b:73:a7:ab:8f:b4:10:25:8e:69:42:76: + 82:5f + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 43:16:E6:03:AF:37:B2:7B:BD:B3:C8:A2:9C:95:D7:FA:32:F8:9E:6F + X509v3 Authority Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, SSL Server + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Server Authentication, TLS Web Client Authentication + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:18888/intermediate1_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:18888/ + X509v3 Subject Alternative Name: + DNS:localhost, IP Address:127.0.0.1 + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + a3:87:9f:05:e4:38:61:f7:c4:5b:17:13:4b:2c:9d:a2:4d:e6: + ad:93:54:c5:a3:00:27:0b:5c:45:c5:bd:f8:b6:a7:5a:2a:ec: + dc:9b:59:8a:c7:59:e7:b9:86:f7:27:be:45:0d:d9:86:76:cf: + 00:71:ad:aa:cc:73:50:8c:68:63:b0:e2:3a:59:dd:85:fa:0d: + f0:82:51:05:79:e6:d5:0e:0b:bb:ed:23:65:8f:d0:8b:01:df: + 86:74:bc:3a:22:90:e4:59:44:91:d5:44:d8:21:4d:4e:10:72: + 0a:12:2e:4a:20:5f:15:e7:16:0b:6f:76:f3:04:1f:da:44:50: + 3b:c3:b3:0f:fa:05:cf:6e:64:9c:65:e2:0d:38:28:31:c3:c3: + b6:66:ef:80:d3:c4:5f:e9:f9:01:e9:ce:e6:99:46:a0:9d:ce: + 90:63:77:d2:85:21:d7:88:32:55:38:fe:10:07:69:cd:c8:06: + b7:6f:49:98:bf:cd:be:4f:ab:44:ea:78:af:ab:01:c8:3e:fa: + d9:54:bc:59:28:db:03:9b:1c:ee:e4:c3:ed:f3:97:30:c6:40: + 33:76:84:40:b2:b8:4d:b4:ca:a9:2d:d1:4d:17:92:ea:c0:c9: + cb:f6:b1:d7:d3:c7:e6:75:15:00:ff:c7:d9:54:63:27:19:5c: + 96:a5:e5:d9 +-----BEGIN CERTIFICATE----- +MIIEYjCCA0qgAwIBAgIUPMSCZvhdprbHZuGyAT/gcvxyYTMwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMTAe +Fw0yMzA1MDExOTMzMzdaFw0zMzA0MjgxOTMzMzdaMFQxCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEU +MBIGA1UEAwwLVGVzdFNlcnZlcjEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCvJlxQwPpitf09wZ4mUVhiBDewtWqbauMiPM3uPOeL0+JMCBpNY8GBIPRT +pV0v0nHYr+MmlbQnFEZ/4gpzEqcO/5laKfXQZZax0ZZ/DEO4cfJLIeGXbBsB5Tga +OURy1Rkgh/6QTzuX8n29V5dNnVZQiVt5KXo6E5cIYcIMpgJJyYpBq46fJckzGPiS +ZFgEzKOdz9TSvSCri51V3/tbI6yVEvpvB5M/DgOGxJslBiGbA5YyuOAPY+IdNNFB +NRkJwaDcJrnIZvqHZyJuDKbnDyRksU+EBe+tjhvy9DiH0+NIpYLgZokdkppZZ6Qd +A29Npfs7wAtzp6uPtBAljmlCdoJfAgMBAAGjggEkMIIBIDAdBgNVHQ4EFgQUQxbm +A683snu9s8iinJXX+jL4nm8wHwYDVR0jBBgwFoAUtZFuT2S3FoR2+bS+mc5glZga +jp0wDAYDVR0TAQH/BAIwADARBglghkgBhvhCAQEEBAMCBsAwDgYDVR0PAQH/BAQD +AgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA9BgNVHR8ENjA0MDKg +MKAuhixodHRwOi8vMTI3LjAuMC4xOjE4ODg4L2ludGVybWVkaWF0ZTFfY3JsLmRl +cjAzBggrBgEFBQcBAQQnMCUwIwYIKwYBBQUHMAGGF2h0dHA6Ly8xMjcuMC4wLjE6 +MTg4ODgvMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsF +AAOCAQEAo4efBeQ4YffEWxcTSyydok3mrZNUxaMAJwtcRcW9+LanWirs3JtZisdZ +57mG9ye+RQ3ZhnbPAHGtqsxzUIxoY7DiOlndhfoN8IJRBXnm1Q4Lu+0jZY/QiwHf +hnS8OiKQ5FlEkdVE2CFNThByChIuSiBfFecWC2928wQf2kRQO8OzD/oFz25knGXi +DTgoMcPDtmbvgNPEX+n5AenO5plGoJ3OkGN30oUh14gyVTj+EAdpzcgGt29JmL/N +vk+rROp4r6sByD762VS8WSjbA5sc7uTD7fOXMMZAM3aEQLK4TbTKqS3RTReS6sDJ +y/ax19PH5nUVAP/H2VRjJxlclqXl2Q== +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/server1/TestServer2_bundle.pem b/test/configs/certs/ocsp_peer/mini-ca/server1/TestServer2_bundle.pem new file mode 100644 index 000000000..aacb9be1d --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/server1/TestServer2_bundle.pem @@ -0,0 +1,186 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 2e:91:da:29:59:ff:c4:64:bf:02:bc:27:bb:e3:35:4e:5b:36:f7:91 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Validity + Not Before: May 1 19:33:53 2023 GMT + Not After : Apr 28 19:33:53 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=TestServer2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:ac:48:ce:a7:b2:ad:7a:68:01:55:3f:86:20:7e: + bb:26:e6:88:f3:ae:04:15:7d:d9:64:98:85:bc:eb: + bd:d8:0a:c7:26:c4:8e:27:56:8c:a8:9f:51:37:a9: + ec:8a:dc:af:27:05:0c:f5:c0:19:b1:2c:0d:56:66: + 7b:7e:b1:8f:ab:34:61:56:37:a8:ab:51:d6:1d:e6: + a7:56:b2:51:72:57:9b:c5:87:84:6c:ef:e6:18:d4: + 45:b8:ef:52:72:11:02:81:61:f2:36:63:25:18:31: + 7f:c7:91:89:c3:b0:73:13:f0:26:1f:a1:4f:8c:ff: + 94:1c:75:a6:be:38:7d:81:06:33:dd:7b:86:81:c5: + 1f:d2:5d:f6:ea:3f:9f:ab:fb:e7:97:3c:72:ea:b3: + 83:ab:49:88:ac:a9:4b:81:db:fa:e3:bf:79:d9:6e: + 90:bf:8f:68:d8:05:f8:52:ad:98:41:29:e0:2a:18: + 98:b6:b2:61:78:02:02:52:85:02:e0:63:f4:a0:55: + 80:c9:66:8b:ac:4f:8b:36:f4:56:8f:cf:bd:67:86: + 72:92:0b:f9:73:7b:05:cc:3d:91:ed:ed:4f:f0:8f: + 36:99:e5:51:7f:ee:9e:fb:e5:5c:d0:39:a2:f5:51: + 06:92:3c:ad:cc:59:9d:0a:81:50:26:30:01:e9:f4: + b1:e9 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + CD:65:B9:5C:48:35:F7:1E:85:6E:94:50:78:72:BB:3F:F7:BC:22:A6 + X509v3 Authority Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, SSL Server + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Server Authentication, TLS Web Client Authentication + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:18888/intermediate1_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:18888/ + X509v3 Subject Alternative Name: + DNS:localhost, IP Address:127.0.0.1 + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 6f:de:f3:92:b2:8b:57:61:7a:b9:06:49:3e:af:e0:1c:3a:d4: + 42:52:fe:d0:7d:97:8a:9b:d0:6d:b9:f3:e6:8b:2a:40:ce:aa: + ed:bb:ce:21:e8:ae:32:9d:eb:5a:00:e0:c1:3a:d7:40:74:1b: + 43:e4:43:f0:61:bf:40:06:75:52:1b:b9:f4:b5:32:55:94:f5: + 84:98:90:cc:27:92:91:b7:3d:8e:f1:12:bf:37:1a:8a:50:41: + 3a:14:0c:cf:93:fe:57:97:7b:fe:af:b9:c0:c2:d6:bb:20:e4: + 0a:6f:12:0b:60:a6:cc:59:46:db:99:db:61:71:d3:a7:f5:a1: + d0:d6:81:87:57:a3:dd:b6:e1:ab:2f:4f:b6:51:21:ec:a6:95: + df:d3:ab:e5:a1:67:a3:ba:b1:b9:71:39:a1:3b:db:5e:c5:6f: + b1:34:27:ae:6d:f6:67:4c:7d:7c:6d:12:37:6f:b5:0b:5a:85: + aa:5d:fd:03:de:59:b5:20:7a:ea:84:a0:a5:75:60:12:12:08: + 77:0e:46:d6:fa:57:fa:b1:43:42:54:38:d7:66:67:cd:fc:b6: + f9:4c:fe:99:71:2b:d5:a6:13:2f:2e:f0:a3:9e:fc:47:03:31: + 79:38:e3:50:8a:de:81:97:80:9e:46:71:5c:9f:e5:de:0c:49: + fc:f5:61:1c +-----BEGIN CERTIFICATE----- +MIIEYjCCA0qgAwIBAgIULpHaKVn/xGS/Arwnu+M1Tls295EwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMTAe +Fw0yMzA1MDExOTMzNTNaFw0zMzA0MjgxOTMzNTNaMFQxCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEU +MBIGA1UEAwwLVGVzdFNlcnZlcjIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCsSM6nsq16aAFVP4Ygfrsm5ojzrgQVfdlkmIW8673YCscmxI4nVoyon1E3 +qeyK3K8nBQz1wBmxLA1WZnt+sY+rNGFWN6irUdYd5qdWslFyV5vFh4Rs7+YY1EW4 +71JyEQKBYfI2YyUYMX/HkYnDsHMT8CYfoU+M/5Qcdaa+OH2BBjPde4aBxR/SXfbq +P5+r++eXPHLqs4OrSYisqUuB2/rjv3nZbpC/j2jYBfhSrZhBKeAqGJi2smF4AgJS +hQLgY/SgVYDJZousT4s29FaPz71nhnKSC/lzewXMPZHt7U/wjzaZ5VF/7p775VzQ +OaL1UQaSPK3MWZ0KgVAmMAHp9LHpAgMBAAGjggEkMIIBIDAdBgNVHQ4EFgQUzWW5 +XEg19x6FbpRQeHK7P/e8IqYwHwYDVR0jBBgwFoAUtZFuT2S3FoR2+bS+mc5glZga +jp0wDAYDVR0TAQH/BAIwADARBglghkgBhvhCAQEEBAMCBsAwDgYDVR0PAQH/BAQD +AgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA9BgNVHR8ENjA0MDKg +MKAuhixodHRwOi8vMTI3LjAuMC4xOjE4ODg4L2ludGVybWVkaWF0ZTFfY3JsLmRl +cjAzBggrBgEFBQcBAQQnMCUwIwYIKwYBBQUHMAGGF2h0dHA6Ly8xMjcuMC4wLjE6 +MTg4ODgvMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsF +AAOCAQEAb97zkrKLV2F6uQZJPq/gHDrUQlL+0H2XipvQbbnz5osqQM6q7bvOIeiu +Mp3rWgDgwTrXQHQbQ+RD8GG/QAZ1Uhu59LUyVZT1hJiQzCeSkbc9jvESvzcailBB +OhQMz5P+V5d7/q+5wMLWuyDkCm8SC2CmzFlG25nbYXHTp/Wh0NaBh1ej3bbhqy9P +tlEh7KaV39Or5aFno7qxuXE5oTvbXsVvsTQnrm32Z0x9fG0SN2+1C1qFql39A95Z +tSB66oSgpXVgEhIIdw5G1vpX+rFDQlQ412Znzfy2+Uz+mXEr1aYTLy7wo578RwMx +eTjjUIregZeAnkZxXJ/l3gxJ/PVhHA== +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 55:57:db:45:43:06:ce:52:63:59:b9:5a:26:78:fd:0d:94:68:95:9c + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:15 2023 GMT + Not After : Apr 28 19:01:15 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:bc:c6:84:2d:c2:ab:5d:05:d7:65:a8:e2:15:74: + d8:f2:f1:55:11:45:93:96:4c:a5:dc:cb:44:f5:f4: + 14:7e:46:02:59:e8:ae:78:59:69:21:58:f7:16:38: + b9:c2:c2:60:d8:76:ab:a1:39:ba:0b:a3:03:17:e4: + a1:cb:5d:1a:0c:62:71:24:64:b0:00:f0:6f:4c:af: + 08:62:8c:dc:4f:e0:d7:d4:55:2c:db:36:fc:a9:aa: + d7:58:27:e4:99:cb:dc:29:d9:ea:35:16:cb:2e:be: + 04:b2:82:58:f4:e5:5c:07:db:12:8e:e3:3c:9a:5e: + 90:4b:c5:a3:d4:21:96:5f:e1:8f:f7:cb:9e:db:e0: + 10:a0:6c:a2:1e:30:17:6c:32:9f:7b:43:a4:9f:d3: + 6b:33:1b:18:cd:a4:ad:33:48:a3:98:b0:2b:c8:22: + 74:17:71:d8:f1:64:21:55:e1:33:bc:7f:74:5f:a5: + a6:a2:9b:58:2f:db:ed:c7:c1:e5:36:2e:86:26:ad: + c6:fe:b8:00:85:6e:7c:ed:fd:4a:c6:a0:d9:b2:3f: + 4e:bd:fa:08:52:c8:5d:31:13:86:bd:3f:ec:7a:d8: + 3a:15:e2:71:af:ec:00:88:7e:a6:e8:e1:9d:ab:57: + 5a:8a:1f:f8:e2:4d:29:58:53:79:25:f0:9e:d9:18: + 40:27 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + b1:48:16:3b:d7:91:d0:4d:54:09:cb:ab:c7:41:4f:35:12:8b: + a6:e8:84:11:49:a9:04:91:41:25:7c:02:38:b2:19:a0:e9:2e: + d5:d6:7a:26:c1:1a:f8:f1:c6:51:92:68:af:c8:6e:5b:df:28: + 40:b8:99:94:d5:43:7d:e3:68:75:94:26:56:11:21:9e:50:b3: + 36:7b:f8:5f:33:76:64:71:04:26:2b:bb:2c:83:33:89:ba:74: + c1:e9:9d:eb:c0:86:4b:4d:6f:f8:4d:55:5a:3d:f6:55:95:33: + 0f:b8:f0:53:2b:93:a6:da:8d:5c:1a:e8:30:22:55:67:44:6e: + 17:c4:57:05:0d:ce:fc:61:dd:b1:3c:b0:66:55:f4:42:d0:ce: + 94:7d:6a:82:bd:32:ed:2f:21:ff:c7:70:ff:48:9d:10:4a:71: + be:a8:37:e5:0f:f4:79:1e:7d:a2:f1:6a:6b:2c:e8:03:20:ce: + 80:94:d2:38:80:bc:7e:56:c5:77:62:94:c0:b7:40:11:4d:ba: + 98:4b:2e:52:03:66:68:36:ab:d1:0f:3e:b5:92:a3:95:9d:a4: + ea:d3:8a:14:41:6d:86:24:89:aa:d7:29:20:c8:52:d5:bf:8d: + 3b:09:52:dd:89:8c:2c:85:40:b5:9f:cc:47:63:ca:3a:e0:c9: + 91:5c:43:a9 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUVVfbRUMGzlJjWblaJnj9DZRolZwwDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDExNVoXDTMzMDQyODE5MDExNVowWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALzGhC3Cq10F12Wo4hV02PLxVRFFk5ZMpdzLRPX0FH5GAlnornhZaSFY9xY4ucLC +YNh2q6E5ugujAxfkoctdGgxicSRksADwb0yvCGKM3E/g19RVLNs2/Kmq11gn5JnL +3CnZ6jUWyy6+BLKCWPTlXAfbEo7jPJpekEvFo9Qhll/hj/fLntvgEKBsoh4wF2wy +n3tDpJ/TazMbGM2krTNIo5iwK8gidBdx2PFkIVXhM7x/dF+lpqKbWC/b7cfB5TYu +hiatxv64AIVufO39Ssag2bI/Tr36CFLIXTEThr0/7HrYOhXica/sAIh+pujhnatX +Woof+OJNKVhTeSXwntkYQCcCAwEAAaOB0DCBzTAdBgNVHQ4EFgQUtZFuT2S3FoR2 ++bS+mc5glZgajp0wHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBALFIFjvXkdBNVAnLq8dBTzUSi6bohBFJqQSRQSV8AjiyGaDp +LtXWeibBGvjxxlGSaK/IblvfKEC4mZTVQ33jaHWUJlYRIZ5QszZ7+F8zdmRxBCYr +uyyDM4m6dMHpnevAhktNb/hNVVo99lWVMw+48FMrk6bajVwa6DAiVWdEbhfEVwUN +zvxh3bE8sGZV9ELQzpR9aoK9Mu0vIf/HcP9InRBKcb6oN+UP9HkefaLxamss6AMg +zoCU0jiAvH5WxXdilMC3QBFNuphLLlIDZmg2q9EPPrWSo5WdpOrTihRBbYYkiarX +KSDIUtW/jTsJUt2JjCyFQLWfzEdjyjrgyZFcQ6k= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/server1/TestServer2_cert.pem b/test/configs/certs/ocsp_peer/mini-ca/server1/TestServer2_cert.pem new file mode 100644 index 000000000..91ddf5657 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/server1/TestServer2_cert.pem @@ -0,0 +1,97 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 2e:91:da:29:59:ff:c4:64:bf:02:bc:27:bb:e3:35:4e:5b:36:f7:91 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 1 + Validity + Not Before: May 1 19:33:53 2023 GMT + Not After : Apr 28 19:33:53 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=TestServer2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:ac:48:ce:a7:b2:ad:7a:68:01:55:3f:86:20:7e: + bb:26:e6:88:f3:ae:04:15:7d:d9:64:98:85:bc:eb: + bd:d8:0a:c7:26:c4:8e:27:56:8c:a8:9f:51:37:a9: + ec:8a:dc:af:27:05:0c:f5:c0:19:b1:2c:0d:56:66: + 7b:7e:b1:8f:ab:34:61:56:37:a8:ab:51:d6:1d:e6: + a7:56:b2:51:72:57:9b:c5:87:84:6c:ef:e6:18:d4: + 45:b8:ef:52:72:11:02:81:61:f2:36:63:25:18:31: + 7f:c7:91:89:c3:b0:73:13:f0:26:1f:a1:4f:8c:ff: + 94:1c:75:a6:be:38:7d:81:06:33:dd:7b:86:81:c5: + 1f:d2:5d:f6:ea:3f:9f:ab:fb:e7:97:3c:72:ea:b3: + 83:ab:49:88:ac:a9:4b:81:db:fa:e3:bf:79:d9:6e: + 90:bf:8f:68:d8:05:f8:52:ad:98:41:29:e0:2a:18: + 98:b6:b2:61:78:02:02:52:85:02:e0:63:f4:a0:55: + 80:c9:66:8b:ac:4f:8b:36:f4:56:8f:cf:bd:67:86: + 72:92:0b:f9:73:7b:05:cc:3d:91:ed:ed:4f:f0:8f: + 36:99:e5:51:7f:ee:9e:fb:e5:5c:d0:39:a2:f5:51: + 06:92:3c:ad:cc:59:9d:0a:81:50:26:30:01:e9:f4: + b1:e9 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + CD:65:B9:5C:48:35:F7:1E:85:6E:94:50:78:72:BB:3F:F7:BC:22:A6 + X509v3 Authority Key Identifier: + B5:91:6E:4F:64:B7:16:84:76:F9:B4:BE:99:CE:60:95:98:1A:8E:9D + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, SSL Server + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Server Authentication, TLS Web Client Authentication + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:18888/intermediate1_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:18888/ + X509v3 Subject Alternative Name: + DNS:localhost, IP Address:127.0.0.1 + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 6f:de:f3:92:b2:8b:57:61:7a:b9:06:49:3e:af:e0:1c:3a:d4: + 42:52:fe:d0:7d:97:8a:9b:d0:6d:b9:f3:e6:8b:2a:40:ce:aa: + ed:bb:ce:21:e8:ae:32:9d:eb:5a:00:e0:c1:3a:d7:40:74:1b: + 43:e4:43:f0:61:bf:40:06:75:52:1b:b9:f4:b5:32:55:94:f5: + 84:98:90:cc:27:92:91:b7:3d:8e:f1:12:bf:37:1a:8a:50:41: + 3a:14:0c:cf:93:fe:57:97:7b:fe:af:b9:c0:c2:d6:bb:20:e4: + 0a:6f:12:0b:60:a6:cc:59:46:db:99:db:61:71:d3:a7:f5:a1: + d0:d6:81:87:57:a3:dd:b6:e1:ab:2f:4f:b6:51:21:ec:a6:95: + df:d3:ab:e5:a1:67:a3:ba:b1:b9:71:39:a1:3b:db:5e:c5:6f: + b1:34:27:ae:6d:f6:67:4c:7d:7c:6d:12:37:6f:b5:0b:5a:85: + aa:5d:fd:03:de:59:b5:20:7a:ea:84:a0:a5:75:60:12:12:08: + 77:0e:46:d6:fa:57:fa:b1:43:42:54:38:d7:66:67:cd:fc:b6: + f9:4c:fe:99:71:2b:d5:a6:13:2f:2e:f0:a3:9e:fc:47:03:31: + 79:38:e3:50:8a:de:81:97:80:9e:46:71:5c:9f:e5:de:0c:49: + fc:f5:61:1c +-----BEGIN CERTIFICATE----- +MIIEYjCCA0qgAwIBAgIULpHaKVn/xGS/Arwnu+M1Tls295EwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMTAe +Fw0yMzA1MDExOTMzNTNaFw0zMzA0MjgxOTMzNTNaMFQxCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEU +MBIGA1UEAwwLVGVzdFNlcnZlcjIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCsSM6nsq16aAFVP4Ygfrsm5ojzrgQVfdlkmIW8673YCscmxI4nVoyon1E3 +qeyK3K8nBQz1wBmxLA1WZnt+sY+rNGFWN6irUdYd5qdWslFyV5vFh4Rs7+YY1EW4 +71JyEQKBYfI2YyUYMX/HkYnDsHMT8CYfoU+M/5Qcdaa+OH2BBjPde4aBxR/SXfbq +P5+r++eXPHLqs4OrSYisqUuB2/rjv3nZbpC/j2jYBfhSrZhBKeAqGJi2smF4AgJS +hQLgY/SgVYDJZousT4s29FaPz71nhnKSC/lzewXMPZHt7U/wjzaZ5VF/7p775VzQ +OaL1UQaSPK3MWZ0KgVAmMAHp9LHpAgMBAAGjggEkMIIBIDAdBgNVHQ4EFgQUzWW5 +XEg19x6FbpRQeHK7P/e8IqYwHwYDVR0jBBgwFoAUtZFuT2S3FoR2+bS+mc5glZga +jp0wDAYDVR0TAQH/BAIwADARBglghkgBhvhCAQEEBAMCBsAwDgYDVR0PAQH/BAQD +AgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA9BgNVHR8ENjA0MDKg +MKAuhixodHRwOi8vMTI3LjAuMC4xOjE4ODg4L2ludGVybWVkaWF0ZTFfY3JsLmRl +cjAzBggrBgEFBQcBAQQnMCUwIwYIKwYBBQUHMAGGF2h0dHA6Ly8xMjcuMC4wLjE6 +MTg4ODgvMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsF +AAOCAQEAb97zkrKLV2F6uQZJPq/gHDrUQlL+0H2XipvQbbnz5osqQM6q7bvOIeiu +Mp3rWgDgwTrXQHQbQ+RD8GG/QAZ1Uhu59LUyVZT1hJiQzCeSkbc9jvESvzcailBB +OhQMz5P+V5d7/q+5wMLWuyDkCm8SC2CmzFlG25nbYXHTp/Wh0NaBh1ej3bbhqy9P +tlEh7KaV39Or5aFno7qxuXE5oTvbXsVvsTQnrm32Z0x9fG0SN2+1C1qFql39A95Z +tSB66oSgpXVgEhIIdw5G1vpX+rFDQlQ412Znzfy2+Uz+mXEr1aYTLy7wo578RwMx +eTjjUIregZeAnkZxXJ/l3gxJ/PVhHA== +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem b/test/configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem new file mode 100644 index 000000000..2ea703d5a --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCvJlxQwPpitf09 +wZ4mUVhiBDewtWqbauMiPM3uPOeL0+JMCBpNY8GBIPRTpV0v0nHYr+MmlbQnFEZ/ +4gpzEqcO/5laKfXQZZax0ZZ/DEO4cfJLIeGXbBsB5TgaOURy1Rkgh/6QTzuX8n29 +V5dNnVZQiVt5KXo6E5cIYcIMpgJJyYpBq46fJckzGPiSZFgEzKOdz9TSvSCri51V +3/tbI6yVEvpvB5M/DgOGxJslBiGbA5YyuOAPY+IdNNFBNRkJwaDcJrnIZvqHZyJu +DKbnDyRksU+EBe+tjhvy9DiH0+NIpYLgZokdkppZZ6QdA29Npfs7wAtzp6uPtBAl +jmlCdoJfAgMBAAECggEAQLRoOEECfwMCehUUKs20XAl41WQ/7QiQvm4+GXwQgjyV +hkccCGkI7H5TJK+bfHY/LrDTtsZpVmKMJORJvfcvFkBg08lakVFmWWy3L1pFjlcy +DoWGxJzgYVPf5PgxDEcjUDxNU9yhhGHGB/Pa5oZwg7Iqw9kJ2XixPBx5RpjxkXYw +tR8V3IaKq0YRI5lpUfuaofmJnHJnWCMTmawWMxWuTlzlbDDZTHQs8aTDUnwZ26kD +6tYB2Tp3aP3zUE8MQZwOEyhRH1WQeS3kcIWh4UnPyA09g0aTb6YK8qacnTL2CixF +VJpLDtlkQk0TCo06AZkcvWkPTQyFXnVsgkG8rRUlEQKBgQDrTHyf6merJAohUeBV +5IIfoKHWbGc1DXSdmHtCSN9wFGkhCYtfCZ7YaSLjFF7GOvd6mfHJVnIp3aFONqM7 +dk/MZDsAvogO6lU+zgQc+EcKk+e6zyfsUYghy/R3+QKsYtd4SyNDq6cl80MUujjG +pE2b41O57sNCVZgywCCGXvt/ZwKBgQC+jyufgKRIptM+OOhHlKUaxkTDaMHA1KKY +iFPLuLgWmyCYHQq2D6uoCRGnEguEnXtbtOz6SYlMMNfeHtX0SATkdCGae/bh5ibG +uQoWwRMkRkAgl1gyAh7h669pDUiD2gh0q56cS8El7Jgze7NRF4hUyY2mWc5nGhVR +7rHKlOCiSQKBgHBiWevvg5BkaEo91w5vVA9TI7lMkYbvZFGZcNXaBI590TCsZFsC +N1JZ9QXMxu+bXnS6bpehqGmCp/a5dgGCot6WyO+0ETw+hHS45ZIIq7XLqxS4uPLQ +hlrOFXfwAWzg0NVt3ewGYpFnvRR7VX7bHw5j56uY9L4ML+OdjGthlnHlAoGAZAm7 +R/f7xtw1h7POVU22w3CUxtUm6jl2xobDHu7xTYTQvqp4Zg2h+wwPxVqWy171VLaN +tfOG7YWyvbwIbD6mutwwi+5KNFtjve2EW1+u0dtDbRimx1IPrmDRbF/50qZSzBUQ +plKqqmMjn9tvzsGA46oP/+WjksLBsIqTsZsotmkCgYAn8Ap+e6ZNX2uM8Kg7LB+T +hBNGczNOGQX8SpfCeH9eV4VzfpEHn8Fxk+lcI2WpYkandQ8ju2s0mT5OoQ2VjxGT +eql9jMd8MQZTx/aWridt5qG3hsFcx9GILlcXTUqyRH0SFAU7xDO5HzzKP3tiW6BN +YE3GakolPPymOR9q69sT0Q== +-----END PRIVATE KEY----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/server1/private/TestServer2_keypair.pem b/test/configs/certs/ocsp_peer/mini-ca/server1/private/TestServer2_keypair.pem new file mode 100644 index 000000000..7b76a48d6 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/server1/private/TestServer2_keypair.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCsSM6nsq16aAFV +P4Ygfrsm5ojzrgQVfdlkmIW8673YCscmxI4nVoyon1E3qeyK3K8nBQz1wBmxLA1W +Znt+sY+rNGFWN6irUdYd5qdWslFyV5vFh4Rs7+YY1EW471JyEQKBYfI2YyUYMX/H +kYnDsHMT8CYfoU+M/5Qcdaa+OH2BBjPde4aBxR/SXfbqP5+r++eXPHLqs4OrSYis +qUuB2/rjv3nZbpC/j2jYBfhSrZhBKeAqGJi2smF4AgJShQLgY/SgVYDJZousT4s2 +9FaPz71nhnKSC/lzewXMPZHt7U/wjzaZ5VF/7p775VzQOaL1UQaSPK3MWZ0KgVAm +MAHp9LHpAgMBAAECggEABbGGbngQX9Un+x8FQesSPgHnGM92wtY6J5Gwn2qhJy6M +VYwwFZ3Nz5pBPbrOY9SRGhPihrdixKOWgWppA8ne0WB4JC26HnGZnFAbAQRVqPbQ +duhd4ILpOpzpkh1K6b+vvU0addXpsUlHJjYZmdy+9tPBkhtwz1xDCFGShrguR0Pa +WTudsee4skdGfw6wMyHEfM4IXXuSfb1hIse1xlnZMPXMMi3ebCqpOy4IzJ4ML7sF +RySdrdAHcWJqOQjPkDTOPCXpthBn3iQ8Fa7Znd0GGLZvdRbq3p10H5LNhMg+LBc7 +oRVQ67qAfQKPHKQMSsR4x2fWo8/hw/QEi3cj6CohYQKBgQDtjDBm7VfbLojZeCMx ++32EZ0bLUTob5qInTKpDbdKcYmxP857LRAglaGu+pkOTnHi6lOjJYSiBDd1+vWm/ +1lgMUjKerI0l5ol5yRHWNDFyeQoh10TqEUbIUqB8E5Vi4gl0DlpnsfEm899rlfhP +dmi1rNpc/C7ZK8Zpt7l4eLbqYQKBgQC5qs+K01WwjtrjoqkEwKqjy7ASrbBuZ56u +wOe+iO7pYVP4/VdAvOsfEYCWfjhoETYGKob9ZZlo3StpQ5Ku5CigpWQVSCvJhO2T +KQe75DfXXxaqoPmlNcqAFpqY383Sm+1r3a815sg83XhQAu7GdCyTrLocBLM9SFWX +fVbojv/EiQKBgBlOpCFzC7cYIBA7ElTS3C5s6kfi4XPzgDb7nfANFTD/81YZOEOj +fdKuazwmbnCdbOdD0gESTsRg+8Xy2/9KEJtPboElFOyCwQauey385X+ykXfFfVwK +dyYEV4CgfXvJZQRuOwdtF6n0tUq68XdVwBYK0kCxxTPxy/ObVTEWezZBAoGAPPX2 +evB0vCnLeN5ZfHP+ExW31AovWbCwC1TPQmIXf40zUxdrZJgi4uqOO9tpjdHI2TFx +bRXEzwd/T2qeaMoFBOoI+Gvf5KS+lIjuPyTpqM9R0+hSz4nf2TqSvAsPu0zzIW2C +L8J8kG9vJ2YvG/3c/QfDe5uXdlGfuMOwm18IX3ECgYAelsVWNSm9YR2H7S6dlgjm +8U1IZO342Ab5qlrIWn9Vr/X9MRsAASlVVESeNeQTToBoub5pakUrpz9nNQy29+TX +xYju72RsCjKywKXWZrCAdHfY+wJJWVo5XkdDZJVl2AYrnP3C07S9aKIjhpGHwz7n +jbbCEkHZREMbQJCQjuKT1w== +-----END PRIVATE KEY----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/server2/TestServer3_bundle.pem b/test/configs/certs/ocsp_peer/mini-ca/server2/TestServer3_bundle.pem new file mode 100644 index 000000000..7a1ee483a --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/server2/TestServer3_bundle.pem @@ -0,0 +1,186 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3e:1f:9b:cd:c8:7b:95:f1:64:e6:41:9c:df:6e:03:da:92:9a:90:b7 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Validity + Not Before: Aug 2 22:15:27 2023 GMT + Not After : Jul 30 22:15:27 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=TestServer3 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:9a:3c:db:76:c9:19:0f:7b:e6:d3:ed:d1:0b:76: + ae:15:d4:11:1c:66:b8:5d:2a:7d:e3:1f:65:d8:1b: + c4:63:62:f6:5c:8b:18:66:a8:1c:c2:a6:5e:72:f2: + dd:57:42:8a:ab:5d:bd:37:b6:f1:4b:51:f0:b3:6a: + 37:e9:55:78:01:23:ea:53:09:83:2f:7d:59:36:ab: + 33:4f:4c:bc:ef:a9:1c:db:94:79:4c:0d:4a:7c:3f: + 9d:3c:ba:6c:76:82:47:25:eb:79:22:f4:09:6c:78: + 3c:a6:ef:4b:30:90:29:b3:5f:ba:69:b1:1a:95:ed: + 53:e0:c6:24:78:6e:52:af:8e:bc:db:4a:f0:19:d2: + 00:5a:a8:b6:73:4c:17:92:d1:8d:81:9b:4c:b8:35: + 4d:91:dd:df:d3:85:a6:9f:c4:91:19:ec:47:d1:ca: + 4e:0b:c3:06:8c:27:42:95:83:e3:28:6a:3b:74:9c: + 68:b0:55:a5:91:91:cb:37:ad:fa:d8:69:8b:de:2e: + 4a:51:59:32:4b:3d:06:21:04:65:d2:f5:8b:e8:4d: + 45:96:de:63:97:47:81:85:ea:48:f0:9d:23:2d:71: + 87:6f:d2:75:3d:45:bf:de:ad:43:82:db:a5:29:9b: + f9:5e:38:0a:39:a9:38:71:ec:40:40:b5:dc:69:c7: + 0b:73 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 7F:47:8C:9E:F1:73:7E:34:B9:5B:1E:ED:AD:3A:87:42:80:D4:E3:FD + X509v3 Authority Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, SSL Server + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Server Authentication, TLS Web Client Authentication + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:28888/intermediate2_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:28888/ + X509v3 Subject Alternative Name: + DNS:localhost, IP Address:127.0.0.1 + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + b9:b4:05:48:a6:ba:6c:99:8b:23:c4:9b:b3:8a:32:3f:ca:62: + 89:81:1e:5d:04:ba:2d:22:a3:0f:5a:5d:a0:ab:40:a4:87:43: + 26:36:0a:09:64:ef:f5:b0:a7:6f:7a:1f:cc:06:6c:f7:8d:9c: + 64:5e:c2:ae:e7:45:39:dc:bc:87:06:e6:d5:aa:6b:32:76:51: + 64:e1:ac:d9:9a:dd:17:47:9b:4e:31:1c:93:f5:c5:ca:d6:b7: + 90:ff:64:97:59:df:2b:7f:ee:2d:7d:73:ef:95:ad:b5:1e:a9: + 0c:48:38:29:0b:39:4f:05:fb:07:cf:ec:94:a3:b3:d5:eb:00: + ed:b2:b9:71:a0:59:b5:3f:7c:f5:20:90:54:a8:ea:36:4c:ae: + 62:5b:2b:6d:05:8d:76:78:87:c9:90:f3:b2:d1:72:fc:87:f5: + 28:4c:ec:19:50:0f:02:32:d4:57:75:d9:c1:b2:dc:0e:d4:9a: + 3a:cd:48:70:1e:c4:2e:fd:4f:b0:89:6a:de:f0:90:91:23:16: + cd:04:fc:61:87:9c:c3:5c:7e:0f:19:ff:26:3e:fb:1b:65:2a: + 49:ae:47:9f:d5:e6:c8:30:bb:13:b9:48:d0:67:57:0f:fb:c6: + df:1c:fc:82:3b:ae:1f:f7:25:c8:df:c0:c5:d1:8d:51:94:74: + 30:be:fb:f7 +-----BEGIN CERTIFICATE----- +MIIEYjCCA0qgAwIBAgIUPh+bzch7lfFk5kGc324D2pKakLcwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMjAe +Fw0yMzA4MDIyMjE1MjdaFw0zMzA3MzAyMjE1MjdaMFQxCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEU +MBIGA1UEAwwLVGVzdFNlcnZlcjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCaPNt2yRkPe+bT7dELdq4V1BEcZrhdKn3jH2XYG8RjYvZcixhmqBzCpl5y +8t1XQoqrXb03tvFLUfCzajfpVXgBI+pTCYMvfVk2qzNPTLzvqRzblHlMDUp8P508 +umx2gkcl63ki9AlseDym70swkCmzX7ppsRqV7VPgxiR4blKvjrzbSvAZ0gBaqLZz +TBeS0Y2Bm0y4NU2R3d/ThaafxJEZ7EfRyk4LwwaMJ0KVg+Moajt0nGiwVaWRkcs3 +rfrYaYveLkpRWTJLPQYhBGXS9YvoTUWW3mOXR4GF6kjwnSMtcYdv0nU9Rb/erUOC +26Upm/leOAo5qThx7EBAtdxpxwtzAgMBAAGjggEkMIIBIDAdBgNVHQ4EFgQUf0eM +nvFzfjS5Wx7trTqHQoDU4/0wHwYDVR0jBBgwFoAUdVXijuetpd2APckzCyyiV3ft +FawwDAYDVR0TAQH/BAIwADARBglghkgBhvhCAQEEBAMCBsAwDgYDVR0PAQH/BAQD +AgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA9BgNVHR8ENjA0MDKg +MKAuhixodHRwOi8vMTI3LjAuMC4xOjI4ODg4L2ludGVybWVkaWF0ZTJfY3JsLmRl +cjAzBggrBgEFBQcBAQQnMCUwIwYIKwYBBQUHMAGGF2h0dHA6Ly8xMjcuMC4wLjE6 +Mjg4ODgvMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsF +AAOCAQEAubQFSKa6bJmLI8Sbs4oyP8piiYEeXQS6LSKjD1pdoKtApIdDJjYKCWTv +9bCnb3ofzAZs942cZF7CrudFOdy8hwbm1aprMnZRZOGs2ZrdF0ebTjEck/XFyta3 +kP9kl1nfK3/uLX1z75WttR6pDEg4KQs5TwX7B8/slKOz1esA7bK5caBZtT989SCQ +VKjqNkyuYlsrbQWNdniHyZDzstFy/If1KEzsGVAPAjLUV3XZwbLcDtSaOs1IcB7E +Lv1PsIlq3vCQkSMWzQT8YYecw1x+Dxn/Jj77G2UqSa5Hn9XmyDC7E7lI0GdXD/vG +3xz8gjuuH/clyN/AxdGNUZR0ML779w== +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3c:d7:16:fb:15:99:81:4e:53:f8:80:7c:b6:7c:77:a6:06:a4:3e:ea + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:43 2023 GMT + Not After : Apr 28 19:01:43 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:da:5f:ff:1d:f7:8d:1a:9e:9a:f3:2b:68:8f:c1: + 0c:33:06:41:00:c9:3e:e4:1a:e1:e0:70:6a:f5:2f: + ad:df:f3:e9:99:ed:c5:d7:aa:93:13:37:ff:47:aa: + f3:c5:89:f7:b7:ad:3a:47:e5:9c:4e:9f:8c:e2:41: + ed:a4:7c:9d:88:32:ae:f5:8a:84:9f:0c:18:a0:b3: + fe:8e:dc:2a:88:6a:f5:2f:9c:86:92:fa:7b:6e:b3: + 5a:78:67:53:0b:21:6c:0d:6c:80:1a:0e:1e:ee:06: + c4:d2:e7:24:c6:e5:74:be:1e:2e:17:55:2b:e5:9f: + 0b:a0:58:cc:fe:bf:53:37:f7:dc:95:88:f4:77:a6: + 59:b4:b8:7c:a2:4b:b7:6a:67:aa:84:dc:29:f1:f9: + d7:89:05:4d:0b:f3:8b:2d:52:99:57:ed:6f:11:9e: + af:28:a3:61:44:c2:ec:6e:7f:9f:3d:0b:dc:f7:19: + 6d:14:8a:a5:b8:b6:29:02:34:90:b4:96:c1:cb:a7: + 42:46:97:cf:8d:59:fd:17:b1:a6:27:a7:7b:8a:47: + 6f:fa:03:24:1c:12:25:ee:34:d6:5c:da:45:98:23: + 30:e1:48:c9:9a:df:37:aa:1b:70:6c:b2:0f:95:39: + d6:6d:3e:25:20:a8:07:2c:48:57:0c:99:52:cb:89: + 08:41 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 1f:c6:fc:1c:a1:a5:6d:76:f0:7d:28:1f:e1:15:ab:86:e0:c3: + dd:a0:17:96:0a:c0:16:32:52:37:a4:b6:ad:24:d7:fd:3c:01: + 34:3b:a9:a2:ea:81:05:e7:06:5f:a3:af:7b:fa:b2:a9:c3:63: + 89:bb:0c:70:48:e9:73:cc:33:64:cd:b3:71:88:d1:d1:a1:5a: + 22:a6:ed:03:46:8e:9a:c0:92:37:46:9b:e5:37:78:a5:43:d5: + 46:99:1b:34:40:27:8f:95:dd:c6:9a:55:d9:60:25:8d:b8:e9: + 6e:c9:b3:ee:e8:f0:d9:11:ef:4e:ae:1e:03:70:03:60:66:fd: + ab:b0:f4:74:b6:27:7c:7a:96:9d:86:58:5f:5c:d3:04:ab:16: + 57:12:53:51:c7:93:ca:0b:4e:67:27:2d:b7:20:79:b6:b7:8c: + e7:c3:d9:25:5e:25:63:cf:93:f0:6e:31:c0:d5:4f:05:1c:8d: + 14:1b:6a:d5:01:b6:7a:09:6f:38:f3:e5:e2:5a:e4:e2:42:d5: + 8a:8d:de:ef:73:25:85:3c:e3:a9:ef:f7:f7:23:4f:d3:27:c2: + 3a:c6:c0:6f:2a:9b:1e:fe:fc:31:73:10:e1:08:62:98:2b:6d: + 2f:cc:ab:dd:3a:65:c2:00:7f:29:18:32:cd:8f:56:a9:1d:86: + f1:5e:60:55 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUPNcW+xWZgU5T+IB8tnx3pgakPuowDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDE0M1oXDTMzMDQyODE5MDE0M1owWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANpf/x33jRqemvMraI/BDDMGQQDJPuQa4eBwavUvrd/z6ZntxdeqkxM3/0eq88WJ +97etOkflnE6fjOJB7aR8nYgyrvWKhJ8MGKCz/o7cKohq9S+chpL6e26zWnhnUwsh +bA1sgBoOHu4GxNLnJMbldL4eLhdVK+WfC6BYzP6/Uzf33JWI9HemWbS4fKJLt2pn +qoTcKfH514kFTQvziy1SmVftbxGeryijYUTC7G5/nz0L3PcZbRSKpbi2KQI0kLSW +wcunQkaXz41Z/Rexpiene4pHb/oDJBwSJe401lzaRZgjMOFIyZrfN6obcGyyD5U5 +1m0+JSCoByxIVwyZUsuJCEECAwEAAaOB0DCBzTAdBgNVHQ4EFgQUdVXijuetpd2A +PckzCyyiV3ftFawwHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBAB/G/ByhpW128H0oH+EVq4bgw92gF5YKwBYyUjektq0k1/08 +ATQ7qaLqgQXnBl+jr3v6sqnDY4m7DHBI6XPMM2TNs3GI0dGhWiKm7QNGjprAkjdG +m+U3eKVD1UaZGzRAJ4+V3caaVdlgJY246W7Js+7o8NkR706uHgNwA2Bm/auw9HS2 +J3x6lp2GWF9c0wSrFlcSU1HHk8oLTmcnLbcgeba3jOfD2SVeJWPPk/BuMcDVTwUc +jRQbatUBtnoJbzjz5eJa5OJC1YqN3u9zJYU846nv9/cjT9MnwjrGwG8qmx7+/DFz +EOEIYpgrbS/Mq906ZcIAfykYMs2PVqkdhvFeYFU= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/server2/TestServer3_cert.pem b/test/configs/certs/ocsp_peer/mini-ca/server2/TestServer3_cert.pem new file mode 100644 index 000000000..b061b3d46 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/server2/TestServer3_cert.pem @@ -0,0 +1,97 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3e:1f:9b:cd:c8:7b:95:f1:64:e6:41:9c:df:6e:03:da:92:9a:90:b7 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Validity + Not Before: Aug 2 22:15:27 2023 GMT + Not After : Jul 30 22:15:27 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=TestServer3 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:9a:3c:db:76:c9:19:0f:7b:e6:d3:ed:d1:0b:76: + ae:15:d4:11:1c:66:b8:5d:2a:7d:e3:1f:65:d8:1b: + c4:63:62:f6:5c:8b:18:66:a8:1c:c2:a6:5e:72:f2: + dd:57:42:8a:ab:5d:bd:37:b6:f1:4b:51:f0:b3:6a: + 37:e9:55:78:01:23:ea:53:09:83:2f:7d:59:36:ab: + 33:4f:4c:bc:ef:a9:1c:db:94:79:4c:0d:4a:7c:3f: + 9d:3c:ba:6c:76:82:47:25:eb:79:22:f4:09:6c:78: + 3c:a6:ef:4b:30:90:29:b3:5f:ba:69:b1:1a:95:ed: + 53:e0:c6:24:78:6e:52:af:8e:bc:db:4a:f0:19:d2: + 00:5a:a8:b6:73:4c:17:92:d1:8d:81:9b:4c:b8:35: + 4d:91:dd:df:d3:85:a6:9f:c4:91:19:ec:47:d1:ca: + 4e:0b:c3:06:8c:27:42:95:83:e3:28:6a:3b:74:9c: + 68:b0:55:a5:91:91:cb:37:ad:fa:d8:69:8b:de:2e: + 4a:51:59:32:4b:3d:06:21:04:65:d2:f5:8b:e8:4d: + 45:96:de:63:97:47:81:85:ea:48:f0:9d:23:2d:71: + 87:6f:d2:75:3d:45:bf:de:ad:43:82:db:a5:29:9b: + f9:5e:38:0a:39:a9:38:71:ec:40:40:b5:dc:69:c7: + 0b:73 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 7F:47:8C:9E:F1:73:7E:34:B9:5B:1E:ED:AD:3A:87:42:80:D4:E3:FD + X509v3 Authority Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, SSL Server + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Server Authentication, TLS Web Client Authentication + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:28888/intermediate2_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:28888/ + X509v3 Subject Alternative Name: + DNS:localhost, IP Address:127.0.0.1 + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + b9:b4:05:48:a6:ba:6c:99:8b:23:c4:9b:b3:8a:32:3f:ca:62: + 89:81:1e:5d:04:ba:2d:22:a3:0f:5a:5d:a0:ab:40:a4:87:43: + 26:36:0a:09:64:ef:f5:b0:a7:6f:7a:1f:cc:06:6c:f7:8d:9c: + 64:5e:c2:ae:e7:45:39:dc:bc:87:06:e6:d5:aa:6b:32:76:51: + 64:e1:ac:d9:9a:dd:17:47:9b:4e:31:1c:93:f5:c5:ca:d6:b7: + 90:ff:64:97:59:df:2b:7f:ee:2d:7d:73:ef:95:ad:b5:1e:a9: + 0c:48:38:29:0b:39:4f:05:fb:07:cf:ec:94:a3:b3:d5:eb:00: + ed:b2:b9:71:a0:59:b5:3f:7c:f5:20:90:54:a8:ea:36:4c:ae: + 62:5b:2b:6d:05:8d:76:78:87:c9:90:f3:b2:d1:72:fc:87:f5: + 28:4c:ec:19:50:0f:02:32:d4:57:75:d9:c1:b2:dc:0e:d4:9a: + 3a:cd:48:70:1e:c4:2e:fd:4f:b0:89:6a:de:f0:90:91:23:16: + cd:04:fc:61:87:9c:c3:5c:7e:0f:19:ff:26:3e:fb:1b:65:2a: + 49:ae:47:9f:d5:e6:c8:30:bb:13:b9:48:d0:67:57:0f:fb:c6: + df:1c:fc:82:3b:ae:1f:f7:25:c8:df:c0:c5:d1:8d:51:94:74: + 30:be:fb:f7 +-----BEGIN CERTIFICATE----- +MIIEYjCCA0qgAwIBAgIUPh+bzch7lfFk5kGc324D2pKakLcwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMjAe +Fw0yMzA4MDIyMjE1MjdaFw0zMzA3MzAyMjE1MjdaMFQxCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEU +MBIGA1UEAwwLVGVzdFNlcnZlcjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCaPNt2yRkPe+bT7dELdq4V1BEcZrhdKn3jH2XYG8RjYvZcixhmqBzCpl5y +8t1XQoqrXb03tvFLUfCzajfpVXgBI+pTCYMvfVk2qzNPTLzvqRzblHlMDUp8P508 +umx2gkcl63ki9AlseDym70swkCmzX7ppsRqV7VPgxiR4blKvjrzbSvAZ0gBaqLZz +TBeS0Y2Bm0y4NU2R3d/ThaafxJEZ7EfRyk4LwwaMJ0KVg+Moajt0nGiwVaWRkcs3 +rfrYaYveLkpRWTJLPQYhBGXS9YvoTUWW3mOXR4GF6kjwnSMtcYdv0nU9Rb/erUOC +26Upm/leOAo5qThx7EBAtdxpxwtzAgMBAAGjggEkMIIBIDAdBgNVHQ4EFgQUf0eM +nvFzfjS5Wx7trTqHQoDU4/0wHwYDVR0jBBgwFoAUdVXijuetpd2APckzCyyiV3ft +FawwDAYDVR0TAQH/BAIwADARBglghkgBhvhCAQEEBAMCBsAwDgYDVR0PAQH/BAQD +AgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA9BgNVHR8ENjA0MDKg +MKAuhixodHRwOi8vMTI3LjAuMC4xOjI4ODg4L2ludGVybWVkaWF0ZTJfY3JsLmRl +cjAzBggrBgEFBQcBAQQnMCUwIwYIKwYBBQUHMAGGF2h0dHA6Ly8xMjcuMC4wLjE6 +Mjg4ODgvMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsF +AAOCAQEAubQFSKa6bJmLI8Sbs4oyP8piiYEeXQS6LSKjD1pdoKtApIdDJjYKCWTv +9bCnb3ofzAZs942cZF7CrudFOdy8hwbm1aprMnZRZOGs2ZrdF0ebTjEck/XFyta3 +kP9kl1nfK3/uLX1z75WttR6pDEg4KQs5TwX7B8/slKOz1esA7bK5caBZtT989SCQ +VKjqNkyuYlsrbQWNdniHyZDzstFy/If1KEzsGVAPAjLUV3XZwbLcDtSaOs1IcB7E +Lv1PsIlq3vCQkSMWzQT8YYecw1x+Dxn/Jj77G2UqSa5Hn9XmyDC7E7lI0GdXD/vG +3xz8gjuuH/clyN/AxdGNUZR0ML779w== +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/server2/TestServer4_bundle.pem b/test/configs/certs/ocsp_peer/mini-ca/server2/TestServer4_bundle.pem new file mode 100644 index 000000000..27f4217d7 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/server2/TestServer4_bundle.pem @@ -0,0 +1,186 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 16:5e:ab:1c:8b:dc:fc:97:d9:34:9d:fd:cd:7d:b3:3c:51:83:ce:d2 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Validity + Not Before: Aug 2 22:15:38 2023 GMT + Not After : Jul 30 22:15:38 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=TestServer4 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:d5:fd:fb:3f:42:c7:ca:02:37:72:6e:78:d5:af: + 8d:b4:4d:f4:4c:0c:8f:8f:67:da:62:c0:2a:0f:f3: + 73:3b:83:c1:3a:df:9e:df:1d:26:12:95:41:ca:52: + 88:4d:8b:38:7f:78:ce:ed:aa:48:b0:dc:57:62:80: + 7a:fc:1f:43:c8:d8:2d:4f:38:c3:22:fc:bb:16:53: + 84:9e:44:0c:f9:51:00:a0:57:97:3f:df:57:08:48: + 3b:2b:55:b3:90:98:98:e6:a6:eb:ca:8f:ec:f8:4f: + dc:4d:7e:71:2e:03:ff:cd:fa:ef:65:7e:6d:8c:35: + be:df:fb:c1:0b:e9:f0:3b:89:24:4d:b4:02:7f:82: + 8e:0a:34:ea:a8:68:9e:f8:4b:39:9a:8f:d5:eb:bc: + 59:68:c9:f0:a5:eb:e9:be:7c:03:49:bd:b5:d9:54: + cf:88:29:b0:2c:a3:e9:08:b6:66:37:57:ef:66:5f: + 6b:0f:34:6d:02:bf:92:2b:cc:e9:9d:c0:a8:92:0d: + 76:8f:ae:f6:3f:24:38:e9:5b:fc:12:a2:ab:fa:42: + 3f:5a:05:e3:5e:bb:08:43:5d:55:18:17:13:0a:27: + 84:5f:05:69:18:a9:45:68:37:a7:35:f9:8c:ef:c5: + 9f:b1:8d:aa:3c:b7:cc:47:b6:e5:85:e2:73:f5:8a: + 5a:71 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + C4:BB:A1:42:EA:15:3E:0E:D1:48:5F:B5:E2:01:42:D0:72:BE:B0:CE + X509v3 Authority Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, SSL Server + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Server Authentication, TLS Web Client Authentication + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:28888/intermediate2_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:28888/ + X509v3 Subject Alternative Name: + DNS:localhost, IP Address:127.0.0.1 + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 85:c2:1a:b0:94:8b:a0:f8:2c:85:1e:17:88:4e:ca:2c:d1:f6: + 69:26:e3:a6:94:9f:62:eb:68:54:da:2b:f2:67:23:be:4b:95: + 56:28:08:7a:52:8e:b3:b2:70:2f:c9:db:06:74:b4:8b:8e:84: + 23:0a:74:f7:c1:67:81:69:11:36:2b:0e:4c:0f:2c:76:e6:2d: + 50:f3:e8:59:0d:3a:6c:30:eb:31:16:74:c8:34:d1:62:97:6b: + 1e:2f:5c:56:b0:6e:bc:5e:08:8f:d4:ce:4a:d3:8e:91:70:7d: + 18:d4:3f:40:39:39:67:95:68:f7:16:c6:19:69:41:c2:20:2e: + 45:e3:9d:31:c2:da:67:8d:2c:1f:a2:3f:1e:46:23:19:fd:25: + 16:69:5c:80:09:1b:f7:7f:50:47:1d:d9:6b:aa:7b:0f:20:8d: + 5a:f4:37:f0:c3:a7:31:5f:4d:41:70:c8:c4:aa:2a:69:d0:a8: + 7b:3c:cc:b4:a4:12:54:a3:bf:ce:ea:22:20:58:ae:eb:29:f3: + 15:da:22:05:46:cd:26:ef:63:84:4a:5b:86:47:fe:cb:fa:4a: + 0c:fe:82:e0:db:81:dc:3e:87:8f:93:23:32:de:37:3d:d7:0f: + 6c:f1:74:63:8b:11:b7:f3:69:b7:d6:e0:72:b2:1d:e1:15:10: + 7d:2e:97:de +-----BEGIN CERTIFICATE----- +MIIEYjCCA0qgAwIBAgIUFl6rHIvc/JfZNJ39zX2zPFGDztIwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMjAe +Fw0yMzA4MDIyMjE1MzhaFw0zMzA3MzAyMjE1MzhaMFQxCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEU +MBIGA1UEAwwLVGVzdFNlcnZlcjQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDV/fs/QsfKAjdybnjVr420TfRMDI+PZ9piwCoP83M7g8E6357fHSYSlUHK +UohNizh/eM7tqkiw3FdigHr8H0PI2C1POMMi/LsWU4SeRAz5UQCgV5c/31cISDsr +VbOQmJjmpuvKj+z4T9xNfnEuA//N+u9lfm2MNb7f+8EL6fA7iSRNtAJ/go4KNOqo +aJ74Szmaj9XrvFloyfCl6+m+fANJvbXZVM+IKbAso+kItmY3V+9mX2sPNG0Cv5Ir +zOmdwKiSDXaPrvY/JDjpW/wSoqv6Qj9aBeNeuwhDXVUYFxMKJ4RfBWkYqUVoN6c1 ++YzvxZ+xjao8t8xHtuWF4nP1ilpxAgMBAAGjggEkMIIBIDAdBgNVHQ4EFgQUxLuh +QuoVPg7RSF+14gFC0HK+sM4wHwYDVR0jBBgwFoAUdVXijuetpd2APckzCyyiV3ft +FawwDAYDVR0TAQH/BAIwADARBglghkgBhvhCAQEEBAMCBsAwDgYDVR0PAQH/BAQD +AgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA9BgNVHR8ENjA0MDKg +MKAuhixodHRwOi8vMTI3LjAuMC4xOjI4ODg4L2ludGVybWVkaWF0ZTJfY3JsLmRl +cjAzBggrBgEFBQcBAQQnMCUwIwYIKwYBBQUHMAGGF2h0dHA6Ly8xMjcuMC4wLjE6 +Mjg4ODgvMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsF +AAOCAQEAhcIasJSLoPgshR4XiE7KLNH2aSbjppSfYutoVNor8mcjvkuVVigIelKO +s7JwL8nbBnS0i46EIwp098FngWkRNisOTA8sduYtUPPoWQ06bDDrMRZ0yDTRYpdr +Hi9cVrBuvF4Ij9TOStOOkXB9GNQ/QDk5Z5Vo9xbGGWlBwiAuReOdMcLaZ40sH6I/ +HkYjGf0lFmlcgAkb939QRx3Za6p7DyCNWvQ38MOnMV9NQXDIxKoqadCoezzMtKQS +VKO/zuoiIFiu6ynzFdoiBUbNJu9jhEpbhkf+y/pKDP6C4NuB3D6Hj5MjMt43PdcP +bPF0Y4sRt/Npt9bgcrId4RUQfS6X3g== +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 3c:d7:16:fb:15:99:81:4e:53:f8:80:7c:b6:7c:77:a6:06:a4:3e:ea + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Root CA + Validity + Not Before: May 1 19:01:43 2023 GMT + Not After : Apr 28 19:01:43 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:da:5f:ff:1d:f7:8d:1a:9e:9a:f3:2b:68:8f:c1: + 0c:33:06:41:00:c9:3e:e4:1a:e1:e0:70:6a:f5:2f: + ad:df:f3:e9:99:ed:c5:d7:aa:93:13:37:ff:47:aa: + f3:c5:89:f7:b7:ad:3a:47:e5:9c:4e:9f:8c:e2:41: + ed:a4:7c:9d:88:32:ae:f5:8a:84:9f:0c:18:a0:b3: + fe:8e:dc:2a:88:6a:f5:2f:9c:86:92:fa:7b:6e:b3: + 5a:78:67:53:0b:21:6c:0d:6c:80:1a:0e:1e:ee:06: + c4:d2:e7:24:c6:e5:74:be:1e:2e:17:55:2b:e5:9f: + 0b:a0:58:cc:fe:bf:53:37:f7:dc:95:88:f4:77:a6: + 59:b4:b8:7c:a2:4b:b7:6a:67:aa:84:dc:29:f1:f9: + d7:89:05:4d:0b:f3:8b:2d:52:99:57:ed:6f:11:9e: + af:28:a3:61:44:c2:ec:6e:7f:9f:3d:0b:dc:f7:19: + 6d:14:8a:a5:b8:b6:29:02:34:90:b4:96:c1:cb:a7: + 42:46:97:cf:8d:59:fd:17:b1:a6:27:a7:7b:8a:47: + 6f:fa:03:24:1c:12:25:ee:34:d6:5c:da:45:98:23: + 30:e1:48:c9:9a:df:37:aa:1b:70:6c:b2:0f:95:39: + d6:6d:3e:25:20:a8:07:2c:48:57:0c:99:52:cb:89: + 08:41 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Authority Key Identifier: + C3:12:42:BA:A9:D8:4D:E0:C3:3E:BA:D7:47:41:A6:09:2F:6D:B4:E1 + X509v3 Basic Constraints: critical + CA:TRUE, pathlen:0 + X509v3 Key Usage: critical + Digital Signature, Certificate Sign, CRL Sign + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:8888/root_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:8888/ + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 1f:c6:fc:1c:a1:a5:6d:76:f0:7d:28:1f:e1:15:ab:86:e0:c3: + dd:a0:17:96:0a:c0:16:32:52:37:a4:b6:ad:24:d7:fd:3c:01: + 34:3b:a9:a2:ea:81:05:e7:06:5f:a3:af:7b:fa:b2:a9:c3:63: + 89:bb:0c:70:48:e9:73:cc:33:64:cd:b3:71:88:d1:d1:a1:5a: + 22:a6:ed:03:46:8e:9a:c0:92:37:46:9b:e5:37:78:a5:43:d5: + 46:99:1b:34:40:27:8f:95:dd:c6:9a:55:d9:60:25:8d:b8:e9: + 6e:c9:b3:ee:e8:f0:d9:11:ef:4e:ae:1e:03:70:03:60:66:fd: + ab:b0:f4:74:b6:27:7c:7a:96:9d:86:58:5f:5c:d3:04:ab:16: + 57:12:53:51:c7:93:ca:0b:4e:67:27:2d:b7:20:79:b6:b7:8c: + e7:c3:d9:25:5e:25:63:cf:93:f0:6e:31:c0:d5:4f:05:1c:8d: + 14:1b:6a:d5:01:b6:7a:09:6f:38:f3:e5:e2:5a:e4:e2:42:d5: + 8a:8d:de:ef:73:25:85:3c:e3:a9:ef:f7:f7:23:4f:d3:27:c2: + 3a:c6:c0:6f:2a:9b:1e:fe:fc:31:73:10:e1:08:62:98:2b:6d: + 2f:cc:ab:dd:3a:65:c2:00:7f:29:18:32:cd:8f:56:a9:1d:86: + f1:5e:60:55 +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIUPNcW+xWZgU5T+IB8tnx3pgakPuowDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRAwDgYDVQQDDAdSb290IENBMB4XDTIzMDUwMTE5 +MDE0M1oXDTMzMDQyODE5MDE0M1owWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldB +MQ8wDQYDVQQHDAZUYWNvbWExETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJ +bnRlcm1lZGlhdGUgQ0EgMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANpf/x33jRqemvMraI/BDDMGQQDJPuQa4eBwavUvrd/z6ZntxdeqkxM3/0eq88WJ +97etOkflnE6fjOJB7aR8nYgyrvWKhJ8MGKCz/o7cKohq9S+chpL6e26zWnhnUwsh +bA1sgBoOHu4GxNLnJMbldL4eLhdVK+WfC6BYzP6/Uzf33JWI9HemWbS4fKJLt2pn +qoTcKfH514kFTQvziy1SmVftbxGeryijYUTC7G5/nz0L3PcZbRSKpbi2KQI0kLSW +wcunQkaXz41Z/Rexpiene4pHb/oDJBwSJe401lzaRZgjMOFIyZrfN6obcGyyD5U5 +1m0+JSCoByxIVwyZUsuJCEECAwEAAaOB0DCBzTAdBgNVHQ4EFgQUdVXijuetpd2A +PckzCyyiV3ftFawwHwYDVR0jBBgwFoAUwxJCuqnYTeDDPrrXR0GmCS9ttOEwEgYD +VR0TAQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwMwYDVR0fBCwwKjAooCag +JIYiaHR0cDovLzEyNy4wLjAuMTo4ODg4L3Jvb3RfY3JsLmRlcjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6ODg4OC8wDQYJKoZI +hvcNAQELBQADggEBAB/G/ByhpW128H0oH+EVq4bgw92gF5YKwBYyUjektq0k1/08 +ATQ7qaLqgQXnBl+jr3v6sqnDY4m7DHBI6XPMM2TNs3GI0dGhWiKm7QNGjprAkjdG +m+U3eKVD1UaZGzRAJ4+V3caaVdlgJY246W7Js+7o8NkR706uHgNwA2Bm/auw9HS2 +J3x6lp2GWF9c0wSrFlcSU1HHk8oLTmcnLbcgeba3jOfD2SVeJWPPk/BuMcDVTwUc +jRQbatUBtnoJbzjz5eJa5OJC1YqN3u9zJYU846nv9/cjT9MnwjrGwG8qmx7+/DFz +EOEIYpgrbS/Mq906ZcIAfykYMs2PVqkdhvFeYFU= +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/server2/TestServer4_cert.pem b/test/configs/certs/ocsp_peer/mini-ca/server2/TestServer4_cert.pem new file mode 100644 index 000000000..703262550 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/server2/TestServer4_cert.pem @@ -0,0 +1,97 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 16:5e:ab:1c:8b:dc:fc:97:d9:34:9d:fd:cd:7d:b3:3c:51:83:ce:d2 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, L=Tacoma, O=Testnats, CN=Intermediate CA 2 + Validity + Not Before: Aug 2 22:15:38 2023 GMT + Not After : Jul 30 22:15:38 2033 GMT + Subject: C=US, ST=WA, L=Tacoma, O=Testnats, CN=TestServer4 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:d5:fd:fb:3f:42:c7:ca:02:37:72:6e:78:d5:af: + 8d:b4:4d:f4:4c:0c:8f:8f:67:da:62:c0:2a:0f:f3: + 73:3b:83:c1:3a:df:9e:df:1d:26:12:95:41:ca:52: + 88:4d:8b:38:7f:78:ce:ed:aa:48:b0:dc:57:62:80: + 7a:fc:1f:43:c8:d8:2d:4f:38:c3:22:fc:bb:16:53: + 84:9e:44:0c:f9:51:00:a0:57:97:3f:df:57:08:48: + 3b:2b:55:b3:90:98:98:e6:a6:eb:ca:8f:ec:f8:4f: + dc:4d:7e:71:2e:03:ff:cd:fa:ef:65:7e:6d:8c:35: + be:df:fb:c1:0b:e9:f0:3b:89:24:4d:b4:02:7f:82: + 8e:0a:34:ea:a8:68:9e:f8:4b:39:9a:8f:d5:eb:bc: + 59:68:c9:f0:a5:eb:e9:be:7c:03:49:bd:b5:d9:54: + cf:88:29:b0:2c:a3:e9:08:b6:66:37:57:ef:66:5f: + 6b:0f:34:6d:02:bf:92:2b:cc:e9:9d:c0:a8:92:0d: + 76:8f:ae:f6:3f:24:38:e9:5b:fc:12:a2:ab:fa:42: + 3f:5a:05:e3:5e:bb:08:43:5d:55:18:17:13:0a:27: + 84:5f:05:69:18:a9:45:68:37:a7:35:f9:8c:ef:c5: + 9f:b1:8d:aa:3c:b7:cc:47:b6:e5:85:e2:73:f5:8a: + 5a:71 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + C4:BB:A1:42:EA:15:3E:0E:D1:48:5F:B5:E2:01:42:D0:72:BE:B0:CE + X509v3 Authority Key Identifier: + 75:55:E2:8E:E7:AD:A5:DD:80:3D:C9:33:0B:2C:A2:57:77:ED:15:AC + X509v3 Basic Constraints: critical + CA:FALSE + Netscape Cert Type: + SSL Client, SSL Server + X509v3 Key Usage: critical + Digital Signature, Non Repudiation, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Server Authentication, TLS Web Client Authentication + X509v3 CRL Distribution Points: + Full Name: + URI:http://127.0.0.1:28888/intermediate2_crl.der + Authority Information Access: + OCSP - URI:http://127.0.0.1:28888/ + X509v3 Subject Alternative Name: + DNS:localhost, IP Address:127.0.0.1 + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 85:c2:1a:b0:94:8b:a0:f8:2c:85:1e:17:88:4e:ca:2c:d1:f6: + 69:26:e3:a6:94:9f:62:eb:68:54:da:2b:f2:67:23:be:4b:95: + 56:28:08:7a:52:8e:b3:b2:70:2f:c9:db:06:74:b4:8b:8e:84: + 23:0a:74:f7:c1:67:81:69:11:36:2b:0e:4c:0f:2c:76:e6:2d: + 50:f3:e8:59:0d:3a:6c:30:eb:31:16:74:c8:34:d1:62:97:6b: + 1e:2f:5c:56:b0:6e:bc:5e:08:8f:d4:ce:4a:d3:8e:91:70:7d: + 18:d4:3f:40:39:39:67:95:68:f7:16:c6:19:69:41:c2:20:2e: + 45:e3:9d:31:c2:da:67:8d:2c:1f:a2:3f:1e:46:23:19:fd:25: + 16:69:5c:80:09:1b:f7:7f:50:47:1d:d9:6b:aa:7b:0f:20:8d: + 5a:f4:37:f0:c3:a7:31:5f:4d:41:70:c8:c4:aa:2a:69:d0:a8: + 7b:3c:cc:b4:a4:12:54:a3:bf:ce:ea:22:20:58:ae:eb:29:f3: + 15:da:22:05:46:cd:26:ef:63:84:4a:5b:86:47:fe:cb:fa:4a: + 0c:fe:82:e0:db:81:dc:3e:87:8f:93:23:32:de:37:3d:d7:0f: + 6c:f1:74:63:8b:11:b7:f3:69:b7:d6:e0:72:b2:1d:e1:15:10: + 7d:2e:97:de +-----BEGIN CERTIFICATE----- +MIIEYjCCA0qgAwIBAgIUFl6rHIvc/JfZNJ39zX2zPFGDztIwDQYJKoZIhvcNAQEL +BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAldBMQ8wDQYDVQQHDAZUYWNvbWEx +ETAPBgNVBAoMCFRlc3RuYXRzMRowGAYDVQQDDBFJbnRlcm1lZGlhdGUgQ0EgMjAe +Fw0yMzA4MDIyMjE1MzhaFw0zMzA3MzAyMjE1MzhaMFQxCzAJBgNVBAYTAlVTMQsw +CQYDVQQIDAJXQTEPMA0GA1UEBwwGVGFjb21hMREwDwYDVQQKDAhUZXN0bmF0czEU +MBIGA1UEAwwLVGVzdFNlcnZlcjQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDV/fs/QsfKAjdybnjVr420TfRMDI+PZ9piwCoP83M7g8E6357fHSYSlUHK +UohNizh/eM7tqkiw3FdigHr8H0PI2C1POMMi/LsWU4SeRAz5UQCgV5c/31cISDsr +VbOQmJjmpuvKj+z4T9xNfnEuA//N+u9lfm2MNb7f+8EL6fA7iSRNtAJ/go4KNOqo +aJ74Szmaj9XrvFloyfCl6+m+fANJvbXZVM+IKbAso+kItmY3V+9mX2sPNG0Cv5Ir +zOmdwKiSDXaPrvY/JDjpW/wSoqv6Qj9aBeNeuwhDXVUYFxMKJ4RfBWkYqUVoN6c1 ++YzvxZ+xjao8t8xHtuWF4nP1ilpxAgMBAAGjggEkMIIBIDAdBgNVHQ4EFgQUxLuh +QuoVPg7RSF+14gFC0HK+sM4wHwYDVR0jBBgwFoAUdVXijuetpd2APckzCyyiV3ft +FawwDAYDVR0TAQH/BAIwADARBglghkgBhvhCAQEEBAMCBsAwDgYDVR0PAQH/BAQD +AgXgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA9BgNVHR8ENjA0MDKg +MKAuhixodHRwOi8vMTI3LjAuMC4xOjI4ODg4L2ludGVybWVkaWF0ZTJfY3JsLmRl +cjAzBggrBgEFBQcBAQQnMCUwIwYIKwYBBQUHMAGGF2h0dHA6Ly8xMjcuMC4wLjE6 +Mjg4ODgvMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsF +AAOCAQEAhcIasJSLoPgshR4XiE7KLNH2aSbjppSfYutoVNor8mcjvkuVVigIelKO +s7JwL8nbBnS0i46EIwp098FngWkRNisOTA8sduYtUPPoWQ06bDDrMRZ0yDTRYpdr +Hi9cVrBuvF4Ij9TOStOOkXB9GNQ/QDk5Z5Vo9xbGGWlBwiAuReOdMcLaZ40sH6I/ +HkYjGf0lFmlcgAkb939QRx3Za6p7DyCNWvQ38MOnMV9NQXDIxKoqadCoezzMtKQS +VKO/zuoiIFiu6ynzFdoiBUbNJu9jhEpbhkf+y/pKDP6C4NuB3D6Hj5MjMt43PdcP +bPF0Y4sRt/Npt9bgcrId4RUQfS6X3g== +-----END CERTIFICATE----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/server2/private/TestServer3_keypair.pem b/test/configs/certs/ocsp_peer/mini-ca/server2/private/TestServer3_keypair.pem new file mode 100644 index 000000000..bb0d7e45b --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/server2/private/TestServer3_keypair.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCaPNt2yRkPe+bT +7dELdq4V1BEcZrhdKn3jH2XYG8RjYvZcixhmqBzCpl5y8t1XQoqrXb03tvFLUfCz +ajfpVXgBI+pTCYMvfVk2qzNPTLzvqRzblHlMDUp8P508umx2gkcl63ki9AlseDym +70swkCmzX7ppsRqV7VPgxiR4blKvjrzbSvAZ0gBaqLZzTBeS0Y2Bm0y4NU2R3d/T +haafxJEZ7EfRyk4LwwaMJ0KVg+Moajt0nGiwVaWRkcs3rfrYaYveLkpRWTJLPQYh +BGXS9YvoTUWW3mOXR4GF6kjwnSMtcYdv0nU9Rb/erUOC26Upm/leOAo5qThx7EBA +tdxpxwtzAgMBAAECggEALjBPYLE0SgjGxWyQj6hI1cyeGy0/xNa2wE9kxmT6WPEH +6grVkdiCVGBSJIZKdpk8wbjes1Kby/yL4o7Kk5u+xkilIZzVpmEZWF/Ii9TlN7gj +Jja+ZGIOjkrWoZsKZCr7d4WezzLZp5wSPcOndrGVa1wdjQ02cvORjNyJi28uX9gd +8uBK5AIXS1lbkt/v+8mrBPgZUttz6gxhlHwxKs6JWWlIpGemNddE39UxuGDGHmVA +aw/gH/G4LNXtbAIPq5zDtFbfCKnQVgU1ppWILehoFqIs8JLtz4LPuvIxeztzKff4 +DU31rs14Zati5ykq9CVqY/d+4nKdstwhRPcPfsvgYQKBgQDBNVPn73A7fRoURpzV +sdJPA4RDbrbiZj0x/cAskuzzx/mmJUuNyuJxGizJU0ebT3VxtdCR2LqpgGEQEaKS +wYmMlSJ4NccugWgRl7/of5d5oY2m6f4W4YaNp4RebdVhNPJ4wSbeW7pH+2OKr2xd +my+m1WJUvRBbPq5kV2BdHNw62QKBgQDMXTqaOjsC9jpOOIjsUHmV55MbMmwK8For +H6e3Dn1ZO0Tpcg33GMLO5wHwzH6dlT2JVJAOdr5HqZgdIqjt30ACZsdf2VkutH94 +OvZmEAbwI9A+TAoxE8QlLYyz/qjJSGopJRU0x+KqEORxBmjO6LVV1GL9VVdoYrlH +Z7mrJ+7RKwKBgQC87LyDS2rfgNEDipjJjPwtLy8iERzb/UVRoONNss3pA15mzIk4 +uW77UbEBnGGkyOn6quKr+tVr8ZD3+YaTIpSx1xLBoTSHkRqGOXD6k+k2knbFBIHl +NdowoeGZxKSmTPPciGLNg7x/rp4Des3oKltKM9XXLpjT4FL+40HjStk+4QKBgQC8 +71AXd9BIy7VZzaCgwUG3GhIBadtDPbRO/AQFFAtE7KuoGz7X+/dWa3F62sQQEgKD +LT/Fb3g5LoyoGvwMdoJp9fVLItj1egAC+pgEAbs4VhPXFFuzxa9oI7VaTwxikmU7 +RsJVOprOWbGo4KES8Ud8Y09lIHof0m2ymy2nE9MRYwKBgDn86ZcbBr6sBXgc6PEM +rq4JXBCX8O17id9rJO37PkhPsOKpNf7YbQwHlHjwkUq5+g7Ec/LbeZ/tssEBY0ab +zUXwgWFMUKJVTEZUFwl2aTBqW8+LSu1TgzGMx2H/sxrvS4ElxC04jpPWUQstcuRH +y3yIz1HsmlMEg7qCiQ4maZE3 +-----END PRIVATE KEY----- diff --git a/test/configs/certs/ocsp_peer/mini-ca/server2/private/TestServer4_keypair.pem b/test/configs/certs/ocsp_peer/mini-ca/server2/private/TestServer4_keypair.pem new file mode 100644 index 000000000..979272806 --- /dev/null +++ b/test/configs/certs/ocsp_peer/mini-ca/server2/private/TestServer4_keypair.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDV/fs/QsfKAjdy +bnjVr420TfRMDI+PZ9piwCoP83M7g8E6357fHSYSlUHKUohNizh/eM7tqkiw3Fdi +gHr8H0PI2C1POMMi/LsWU4SeRAz5UQCgV5c/31cISDsrVbOQmJjmpuvKj+z4T9xN +fnEuA//N+u9lfm2MNb7f+8EL6fA7iSRNtAJ/go4KNOqoaJ74Szmaj9XrvFloyfCl +6+m+fANJvbXZVM+IKbAso+kItmY3V+9mX2sPNG0Cv5IrzOmdwKiSDXaPrvY/JDjp +W/wSoqv6Qj9aBeNeuwhDXVUYFxMKJ4RfBWkYqUVoN6c1+YzvxZ+xjao8t8xHtuWF +4nP1ilpxAgMBAAECggEABmE7dr39Ep3ZDRdz0QwaNY5O6p8Dvy7llQTdZCsaDAPQ +NJsC46w87LgoNVnbUDOGwE8n3TBS2ToCfXBu6joc5V2jkS10LOR7x+0+wpCtEdhL +RFyEKP51u+yaXf8Aut5/zX2bwUbj9d28p89NnMV4AIo7Dau0pKXcDlW1Qk+LztyI +hKFN6hrSFqAurmSt/pu3oo9kI9WJkrCxoj+VjQdVi420uAYOFR22aFaHrzpuHouW +4IzFbLhVF+c33xSbs1OEIpZSFzNucWYEKSwEREcyFgIXfWpDaXjoqWcrvXkeqyo9 +vGytQ3YaEsZPzfzgcViwa30g7WAA7kO9RuwcCPK4wQKBgQDpVmbVnmTlRwFbtdkD +4rjd5vtAB3nfsl0Ex11nU8+Oo0kZWeg8mm+Gba4vjEKfVyojbjFmm0ytQG0OGEK7 +UQ13mE1wueMn5qEVX9nTXIxVwcS7+rQAUrC5a6SSg81WIWzeclkqNc1J1EVC7jtl +zqy3PtC94g4tV68urpD86RRxUQKBgQDqxpWscN1u7GeuYf8rSPhPcoZTupqyrV3L +h+w7jUt5O/vfNPOYIXVfo2u05jiK0mTvLf5tVjYoQDF+x6odA2oBH2yz1ED0DZsf +2AhdtCSrMbxazcl/5fPrIIa1GRBp6y5i0ddX8T19twr/PVoYGRqkU4xoN+KoOKz+ +HLFUUgQPIQKBgG5N9v0DDMVKRL0bAQUSN7xGxf1ly1pRUiHBMUl4WEUgsZy3YM7N +Xu1YiiBWGOSEaxomrFnKDnxUWXlxRJKSZWBk8i7Y4SZqozmcfzeop3qeyCbpBBCn +Bn4RAdJ1VitiT7n0qmwG1Q4St89FGXUuN33Exx8MbxFGQz05LrcwZAaRAoGAVFez +PZfudQMI3GToPqygSCpkh3/qQ3Z008Go5FwGWS9rdOyY9nZOrGURNJPgjD65dBOZ +672lByDIpzsjqfioBG89pf0CuKqKqA38M22cHsRnXle/o+sAjd/JhRXUB7ktmOK5 +8iYAaUFw+fEYhL/ACnjZYDdzfeueekvkiN5OBwECgYB90hQJ2lw5s6GFJd+9T5xS +OMngfLAWDvW8+0hvtWCTLAVpMDWRGhGmvj532jWfkgqnvUemyF541RkV0Hy5K1Xl +0icXtpuZ+REh7NCXFJlEiOd+69OEdu78s5Zy8V1zCkEsgxzl2q6PkBDWfxepgdRC +LbwiAF8h2mxCwvvHbaBiKA== +-----END PRIVATE KEY----- diff --git a/test/configs/certs/tlsauth/certstore/client.p12 b/test/configs/certs/tlsauth/certstore/client.p12 new file mode 100644 index 0000000000000000000000000000000000000000..18ee5c32f01bd2c89fdff8779f6b309242426da8 GIT binary patch literal 2509 zcmY+^cQhM}8U}EQ7(o?9%&HM1S`xMAXMDBOs@T*hHCwaMp+?k*nx$6EiW*g;pAl*n zO;IaUYgZGRnn|ttP*=~n_kQ>O@t*gb=Q-#3^9RR(2Y>)ZI0l@?42H#<#P6R0umDOh z;AjvA9B~@ogJVF7|5VH+7|?~&NCXIAIDLHoGyu2=GuyuxH~`FWP7v#ySx{0b_H{52 z$jT6nVWfIE);Z^5SXfDW$%q+}C%UUF@BGL1Ym0TYN#J{Yq+q>Hm9~bndo3N39<|Dl z_D63hmD3CY+B1*S&zN#$E6v${UKxCeg$P{at%{6Wr^lsZ1xfYUyU4!~dwV#ulxQV< zf-rONs#WnN?LY1DnyR$91TlWe>B+`o;khe0nZh1aMk1~)^Keq8O~aIK5IOS^6;RjG z^4#^SqONy0Kwq_23+48PJ7p_?e zFUI=_iAsriAboMTd8X?L{_|S()=^twGwK(mMg?D0_0L#8!&!y$ur3U^) zm;F^&jk2pn%=SwlbWC9AoU+)PUN2LXOL=CaDPL*Yn}l?KV7PJc}c1O zv_#{3Ov>sF_MN^PZ@KZrij9ORmIb+PGGi(+im^Y#)Yq$B%4<8$A%C)scx}DKvXu5Q z7%;U_ARHx^GB|&y4+etvmq}3?Ejt<*e0#DkAg8Lod|vUyI+SWh{J0u&`Mb@4YYRFw zLxsoYPrcr)fK7}`!qyH;*iQ|0Hir(+X(GOpNDIoFj3QZPNzikyGZ&>X-yaJGZgs~d zCdP_}<5oEsvMcED?KBooRP%8~#Ypo}Ces_uuR4A~bTu8Jh?dZ1*OsG7BzCPRqJdm~ zAoe4Ksaoq@(rb0^oi$BEv6cJ6H{71x2!Gl3~D#iNHZe}Kmy+)19k=i;+X zLk>A@p5+NB?nTIQir2=BG+V!pG-M3_Ly0MEf;E$;r*=4O1RH2({IZXKca(yk$ezD5 z(@Pi+RF*tLN)<=TB_1fcz~;LBR?d!w!}XmC3Nsy|){C0Eo?Na-sxj`9zNj3M&)&m_ z>UIm?;)RN(Ak-#39xqMx&$AM{b%q}!**X-iK+Li58#UMyA#Js9>+1q1EGCgXlOB?r za)E$Vz5%RmA^^SBx|Y&qJF~{7lm_Rd)lcwmI6vqwp-WD>y=tQiJ=7Lw5Tv#A=qiv> zjO3d7g1CXbmD%>eIj?eS-pAZ)aqUUQc;W?i8u=H?4S+(nLY{Zm1|=TZ+!xU@D?nDqY#h7t^u5(vYja2mr-PnwziKTNOzfTx~pobAzd=V&amcSh%8Y{e@sThsBr9N2n2+f-u<-JOQljo>Yh)lN9{6O8o)+DvDb}4B#Xs<)I=28jl3;I@M znK5Vd`FFL(*5MnhuZUGq14i38B-P66q~pA+U@Cb*~*U@2JK?mmEBYrR3Jmj?od2(~MJ}z|TM8?`v2`ktaO)B*ED{B|+WNTiB5B zrMKo^Y?7abjx&}8>wWgF$}L@#tIJu&*?*2&z&^bzUl`AJG-FYF^mkb4Rm34IR9ePv zmsB2v2d`tY}hbND|41ufarRY!Y2r9WiTFJEOFX6G*AJoy9rx9DiB;q)`) z&eN5U#XUIskl@oB^pZs<@%wIolcKGEAM#~muc>Q!s^pe3GL@(p`irUi#74dKAnVyq z+hT;q!W}|aeAiif;i|$QoJE~lOU5-u+U6$70c(HaW-Y-jJT20>JOb(vP3XIcOCrBM zxCk){Jp9>VpzYk( z?XO56dqEiuC42F6^~|N3GtvN>7=AX`#qd$;bLP%VhL0eT(E;6xO>RsqANaDBG-^;B zgI2;rV3{~K389Hq2vqyM-=6yTx#yU}G40;M6?S?CUogwi8v97Jr`Dwkl{mj@Jv$+h z%DZ}ovBbFdXwfA?R(IIX6}s!Th7$$5a^xqRQ7dqKPORM9p>L+PcFe0?K=eGh;DbHu zhS-#KfNrkAOn*f@Sm;`k?M*`Y&y&BZbmbg^dNL1=2luV`>KGLFxK)(85=ZjhP4yZ* zCa+JnLVAJSBNmG5Zk5@lu9&hJiA$>rcjK;e@A)e+2x;0N)# zo5+$P_je*+900UxBv$C{dctSm2)H;L%*-Uu$p{o;VgT{dyc|4~xM#EOpJY&8%gTuY U+E{c!tg Note: set the PKCS12 bundle password to `s3cr3t` as required by provisioning scripts + +## Cert Store Provisioning Scripts + +Windows cert store supports p12/pfx bundle for certificate-with-key import. Windows cert store tests will execute +a Powershell script to import relevant PKCS12 bundle into the Windows store before the test. Equivalent to: + +`powershell.exe -command "& '..\test\configs\certs\tlsauth\certstore\import--p12.ps1'"` + +The `delete-cert-from-store.ps1` script deletes imported certificates from the Windows store (if present) that can +cause side-effects and impact the validity of different use tests. + +> Note: Tests are configured for "current user" store context. Execute tests with appropriate Windows permissions +> (e.g. as Admin) if adding tests with "local machine" store context specified. \ No newline at end of file diff --git a/test/configs/certs/tlsauth/certstore/server.p12 b/test/configs/certs/tlsauth/certstore/server.p12 new file mode 100644 index 0000000000000000000000000000000000000000..9325afbc15cd755c85b08a3307aa55d4b6547e85 GIT binary patch literal 2533 zcmY+^cQhM{9tUt4O~tOQm#QdktVV2V?@>D~MQyQXt(#b(N^B9*w5lkshFZ01_8Kj! zR$8N_#jV=ZikeT)dGFo#{`j5WIp6O&-#%w7Yr02Rps2f86S@Y zf2$ms_-c003}Dje-#TThI1+bj)|@qQlr95E$dPa7Kt!&GqS#^Hk9RsN#JT15=>Quw zC`3f_NfbBL*stak({qc9@EUi#n>QI|=<##K`uFJ>$q8eZ_`k?}UJpD5KxIBCmIyU3 zZk42$gnYxnx;x3J6LQ038lIk{A5$%@^3@`+qqpYPdYk<0?y30kPxHdK4Om|~q;2m|viM4)h6saVGK{?94@DLN~#gCR#(I|Tn%Xj+> zD_{Jn+)9OIV|oa?0x@vNM7z1r3b%5}6E&g0MBd`QR`exnz58jeoJ!>iSLoYtc&g|u zo1d#-$Zy|1CqtA5L$1k5A3o|u&JW9yW{rrGBvYH^7Il1j)QA1?N|RB5c0)+I$dF{F z&)nBy3I}huZ@iEDqpI!I)Q2X|a$>~<_ae!j{32!?BkuP#8W)AX#{T6x-sHO(?z}63 zjvC|Cg+S$Vt^=;dwh)7|p9t+#YkYbTU@gc58|v-`PGLn|pLqnD#c zWdD^j^VzRU3{cVBZ7nbs8Ks{`Ooi#@)C4-PzRj9xEs}`E8k)`a-5)1SlZFHWa!bu} zs{0EPXv*{}yB4lkuOOXQ*9X{*^R}%X!nG6!KRnC3 zt;H1@quPWt+B!h?h zp^?p$)^f>lUz2hLjI*(>hc7Amn$vXQ~B9x*F-Da z8rImgRU4akzWU@~FU2Ph95^}|)EQyMsL?+faoH`*<+ST$F;JsuheftpZJhU;YsN=O zl>X}Ph3xRo;q_P}oNb*#7=XjgV@(?`%k`+NG-PyxQJ#Mfrj7Zc!_*;haLTm{%Z~^q zt@=;esn?(0d{ky-9D*LeKh|Q8zxOfwYJH)~X2Bgc8#Zo=a}l>Z!;kK#d&6Y%Xo#Yp zctBYXYnDt^Om|HtRi&z(>_6yz5uD#`1eTU-*!^%NxL#G#R;N+TnI`SY>um)&GZplo z=&%4v)>rE|GWq1f$84)|{fHRCfnhQzNa7V1zZOzVM8{2E29zLh7qtEdjHkE@NHFe# z^m#0PexeY@|M0;80G>OuaSqJN|EU7{rOLZ~YDp$=2K-AEFb@2d!Lkc?{G3A(jZ7ha zxzvAHOQ7CXLRkV7`e$3DjPC2|Ug7{YXdDL3a~mq+jTp*ivEE&Y6LK)awneU^HJA;` zu(!k$4v8V^<#eiS)vwG`fyXzcrIOm!lIj_i7KUp*y6X3;WyeZu}Yb1!2 z52t@)88vb3i(+ZbguLiKO(Nr{I<3F0)R^7JJl+Wz&;$pTf%x^H7eUefx(?7YX# zgu3>(Fn~<$jk3!w1z$17PFMH>9`U)Omue`-t*M6$+5w(*UQ_#f7KewK!RF5xMF;20 zpH1n-?7$N(i+|dRKtcjnk$A-?-Mu>azeDY(Z#6qz}BUHfPdH@O+LDB-bN1CCg82HG4M0 zqx^fG$4$~TtuE$ufAK-P6YeL>2EKoTk$z z{@#rS>%_7nJ`s$gQI?fQ%XZjoH)l`COr0_S&*9SN=3CB8qZVI{81@G_I~Vuun5VbW z^uCksRrp-LRqk7NrE^(%v-H3o##;LN{z%h!i*9Cf=(Oyi;!xC|9D7}1D5Y^&{N_<4 zl-aKr`p!6~!00!E^#?`^4JYgevEE95wr#Oea-tsktc@j;;2w+?-q6{CX|K{=Ly-7bXIbK&@4a-!tsegVNtkz;Y#p)oHDp5aXfKhEsJqYS!_$Cb$ zd|4viz@NB>ZOwKb*cSX$?8VQq3+QVeRw1k9nP~+Or>pxrLPDDgSEYOswD$w3ApPtRp2)H)9c)MC5~+O900%j9-pNLja!xrD2M~KQOZV^!lRD~{ zj9^*r=lm~CWke9d|J4!7CBqHo{F7BghChx*Kvrxw+gSg2IA@^3jgUbIA)t^85-cF# q 0 { + // good + } else { + if err != nil { + t.Fatalf("Expected an extant local cache file, got error: %v", err) + } + if fi != nil { + t.Fatalf("Expected non-zero size local cache file, got a FileInfo with size %d", fi.Size()) + } + } + firstFi := fi + // Connect with UserB1 client and get another CA Response + nc, err = nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), test.opts[1]...) + if test.err == nil && err != nil { + t.Errorf("Expected to connect, got %v", err) + } else if test.err != nil && err == nil { + t.Errorf("Expected error on connect") + } else if test.err != nil && err != nil { + // Error on connect was expected + if test.err.Error() != err.Error() { + t.Errorf("Expected error %s, got: %s", test.err, err) + } + return + } + nc.Close() + time.Sleep(2 * time.Second) + fi, err = statCacheFile("") + if err == nil && fi != nil && fi.Size() > firstFi.Size() { + // good + } else { + if err != nil { + t.Fatalf("Expected an extant local cache file, got error: %v", err) + } + if fi != nil { + t.Fatalf("Expected non-zero size local cache file with more bytes, got a FileInfo with size %d", fi.Size()) + } + } + }) + } +} + +func statCacheFile(dir string) (os.FileInfo, error) { + if dir == "" { + dir = "_rc_" + } + return os.Stat(filepath.Join(dir, "cache.json")) +} + +// TestOCSPPeerUndelegatedCAResponseSigner +func TestOCSPPeerUndelegatedCAResponseSigner(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rootCAResponder := newOCSPResponderRootCA(t) + rootCAResponderURL := fmt.Sprintf("http://%s", rootCAResponder.Addr) + defer rootCAResponder.Shutdown(ctx) + setOCSPStatus(t, rootCAResponderURL, "configs/certs/ocsp_peer/mini-ca/intermediate1/intermediate1_cert.pem", ocsp.Good) + + intermediateCA1Responder := newOCSPResponderIntermediateCA1Undelegated(t) + intermediateCA1ResponderURL := fmt.Sprintf("http://%s", intermediateCA1Responder.Addr) + defer intermediateCA1Responder.Shutdown(ctx) + setOCSPStatus(t, intermediateCA1ResponderURL, "configs/certs/ocsp_peer/mini-ca/client1/UserA1_cert.pem", ocsp.Good) + + for _, test := range []struct { + name string + config string + opts []nats.Option + err error + rerr error + configure func() + }{ + { + "mTLS OCSP peer check on inbound client connection, responder is CA (undelegated)", + ` + port: -1 + # Cache configuration is default + tls: { + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" + timeout: 5 + verify: true + # Turn on CA OCSP check so unvalidated clients can't connect + ocsp_peer: true + } + `, + []nats.Option{ + nats.ClientCert("./configs/certs/ocsp_peer/mini-ca/client1/UserA1_bundle.pem", "./configs/certs/ocsp_peer/mini-ca/client1/private/UserA1_keypair.pem"), + nats.RootCAs("./configs/certs/ocsp_peer/mini-ca/root/root_cert.pem"), + nats.ErrorHandler(noOpErrHandler), + }, + nil, + nil, + func() {}, + }, + } { + t.Run(test.name, func(t *testing.T) { + deleteLocalStore(t, "") + test.configure() + content := test.config + conf := createConfFile(t, []byte(content)) + s, opts := RunServerWithConfig(conf) + defer s.Shutdown() + nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), test.opts...) + if test.err == nil && err != nil { + t.Errorf("Expected to connect, got %v", err) + } else if test.err != nil && err == nil { + t.Errorf("Expected error on connect") + } else if test.err != nil && err != nil { + // Error on connect was expected + if test.err.Error() != err.Error() { + t.Errorf("Expected error %s, got: %s", test.err, err) + } + return + } + defer nc.Close() + }) + } +} + +// TestOCSPPeerDelegatedCAResponseSigner +func TestOCSPPeerDelegatedCAResponseSigner(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rootCAResponder := newOCSPResponderRootCA(t) + rootCAResponderURL := fmt.Sprintf("http://%s", rootCAResponder.Addr) + defer rootCAResponder.Shutdown(ctx) + setOCSPStatus(t, rootCAResponderURL, "configs/certs/ocsp_peer/mini-ca/intermediate1/intermediate1_cert.pem", ocsp.Good) + + intermediateCA1Responder := newOCSPResponderIntermediateCA1(t) + intermediateCA1ResponderURL := fmt.Sprintf("http://%s", intermediateCA1Responder.Addr) + defer intermediateCA1Responder.Shutdown(ctx) + setOCSPStatus(t, intermediateCA1ResponderURL, "configs/certs/ocsp_peer/mini-ca/client1/UserA1_cert.pem", ocsp.Good) + + for _, test := range []struct { + name string + config string + opts []nats.Option + err error + rerr error + configure func() + }{ + { + "mTLS OCSP peer check on inbound client connection, responder is CA (undelegated)", + ` + port: -1 + # Cache configuration is default + tls: { + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" + timeout: 5 + verify: true + # Turn on CA OCSP check so unvalidated clients can't connect + ocsp_peer: true + } + `, + []nats.Option{ + nats.ClientCert("./configs/certs/ocsp_peer/mini-ca/client1/UserA1_bundle.pem", "./configs/certs/ocsp_peer/mini-ca/client1/private/UserA1_keypair.pem"), + nats.RootCAs("./configs/certs/ocsp_peer/mini-ca/root/root_cert.pem"), + nats.ErrorHandler(noOpErrHandler), + }, + nil, + nil, + func() {}, + }, + } { + t.Run(test.name, func(t *testing.T) { + deleteLocalStore(t, "") + test.configure() + content := test.config + conf := createConfFile(t, []byte(content)) + s, opts := RunServerWithConfig(conf) + defer s.Shutdown() + nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), test.opts...) + if test.err == nil && err != nil { + t.Errorf("Expected to connect, got %v", err) + } else if test.err != nil && err == nil { + t.Errorf("Expected error on connect") + } else if test.err != nil && err != nil { + // Error on connect was expected + if test.err.Error() != err.Error() { + t.Errorf("Expected error %s, got: %s", test.err, err) + } + return + } + defer nc.Close() + }) + } +} + +// TestOCSPPeerBadDelegatedCAResponseSigner +func TestOCSPPeerBadDelegatedCAResponseSigner(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rootCAResponder := newOCSPResponderRootCA(t) + rootCAResponderURL := fmt.Sprintf("http://%s", rootCAResponder.Addr) + defer rootCAResponder.Shutdown(ctx) + setOCSPStatus(t, rootCAResponderURL, "configs/certs/ocsp_peer/mini-ca/intermediate1/intermediate1_cert.pem", ocsp.Good) + + intermediateCA1Responder := newOCSPResponderBadDelegateIntermediateCA1(t) + intermediateCA1ResponderURL := fmt.Sprintf("http://%s", intermediateCA1Responder.Addr) + defer intermediateCA1Responder.Shutdown(ctx) + setOCSPStatus(t, intermediateCA1ResponderURL, "configs/certs/ocsp_peer/mini-ca/client1/UserA1_cert.pem", ocsp.Good) + + for _, test := range []struct { + name string + config string + opts []nats.Option + err error + rerr error + configure func() + }{ + { + "mTLS OCSP peer check on inbound client connection, responder is not a legal delegate", + ` + port: -1 + # Cache configuration is default + tls: { + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" + timeout: 5 + verify: true + # Turn on CA OCSP check so unvalidated clients can't connect + ocsp_peer: true + } + `, + []nats.Option{ + nats.ClientCert("./configs/certs/ocsp_peer/mini-ca/client1/UserA1_bundle.pem", "./configs/certs/ocsp_peer/mini-ca/client1/private/UserA1_keypair.pem"), + nats.RootCAs("./configs/certs/ocsp_peer/mini-ca/root/root_cert.pem"), + nats.ErrorHandler(noOpErrHandler), + }, + errors.New("remote error: tls: bad certificate"), + errors.New("expect error"), + func() {}, + }, + } { + t.Run(test.name, func(t *testing.T) { + deleteLocalStore(t, "") + test.configure() + content := test.config + conf := createConfFile(t, []byte(content)) + s, opts := RunServerWithConfig(conf) + defer s.Shutdown() + nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), test.opts...) + if test.err == nil && err != nil { + t.Errorf("Expected to connect, got %v", err) + } else if test.err != nil && err == nil { + t.Errorf("Expected error on connect") + } else if test.err != nil && err != nil { + // Error on connect was expected + if test.err.Error() != err.Error() { + t.Errorf("Expected error %s, got: %s", test.err, err) + } + return + } + defer nc.Close() + }) + } +} + +// TestOCSPPeerNextUpdateUnset is test of scenario when responder does not set NextUpdate and cache TTL option is used +func TestOCSPPeerNextUpdateUnset(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rootCAResponder := newOCSPResponderRootCA(t) + rootCAResponderURL := fmt.Sprintf("http://%s", rootCAResponder.Addr) + defer rootCAResponder.Shutdown(ctx) + setOCSPStatus(t, rootCAResponderURL, "configs/certs/ocsp_peer/mini-ca/intermediate1/intermediate1_cert.pem", ocsp.Good) + + respCertPEM := "configs/certs/ocsp_peer/mini-ca/ocsp1/ocsp1_bundle.pem" + respKeyPEM := "configs/certs/ocsp_peer/mini-ca/ocsp1/private/ocsp1_keypair.pem" + issuerCertPEM := "configs/certs/ocsp_peer/mini-ca/intermediate1/intermediate1_cert.pem" + intermediateCA1Responder := newOCSPResponderBase(t, issuerCertPEM, respCertPEM, respKeyPEM, true, "127.0.0.1:18888", 0) + intermediateCA1ResponderURL := fmt.Sprintf("http://%s", intermediateCA1Responder.Addr) + defer intermediateCA1Responder.Shutdown(ctx) + setOCSPStatus(t, intermediateCA1ResponderURL, "configs/certs/ocsp_peer/mini-ca/client1/UserA1_cert.pem", ocsp.Good) + + for _, test := range []struct { + name string + config string + opts []nats.Option + err error + rerr error + expectedMisses int64 + configure func() + }{ + { + "TTL set to 4 seconds with second client connection leveraging cache from first client connect", + ` + port: -1 + http_port: 8222 + tls: { + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" + timeout: 5 + verify: true + # Long form configuration + ocsp_peer: { + verify: true + ca_timeout: 5 + allowed_clockskew: 0 + cache_ttl_when_next_update_unset: 4 + } + } + # Short form configuration, local as default + ocsp_cache: true + `, + []nats.Option{ + nats.ClientCert("./configs/certs/ocsp_peer/mini-ca/client1/UserA1_bundle.pem", "./configs/certs/ocsp_peer/mini-ca/client1/private/UserA1_keypair.pem"), + nats.RootCAs("./configs/certs/ocsp_peer/mini-ca/root/root_cert.pem"), + nats.ErrorHandler(noOpErrHandler), + }, + nil, + nil, + 2, + func() {}, + }, + { + "TTL set to 1 seconds with second client connection not leveraging cache items from first client connect", + ` + port: -1 + http_port: 8222 + tls: { + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" + timeout: 5 + verify: true + # Long form configuration + ocsp_peer: { + verify: true + ca_timeout: 5 + allowed_clockskew: 0 + cache_ttl_when_next_update_unset: 1 + } + } + # Short form configuration, local as default + ocsp_cache: true + `, + []nats.Option{ + nats.ClientCert("./configs/certs/ocsp_peer/mini-ca/client1/UserA1_bundle.pem", "./configs/certs/ocsp_peer/mini-ca/client1/private/UserA1_keypair.pem"), + nats.RootCAs("./configs/certs/ocsp_peer/mini-ca/root/root_cert.pem"), + nats.ErrorHandler(noOpErrHandler), + }, + nil, + nil, + 3, + func() {}, + }, + } { + t.Run(test.name, func(t *testing.T) { + // Cleanup any previous test that saved a local cache + deleteLocalStore(t, "") + test.configure() + content := test.config + conf := createConfFile(t, []byte(content)) + s, opts := RunServerWithConfig(conf) + defer s.Shutdown() + nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), test.opts...) + if test.err == nil && err != nil { + t.Errorf("Expected to connect, got %v", err) + } else if test.err != nil && err == nil { + t.Errorf("Expected error on connect") + } else if test.err != nil && err != nil { + // Error on connect was expected + if test.err.Error() != err.Error() { + t.Errorf("Expected error %s, got: %s", test.err, err) + } + return + } + nc.Close() + + // Wait interval shorter than first test, and longer than second test + time.Sleep(2 * time.Second) + + nc, err = nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), test.opts...) + if test.err == nil && err != nil { + t.Errorf("Expected to connect, got %v", err) + } else if test.err != nil && err == nil { + t.Errorf("Expected error on connect") + } else if test.err != nil && err != nil { + // Error on connect was expected + if test.err.Error() != err.Error() { + t.Errorf("Expected error %s, got: %s", test.err, err) + } + return + } + defer nc.Close() + + v := monitorGetVarzHelper(t, 8222) + if v.OCSPResponseCache.Misses != test.expectedMisses || v.OCSPResponseCache.Responses != 2 { + t.Errorf("Expected cache misses to be %d and cache items to be 2, got %d and %d", test.expectedMisses, v.OCSPResponseCache.Misses, v.OCSPResponseCache.Responses) + } + }) + } +} diff --git a/test/ocsp_test.go b/test/ocsp_test.go index e83fea7c1..bf1635051 100644 --- a/test/ocsp_test.go +++ b/test/ocsp_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 The NATS Authors +// Copyright 2021-2023 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -16,7 +16,7 @@ package test import ( "bytes" "context" - "crypto/rsa" + "crypto" "crypto/tls" "crypto/x509" "encoding/base64" @@ -32,9 +32,15 @@ import ( "testing" "time" + "golang.org/x/crypto/ocsp" + "github.com/memphisdev/memphis/server" "github.com/nats-io/nats.go" - "golang.org/x/crypto/ocsp" +) + +const ( + defaultResponseTTL = 4 * time.Second + defaultAddress = "127.0.0.1:8888" ) func TestOCSPAlwaysMustStapleAndShutdown(t *testing.T) { @@ -1255,6 +1261,7 @@ func TestOCSPLeaf(t *testing.T) { setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-06-cert.pem", ocsp.Good) setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-07-cert.pem", ocsp.Good) setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-08-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/client-cert.pem", ocsp.Good) // Store Dirs storeDirA := t.TempDir() @@ -1275,6 +1282,7 @@ func TestOCSPLeaf(t *testing.T) { timeout: 5 } store_dir: '%s' + leafnodes { host: "127.0.0.1" port: -1 @@ -1285,6 +1293,8 @@ func TestOCSPLeaf(t *testing.T) { key_file: "configs/certs/ocsp/server-status-request-url-02-key.pem" ca_file: "configs/certs/ocsp/ca-cert.pem" timeout: 5 + # Leaf connection must present certs. + verify: true } } ` @@ -1293,7 +1303,8 @@ func TestOCSPLeaf(t *testing.T) { srvA, optsA := RunServerWithConfig(sconfA) defer srvA.Shutdown() - // LeafNode that has the original as a remote. + // LeafNode that has the original as a remote and running + // without OCSP Stapling for the leaf remote. srvConfB := ` host: "127.0.0.1" port: -1 @@ -1307,12 +1318,14 @@ func TestOCSPLeaf(t *testing.T) { timeout: 5 } store_dir: '%s' + leafnodes { remotes: [ { url: "tls://127.0.0.1:%d" tls { - cert_file: "configs/certs/ocsp/server-status-request-url-04-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-04-key.pem" + # Cert without OCSP Stapling enabled is able to connect. + cert_file: "configs/certs/ocsp/client-cert.pem" + key_file: "configs/certs/ocsp/client-key.pem" ca_file: "configs/certs/ocsp/ca-cert.pem" timeout: 5 } @@ -1341,19 +1354,19 @@ func TestOCSPLeaf(t *testing.T) { t.Fatal(err) } defer cA.Close() - // checkLeafNodeConnected(t, srvA) + checkLeafNodeConnected(t, srvA) // Revoke the seed server cluster certificate, following servers will not be able to verify connection. setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-02-cert.pem", ocsp.Revoked) - // Original set of servers still can communicate to each other, even though the cert has been revoked. - // checkLeafNodeConnected(t, srvA) + // Original set of servers still can communicate to each other via leafnode, even though the staple + // for the leaf server has been revoked. + checkLeafNodeConnected(t, srvA) - // Wait for seed server to notice that its certificate has been revoked, - // so that new leafnodes can't connect to it. + // Wait for seed server to notice that its certificate has been revoked. time.Sleep(6 * time.Second) - // Start another server against the seed server that has an invalid OCSP Staple + // Start another server against the seed server that has an revoked OCSP Staple. srvConfC := ` host: "127.0.0.1" port: -1 @@ -1417,7 +1430,8 @@ func TestOCSPLeaf(t *testing.T) { } defer cC.Close() - // There should be no connectivity between the clients due to the revoked staple. + // There should be connectivity between the clients even if there is a revoked staple + // from a leafnode connection. _, err = cA.Subscribe("foo", func(m *nats.Msg) { m.Respond(nil) }) @@ -1432,13 +1446,13 @@ func TestOCSPLeaf(t *testing.T) { t.Fatal(err) } cB.Flush() - resp, err := cC.Request("foo", nil, 2*time.Second) - if err == nil { - t.Errorf("Unexpected success, response: %+v", resp) + _, err = cC.Request("foo", nil, 2*time.Second) + if err != nil { + t.Errorf("Expected success, got: %+v", err) } - resp, err = cC.Request("bar", nil, 2*time.Second) - if err == nil { - t.Errorf("Unexpected success, response: %+v", resp) + _, err = cC.Request("bar", nil, 2*time.Second) + if err != nil { + t.Errorf("Expected success, got: %+v", err) } // Switch the certs from the leafnode server to new ones that are not revoked, @@ -1466,6 +1480,7 @@ func TestOCSPLeaf(t *testing.T) { key_file: "configs/certs/ocsp/server-status-request-url-08-key.pem" ca_file: "configs/certs/ocsp/ca-cert.pem" timeout: 5 + verify: true } } ` @@ -1502,7 +1517,7 @@ func TestOCSPLeaf(t *testing.T) { } } -func TestOCSPGateway(t *testing.T) { +func TestOCSPLeafNoVerify(t *testing.T) { const ( caCert = "configs/certs/ocsp/ca-cert.pem" caKey = "configs/certs/ocsp/ca-key.pem" @@ -1520,13 +1535,14 @@ func TestOCSPGateway(t *testing.T) { setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-06-cert.pem", ocsp.Good) setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-07-cert.pem", ocsp.Good) setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-08-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/client-cert.pem", ocsp.Good) // Store Dirs storeDirA := t.TempDir() storeDirB := t.TempDir() storeDirC := t.TempDir() - // Gateway server configuration + // LeafNode server configuration srvConfA := ` host: "127.0.0.1" port: -1 @@ -1540,8 +1556,8 @@ func TestOCSPGateway(t *testing.T) { timeout: 5 } store_dir: '%s' - gateway { - name: A + + leafnodes { host: "127.0.0.1" port: -1 advertise: "127.0.0.1" @@ -1551,6 +1567,8 @@ func TestOCSPGateway(t *testing.T) { key_file: "configs/certs/ocsp/server-status-request-url-02-key.pem" ca_file: "configs/certs/ocsp/ca-cert.pem" timeout: 5 + # Leaf server does not require certs for clients. + verify: false } } ` @@ -1559,7 +1577,7 @@ func TestOCSPGateway(t *testing.T) { srvA, optsA := RunServerWithConfig(sconfA) defer srvA.Shutdown() - // LeafNode that has the original as a remote. + // LeafNode remote that will connect to A and will not present certs. srvConfB := ` host: "127.0.0.1" port: -1 @@ -1573,30 +1591,18 @@ func TestOCSPGateway(t *testing.T) { timeout: 5 } store_dir: '%s' - gateway { - name: B - host: "127.0.0.1" - advertise: "127.0.0.1" - port: -1 - gateways: [{ - name: "A" - url: "nats://127.0.0.1:%d" + + leafnodes { + remotes: [ { + url: "tls://127.0.0.1:%d" tls { - cert_file: "configs/certs/ocsp/server-status-request-url-04-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-04-key.pem" ca_file: "configs/certs/ocsp/ca-cert.pem" timeout: 5 } - }] - tls { - cert_file: "configs/certs/ocsp/server-status-request-url-04-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-04-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" - timeout: 5 - } + } ] } ` - srvConfB = fmt.Sprintf(srvConfB, storeDirB, optsA.Gateway.Port) + srvConfB = fmt.Sprintf(srvConfB, storeDirB, optsA.LeafNode.Port) conf := createConfFile(t, []byte(srvConfB)) srvB, optsB := RunServerWithConfig(conf) defer srvB.Shutdown() @@ -1618,20 +1624,18 @@ func TestOCSPGateway(t *testing.T) { t.Fatal(err) } defer cA.Close() - waitForOutboundGateways(t, srvB, 1, 5*time.Second) + checkLeafNodeConnected(t, srvA) // Revoke the seed server cluster certificate, following servers will not be able to verify connection. setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-02-cert.pem", ocsp.Revoked) // Original set of servers still can communicate to each other, even though the cert has been revoked. - waitForOutboundGateways(t, srvA, 1, 5*time.Second) - waitForOutboundGateways(t, srvB, 1, 5*time.Second) + checkLeafNodeConnected(t, srvA) - // Wait for gateway A to notice that its certificate has been revoked, - // so that new gateways can't connect to it. + // Wait for seed server to notice that its certificate has been revoked. time.Sleep(6 * time.Second) - // Start another server against the seed server that has an invalid OCSP Staple + // Start another server against the seed server that has an revoked OCSP Staple. srvConfC := ` host: "127.0.0.1" port: -1 @@ -1645,21 +1649,20 @@ func TestOCSPGateway(t *testing.T) { timeout: 5 } store_dir: '%s' - gateway { - name: C - host: "127.0.0.1" - advertise: "127.0.0.1" - port: -1 - gateways: [{name: "A", url: "nats://127.0.0.1:%d" }] - tls { - cert_file: "configs/certs/ocsp/server-status-request-url-06-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-06-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" - timeout: 5 - } + leafnodes { + remotes: [ { + url: "tls://127.0.0.1:%d" + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-06-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-06-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + timeout: 5 + } + } ] } ` - srvConfC = fmt.Sprintf(srvConfC, storeDirC, optsA.Gateway.Port) + srvConfC = fmt.Sprintf(srvConfC, storeDirC, optsA.LeafNode.Port) conf = createConfFile(t, []byte(srvConfC)) srvC, optsC := RunServerWithConfig(conf) defer srvC.Shutdown() @@ -1697,7 +1700,8 @@ func TestOCSPGateway(t *testing.T) { } defer cC.Close() - // There should be no connectivity between the clients due to the revoked staple. + // There should be connectivity between the clients even if there is a revoked staple + // from a leafnode connection. _, err = cA.Subscribe("foo", func(m *nats.Msg) { m.Respond(nil) }) @@ -1712,21 +1716,17 @@ func TestOCSPGateway(t *testing.T) { t.Fatal(err) } cB.Flush() - - // Gateway C was not able to mesh with Gateway A because of the revoked OCSP staple - // so these requests to A and B should fail. - resp, err := cC.Request("foo", nil, 2*time.Second) - if err == nil { - t.Errorf("Unexpected success, response: %+v", resp) + _, err = cC.Request("foo", nil, 2*time.Second) + if err != nil { + t.Errorf("Expected success, got: %+v", err) } - // Make request to B - resp, err = cC.Request("bar", nil, 2*time.Second) - if err == nil { - t.Errorf("Unexpected success, response: %+v", resp) + _, err = cC.Request("bar", nil, 2*time.Second) + if err != nil { + t.Errorf("Expected success, got: %+v", err) } - // Switch the certs from the seed server to new ones that are not revoked, - // this should restart OCSP Stapling for the cluster routes. + // Switch the certs from the leafnode server to new ones that are not revoked, + // this should restart OCSP Stapling for the leafnode server. srvConfA = ` host: "127.0.0.1" port: -1 @@ -1740,8 +1740,7 @@ func TestOCSPGateway(t *testing.T) { timeout: 5 } store_dir: '%s' - gateway { - name: A + leafnodes { host: "127.0.0.1" port: -1 advertise: "127.0.0.1" @@ -1751,10 +1750,10 @@ func TestOCSPGateway(t *testing.T) { key_file: "configs/certs/ocsp/server-status-request-url-08-key.pem" ca_file: "configs/certs/ocsp/ca-cert.pem" timeout: 5 + verify: true } } ` - srvConfA = fmt.Sprintf(srvConfA, storeDirA) if err := os.WriteFile(sconfA, []byte(srvConfA), 0666); err != nil { t.Fatalf("Error writing config: %v", err) @@ -1763,424 +1762,1463 @@ func TestOCSPGateway(t *testing.T) { t.Fatal(err) } time.Sleep(4 * time.Second) - waitForOutboundGateways(t, srvA, 2, 5*time.Second) - waitForOutboundGateways(t, srvB, 2, 5*time.Second) - waitForOutboundGateways(t, srvC, 2, 5*time.Second) - // Now clients connect to C can communicate with B and A. + // A <-> A + _, err = cA.Request("foo", nil, 2*time.Second) + if err != nil { + t.Errorf("%v", err) + } + + // B <-> A + _, err = cB.Request("foo", nil, 2*time.Second) + if err != nil { + t.Errorf("%v", err) + } + + // C <-> A _, err = cC.Request("foo", nil, 2*time.Second) if err != nil { t.Errorf("%v", err) } + // C <-> B via leafnode A _, err = cC.Request("bar", nil, 2*time.Second) if err != nil { t.Errorf("%v", err) } } -func TestOCSPGatewayIntermediate(t *testing.T) { +func TestOCSPLeafVerifyLeafRemote(t *testing.T) { const ( - caCert = "configs/certs/ocsp/desgsign/ca-cert.pem" - caIntermCert = "configs/certs/ocsp/desgsign/ca-interm-cert.pem" - caIntermKey = "configs/certs/ocsp/desgsign/ca-interm-key.pem" + caCert = "configs/certs/ocsp/ca-cert.pem" + caKey = "configs/certs/ocsp/ca-key.pem" ) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - - ocspr := newOCSPResponderDesignated(t, caCert, caIntermCert, caIntermKey, true) + ocspr := newOCSPResponder(t, caCert, caKey) defer ocspr.Shutdown(ctx) - addr := fmt.Sprintf("http://%s", ocspr.Addr) - setOCSPStatus(t, addr, "configs/certs/ocsp/desgsign/server-01-cert.pem", ocsp.Good) - setOCSPStatus(t, addr, "configs/certs/ocsp/desgsign/server-02-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-02-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-03-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-04-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-05-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-06-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-07-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-08-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/client-cert.pem", ocsp.Good) - // Gateway server configuration + // Store Dirs + storeDirA := t.TempDir() + storeDirB := t.TempDir() + + // LeafNode server configuration srvConfA := ` host: "127.0.0.1" port: -1 server_name: "AAA" - ocsp: { - mode: always - url: %s + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 } + store_dir: '%s' - gateway { - name: A + leafnodes { host: "127.0.0.1" port: -1 advertise: "127.0.0.1" tls { - cert_file: "configs/certs/ocsp/desgsign/server-01-cert.pem" - key_file: "configs/certs/ocsp/desgsign/server-01-key.pem" - ca_file: "configs/certs/ocsp/desgsign/ca-chain-cert.pem" + cert_file: "configs/certs/ocsp/server-status-request-url-02-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-02-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" timeout: 5 + verify: true } } ` - srvConfA = fmt.Sprintf(srvConfA, addr) + srvConfA = fmt.Sprintf(srvConfA, storeDirA) sconfA := createConfFile(t, []byte(srvConfA)) srvA, optsA := RunServerWithConfig(sconfA) defer srvA.Shutdown() + // LeafNode remote that will connect to A and will not present certs. srvConfB := ` host: "127.0.0.1" port: -1 server_name: "BBB" - ocsp: { - mode: always - url: %s + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-03-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-03-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 } + store_dir: '%s' - gateway { - name: B - host: "127.0.0.1" - advertise: "127.0.0.1" - port: -1 - gateways: [{ - name: "A" - url: "nats://127.0.0.1:%d" - }] - tls { - cert_file: "configs/certs/ocsp/desgsign/server-02-cert.pem" - key_file: "configs/certs/ocsp/desgsign/server-02-key.pem" - ca_file: "configs/certs/ocsp/desgsign/ca-chain-cert.pem" - timeout: 5 - } + leafnodes { + remotes: [ { + url: "tls://127.0.0.1:%d" + tls { + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + } ] } ` - srvConfB = fmt.Sprintf(srvConfB, addr, optsA.Gateway.Port) + srvConfB = fmt.Sprintf(srvConfB, storeDirB, optsA.LeafNode.Port) conf := createConfFile(t, []byte(srvConfB)) - srvB, optsB := RunServerWithConfig(conf) + srvB, _ := RunServerWithConfig(conf) defer srvB.Shutdown() // Client connects to server A. - cA, err := nats.Connect(fmt.Sprintf("nats://127.0.0.1:%d", optsA.Port), + cA, err := nats.Connect(fmt.Sprintf("tls://127.0.0.1:%d", optsA.Port), + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse == nil { + return fmt.Errorf("missing OCSP Staple from server") + } + return nil + }, + }), + nats.RootCAs(caCert), nats.ErrorHandler(noOpErrHandler), ) if err != nil { t.Fatal(err) } defer cA.Close() - waitForOutboundGateways(t, srvB, 1, 5*time.Second) - cB, err := nats.Connect(fmt.Sprintf("nats://127.0.0.1:%d", optsB.Port), - nats.ErrorHandler(noOpErrHandler), - ) - if err != nil { - t.Fatal(err) - } - defer cB.Close() + // Should not have been able to connect. + checkLeafNodeConnections(t, srvA, 0) } -func TestOCSPCustomConfig(t *testing.T) { +func TestOCSPLeafVerifyAndMapLeafRemote(t *testing.T) { const ( - caCert = "configs/certs/ocsp/ca-cert.pem" - caKey = "configs/certs/ocsp/ca-key.pem" - serverCert = "configs/certs/ocsp/server-cert.pem" - serverKey = "configs/certs/ocsp/server-key.pem" + caCert = "configs/certs/ocsp/ca-cert.pem" + caKey = "configs/certs/ocsp/ca-key.pem" ) - ctx, cancel := context.WithCancel(context.Background()) defer cancel() ocspr := newOCSPResponder(t, caCert, caKey) - ocspURL := fmt.Sprintf("http://%s", ocspr.Addr) defer ocspr.Shutdown(ctx) + addr := fmt.Sprintf("http://%s", ocspr.Addr) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-02-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-03-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-04-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-05-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-06-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-07-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-08-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/client-cert.pem", ocsp.Good) - var ( - errExpectedNoStaple = fmt.Errorf("expected no staple") - errMissingStaple = fmt.Errorf("missing OCSP Staple from server") - ) - - for _, test := range []struct { - name string - config string - opts []nats.Option - err error - rerr error - configure func() - }{ - { - "OCSP Stapling in auto mode makes server fail to boot if status is revoked", - ` - port: -1 + // Store Dirs + storeDirA := t.TempDir() + storeDirB := t.TempDir() - ocsp { - mode: auto - } + // LeafNode server configuration + srvConfA := ` + host: "127.0.0.1" + port: -1 - tls { - cert_file: "configs/certs/ocsp/server-cert.pem" - key_file: "configs/certs/ocsp/server-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" - timeout: 5 - } - `, - []nats.Option{ - nats.Secure(&tls.Config{ - VerifyConnection: func(s tls.ConnectionState) error { - if s.OCSPResponse != nil { - return errExpectedNoStaple - } - return nil - }, - }), - nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), - nats.RootCAs(caCert), - nats.ErrorHandler(noOpErrHandler), - }, - nil, - nil, - func() { setOCSPStatus(t, ocspURL, serverCert, ocsp.Revoked) }, - }, - { - "OCSP Stapling must staple ignored if disabled with ocsp: false", - ` - port: -1 + server_name: "AAA" - ocsp: false + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + verify_and_map: true + } + store_dir: '%s' + + leafnodes { + host: "127.0.0.1" + port: -1 + advertise: "127.0.0.1" + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-02-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-02-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + verify_and_map: true + } + } + + accounts: { + leaf: { + users: [ {user: "C=US, ST=CA, L=San Francisco, O=Synadia, OU=nats.io, CN=localhost server-status-request-url-04"} ] + } + client: { + users: [ {user: "C=US, ST=CA, L=San Francisco, O=Synadia, OU=nats.io, CN=localhost client"} ] + } + } + + ` + srvConfA = fmt.Sprintf(srvConfA, storeDirA) + sconfA := createConfFile(t, []byte(srvConfA)) + srvA, optsA := RunServerWithConfig(sconfA) + defer srvA.Shutdown() + + // LeafNode remote that will connect to A and will not present certs. + srvConfB := ` + host: "127.0.0.1" + port: -1 + + server_name: "BBB" + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-03-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-03-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + store_dir: '%s' + leafnodes { + remotes: [ { + url: "tls://127.0.0.1:%d" tls { - cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" + cert_file: "configs/certs/ocsp/server-status-request-url-04-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-04-key.pem" ca_file: "configs/certs/ocsp/ca-cert.pem" timeout: 5 } - `, - []nats.Option{ - nats.Secure(&tls.Config{ - VerifyConnection: func(s tls.ConnectionState) error { - if s.OCSPResponse != nil { - return errExpectedNoStaple - } - return nil - }, - }), - nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), - nats.RootCAs(caCert), - nats.ErrorHandler(noOpErrHandler), - }, - nil, - nil, - func() { - setOCSPStatus(t, ocspURL, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good) + } ] + } + ` + srvConfB = fmt.Sprintf(srvConfB, storeDirB, optsA.LeafNode.Port) + conf := createConfFile(t, []byte(srvConfB)) + srvB, _ := RunServerWithConfig(conf) + defer srvB.Shutdown() + + // Client connects to server A. + cA, err := nats.Connect(fmt.Sprintf("tls://127.0.0.1:%d", optsA.Port), + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse == nil { + return fmt.Errorf("missing OCSP Staple from server") + } + return nil }, - }, - { - "OCSP Stapling must staple ignored if disabled with ocsp mode never", - ` - port: -1 + }), + nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), + nats.RootCAs(caCert), + nats.ErrorHandler(noOpErrHandler), + ) + if err != nil { + t.Fatal(err) + } + defer cA.Close() + checkLeafNodeConnections(t, srvA, 1) +} - ocsp: { mode: never } +func TestOCSPGateway(t *testing.T) { + const ( + caCert = "configs/certs/ocsp/ca-cert.pem" + caKey = "configs/certs/ocsp/ca-key.pem" + ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ocspr := newOCSPResponder(t, caCert, caKey) + defer ocspr.Shutdown(ctx) + addr := fmt.Sprintf("http://%s", ocspr.Addr) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-02-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-03-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-04-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-05-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-06-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-07-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-08-cert.pem", ocsp.Good) + + // Store Dirs + storeDirA := t.TempDir() + storeDirB := t.TempDir() + storeDirC := t.TempDir() + + // Gateway server configuration + srvConfA := ` + host: "127.0.0.1" + port: -1 + + server_name: "AAA" + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + store_dir: '%s' + gateway { + name: A + host: "127.0.0.1" + port: -1 + advertise: "127.0.0.1" + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-02-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-02-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + } + ` + srvConfA = fmt.Sprintf(srvConfA, storeDirA) + sconfA := createConfFile(t, []byte(srvConfA)) + srvA, optsA := RunServerWithConfig(sconfA) + defer srvA.Shutdown() + // LeafNode that has the original as a remote. + srvConfB := ` + host: "127.0.0.1" + port: -1 + + server_name: "BBB" + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-03-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-03-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + store_dir: '%s' + gateway { + name: B + host: "127.0.0.1" + advertise: "127.0.0.1" + port: -1 + gateways: [{ + name: "A" + url: "nats://127.0.0.1:%d" tls { - cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" + cert_file: "configs/certs/ocsp/server-status-request-url-04-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-04-key.pem" ca_file: "configs/certs/ocsp/ca-cert.pem" timeout: 5 } - `, - []nats.Option{ - nats.Secure(&tls.Config{ - VerifyConnection: func(s tls.ConnectionState) error { - if s.OCSPResponse != nil { - return errExpectedNoStaple - } - return nil - }, - }), - nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), - nats.RootCAs(caCert), - nats.ErrorHandler(noOpErrHandler), + }] + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-04-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-04-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + } + ` + srvConfB = fmt.Sprintf(srvConfB, storeDirB, optsA.Gateway.Port) + conf := createConfFile(t, []byte(srvConfB)) + srvB, optsB := RunServerWithConfig(conf) + defer srvB.Shutdown() + + // Client connects to server A. + cA, err := nats.Connect(fmt.Sprintf("tls://127.0.0.1:%d", optsA.Port), + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse == nil { + return fmt.Errorf("missing OCSP Staple from server") + } + return nil }, - nil, - nil, - func() { + }), + nats.RootCAs(caCert), + nats.ErrorHandler(noOpErrHandler), + ) + if err != nil { + t.Fatal(err) + } + defer cA.Close() + waitForOutboundGateways(t, srvB, 1, 5*time.Second) + + // Revoke the seed server cluster certificate, following servers will not be able to verify connection. + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-02-cert.pem", ocsp.Revoked) + + // Original set of servers still can communicate to each other, even though the cert has been revoked. + waitForOutboundGateways(t, srvA, 1, 5*time.Second) + waitForOutboundGateways(t, srvB, 1, 5*time.Second) + + // Wait for gateway A to notice that its certificate has been revoked, + // so that new gateways can't connect to it. + time.Sleep(6 * time.Second) + + // Start another server against the seed server that has an invalid OCSP Staple + srvConfC := ` + host: "127.0.0.1" + port: -1 + + server_name: "CCC" + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-05-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-05-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + store_dir: '%s' + gateway { + name: C + host: "127.0.0.1" + advertise: "127.0.0.1" + port: -1 + gateways: [{name: "A", url: "nats://127.0.0.1:%d" }] + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-06-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-06-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + } + ` + srvConfC = fmt.Sprintf(srvConfC, storeDirC, optsA.Gateway.Port) + conf = createConfFile(t, []byte(srvConfC)) + srvC, optsC := RunServerWithConfig(conf) + defer srvC.Shutdown() + + cB, err := nats.Connect(fmt.Sprintf("tls://127.0.0.1:%d", optsB.Port), + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse == nil { + return fmt.Errorf("missing OCSP Staple from server") + } + return nil + }, + }), + nats.RootCAs(caCert), + nats.ErrorHandler(noOpErrHandler), + ) + if err != nil { + t.Fatal(err) + } + defer cB.Close() + cC, err := nats.Connect(fmt.Sprintf("tls://127.0.0.1:%d", optsC.Port), + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse == nil { + return fmt.Errorf("missing OCSP Staple from server") + } + return nil + }, + }), + nats.RootCAs(caCert), + nats.ErrorHandler(noOpErrHandler), + ) + if err != nil { + t.Fatal(err) + } + defer cC.Close() + + // There should be no connectivity between the clients due to the revoked staple. + _, err = cA.Subscribe("foo", func(m *nats.Msg) { + m.Respond(nil) + }) + if err != nil { + t.Errorf("%v", err) + } + cA.Flush() + _, err = cB.Subscribe("bar", func(m *nats.Msg) { + m.Respond(nil) + }) + if err != nil { + t.Fatal(err) + } + cB.Flush() + + // Gateway C was not able to mesh with Gateway A because of the revoked OCSP staple + // so these requests to A and B should fail. + resp, err := cC.Request("foo", nil, 2*time.Second) + if err == nil { + t.Errorf("Unexpected success, response: %+v", resp) + } + // Make request to B + resp, err = cC.Request("bar", nil, 2*time.Second) + if err == nil { + t.Errorf("Unexpected success, response: %+v", resp) + } + + // Switch the certs from the seed server to new ones that are not revoked, + // this should restart OCSP Stapling for the cluster routes. + srvConfA = ` + host: "127.0.0.1" + port: -1 + + server_name: "AAA" + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-07-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-07-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + store_dir: '%s' + gateway { + name: A + host: "127.0.0.1" + port: -1 + advertise: "127.0.0.1" + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-08-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-08-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + } + ` + + srvConfA = fmt.Sprintf(srvConfA, storeDirA) + if err := os.WriteFile(sconfA, []byte(srvConfA), 0666); err != nil { + t.Fatalf("Error writing config: %v", err) + } + if err := srvA.Reload(); err != nil { + t.Fatal(err) + } + time.Sleep(4 * time.Second) + waitForOutboundGateways(t, srvA, 2, 5*time.Second) + waitForOutboundGateways(t, srvB, 2, 5*time.Second) + waitForOutboundGateways(t, srvC, 2, 5*time.Second) + + // Now clients connect to C can communicate with B and A. + _, err = cC.Request("foo", nil, 2*time.Second) + if err != nil { + t.Errorf("%v", err) + } + _, err = cC.Request("bar", nil, 2*time.Second) + if err != nil { + t.Errorf("%v", err) + } +} + +func TestOCSPGatewayIntermediate(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + intermediateCA1Responder := newOCSPResponderIntermediateCA1(t) + intermediateCA1ResponderURL := fmt.Sprintf("http://%s", intermediateCA1Responder.Addr) + defer intermediateCA1Responder.Shutdown(ctx) + setOCSPStatus(t, intermediateCA1ResponderURL, "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_cert.pem", ocsp.Good) + setOCSPStatus(t, intermediateCA1ResponderURL, "configs/certs/ocsp_peer/mini-ca/server1/TestServer2_cert.pem", ocsp.Good) + + // Gateway server configuration + srvConfA := ` + host: "127.0.0.1" + port: -1 + + server_name: "AAA" + + ocsp: { + mode: always + url: %s + } + + gateway { + name: A + host: "127.0.0.1" + port: -1 + advertise: "127.0.0.1" + + tls { + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" + timeout: 5 + } + } + ` + srvConfA = fmt.Sprintf(srvConfA, intermediateCA1ResponderURL) + sconfA := createConfFile(t, []byte(srvConfA)) + srvA, optsA := RunServerWithConfig(sconfA) + defer srvA.Shutdown() + + srvConfB := ` + host: "127.0.0.1" + port: -1 + + server_name: "BBB" + + ocsp: { + mode: always + url: %s + } + + gateway { + name: B + host: "127.0.0.1" + advertise: "127.0.0.1" + port: -1 + gateways: [{ + name: "A" + url: "nats://127.0.0.1:%d" + }] + tls { + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer2_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer2_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" + timeout: 5 + } + } + ` + srvConfB = fmt.Sprintf(srvConfB, intermediateCA1ResponderURL, optsA.Gateway.Port) + conf := createConfFile(t, []byte(srvConfB)) + srvB, optsB := RunServerWithConfig(conf) + defer srvB.Shutdown() + + // Client connects to server A. + cA, err := nats.Connect(fmt.Sprintf("nats://127.0.0.1:%d", optsA.Port), + nats.ErrorHandler(noOpErrHandler), + ) + if err != nil { + t.Fatal(err) + } + defer cA.Close() + waitForOutboundGateways(t, srvB, 1, 5*time.Second) + + cB, err := nats.Connect(fmt.Sprintf("nats://127.0.0.1:%d", optsB.Port), + nats.ErrorHandler(noOpErrHandler), + ) + if err != nil { + t.Fatal(err) + } + defer cB.Close() +} + +func TestOCSPCustomConfig(t *testing.T) { + const ( + caCert = "configs/certs/ocsp/ca-cert.pem" + caKey = "configs/certs/ocsp/ca-key.pem" + serverCert = "configs/certs/ocsp/server-cert.pem" + serverKey = "configs/certs/ocsp/server-key.pem" + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ocspr := newOCSPResponder(t, caCert, caKey) + ocspURL := fmt.Sprintf("http://%s", ocspr.Addr) + defer ocspr.Shutdown(ctx) + + var ( + errExpectedNoStaple = fmt.Errorf("expected no staple") + errMissingStaple = fmt.Errorf("missing OCSP Staple from server") + ) + + for _, test := range []struct { + name string + config string + opts []nats.Option + err error + rerr error + configure func() + }{ + { + "OCSP Stapling in auto mode makes server fail to boot if status is revoked", + ` + port: -1 + + ocsp { + mode: auto + } + + tls { + cert_file: "configs/certs/ocsp/server-cert.pem" + key_file: "configs/certs/ocsp/server-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + `, + []nats.Option{ + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse != nil { + return errExpectedNoStaple + } + return nil + }, + }), + nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), + nats.RootCAs(caCert), + nats.ErrorHandler(noOpErrHandler), + }, + nil, + nil, + func() { setOCSPStatus(t, ocspURL, serverCert, ocsp.Revoked) }, + }, + { + "OCSP Stapling must staple ignored if disabled with ocsp: false", + ` + port: -1 + + ocsp: false + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + `, + []nats.Option{ + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse != nil { + return errExpectedNoStaple + } + return nil + }, + }), + nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), + nats.RootCAs(caCert), + nats.ErrorHandler(noOpErrHandler), + }, + nil, + nil, + func() { + setOCSPStatus(t, ocspURL, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good) + }, + }, + { + "OCSP Stapling must staple ignored if disabled with ocsp mode never", + ` + port: -1 + + ocsp: { mode: never } + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + `, + []nats.Option{ + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse != nil { + return errExpectedNoStaple + } + return nil + }, + }), + nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), + nats.RootCAs(caCert), + nats.ErrorHandler(noOpErrHandler), + }, + nil, + nil, + func() { + setOCSPStatus(t, ocspURL, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good) + }, + }, + { + "OCSP Stapling in always mode fetches a staple even if cert does not have one", + ` + port: -1 + + ocsp { + mode: always + url: "http://127.0.0.1:8888" + } + + tls { + cert_file: "configs/certs/ocsp/server-cert.pem" + key_file: "configs/certs/ocsp/server-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + `, + []nats.Option{ + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse == nil { + return errMissingStaple + } + return nil + }, + }), + nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), + nats.RootCAs(caCert), + nats.ErrorHandler(noOpErrHandler), + }, + nil, + nil, + func() { setOCSPStatus(t, ocspURL, serverCert, ocsp.Good) }, + }, + { + "OCSP Stapling in must staple mode does not fetch staple if there is no must staple flag", + ` + port: -1 + + ocsp { + mode: must + url: "http://127.0.0.1:8888" + } + + tls { + cert_file: "configs/certs/ocsp/server-cert.pem" + key_file: "configs/certs/ocsp/server-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + `, + []nats.Option{ + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse != nil { + return errExpectedNoStaple + } + return nil + }, + }), + nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), + nats.RootCAs(caCert), + nats.ErrorHandler(noOpErrHandler), + }, + nil, + nil, + func() { setOCSPStatus(t, ocspURL, serverCert, ocsp.Good) }, + }, + { + "OCSP Stapling in must staple mode fetches staple if there is a must staple flag", + ` + port: -1 + + ocsp { + mode: must + url: "http://127.0.0.1:8888" + } + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + `, + []nats.Option{ + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse == nil { + return errMissingStaple + } + return nil + }, + }), + nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), + nats.RootCAs(caCert), + nats.ErrorHandler(noOpErrHandler), + }, + nil, + nil, + func() { setOCSPStatus(t, ocspURL, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good) }, - }, - { - "OCSP Stapling in always mode fetches a staple even if cert does not have one", - ` - port: -1 + }, + } { + t.Run(test.name, func(t *testing.T) { + test.configure() + content := test.config + conf := createConfFile(t, []byte(content)) + s, opts := RunServerWithConfig(conf) + defer s.Shutdown() + + nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), test.opts...) + if test.err == nil && err != nil { + t.Errorf("Expected to connect, got %v", err) + } else if test.err != nil && err == nil { + t.Errorf("Expected error on connect") + } else if test.err != nil && err != nil { + // Error on connect was expected + if test.err.Error() != err.Error() { + t.Errorf("Expected error %s, got: %s", test.err, err) + } + return + } + defer nc.Close() + + nc.Subscribe("ping", func(m *nats.Msg) { + m.Respond([]byte("pong")) + }) + nc.Flush() + + _, err = nc.Request("ping", []byte("ping"), 250*time.Millisecond) + if test.rerr != nil && err == nil { + t.Errorf("Expected error getting response") + } else if test.rerr == nil && err != nil { + t.Errorf("Expected response") + } + }) + } +} + +func TestOCSPCustomConfigReloadDisable(t *testing.T) { + const ( + caCert = "configs/certs/ocsp/ca-cert.pem" + caKey = "configs/certs/ocsp/ca-key.pem" + serverCert = "configs/certs/ocsp/server-cert.pem" + updatedServerCert = "configs/certs/ocsp/server-status-request-url-01-cert.pem" + ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ocspr := newOCSPResponder(t, caCert, caKey) + defer ocspr.Shutdown(ctx) + addr := fmt.Sprintf("http://%s", ocspr.Addr) + setOCSPStatus(t, addr, serverCert, ocsp.Good) + setOCSPStatus(t, addr, updatedServerCert, ocsp.Good) + + // Start with server without OCSP Stapling MustStaple + content := ` + port: -1 + + ocsp: { mode: always, url: "http://127.0.0.1:8888" } + + tls { + cert_file: "configs/certs/ocsp/server-cert.pem" + key_file: "configs/certs/ocsp/server-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + ` + conf := createConfFile(t, []byte(content)) + s, opts := RunServerWithConfig(conf) + defer s.Shutdown() + + nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse == nil { + return fmt.Errorf("missing OCSP Staple!") + } + return nil + }, + }), + nats.RootCAs(caCert), + nats.ErrorHandler(noOpErrHandler), + ) + if err != nil { + t.Fatal(err) + } + defer nc.Close() + sub, err := nc.SubscribeSync("foo") + if err != nil { + t.Fatal(err) + } + nc.Publish("foo", []byte("hello world")) + nc.Flush() + + _, err = sub.NextMsg(1 * time.Second) + if err != nil { + t.Fatal(err) + } + nc.Close() + + // Change and disable OCSP Stapling. + content = ` + port: -1 + + ocsp: { mode: never } + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + ` + if err := os.WriteFile(conf, []byte(content), 0666); err != nil { + t.Fatalf("Error writing config: %v", err) + } + if err := s.Reload(); err != nil { + t.Fatal(err) + } + + // The new certificate has must staple but OCSP Stapling is disabled. + time.Sleep(2 * time.Second) + + nc, err = nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse != nil { + return fmt.Errorf("unexpected OCSP Staple!") + } + return nil + }, + }), + nats.RootCAs(caCert), + nats.ErrorHandler(noOpErrHandler), + ) + if err != nil { + t.Fatal(err) + } + nc.Close() +} + +func TestOCSPCustomConfigReloadEnable(t *testing.T) { + const ( + caCert = "configs/certs/ocsp/ca-cert.pem" + caKey = "configs/certs/ocsp/ca-key.pem" + serverCert = "configs/certs/ocsp/server-cert.pem" + updatedServerCert = "configs/certs/ocsp/server-status-request-url-01-cert.pem" + ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ocspr := newOCSPResponder(t, caCert, caKey) + defer ocspr.Shutdown(ctx) + addr := fmt.Sprintf("http://%s", ocspr.Addr) + setOCSPStatus(t, addr, serverCert, ocsp.Good) + setOCSPStatus(t, addr, updatedServerCert, ocsp.Good) + + // Start with server without OCSP Stapling MustStaple + content := ` + port: -1 + + ocsp: { mode: never, url: "http://127.0.0.1:8888" } + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + ` + conf := createConfFile(t, []byte(content)) + s, opts := RunServerWithConfig(conf) + defer s.Shutdown() + + nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse != nil { + return fmt.Errorf("unexpected OCSP Staple!") + } + return nil + }, + }), + nats.RootCAs(caCert), + nats.ErrorHandler(noOpErrHandler), + ) + if err != nil { + t.Fatal(err) + } + defer nc.Close() + sub, err := nc.SubscribeSync("foo") + if err != nil { + t.Fatal(err) + } + nc.Publish("foo", []byte("hello world")) + nc.Flush() + + _, err = sub.NextMsg(1 * time.Second) + if err != nil { + t.Fatal(err) + } + nc.Close() + + // Change and disable OCSP Stapling. + content = ` + port: -1 + + ocsp: { mode: always, url: "http://127.0.0.1:8888" } + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + ` + if err := os.WriteFile(conf, []byte(content), 0666); err != nil { + t.Fatalf("Error writing config: %v", err) + } + if err := s.Reload(); err != nil { + t.Fatal(err) + } + time.Sleep(2 * time.Second) + + nc, err = nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse == nil { + return fmt.Errorf("missing OCSP Staple!") + } + return nil + }, + }), + nats.RootCAs(caCert), + nats.ErrorHandler(noOpErrHandler), + ) + if err != nil { + t.Fatal(err) + } + nc.Close() +} + +func newOCSPResponderCustomAddress(t *testing.T, issuerCertPEM, issuerKeyPEM string, addr string) *http.Server { + t.Helper() + return newOCSPResponderBase(t, issuerCertPEM, issuerCertPEM, issuerKeyPEM, false, addr, defaultResponseTTL) +} + +func newOCSPResponder(t *testing.T, issuerCertPEM, issuerKeyPEM string) *http.Server { + t.Helper() + return newOCSPResponderBase(t, issuerCertPEM, issuerCertPEM, issuerKeyPEM, false, defaultAddress, defaultResponseTTL) +} + +func newOCSPResponderDesignatedCustomAddress(t *testing.T, issuerCertPEM, respCertPEM, respKeyPEM string, addr string) *http.Server { + t.Helper() + return newOCSPResponderBase(t, issuerCertPEM, respCertPEM, respKeyPEM, true, addr, defaultResponseTTL) +} + +func newOCSPResponderBase(t *testing.T, issuerCertPEM, respCertPEM, respKeyPEM string, embed bool, addr string, responseTTL time.Duration) *http.Server { + t.Helper() + var mu sync.Mutex + status := make(map[string]int) + + issuerCert := parseCertPEM(t, issuerCertPEM) + respCert := parseCertPEM(t, respCertPEM) + respKey := parseKeyPEM(t, respKeyPEM) + + mux := http.NewServeMux() + // The "/statuses/" endpoint is for directly setting a key-value pair in + // the CA's status database. + mux.HandleFunc("/statuses/", func(rw http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + key := r.URL.Path[len("/statuses/"):] + switch r.Method { + case "GET": + mu.Lock() + n, ok := status[key] + if !ok { + n = ocsp.Unknown + } + mu.Unlock() + + fmt.Fprintf(rw, "%s %d", key, n) + case "POST": + data, err := io.ReadAll(r.Body) + if err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + n, err := strconv.Atoi(string(data)) + if err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + mu.Lock() + status[key] = n + mu.Unlock() + + fmt.Fprintf(rw, "%s %d", key, n) + default: + http.Error(rw, "Method Not Allowed", http.StatusMethodNotAllowed) + return + } + }) + // The "/" endpoint is for normal OCSP requests. This actually parses an + // OCSP status request and signs a response with a CA. Lightly based off: + // https://www.ietf.org/rfc/rfc2560.txt + mux.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + http.Error(rw, "Method Not Allowed", http.StatusMethodNotAllowed) + return + } + + reqData, err := base64.StdEncoding.DecodeString(r.URL.Path[1:]) + if err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + ocspReq, err := ocsp.ParseRequest(reqData) + if err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + mu.Lock() + n, ok := status[ocspReq.SerialNumber.String()] + if !ok { + n = ocsp.Unknown + } + mu.Unlock() + + tmpl := ocsp.Response{ + Status: n, + SerialNumber: ocspReq.SerialNumber, + ThisUpdate: time.Now(), + } + if responseTTL != 0 { + tmpl.NextUpdate = tmpl.ThisUpdate.Add(responseTTL) + } + if embed { + tmpl.Certificate = respCert + } + respData, err := ocsp.CreateResponse(issuerCert, respCert, tmpl, respKey) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + rw.Header().Set("Content-Type", "application/ocsp-response") + rw.Header().Set("Content-Length", fmt.Sprint(len(respData))) + + fmt.Fprint(rw, string(respData)) + }) + + srv := &http.Server{ + Addr: addr, + Handler: mux, + } + go srv.ListenAndServe() + time.Sleep(1 * time.Second) + return srv +} + +func setOCSPStatus(t *testing.T, ocspURL, certPEM string, status int) { + t.Helper() + + cert := parseCertPEM(t, certPEM) + + hc := &http.Client{Timeout: 10 * time.Second} + resp, err := hc.Post( + fmt.Sprintf("%s/statuses/%s", ocspURL, cert.SerialNumber), + "", + strings.NewReader(fmt.Sprint(status)), + ) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + + data, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read OCSP HTTP response body: %s", err) + } + + if got, want := resp.Status, "200 OK"; got != want { + t.Error(strings.TrimSpace(string(data))) + t.Fatalf("unexpected OCSP HTTP set status, got %q, want %q", got, want) + } +} + +func parseCertPEM(t *testing.T, certPEM string) *x509.Certificate { + t.Helper() + block := parsePEM(t, certPEM) - ocsp { - mode: always - url: "http://127.0.0.1:8888" - } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatalf("failed to parse cert '%s': %s", certPEM, err) + } + return cert +} - tls { - cert_file: "configs/certs/ocsp/server-cert.pem" - key_file: "configs/certs/ocsp/server-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" - timeout: 5 - } - `, - []nats.Option{ - nats.Secure(&tls.Config{ - VerifyConnection: func(s tls.ConnectionState) error { - if s.OCSPResponse == nil { - return errMissingStaple - } - return nil - }, - }), - nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), - nats.RootCAs(caCert), - nats.ErrorHandler(noOpErrHandler), - }, - nil, - nil, - func() { setOCSPStatus(t, ocspURL, serverCert, ocsp.Good) }, - }, - { - "OCSP Stapling in must staple mode does not fetch staple if there is no must staple flag", - ` - port: -1 +func parseKeyPEM(t *testing.T, keyPEM string) crypto.Signer { + t.Helper() + block := parsePEM(t, keyPEM) - ocsp { - mode: must - url: "http://127.0.0.1:8888" - } + key, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + key, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + t.Fatalf("failed to parse ikey %s: %s", keyPEM, err) + } + } + keyc := key.(crypto.Signer) + return keyc +} - tls { - cert_file: "configs/certs/ocsp/server-cert.pem" - key_file: "configs/certs/ocsp/server-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" - timeout: 5 - } - `, - []nats.Option{ - nats.Secure(&tls.Config{ - VerifyConnection: func(s tls.ConnectionState) error { - if s.OCSPResponse != nil { - return errExpectedNoStaple - } - return nil - }, - }), - nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), - nats.RootCAs(caCert), - nats.ErrorHandler(noOpErrHandler), - }, - nil, - nil, - func() { setOCSPStatus(t, ocspURL, serverCert, ocsp.Good) }, - }, - { - "OCSP Stapling in must staple mode fetches staple if there is a must staple flag", - ` - port: -1 +func parsePEM(t *testing.T, pemPath string) *pem.Block { + t.Helper() + data, err := os.ReadFile(pemPath) + if err != nil { + t.Fatal(err) + } - ocsp { - mode: must - url: "http://127.0.0.1:8888" - } + block, _ := pem.Decode(data) + if block == nil { + t.Fatalf("failed to decode PEM %s", pemPath) + } + return block +} - tls { - cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" - timeout: 5 - } - `, - []nats.Option{ - nats.Secure(&tls.Config{ - VerifyConnection: func(s tls.ConnectionState) error { - if s.OCSPResponse == nil { - return errMissingStaple - } - return nil - }, - }), - nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), - nats.RootCAs(caCert), - nats.ErrorHandler(noOpErrHandler), - }, - nil, - nil, - func() { - setOCSPStatus(t, ocspURL, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good) - }, - }, - } { - t.Run(test.name, func(t *testing.T) { - test.configure() - content := test.config - conf := createConfFile(t, []byte(content)) - s, opts := RunServerWithConfig(conf) - defer s.Shutdown() +func getOCSPStatus(s tls.ConnectionState) (*ocsp.Response, error) { + if len(s.VerifiedChains) == 0 { + return nil, fmt.Errorf("missing TLS verified chains") + } + chain := s.VerifiedChains[0] - nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), test.opts...) - if test.err == nil && err != nil { - t.Errorf("Expected to connect, got %v", err) - } else if test.err != nil && err == nil { - t.Errorf("Expected error on connect") - } else if test.err != nil && err != nil { - // Error on connect was expected - if test.err.Error() != err.Error() { - t.Errorf("Expected error %s, got: %s", test.err, err) - } - return - } - defer nc.Close() + if got, want := len(chain), 2; got < want { + return nil, fmt.Errorf("incomplete cert chain, got %d, want at least %d", got, want) + } + leaf, issuer := chain[0], chain[1] - nc.Subscribe("ping", func(m *nats.Msg) { - m.Respond([]byte("pong")) - }) - nc.Flush() + resp, err := ocsp.ParseResponseForCert(s.OCSPResponse, leaf, issuer) + if err != nil { + return nil, fmt.Errorf("failed to parse OCSP response: %w", err) + } + if err := resp.CheckSignatureFrom(issuer); err != nil { + return resp, err + } + return resp, nil +} - _, err = nc.Request("ping", []byte("ping"), 250*time.Millisecond) - if test.rerr != nil && err == nil { - t.Errorf("Expected error getting response") - } else if test.rerr == nil && err != nil { - t.Errorf("Expected response") - } - }) +func TestOCSPTLSConfigNoLeafSet(t *testing.T) { + o := DefaultTestOptions + o.HTTPHost = "127.0.0.1" + o.HTTPSPort = -1 + o.TLSConfig = &tls.Config{ServerName: "localhost"} + cert, err := tls.LoadX509KeyPair("configs/certs/server-cert.pem", "configs/certs/server-key.pem") + if err != nil { + t.Fatalf("Got error reading certificates: %s", err) } + o.TLSConfig.Certificates = []tls.Certificate{cert} + s := RunServer(&o) + s.Shutdown() } -func TestOCSPCustomConfigReloadDisable(t *testing.T) { +func TestOCSPSuperCluster(t *testing.T) { const ( - caCert = "configs/certs/ocsp/ca-cert.pem" - caKey = "configs/certs/ocsp/ca-key.pem" - serverCert = "configs/certs/ocsp/server-cert.pem" - updatedServerCert = "configs/certs/ocsp/server-status-request-url-01-cert.pem" + caCert = "configs/certs/ocsp/ca-cert.pem" + caKey = "configs/certs/ocsp/ca-key.pem" ) ctx, cancel := context.WithCancel(context.Background()) defer cancel() ocspr := newOCSPResponder(t, caCert, caKey) defer ocspr.Shutdown(ctx) addr := fmt.Sprintf("http://%s", ocspr.Addr) - setOCSPStatus(t, addr, serverCert, ocsp.Good) - setOCSPStatus(t, addr, updatedServerCert, ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-02-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-03-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-04-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-05-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-06-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-07-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-08-cert.pem", ocsp.Good) + setOCSPStatus(t, addr, "configs/certs/ocsp/server-cert.pem", ocsp.Good) - // Start with server without OCSP Stapling MustStaple - content := ` + // Store Dirs + storeDirA := t.TempDir() + storeDirB := t.TempDir() + storeDirC := t.TempDir() + storeDirD := t.TempDir() + + // Gateway server configuration + srvConfA := ` + host: "127.0.0.1" + port: -1 + + server_name: "A" + + ocsp { mode: "always" } + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + store_dir: '%s' + + cluster { + name: A + host: "127.0.0.1" + advertise: 127.0.0.1 + port: -1 + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-02-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-02-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + } + + gateway { + name: A + host: "127.0.0.1" + port: -1 + advertise: "127.0.0.1" + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-03-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-03-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + verify: true + } + } + ` + srvConfA = fmt.Sprintf(srvConfA, storeDirA) + sconfA := createConfFile(t, []byte(srvConfA)) + srvA, optsA := RunServerWithConfig(sconfA) + defer srvA.Shutdown() + + // Server that has the original as a cluster. + srvConfB := ` + host: "127.0.0.1" port: -1 - ocsp: { mode: always, url: "http://127.0.0.1:8888" } + server_name: "B" + + ocsp { mode: "always" } tls { - cert_file: "configs/certs/ocsp/server-cert.pem" - key_file: "configs/certs/ocsp/server-key.pem" + cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" ca_file: "configs/certs/ocsp/ca-cert.pem" timeout: 5 } + store_dir: '%s' + + cluster { + name: A + host: "127.0.0.1" + advertise: 127.0.0.1 + port: -1 + + routes: [ nats://127.0.0.1:%d ] + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-02-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-02-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + } + } + + gateway { + name: A + host: "127.0.0.1" + advertise: "127.0.0.1" + port: -1 + + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-03-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-03-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + verify: true + } + } ` - conf := createConfFile(t, []byte(content)) - s, opts := RunServerWithConfig(conf) - defer s.Shutdown() + srvConfB = fmt.Sprintf(srvConfB, storeDirB, optsA.Cluster.Port) + conf := createConfFile(t, []byte(srvConfB)) + srvB, optsB := RunServerWithConfig(conf) + defer srvB.Shutdown() - nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), + // Client connects to server A. + cA, err := nats.Connect(fmt.Sprintf("tls://127.0.0.1:%d", optsA.Port), nats.Secure(&tls.Config{ VerifyConnection: func(s tls.ConnectionState) error { if s.OCSPResponse == nil { - return fmt.Errorf("missing OCSP Staple!") + return fmt.Errorf("missing OCSP Staple from server") } return nil }, @@ -2190,149 +3228,144 @@ func TestOCSPCustomConfigReloadDisable(t *testing.T) { ) if err != nil { t.Fatal(err) - } - defer nc.Close() - sub, err := nc.SubscribeSync("foo") - if err != nil { - t.Fatal(err) - } - nc.Publish("foo", []byte("hello world")) - nc.Flush() - _, err = sub.NextMsg(1 * time.Second) - if err != nil { - t.Fatal(err) } - nc.Close() + defer cA.Close() - // Change and disable OCSP Stapling. - content = ` + // Start another server that will make connect as a gateway to cluster A. + srvConfC := ` + host: "127.0.0.1" port: -1 - ocsp: { mode: never } + server_name: "C" + + ocsp { mode: "always" } tls { - cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" + cert_file: "configs/certs/ocsp/server-status-request-url-05-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-05-key.pem" ca_file: "configs/certs/ocsp/ca-cert.pem" timeout: 5 } - ` - if err := os.WriteFile(conf, []byte(content), 0666); err != nil { - t.Fatalf("Error writing config: %v", err) - } - if err := s.Reload(); err != nil { - t.Fatal(err) - } - - // The new certificate has must staple but OCSP Stapling is disabled. - time.Sleep(2 * time.Second) - - nc, err = nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), - nats.Secure(&tls.Config{ - VerifyConnection: func(s tls.ConnectionState) error { - if s.OCSPResponse != nil { - return fmt.Errorf("unexpected OCSP Staple!") + store_dir: '%s' + gateway { + name: C + host: "127.0.0.1" + advertise: "127.0.0.1" + port: -1 + gateways: [{ + name: "A", + urls: ["nats://127.0.0.1:%d"] + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-06-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-06-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 } - return nil - }, - }), - nats.RootCAs(caCert), - nats.ErrorHandler(noOpErrHandler), - ) - if err != nil { - t.Fatal(err) - } - nc.Close() -} + }] + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-06-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-06-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + verify: true + } + } + ` + srvConfC = fmt.Sprintf(srvConfC, storeDirC, optsA.Gateway.Port) + conf = createConfFile(t, []byte(srvConfC)) + srvC, optsC := RunServerWithConfig(conf) + defer srvC.Shutdown() -func TestOCSPCustomConfigReloadEnable(t *testing.T) { - const ( - caCert = "configs/certs/ocsp/ca-cert.pem" - caKey = "configs/certs/ocsp/ca-key.pem" - serverCert = "configs/certs/ocsp/server-cert.pem" - updatedServerCert = "configs/certs/ocsp/server-status-request-url-01-cert.pem" - ) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - ocspr := newOCSPResponder(t, caCert, caKey) - defer ocspr.Shutdown(ctx) - addr := fmt.Sprintf("http://%s", ocspr.Addr) - setOCSPStatus(t, addr, serverCert, ocsp.Good) - setOCSPStatus(t, addr, updatedServerCert, ocsp.Good) + // Check that server is connected to any server from the other cluster. + checkClusterFormed(t, srvA, srvB) + waitForOutboundGateways(t, srvC, 1, 5*time.Second) - // Start with server without OCSP Stapling MustStaple - content := ` + // Start one more server that will become another gateway. + srvConfD := ` + host: "127.0.0.1" port: -1 - ocsp: { mode: never, url: "http://127.0.0.1:8888" } + server_name: "D" + + ocsp { mode: "auto", url: "%s" } tls { - cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" + cert_file: "configs/certs/ocsp/server-status-request-url-07-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-07-key.pem" ca_file: "configs/certs/ocsp/ca-cert.pem" timeout: 5 } + store_dir: '%s' + gateway { + name: D + host: "127.0.0.1" + advertise: "127.0.0.1" + port: -1 + gateways: [{ + name: "A", + urls: ["nats://127.0.0.1:%d"] + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-08-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-08-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + }}, + { + name: "C", + urls: ["nats://127.0.0.1:%d"] + + #################################################################### + ## TEST NOTE: This cert does not have an OCSP Staple intentionally## + #################################################################### + tls { + ca_file: "configs/certs/ocsp/ca-cert.pem" + cert_file: "configs/certs/ocsp/server-cert.pem" + key_file: "configs/certs/ocsp/server-key.pem" + timeout: 5 + }} + ] + tls { + cert_file: "configs/certs/ocsp/server-status-request-url-08-cert.pem" + key_file: "configs/certs/ocsp/server-status-request-url-08-key.pem" + ca_file: "configs/certs/ocsp/ca-cert.pem" + timeout: 5 + verify: true + } + } ` - conf := createConfFile(t, []byte(content)) - s, opts := RunServerWithConfig(conf) - defer s.Shutdown() + srvConfD = fmt.Sprintf(srvConfD, addr, storeDirD, optsA.Gateway.Port, optsC.Gateway.Port) + conf = createConfFile(t, []byte(srvConfD)) + srvD, _ := RunServerWithConfig(conf) + defer srvD.Shutdown() - nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), + // There should be a single gateway here because one of the gateway connections does not have a OCSP staple. + waitForOutboundGateways(t, srvD, 1, 10*time.Second) + + // Connect to cluster A using server B. + cB, err := nats.Connect(fmt.Sprintf("tls://127.0.0.1:%d", optsB.Port), nats.Secure(&tls.Config{ VerifyConnection: func(s tls.ConnectionState) error { - if s.OCSPResponse != nil { - return fmt.Errorf("unexpected OCSP Staple!") + if s.OCSPResponse == nil { + return fmt.Errorf("missing OCSP Staple from server") } return nil }, }), nats.RootCAs(caCert), nats.ErrorHandler(noOpErrHandler), - ) - if err != nil { - t.Fatal(err) - } - defer nc.Close() - sub, err := nc.SubscribeSync("foo") - if err != nil { - t.Fatal(err) - } - nc.Publish("foo", []byte("hello world")) - nc.Flush() - - _, err = sub.NextMsg(1 * time.Second) - if err != nil { - t.Fatal(err) - } - nc.Close() - - // Change and disable OCSP Stapling. - content = ` - port: -1 - - ocsp: { mode: always, url: "http://127.0.0.1:8888" } - - tls { - cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" - timeout: 5 - } - ` - if err := os.WriteFile(conf, []byte(content), 0666); err != nil { - t.Fatalf("Error writing config: %v", err) - } - if err := s.Reload(); err != nil { + ) + if err != nil { t.Fatal(err) } - time.Sleep(2 * time.Second) + defer cB.Close() - nc, err = nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), + // Connects to cluster C using server C. + cC, err := nats.Connect(fmt.Sprintf("tls://127.0.0.1:%d", optsC.Port), nats.Secure(&tls.Config{ VerifyConnection: func(s tls.ConnectionState) error { if s.OCSPResponse == nil { - return fmt.Errorf("missing OCSP Staple!") + return fmt.Errorf("missing OCSP Staple from server") } return nil }, @@ -2343,244 +3376,338 @@ func TestOCSPCustomConfigReloadEnable(t *testing.T) { if err != nil { t.Fatal(err) } - nc.Close() -} - -func newOCSPResponder(t *testing.T, issuerCertPEM, issuerKeyPEM string) *http.Server { - t.Helper() - return newOCSPResponderDesignated(t, issuerCertPEM, issuerCertPEM, issuerKeyPEM, false) -} - -func newOCSPResponderDesignated(t *testing.T, issuerCertPEM, respCertPEM, respKeyPEM string, embed bool) *http.Server { - t.Helper() - var mu sync.Mutex - status := make(map[string]int) - - issuerCert := parseCertPEM(t, issuerCertPEM) - respCert := parseCertPEM(t, respCertPEM) - respKey := parseKeyPEM(t, respKeyPEM) - - mux := http.NewServeMux() - // The "/statuses/" endpoint is for directly setting a key-value pair in - // the CA's status database. - mux.HandleFunc("/statuses/", func(rw http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - - key := r.URL.Path[len("/statuses/"):] - switch r.Method { - case "GET": - mu.Lock() - n, ok := status[key] - if !ok { - n = ocsp.Unknown - } - mu.Unlock() - - fmt.Fprintf(rw, "%s %d", key, n) - case "POST": - data, err := io.ReadAll(r.Body) - if err != nil { - http.Error(rw, err.Error(), http.StatusBadRequest) - return - } - - n, err := strconv.Atoi(string(data)) - if err != nil { - http.Error(rw, err.Error(), http.StatusBadRequest) - return - } - - mu.Lock() - status[key] = n - mu.Unlock() + defer cC.Close() - fmt.Fprintf(rw, "%s %d", key, n) - default: - http.Error(rw, "Method Not Allowed", http.StatusMethodNotAllowed) - return - } + _, err = cA.Subscribe("foo", func(m *nats.Msg) { + m.Respond([]byte("From Server A")) }) - // The "/" endpoint is for normal OCSP requests. This actually parses an - // OCSP status request and signs a response with a CA. Lightly based off: - // https://www.ietf.org/rfc/rfc2560.txt - mux.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { - http.Error(rw, "Method Not Allowed", http.StatusMethodNotAllowed) - return - } + if err != nil { + t.Errorf("%v", err) + } + cA.Flush() - reqData, err := base64.StdEncoding.DecodeString(r.URL.Path[1:]) - if err != nil { - http.Error(rw, err.Error(), http.StatusBadRequest) - return - } + _, err = cB.Subscribe("bar", func(m *nats.Msg) { + m.Respond([]byte("From Server B")) + }) + if err != nil { + t.Fatal(err) + } + cB.Flush() - ocspReq, err := ocsp.ParseRequest(reqData) - if err != nil { - http.Error(rw, err.Error(), http.StatusBadRequest) - return + // Confirm that a message from server C can flow back to server A via gateway.. + var ( + resp *nats.Msg + lerr error + ) + for i := 0; i < 10; i++ { + resp, lerr = cC.Request("foo", nil, 500*time.Millisecond) + if lerr != nil { + continue } - - mu.Lock() - n, ok := status[ocspReq.SerialNumber.String()] - if !ok { - n = ocsp.Unknown + got := string(resp.Data) + expected := "From Server A" + if got != expected { + t.Fatalf("Expected %v, got: %v", expected, got) } - mu.Unlock() - tmpl := ocsp.Response{ - Status: n, - SerialNumber: ocspReq.SerialNumber, - ThisUpdate: time.Now(), - NextUpdate: time.Now().Add(4 * time.Second), - } - if embed { - tmpl.Certificate = respCert + // Make request to B + resp, lerr = cC.Request("bar", nil, 500*time.Millisecond) + if lerr != nil { + continue } - respData, err := ocsp.CreateResponse(issuerCert, respCert, tmpl, respKey) - if err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) - return + got = string(resp.Data) + expected = "From Server B" + if got != expected { + t.Errorf("Expected %v, got: %v", expected, got) } - - rw.Header().Set("Content-Type", "application/ocsp-response") - rw.Header().Set("Content-Length", fmt.Sprint(len(respData))) - - fmt.Fprint(rw, string(respData)) - }) - - srv := &http.Server{ - Addr: "127.0.0.1:8888", - Handler: mux, + lerr = nil + break + } + if lerr != nil { + t.Errorf("Unexpected error: %v", lerr) + } + if n := srvD.NumOutboundGateways(); n > 1 { + t.Errorf("Expected single gateway, got: %v", n) } - go srv.ListenAndServe() - time.Sleep(1 * time.Second) - return srv } -func setOCSPStatus(t *testing.T, ocspURL, certPEM string, status int) { - t.Helper() +func TestOCSPLocalIssuerDetermination(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cert := parseCertPEM(t, certPEM) + intermediateCA1Responder := newOCSPResponderIntermediateCA1(t) + intermediateCA1ResponderURL := fmt.Sprintf("http://%s", intermediateCA1Responder.Addr) + defer intermediateCA1Responder.Shutdown(ctx) - hc := &http.Client{Timeout: 10 * time.Second} - resp, err := hc.Post( - fmt.Sprintf("%s/statuses/%s", ocspURL, cert.SerialNumber), - "", - strings.NewReader(fmt.Sprint(status)), + // Test constants + ocspURL := intermediateCA1ResponderURL + clientTrustBundle := "configs/certs/ocsp_peer/mini-ca/misc/trust_config1_bundle.pem" + serverCert := "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_cert.pem" + + var ( + errMissingStaple = fmt.Errorf("missing OCSP Staple from server") ) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - data, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("failed to read OCSP HTTP response body: %s", err) - } + for _, test := range []struct { + name string + config string + opts []nats.Option + err error + rerr error + serverStart bool + configure func() + }{ + { + "Correct issuer configured in cert bundle", + ` + port: -1 - if got, want := resp.Status, "200 OK"; got != want { - t.Error(strings.TrimSpace(string(data))) - t.Fatalf("unexpected OCSP HTTP set status, got %q, want %q", got, want) - } -} + ocsp { + mode: always + } -func parseCertPEM(t *testing.T, certPEM string) *x509.Certificate { - t.Helper() - block := parsePEM(t, certPEM) + tls { + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" + timeout: 5 + } + `, + []nats.Option{ + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse == nil { + return errMissingStaple + } + return nil + }, + }), + nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), + nats.RootCAs(clientTrustBundle), + nats.ErrorHandler(noOpErrHandler), + }, + nil, + nil, + true, + func() { + setOCSPStatus(t, ocspURL, serverCert, ocsp.Good) + }, + }, + { + "Wrong issuer configured in cert bundle, server no start", + ` + port: -1 - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - t.Fatalf("failed to parse cert '%s': %s", certPEM, err) - } - return cert -} + ocsp { + mode: always + } -func parseKeyPEM(t *testing.T, keyPEM string) *rsa.PrivateKey { - t.Helper() - block := parsePEM(t, keyPEM) + tls { + cert_file: "configs/certs/ocsp_peer/mini-ca/misc/misconfig_TestServer1_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" + timeout: 5 + } + `, + []nats.Option{ + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse == nil { + return errMissingStaple + } + return nil + }, + }), + nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), + nats.RootCAs(clientTrustBundle), + nats.ErrorHandler(noOpErrHandler), + }, + nil, + nil, + false, + func() { + setOCSPStatus(t, ocspURL, serverCert, ocsp.Good) + }, + }, + { + "Issuer configured in CA bundle only, configuration 1", + ` + port: -1 + + ocsp { + mode: always + } + + tls { + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_cert.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/misc/trust_config1_bundle.pem" + timeout: 5 + } + `, + []nats.Option{ + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse == nil { + return errMissingStaple + } + return nil + }, + }), + nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), + nats.RootCAs(clientTrustBundle), + nats.ErrorHandler(noOpErrHandler), + }, + nil, + nil, + true, + func() { + setOCSPStatus(t, ocspURL, serverCert, ocsp.Good) + }, + }, + { + "Issuer configured in CA bundle only, configuration 2", + ` + port: -1 - key, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - t.Fatalf("failed to parse ikey %s: %s", keyPEM, err) - } - return key -} + ocsp { + mode: always + } -func parsePEM(t *testing.T, pemPath string) *pem.Block { - t.Helper() - data, err := os.ReadFile(pemPath) - if err != nil { - t.Fatal(err) - } + tls { + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_cert.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/misc/trust_config2_bundle.pem" + timeout: 5 + } + `, + []nats.Option{ + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse == nil { + return errMissingStaple + } + return nil + }, + }), + nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), + nats.RootCAs(clientTrustBundle), + nats.ErrorHandler(noOpErrHandler), + }, + nil, + nil, + true, + func() { + setOCSPStatus(t, ocspURL, serverCert, ocsp.Good) + }, + }, + { + "Issuer configured in CA bundle only, configuration 3", + ` + port: -1 - block, _ := pem.Decode(data) - if block == nil { - t.Fatalf("failed to decode PEM %s", pemPath) - } - return block -} + ocsp { + mode: always + } -func getOCSPStatus(s tls.ConnectionState) (*ocsp.Response, error) { - if len(s.VerifiedChains) == 0 { - return nil, fmt.Errorf("missing TLS verified chains") - } - chain := s.VerifiedChains[0] + tls { + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_cert.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/misc/trust_config3_bundle.pem" + timeout: 5 + } + `, + []nats.Option{ + nats.Secure(&tls.Config{ + VerifyConnection: func(s tls.ConnectionState) error { + if s.OCSPResponse == nil { + return errMissingStaple + } + return nil + }, + }), + nats.ClientCert("./configs/certs/ocsp/client-cert.pem", "./configs/certs/ocsp/client-key.pem"), + nats.RootCAs(clientTrustBundle), + nats.ErrorHandler(noOpErrHandler), + }, + nil, + nil, + true, + func() { + setOCSPStatus(t, ocspURL, serverCert, ocsp.Good) + }, + }, + } { + t.Run(test.name, func(t *testing.T) { + defer func() { + r := recover() + if r != nil && test.serverStart { + t.Fatalf("Expected server start, unexpected panic: %v", r) + } + if r == nil && !test.serverStart { + t.Fatalf("Expected server to not start and panic thrown") + } + }() + test.configure() + content := test.config + conf := createConfFile(t, []byte(content)) + s, opts := RunServerWithConfig(conf) + // server may not start for some tests + if s != nil { + defer s.Shutdown() + } - if got, want := len(chain), 2; got < want { - return nil, fmt.Errorf("incomplete cert chain, got %d, want at least %d", got, want) - } - leaf, issuer := chain[0], chain[1] + nc, err := nats.Connect(fmt.Sprintf("tls://localhost:%d", opts.Port), test.opts...) + if test.err == nil && err != nil { + t.Errorf("Expected to connect, got %v", err) + } else if test.err != nil && err == nil { + t.Errorf("Expected error on connect") + } else if test.err != nil && err != nil { + // Error on connect was expected + if test.err.Error() != err.Error() { + t.Errorf("Expected error %s, got: %s", test.err, err) + } + return + } + defer nc.Close() - resp, err := ocsp.ParseResponseForCert(s.OCSPResponse, leaf, issuer) - if err != nil { - return nil, fmt.Errorf("failed to parse OCSP response: %w", err) - } - if err := resp.CheckSignatureFrom(issuer); err != nil { - return resp, err - } - return resp, nil -} + nc.Subscribe("ping", func(m *nats.Msg) { + m.Respond([]byte("pong")) + }) + nc.Flush() -func TestOCSPTLSConfigNoLeafSet(t *testing.T) { - o := DefaultTestOptions - o.HTTPHost = "127.0.0.1" - o.HTTPSPort = -1 - o.TLSConfig = &tls.Config{ServerName: "localhost"} - cert, err := tls.LoadX509KeyPair("configs/certs/server-cert.pem", "configs/certs/server-key.pem") - if err != nil { - t.Fatalf("Got error reading certificates: %s", err) + _, err = nc.Request("ping", []byte("ping"), 250*time.Millisecond) + if test.rerr != nil && err == nil { + t.Errorf("Expected error getting response") + } else if test.rerr == nil && err != nil { + t.Errorf("Expected response") + } + }) } - o.TLSConfig.Certificates = []tls.Certificate{cert} - s := RunServer(&o) - s.Shutdown() } -func TestOCSPSuperCluster(t *testing.T) { +func TestMixedCAOCSPSuperCluster(t *testing.T) { const ( - caCert = "configs/certs/ocsp/ca-cert.pem" + caCert = "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" caKey = "configs/certs/ocsp/ca-key.pem" ) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - ocspr := newOCSPResponder(t, caCert, caKey) - defer ocspr.Shutdown(ctx) - addr := fmt.Sprintf("http://%s", ocspr.Addr) - setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-01-cert.pem", ocsp.Good) - setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-02-cert.pem", ocsp.Good) - setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-03-cert.pem", ocsp.Good) - setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-04-cert.pem", ocsp.Good) - setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-05-cert.pem", ocsp.Good) - setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-06-cert.pem", ocsp.Good) - setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-07-cert.pem", ocsp.Good) - setOCSPStatus(t, addr, "configs/certs/ocsp/server-status-request-url-08-cert.pem", ocsp.Good) - setOCSPStatus(t, addr, "configs/certs/ocsp/server-cert.pem", ocsp.Good) + + intermediateCA1Responder := newOCSPResponderIntermediateCA1(t) + intermediateCA1ResponderURL := fmt.Sprintf("http://%s", intermediateCA1Responder.Addr) + defer intermediateCA1Responder.Shutdown(ctx) + setOCSPStatus(t, intermediateCA1ResponderURL, "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_cert.pem", ocsp.Good) + + intermediateCA2Responder := newOCSPResponderIntermediateCA2(t) + intermediateCA2ResponderURL := fmt.Sprintf("http://%s", intermediateCA2Responder.Addr) + defer intermediateCA2Responder.Shutdown(ctx) + setOCSPStatus(t, intermediateCA2ResponderURL, "configs/certs/ocsp_peer/mini-ca/server2/TestServer3_cert.pem", ocsp.Good) // Store Dirs storeDirA := t.TempDir() storeDirB := t.TempDir() storeDirC := t.TempDir() - storeDirD := t.TempDir() // Gateway server configuration srvConfA := ` @@ -2592,9 +3719,9 @@ func TestOCSPSuperCluster(t *testing.T) { ocsp { mode: "always" } tls { - cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" timeout: 5 } store_dir: '%s' @@ -2606,9 +3733,9 @@ func TestOCSPSuperCluster(t *testing.T) { port: -1 tls { - cert_file: "configs/certs/ocsp/server-status-request-url-02-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-02-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" timeout: 5 } } @@ -2620,9 +3747,9 @@ func TestOCSPSuperCluster(t *testing.T) { advertise: "127.0.0.1" tls { - cert_file: "configs/certs/ocsp/server-status-request-url-03-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-03-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" timeout: 5 verify: true } @@ -2643,9 +3770,9 @@ func TestOCSPSuperCluster(t *testing.T) { ocsp { mode: "always" } tls { - cert_file: "configs/certs/ocsp/server-status-request-url-01-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-01-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" timeout: 5 } store_dir: '%s' @@ -2659,9 +3786,9 @@ func TestOCSPSuperCluster(t *testing.T) { routes: [ nats://127.0.0.1:%d ] tls { - cert_file: "configs/certs/ocsp/server-status-request-url-02-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-02-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" timeout: 5 } } @@ -2673,9 +3800,9 @@ func TestOCSPSuperCluster(t *testing.T) { port: -1 tls { - cert_file: "configs/certs/ocsp/server-status-request-url-03-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-03-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" + cert_file: "configs/certs/ocsp_peer/mini-ca/server1/TestServer1_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server1/private/TestServer1_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" timeout: 5 verify: true } @@ -2705,7 +3832,7 @@ func TestOCSPSuperCluster(t *testing.T) { } defer cA.Close() - // Start another server that will make connect as a gateway to cluster A. + // Start another server that will make connect as a gateway to cluster A but with different CA issuer. srvConfC := ` host: "127.0.0.1" port: -1 @@ -2715,9 +3842,9 @@ func TestOCSPSuperCluster(t *testing.T) { ocsp { mode: "always" } tls { - cert_file: "configs/certs/ocsp/server-status-request-url-05-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-05-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" + cert_file: "configs/certs/ocsp_peer/mini-ca/server2/TestServer3_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server2/private/TestServer3_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" timeout: 5 } store_dir: '%s' @@ -2730,16 +3857,16 @@ func TestOCSPSuperCluster(t *testing.T) { name: "A", urls: ["nats://127.0.0.1:%d"] tls { - cert_file: "configs/certs/ocsp/server-status-request-url-06-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-06-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" + cert_file: "configs/certs/ocsp_peer/mini-ca/server2/TestServer3_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server2/private/TestServer3_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" timeout: 5 } }] tls { - cert_file: "configs/certs/ocsp/server-status-request-url-06-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-06-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" + cert_file: "configs/certs/ocsp_peer/mini-ca/server2/TestServer3_bundle.pem" + key_file: "configs/certs/ocsp_peer/mini-ca/server2/private/TestServer3_keypair.pem" + ca_file: "configs/certs/ocsp_peer/mini-ca/root/root_cert.pem" timeout: 5 verify: true } @@ -2754,67 +3881,6 @@ func TestOCSPSuperCluster(t *testing.T) { checkClusterFormed(t, srvA, srvB) waitForOutboundGateways(t, srvC, 1, 5*time.Second) - // Start one more server that will become another gateway. - srvConfD := ` - host: "127.0.0.1" - port: -1 - - server_name: "D" - - ocsp { mode: "auto", url: "%s" } - - tls { - cert_file: "configs/certs/ocsp/server-status-request-url-07-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-07-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" - timeout: 5 - } - store_dir: '%s' - gateway { - name: D - host: "127.0.0.1" - advertise: "127.0.0.1" - port: -1 - gateways: [{ - name: "A", - urls: ["nats://127.0.0.1:%d"] - tls { - cert_file: "configs/certs/ocsp/server-status-request-url-08-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-08-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" - timeout: 5 - }}, - { - name: "C", - urls: ["nats://127.0.0.1:%d"] - - #################################################################### - ## TEST NOTE: This cert does not have an OCSP Staple intentionally## - #################################################################### - tls { - ca_file: "configs/certs/ocsp/ca-cert.pem" - cert_file: "configs/certs/ocsp/server-cert.pem" - key_file: "configs/certs/ocsp/server-key.pem" - timeout: 5 - }} - ] - tls { - cert_file: "configs/certs/ocsp/server-status-request-url-08-cert.pem" - key_file: "configs/certs/ocsp/server-status-request-url-08-key.pem" - ca_file: "configs/certs/ocsp/ca-cert.pem" - timeout: 5 - verify: true - } - } - ` - srvConfD = fmt.Sprintf(srvConfD, addr, storeDirD, optsA.Gateway.Port, optsC.Gateway.Port) - conf = createConfFile(t, []byte(srvConfD)) - srvD, _ := RunServerWithConfig(conf) - defer srvD.Shutdown() - - // There should be a single gateway here because one of the gateway connections does not have a OCSP staple. - waitForOutboundGateways(t, srvD, 1, 10*time.Second) - // Connect to cluster A using server B. cB, err := nats.Connect(fmt.Sprintf("tls://127.0.0.1:%d", optsB.Port), nats.Secure(&tls.Config{ @@ -2899,7 +3965,4 @@ func TestOCSPSuperCluster(t *testing.T) { if lerr != nil { t.Errorf("Unexpected error: %v", lerr) } - if n := srvD.NumOutboundGateways(); n > 1 { - t.Errorf("Expected single gateway, got: %v", n) - } } diff --git a/test/system_services_test.go b/test/system_services_test.go index 0bdd06ed7..cc0c19d77 100644 --- a/test/system_services_test.go +++ b/test/system_services_test.go @@ -262,7 +262,7 @@ func TestSystemServiceSubscribersLeafNodesWithoutSystem(t *testing.T) { // For now we do not see all the details behind a leafnode if the leafnode is not enabled. checkDbgNumSubs(t, nc, "foo.bar.3", 2) - checkDbgNumSubs(t, nc, "foo.bar.baz QG.22", 11) + checkDbgNumSubs(t, nc, "foo.bar.baz QG.22", 12) } func runSolicitLeafServerWithSystemToURL(surl string) (*server.Server, *server.Options) { diff --git a/test/test_test.go b/test/test_test.go index 60617e932..c6b83de65 100644 --- a/test/test_test.go +++ b/test/test_test.go @@ -146,9 +146,12 @@ func (d *dummyLogger) Debugf(format string, args ...interface{}) { func (d *dummyLogger) Tracef(format string, args ...interface{}) { } +// ** added by Memphis func (d *dummyLogger) Systemf(format string, args ...interface{}) { } +// ** added by Memphis + func (d *dummyLogger) Noticef(format string, args ...interface{}) { } diff --git a/test/tls_test.go b/test/tls_test.go index 288d4b737..dc430d860 100644 --- a/test/tls_test.go +++ b/test/tls_test.go @@ -72,6 +72,26 @@ func TestTLSConnection(t *testing.T) { } } +// TestTLSInProcessConnection checks that even if TLS is enabled on the server, +// that an in-process connection that does *not* use TLS still connects successfully. +func TestTLSInProcessConnection(t *testing.T) { + srv, opts := RunServerWithConfig("./configs/tls.conf") + defer srv.Shutdown() + + nc, err := nats.Connect("", nats.InProcessServer(srv), nats.UserInfo(opts.Username, opts.Password)) + if err != nil { + t.Fatal(err) + } + + if nc.TLSRequired() { + t.Fatalf("Shouldn't have required TLS for in-process connection") + } + + if _, err = nc.TLSConnectionState(); err == nil { + t.Fatal("Should have got an error retrieving TLS connection state") + } +} + func TestTLSClientCertificate(t *testing.T) { srv, opts := RunServerWithConfig("./configs/tlsverify.conf") defer srv.Shutdown() diff --git a/test/verbose_test.go b/test/verbose_test.go index af88477d8..d370a2df9 100644 --- a/test/verbose_test.go +++ b/test/verbose_test.go @@ -10,6 +10,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + package test import ( From b8297a8231ab845cd18a2620f26e04dd44466cc5 Mon Sep 17 00:00:00 2001 From: shay23b Date: Tue, 5 Dec 2023 16:22:38 +0200 Subject: [PATCH 05/16] from cloud --- server/memphis_cloud.go | 8 ++++++++ server/memphis_handlers_stations.go | 5 +++++ server/memphis_handlers_user_mgmt.go | 6 +++++- 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/server/memphis_cloud.go b/server/memphis_cloud.go index f4fe1ec83..0bfc6c973 100644 --- a/server/memphis_cloud.go +++ b/server/memphis_cloud.go @@ -2382,3 +2382,11 @@ func getUsageLimitProduersLimitPerStation(tenantName, stationName string) (float func (s *Server) GetConnectorsByStationAndPartition(stationID, partitionNumber, numOfPartitions int) ([]map[string]string, error) { return []map[string]string{}, nil } + +func deleteConnectorsStationResources(tenantName string, stationID int) error { + return nil +} + +func deleteConnectorsTenantResources(tenantName string) error { + return nil +} diff --git a/server/memphis_handlers_stations.go b/server/memphis_handlers_stations.go index 1a7ba0640..b955ccdf2 100644 --- a/server/memphis_handlers_stations.go +++ b/server/memphis_handlers_stations.go @@ -182,6 +182,11 @@ func removeStationResources(s *Server, station models.Station, shouldDeleteStrea return err } + err = deleteConnectorsStationResources(station.TenantName, station.ID) + if err != nil { + return err + } + return nil } diff --git a/server/memphis_handlers_user_mgmt.go b/server/memphis_handlers_user_mgmt.go index 9dd76cd5e..ef57bf312 100644 --- a/server/memphis_handlers_user_mgmt.go +++ b/server/memphis_handlers_user_mgmt.go @@ -160,7 +160,11 @@ func removeTenantResources(tenantName string, user models.User) error { if err != nil { return err } - // TODO: send response of DeleteAndGetAttachedFunctionsByStation to microservice to delete + + err = deleteConnectorsTenantResources(tenantName) + if err != nil { + return err + } err = db.RemoveStationsByTenant(tenantName) if err != nil { From a6fef73fd937f679ab4eabe8a99f838aba61a9fc Mon Sep 17 00:00:00 2001 From: Avitaltrifsik <107035359+Avitaltrifsik@users.noreply.github.com> Date: Wed, 6 Dec 2023 10:59:14 +0200 Subject: [PATCH 06/16] Update README.md (#1483) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a0d6abe38..c238e4294 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![Github (6)](https://github.com/memphisdev/memphis/assets/107035359/bc2feafc-946c-4569-ab8d-836bc0181890)](https://www.functions.memphis.dev/) +[![Github functions new banner (1)](https://github.com/memphisdev/memphis/assets/107035359/7f89dcba-b897-4164-93b3-fec252341960)](https://www.functions.memphis.dev/)

Discord From 034745e2814b5541105e98185ca7e33db9ec0ae7 Mon Sep 17 00:00:00 2001 From: svetaStrech Date: Wed, 6 Dec 2023 21:20:30 +0200 Subject: [PATCH 07/16] align with cloud --- ui_src/src/App.js | 9 +- ui_src/src/App.scss | 1 + ui_src/src/assets/images/connectIcon.svg | 15 + ui_src/src/assets/images/connectorIcon.svg | 6 + ui_src/src/assets/images/producerIcon.svg | 9 + ui_src/src/assets/images/purplePlus.svg | 4 + ui_src/src/components/InputNumber/index.js | 3 +- ui_src/src/components/InputNumber/style.scss | 12 +- ui_src/src/components/connectorModal/index.js | 345 +++++++++++++++++ .../src/components/connectorModal/style.scss | 57 +++ ui_src/src/components/customSelect/style.scss | 9 + .../src/components/produceMessages/index.js | 2 +- ui_src/src/components/select/index.js | 7 +- ui_src/src/components/select/style.scss | 68 +++- ui_src/src/components/sideBar/index.js | 77 ++-- ui_src/src/components/sideBar/style.scss | 62 ++-- ui_src/src/components/spinner/index.js | 21 +- ui_src/src/connectors/assets/awsKinesis.svg | 10 + ui_src/src/connectors/assets/kafkaIcon.svg | 10 + ui_src/src/connectors/assets/s3LogoIcon.svg | 10 + ui_src/src/connectors/index.js | 12 + ui_src/src/connectors/kafka.js | 187 ++++++++++ ui_src/src/connectors/kinesis.js | 97 +++++ ui_src/src/const/apiEndpoints.js | 4 + ui_src/src/domain/administration/index.js | 5 +- .../ProduceConsumList/index.js | 347 +++++++++++++++--- .../ProduceConsumList/style.scss | 73 +++- .../components/functionsOverview/style.scss | 4 +- .../stationOverviewHeader/index.js | 17 +- .../stationOverviewHeader/style.scss | 2 +- ui_src/src/domain/stationOverview/style.scss | 3 +- .../stationsList/stationBoxOverview/index.js | 2 +- 32 files changed, 1316 insertions(+), 174 deletions(-) create mode 100644 ui_src/src/assets/images/connectIcon.svg create mode 100644 ui_src/src/assets/images/connectorIcon.svg create mode 100644 ui_src/src/assets/images/producerIcon.svg create mode 100644 ui_src/src/assets/images/purplePlus.svg create mode 100644 ui_src/src/components/connectorModal/index.js create mode 100644 ui_src/src/components/connectorModal/style.scss create mode 100644 ui_src/src/connectors/assets/awsKinesis.svg create mode 100644 ui_src/src/connectors/assets/kafkaIcon.svg create mode 100644 ui_src/src/connectors/assets/s3LogoIcon.svg create mode 100644 ui_src/src/connectors/index.js create mode 100644 ui_src/src/connectors/kafka.js create mode 100644 ui_src/src/connectors/kinesis.js diff --git a/ui_src/src/App.js b/ui_src/src/App.js index edfd21df0..5fcf156e9 100644 --- a/ui_src/src/App.js +++ b/ui_src/src/App.js @@ -29,9 +29,7 @@ import { LOCAL_STORAGE_USER_PASS_BASED_AUTH, LOCAL_STORAGE_WS_PORT, USER_IMAGE, - LOCAL_STORAGE_PLAN, - LOCAL_STORAGE_FULL_NAME, - LOCAL_STORAGE_USER_NAME + LOCAL_STORAGE_PLAN } from './const/localStorageConsts'; import { CLOUD_URL, ENVIRONMENT, HANDLE_REFRESH_INTERVAL, WS_PREFIX, WS_SERVER_URL_PRODUCTION } from './config'; import { isCheckoutCompletedTrue, isCloud } from './services/valueConvertor'; @@ -686,6 +684,11 @@ const App = withRouter(() => { /> }>} /> }>} /> + }>} + /> + + + + + + + + + + + + + + diff --git a/ui_src/src/assets/images/connectorIcon.svg b/ui_src/src/assets/images/connectorIcon.svg new file mode 100644 index 000000000..0c54c2e01 --- /dev/null +++ b/ui_src/src/assets/images/connectorIcon.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/ui_src/src/assets/images/producerIcon.svg b/ui_src/src/assets/images/producerIcon.svg new file mode 100644 index 000000000..b4fa1f67f --- /dev/null +++ b/ui_src/src/assets/images/producerIcon.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/ui_src/src/assets/images/purplePlus.svg b/ui_src/src/assets/images/purplePlus.svg new file mode 100644 index 000000000..3bf8b5295 --- /dev/null +++ b/ui_src/src/assets/images/purplePlus.svg @@ -0,0 +1,4 @@ + + + + diff --git a/ui_src/src/components/InputNumber/index.js b/ui_src/src/components/InputNumber/index.js index e70f706b3..dec6ad414 100644 --- a/ui_src/src/components/InputNumber/index.js +++ b/ui_src/src/components/InputNumber/index.js @@ -18,7 +18,7 @@ import React from 'react'; import ArrowDropDownRounded from '@material-ui/icons/ArrowDropDownRounded'; import ArrowDropUpRounded from '@material-ui/icons/ArrowDropUpRounded'; -const InputNumberComponent = ({ min, max, onChange, value, placeholder, disabled, bordered = false }) => { +const InputNumberComponent = ({ min, max, onChange, value, placeholder, disabled, bordered = false, style }) => { const handleChange = (e) => { onChange(e); }; @@ -34,6 +34,7 @@ const InputNumberComponent = ({ min, max, onChange, value, placeholder, disabled placeholder={placeholder} disabled={disabled} className="input-number-wrapper" + style={style} controls={{ downIcon: , upIcon: }} /> ); diff --git a/ui_src/src/components/InputNumber/style.scss b/ui_src/src/components/InputNumber/style.scss index e549b0525..ebfd6aed7 100644 --- a/ui_src/src/components/InputNumber/style.scss +++ b/ui_src/src/components/InputNumber/style.scss @@ -1,14 +1,18 @@ -.input-number-wrapper{ - svg{ +.input-number-wrapper { + svg { color: black; } - } .ant-input-number { border-radius: 4px; border: 1px solid var(--gray); } +.ant-input-number:hover { + border-color: var(--gray); + border: 1px solid var(--gray); +} + .ant-input-number-focused { box-shadow: unset; } @@ -17,4 +21,4 @@ } .ant-input-number-handler-wrap { opacity: 1; -} \ No newline at end of file +} diff --git a/ui_src/src/components/connectorModal/index.js b/ui_src/src/components/connectorModal/index.js new file mode 100644 index 000000000..defb1ebfe --- /dev/null +++ b/ui_src/src/components/connectorModal/index.js @@ -0,0 +1,345 @@ +// Copyright 2022-2023 The Memphis.dev Authors +// Licensed under the Memphis Business Source License 1.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// Changed License: [Apache License, Version 2.0 (https://www.apache.org/licenses/LICENSE-2.0), as published by the Apache Foundation. +// +// https://github.com/memphisdev/memphis/blob/master/LICENSE +// +// Additional Use Grant: You may make use of the Licensed Work (i) only as part of your own product or service, provided it is not a message broker or a message queue product or service; and (ii) provided that you do not use, provide, distribute, or make available the Licensed Work as a Service. +// A "Service" is a commercial offering, product, hosted, or managed service, that allows third parties (other than your own employees and contractors acting on your behalf) to access and/or use the Licensed Work or a substantial set of the features or functionality of the Licensed Work to third parties as a software-as-a-service, platform-as-a-service, infrastructure-as-a-service or other similar services that compete with Licensor products or services. + +import './style.scss'; + +import React, { useContext, useEffect, useState } from 'react'; +import { Divider, Form, Result } from 'antd'; +import { StationStoreContext } from '../../domain/stationOverview'; +import { ReactComponent as ConnectorIcon } from '../../assets/images/connectorIcon.svg'; +import InputNumberComponent from '../InputNumber'; +import TitleComponent from '../titleComponent'; +import SelectComponent from '../select'; +import { Select } from 'antd'; +import Input from '../Input'; +import Modal from '../modal'; +import Spinner from '../spinner'; +import { ApiEndpoints } from '../../const/apiEndpoints'; +import { httpRequest } from '../../services/http'; +import CloudModal from '../cloudModal'; +import { isCloud } from '../../services/valueConvertor'; +import { connectorTypes } from '../../connectors'; + +const ConnectorModal = ({ open, clickOutside, newConnecor, source }) => { + const [stationState, stationDispatch] = useContext(StationStoreContext); + const [connectorForm] = Form.useForm(); + const [step, setStep] = useState(1); + const [isEditing, setIsEditing] = useState(false); + const [loading, setLoading] = useState(false); + const [formFields, setFormFields] = useState({ + connector_type: source ? 'source' : 'sink' + }); + const [connectorInputFields, setConnectorInputFields] = useState([]); + const [resError, setError] = useState(null); + const [cloudModalopen, setCloudModalOpen] = useState(false); + + useEffect(() => { + if (open) { + setStep(1); + setFormFields({ + connector_type: source ? 'source' : 'sink' + }); + setIsEditing(false); + setLoading(false); + setError(null); + } + }, [open]); + + useEffect(() => { + let connectorType = connectorTypes.find((connector) => connector.name === formFields?.type); + formFields?.type && setConnectorInputFields(connectorType?.inputs[formFields?.connector_type]); + }, [formFields?.type, formFields?.connector_type]); + + const updateFormState = (key, value) => { + setFormFields({ ...formFields, [key]: value }); + }; + const updateFormSettingsState = (key, value) => { + let settings = formFields?.settings || {}; + settings[key] = value; + setFormFields({ ...formFields, settings }); + }; + + const updateMultiFormState = (key, value, index) => { + let settings = formFields?.settings || {}; + settings[key] = value; + setFormFields({ ...formFields, settings }); + isEditing && setIsEditing(false); + }; + + const handleSearch = (value, index) => { + let conntectorsNewFields = [...connectorInputFields]; + if (isEditing) conntectorsNewFields[index]?.options?.splice(-1, 1); + else setIsEditing(true); + value !== '' && + conntectorsNewFields[index]?.options?.push({ + name: value, + value: value + }); + setConnectorInputFields(conntectorsNewFields); + }; + + const connectorModalTitle = + step === 1 ? ( + <> +

Add a new connector

+ + + ) : loading ? ( + <> +

Validating connectivity

+ + + ) : resError ? ( + <> +

Error while creating connector

+ + + ) : ( + <> +

Created successfully

+ + + ); + const onFinish = async () => { + try { + await connectorForm.validateFields(); + if (step === 1) { + isCloud() ? createConnector() : setCloudModalOpen(true); + } else { + resError ? setStep(1) : clickOutside(); + setError(null); + } + } catch (err) { + console.log(err); + } + }; + + const createConnector = async () => { + setLoading(true); + setError(null); + setStep(2); + try { + const modifiedSettings = { ...formFields?.settings }; + + for (const key in modifiedSettings) { + if (Array.isArray(modifiedSettings[key])) { + modifiedSettings[key] = modifiedSettings[key].join(','); + } + } + let data = await httpRequest('POST', ApiEndpoints.CREATE_CONNECTOR, { + name: formFields?.name, + station_id: stationState?.stationMetaData?.id, + type: formFields?.type, + connector_type: formFields?.connector_type, + settings: modifiedSettings, + partitions: [stationState?.stationPartition] + }); + newConnecor(data?.connector, source); + } catch (error) { + setError(JSON.stringify(error)); + } finally { + setLoading(false); + } + }; + + const generateFormItem = (input, index) => { + return ( + <> + + + {input?.type === 'string' && ( + { + input?.name === 'name' ? updateFormState(input?.name, e.target.value) : updateFormSettingsState(input?.name, e.target.value); + connectorForm.setFieldValue(input?.name, e.target.value); + }} + /> + )} + {input?.type === 'number' && ( + { + updateFormSettingsState(input?.name, e); + connectorForm.setFieldValue(input?.name, e); + }} + value={formFields[input?.name]} + placeholder={input?.placeholder} + /> + )} + {input?.type === 'select' && ( + { + updateFormSettingsState(input?.name, e); + connectorForm.setFieldValue(input?.name, e); + }} + disabled={false} + /> + )} + {input?.type === 'multi' && ( + diff --git a/ui_src/src/components/select/style.scss b/ui_src/src/components/select/style.scss index 095384779..7818cc3fe 100644 --- a/ui_src/src/components/select/style.scss +++ b/ui_src/src/components/select/style.scss @@ -4,24 +4,25 @@ display: flex; align-items: center; } + .ant-select:not(.ant-select-customize-input) .ant-select-selector { - border: unset; + border: none; background-color: unset; text-align: initial; align-items: center; } - // .ant-select-selector { - // border: unset; - // color: #5e5e68; - // font-family: 'InterMedium'; - // } .ant-select-arrow { background-color: transparent; } .ant-select-selection-item span { display: none; } + label { + display: flex; + align-items: center; + gap: 5px; + } } .select-container .ant-select-focused .ant-select-selector, @@ -40,6 +41,7 @@ box-shadow: 0px 10px 10px rgb(16 10 85 / 10%); border-radius: 8px; padding: 3px; + .ant-select-item { color: rgba(74, 73, 92, 0.8) !important; } @@ -56,6 +58,7 @@ .ant-select-item-option { margin: 2px 8px; border-radius: 8px; + height: 100%; } .ant-select-item-option-selected:not(.ant-select-item-option-disabled) { background: rgba(101, 87, 255, 0.1); @@ -64,6 +67,10 @@ font-weight: unset !important; color: var(--purple) !important; font-family: 'InterBold' !important; + .comment { + color: #5e5e68 !important; + font-family: 'Inter' !important; + } } } @@ -83,7 +90,17 @@ background-color: #edebeb; } } - +.ant-select-selection-item { + display: flex; + align-items: center; + gap: 5px; +} +.ant-select-selection-item { + border: unset !important; +} +.ant-select-selection-search-input { + border: none !important; +} .rc-virtual-list-scrollbar { width: 3px !important; background: white !important; @@ -95,3 +112,40 @@ .ant-select-item-option-selected:not(.ant-select-item-option-disabled) { background-color: transparent; } +.anticon-check { + color: var(--purple) !important; +} + +.ant-select:not(.ant-select-disabled):hover .ant-select-selector { + border-color: #cbcbcb !important; + + box-shadow: none !important; +} +.ant-select.ant-select-in-form-item::selection { + background: transparent; + border-color: #cbcbcb !important; +} +.ant-select-selector { + border: unset; + border-radius: 5px !important; + height: 40px; + color: #5e5e68; + font-family: 'InterMedium'; +} +.ant-select-focused .ant-select-selector, +.ant-select-selector:focus, +.ant-select-selector:active, +.connector-modal-wrapper { + .ant-select-open .ant-select-selector { + border-color: #d9d9d9 !important; + box-shadow: none !important; + } + .ant-select-status-error.ant-select:not(.ant-select-disabled):not(.ant-select-customize-input):not(.ant-pagination-size-changer) .ant-select-selector { + border-color: #d9d9d9 !important; + border-color: none !important; + box-shadow: none !important; + } +} +.ant-select-multiple .ant-select-selector { + overflow-y: auto !important; +} diff --git a/ui_src/src/components/sideBar/index.js b/ui_src/src/components/sideBar/index.js index fe3f96810..e428e21ea 100644 --- a/ui_src/src/components/sideBar/index.js +++ b/ui_src/src/components/sideBar/index.js @@ -119,7 +119,7 @@ function SideBar() { paddingTop: '5px', paddingBottom: '5px', marginBottom: '10px', - marginLeft: expandSidebar ? '100px' : '', + marginLeft: expandSidebar ? '100px' : '' }; const quickActionsStyles = { @@ -128,7 +128,7 @@ function SideBar() { paddingTop: '5px', paddingBottom: '5px', marginBottom: '10px', - marginLeft: expandSidebar ? '100px' : '', + marginLeft: expandSidebar ? '100px' : '' }; const supportContextMenuStyles = { @@ -136,7 +136,7 @@ function SideBar() { paddingTop: '5px', paddingBottom: '5px', marginBottom: '10px', - marginLeft: expandSidebar ? '100px' : '', + marginLeft: expandSidebar ? '100px' : '' }; const getCompanyLogo = useCallback(async () => { @@ -213,7 +213,7 @@ function SideBar() { const MenuItem = ({ icon, activeIcon, name, route, onClick, onMouseEnter, onMouseLeave, badge }) => { return ( -
+
{state.route === route ? activeIcon : hoveredItem === route ? activeIcon : icon}

{name}

{badge && } @@ -396,9 +396,6 @@ function SideBar() { trigger="click" onOpenChange={() => setPopoverOpenSupport(!popoverOpenSupport)} open={popoverOpenSupport} - onClick={() => { - setPopoverOpenSupportContextMenu(false); - }} >
@@ -413,14 +410,19 @@ function SideBar() { ); return ( -
+
-
{setExpandSidebar(!expandSidebar)}}> - +
{ + setExpandSidebar(!expandSidebar); + }} + > +
edit history.replace(`${pathDomains.administration}/profile`)} /> - {( isCloud() && -
-
-
Coming Soon
- {( expandSidebar ? - <> -
-
Production
-
Memphis.dev
-
-
- - -
- - : - <> -
P
- - )} + {isCloud() && ( +
+
+
Coming Soon
+ {expandSidebar ? ( + <> +
+
Production
+
Memphis.dev
+
+
+ + +
+ + ) : ( + <> +
P
+ + )} +
-
)} + )}
Coming soon
- + Light
- + Dark
@@ -582,7 +585,9 @@ function SideBar() {
setPopoverOpenSetting(true)}>
- +

Upgrade

} diff --git a/ui_src/src/components/sideBar/style.scss b/ui_src/src/components/sideBar/style.scss index 69b0d98d9..cfc020aa2 100644 --- a/ui_src/src/components/sideBar/style.scss +++ b/ui_src/src/components/sideBar/style.scss @@ -12,34 +12,36 @@ .sidebar-user-info { cursor: pointer; - &-img {} + &-img { + } &-bottom { display: none; } } opacity: 1; - transition: all .3s ease-in-out; + transition: all 0.3s ease-in-out; &.expand { - animation: expandAnimation .5s ease-out forwards; + animation: expandAnimation 0.5s ease-out forwards; width: 205px; } &.collapse { - animation: collapseAnimation .5s ease-in-out forwards; + animation: collapseAnimation 0.5s ease-in-out forwards; width: 90px; } @keyframes expandAnimation { - 0%, 35% { + 0%, + 35% { opacity: 0; } 50% { - opacity: .3; + opacity: 0.3; } 75% { - opacity: .65; + opacity: 0.65; } 100% { opacity: 1; @@ -57,9 +59,9 @@ @mixin comingSoonBadge { position: absolute; border-radius: 32px; - background: var(--Purply-blue, #6557FF); + background: var(--Purply-blue, #6557ff); padding: 4px 8px; - color: #FFF; + color: #fff; text-align: center; font-size: 8px; font-style: normal; @@ -77,8 +79,9 @@ align-items: center; margin: 0 10px 15px; padding-top: 8px; - border-top: 1px solid #E4E4E4; - &-img {} + border-top: 1px solid #e4e4e4; + &-img { + } &-bottom { display: flex; flex-direction: column; @@ -99,12 +102,12 @@ font-style: normal; font-weight: 400; line-height: normal; - opacity: .4; + opacity: 0.4; text-align: left; } } .upgrade-plan-icon { - display: initial!important; + display: initial !important; } .item-wrapper { flex-direction: row; @@ -122,7 +125,8 @@ top: -8px; right: 5px; } - &-left {} + &-left { + } &-right { display: flex; align-items: center; @@ -138,7 +142,7 @@ text-align: left; } &-subtitle { - color: #6D6C7C; + color: #6d6c7c; font-size: 10px; font-style: normal; font-weight: 500; @@ -175,13 +179,13 @@ } &.ms-active { border-radius: 4px; - background: rgba(101, 87, 255, 0.20); + background: rgba(101, 87, 255, 0.2); position: relative; &:after { content: ''; position: absolute; border-radius: 0px 4px 4px 0px; - background: #6557FF; + background: #6557ff; height: 100%; width: 3px; top: 0; @@ -204,7 +208,7 @@ align-items: initial; .item-wrapper:first-child { padding-top: 16px; - border-top: 1px solid #E4E4E4; + border-top: 1px solid #e4e4e4; } } } @@ -221,16 +225,15 @@ } .item-wrapper { - .menu-item-env { padding: 9px 16px; border-radius: 6px; - border: 1px solid #6557FF; - background: #E8E5FF; + border: 1px solid #6557ff; + background: #e8e5ff; margin-bottom: 15px; position: relative; &-collapsed { - color: var(--Purply-blue, #6557FF); + color: var(--Purply-blue, #6557ff); font-size: 18px; font-style: normal; font-weight: 600; @@ -246,7 +249,7 @@ .ms-appearance { border-radius: 32px; - background: #F0F0F0; + background: #f0f0f0; padding: 4px; display: flex; position: relative; @@ -267,18 +270,18 @@ align-items: center; justify-content: center; padding: 6px; - opacity: .4; + opacity: 0.4; } .ms-active { padding: 6px; border-radius: 32px; border: 0.5px solid rgba(0, 0, 0, 0.04); - background: var(--system-background-light-primary, #FFF); + background: var(--system-background-light-primary, #fff); box-shadow: 0px 1px 1px 0px rgba(0, 0, 0, 0.08); opacity: 1; } &-dark { - opacity: .4; + opacity: 0.4; padding: 6px; flex: 0 0 50%; display: flex; @@ -329,7 +332,7 @@ height: 16px; background: white; border-radius: 50%; - fill: #FFF; + fill: #fff; filter: drop-shadow(0px 1px 3px rgba(0, 0, 0, 0.12)); display: flex; justify-items: center; @@ -339,7 +342,7 @@ cursor: pointer; padding: 5px; z-index: 2; - transition: .2s linear; + transition: 0.2s linear; &.open { transform: scale(-1, -1); } @@ -348,7 +351,6 @@ width: 100%; } } - .logo-wrapper { position: relative; .edit-logo { @@ -432,6 +434,7 @@ justify-content: center; margin-bottom: 10px; cursor: pointer; + p { line-height: 12px; margin: 0; @@ -582,6 +585,7 @@ .ant-popover-placement-rightTop { left: 60px !important; } + .support-container { a { color: #6557ff; diff --git a/ui_src/src/components/spinner/index.js b/ui_src/src/components/spinner/index.js index ccba6ebe9..ec409eb8b 100644 --- a/ui_src/src/components/spinner/index.js +++ b/ui_src/src/components/spinner/index.js @@ -15,17 +15,16 @@ import React from 'react'; import { Spin } from 'antd'; import { LoadingOutlined } from '@ant-design/icons'; -const antIcon = ( - -); - -const Spinner = () => { +const Spinner = ({ fontSize }) => { + const antIcon = ( + + ); return (
diff --git a/ui_src/src/connectors/assets/awsKinesis.svg b/ui_src/src/connectors/assets/awsKinesis.svg new file mode 100644 index 000000000..f71d4b6c0 --- /dev/null +++ b/ui_src/src/connectors/assets/awsKinesis.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/ui_src/src/connectors/assets/kafkaIcon.svg b/ui_src/src/connectors/assets/kafkaIcon.svg new file mode 100644 index 000000000..be1b4a0f0 --- /dev/null +++ b/ui_src/src/connectors/assets/kafkaIcon.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/ui_src/src/connectors/assets/s3LogoIcon.svg b/ui_src/src/connectors/assets/s3LogoIcon.svg new file mode 100644 index 000000000..1312b04a8 --- /dev/null +++ b/ui_src/src/connectors/assets/s3LogoIcon.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/ui_src/src/connectors/index.js b/ui_src/src/connectors/index.js new file mode 100644 index 000000000..e30de359b --- /dev/null +++ b/ui_src/src/connectors/index.js @@ -0,0 +1,12 @@ +import S3LogoIcon from './assets/s3LogoIcon.svg'; +import KafkaIcon from './assets/kafkaIcon.svg'; +import KinesisIcon from './assets/awsKinesis.svg'; + +import { kafka } from './kafka'; +import { kinesis } from './kinesis'; + +export const connectorTypes = [ + { name: 'kafka', icon: KafkaIcon, comment: 'Supported version: v1.0.3', inputs: kafka }, + // { name: 'kinesis', icon: KinesisIcon, inputs: kinesis }, + { name: 's3', icon: S3LogoIcon, disabled: true } +]; diff --git a/ui_src/src/connectors/kafka.js b/ui_src/src/connectors/kafka.js new file mode 100644 index 000000000..5271ecef6 --- /dev/null +++ b/ui_src/src/connectors/kafka.js @@ -0,0 +1,187 @@ +export const kafka = { + source: [ + { + name: 'name', + display: 'name', + type: 'string', + required: true + }, + { + name: 'bootstrap.servers', + display: 'bootstrap.servers', + type: 'multi', + options: [], + required: true, + placeholder: 'localhost:9092', + description: 'list of brokers' + }, + { + name: 'security.protocol', + display: 'security.protocol', + type: 'select', + options: ['SSL', 'SASL_SSL', 'No authentication'], + required: true, + description: 'SSL / SASL_SSL / No authentication', + children: true, + SSL: [ + { + name: 'ssl.mechanism', + display: 'ssl.mechanism', + type: 'string', + required: true + }, + { + name: 'ssl.certificate.pem', + display: 'ssl.certificate.pem', + type: 'string', + required: true + }, + { + name: 'ssl.key.password', + display: 'ssl.key.password', + type: 'string', + required: true + } + ], + SASL_SSL: [ + { + name: 'sasl.mechanism', + display: 'sasl.mechanism', + type: 'select', + options: ['PLAIN', 'SCRAM-SHA-256'], + required: true + }, + { + name: 'sasl.username', + display: 'sasl.username', + type: 'string', + required: true + }, + { + name: 'sasl.password', + display: 'sasl.password', + type: 'string', + required: true + } + ], + 'No authentication': [] + }, + { + name: 'group.id', + display: 'group.id', + type: 'string', + required: true, + description: 'consumer group id' + }, + { + name: 'offset', + display: 'offset', + type: 'string', + required: false, + description: 'earliest / end / specific offset (int)' + }, + { + name: 'topic', + display: 'topic', + type: 'string', + required: true, + description: 'topic name' + }, + { + name: 'partition', + display: 'partition', + type: 'string', + required: false, + description: 'partition number' + } + ], + sink: [ + { + name: 'name', + display: 'name', + type: 'string', + required: true + }, + { + name: 'bootstrap.servers', + display: 'bootstrap.servers', + type: 'multi', + options: [], + required: true, + placeholder: 'localhost:9092', + description: 'list of brokers' + }, + { + name: 'security.protocol', + display: 'security.protocol', + type: 'select', + options: ['SSL', 'SASL_SSL', 'No authentication'], + required: true, + description: 'SSL / SASL_SSL / No authentication', + children: true, + SSL: [ + { + name: 'ssl.mechanism', + display: 'ssl.mechanism', + type: 'string', + required: true + }, + { + name: 'ssl.certificate.pem', + display: 'ssl.certificate.pem', + type: 'string', + required: true + }, + { + name: 'ssl.key.password', + display: 'ssl.key.password', + type: 'string', + required: true + } + ], + SASL_SSL: [ + { + name: 'sasl.mechanism', + display: 'sasl.mechanism', + type: 'select', + options: ['PLAIN', 'SCRAM-SHA-256'], + required: true + }, + { + name: 'sasl.username', + display: 'sasl.username', + type: 'string', + required: true + }, + { + name: 'sasl.password', + display: 'sasl.password', + type: 'string', + required: true + } + ], + 'No authentication': [] + }, + { + name: 'offset', + display: 'offset', + type: 'string', + required: false, + description: 'earliest / end / specific offset (int)' + }, + { + name: 'topic', + display: 'topic', + type: 'string', + required: true, + description: 'topic name' + }, + { + name: 'partition', + display: 'partition', + type: 'string', + required: false, + description: 'partition number' + } + ] +}; diff --git a/ui_src/src/connectors/kinesis.js b/ui_src/src/connectors/kinesis.js new file mode 100644 index 000000000..d003e46c8 --- /dev/null +++ b/ui_src/src/connectors/kinesis.js @@ -0,0 +1,97 @@ +export const kinesis = { + source: [ + { + name: 'name', + display: 'Name', + type: 'string', + required: true + }, + { + name: 'access_key', + display: 'access_key', + type: 'string', + required: false + }, + { + name: 'secret_key', + display: 'secret_key', + type: 'string', + required: false + }, + { + name: 'role', + display: 'role', + type: 'string', + required: true + }, + { + name: 'kinesis_stream_name', + display: 'kinesis_stream_name', + type: 'string', + required: true + }, + { + name: 'shard_iterator_type', + display: 'shard_iterator_type', + type: 'select', + options: ['LATEST', 'TRIM_HORIZON', 'AT_SEQUENCE_NUMBER', 'AFTER_SEQUENCE_NUMBER', 'AT_TIMESTAMP'], + required: true + }, + { + name: 'shard_id', + display: 'shard_id', + type: 'string', + required: true + }, + { + name: 'region', + display: 'region', + type: 'string', + required: true + } + ], + sink: [ + { + name: 'name', + display: 'Name', + type: 'string', + required: true + }, + { + name: 'access_key', + display: 'access_key', + type: 'string', + required: false + }, + { + name: 'secret_key', + display: 'secret_key', + type: 'string', + required: false + }, + { + name: 'role', + display: 'role', + type: 'string', + required: true + }, + { + name: 'region', + display: 'region', + type: 'string', + required: true + }, + { + name: 'kinesis_stream_name', + display: 'kinesis_stream_name', + type: 'string', + required: true + }, + { + name: 'partition_key', + display: 'partition_key', + type: 'string', + required: true + } + ] +}; diff --git a/ui_src/src/const/apiEndpoints.js b/ui_src/src/const/apiEndpoints.js index b9f38d51a..0e8a69089 100644 --- a/ui_src/src/const/apiEndpoints.js +++ b/ui_src/src/const/apiEndpoints.js @@ -52,6 +52,10 @@ export const ApiEndpoints = { PRODUCE: '/stations/produce', ATTACH_DLS: '/stations/attachDlsStation', DETACH_DLS: '/stations/detachDlsStation', + CREATE_CONNECTOR: '/stations/createConnector', + REMOVE_CONNECTOR: '/stations/removeConnector', + START_CONNECTOR: '/stations/startConnector', + STOP_CONNECTOR: '/stations/stopConnector', //Async Tasks GET_ASYNC_TASKS: '/asyncTasks/getAsyncTasks', diff --git a/ui_src/src/domain/administration/index.js b/ui_src/src/domain/administration/index.js index 2aa72bc73..b1294f1f3 100644 --- a/ui_src/src/domain/administration/index.js +++ b/ui_src/src/domain/administration/index.js @@ -60,10 +60,7 @@ function Administration({ step }) { case 'cluster_configuration': return ; case 'system_information': - if (!isCloud()) { - return ; - } - break; + return ; case 'usage': return ; case 'payments': diff --git a/ui_src/src/domain/stationOverview/stationObservabilty/ProduceConsumList/index.js b/ui_src/src/domain/stationOverview/stationObservabilty/ProduceConsumList/index.js index 71f658fc9..bd90619c0 100644 --- a/ui_src/src/domain/stationOverview/stationObservabilty/ProduceConsumList/index.js +++ b/ui_src/src/domain/stationOverview/stationObservabilty/ProduceConsumList/index.js @@ -13,13 +13,18 @@ import './style.scss'; import React, { useContext, useEffect, useRef, useState } from 'react'; -import { Space } from 'antd'; +import { Space, Popover } from 'antd'; import { Virtuoso } from 'react-virtuoso'; import { FiPlayCircle } from 'react-icons/fi'; - import { ReactComponent as WaitingProducerIcon } from '../../../../assets/images/waitingProducer.svg'; import { ReactComponent as WaitingConsumerIcon } from '../../../../assets/images/waitingConsumer.svg'; import { ReactComponent as PlayVideoIcon } from '../../../../assets/images/playVideoIcon.svg'; +import { ReactComponent as PurplePlus } from '../../../../assets/images/purplePlus.svg'; +import { ReactComponent as ProducerIcon } from '../../../../assets/images/producerIcon.svg'; +import { ReactComponent as ConnectIcon } from '../../../../assets/images/connectIcon.svg'; +import { IoPlayCircleOutline, IoRemoveCircleOutline, IoPause } from 'react-icons/io5'; + +import { HiDotsVertical } from 'react-icons/hi'; import OverflowTip from '../../../../components/tooltip/overflowtip'; import { ReactComponent as UnsupportedIcon } from '../../../../assets/images/unsupported.svg'; import StatusIndication from '../../../../components/indication'; @@ -29,12 +34,41 @@ import Button from '../../../../components/button'; import Modal from '../../../../components/modal'; import { StationStoreContext } from '../..'; import ProduceMessages from '../../../../components/produceMessages'; +import ConnectorModal from '../../../../components/connectorModal'; import { ReactComponent as ErrorModalIcon } from '../../../../assets/images/errorModal.svg'; +import { ApiEndpoints } from '../../../../const/apiEndpoints'; +import { httpRequest } from '../../../../services/http'; +import { loader } from '@monaco-editor/react'; +import Spinner from '../../../../components/spinner'; + +const overlayStylesConnectors = { + borderRadius: '8px', + width: '230px', + paddingTop: '5px', + paddingBottom: '5px', + marginBottom: '10px' +}; + +const overlayStyleConnectors = { borderRadius: '8px', width: '150px', paddingTop: '5px', paddingBottom: '5px' }; + +const MenuItem = ({ name, onClick, icon, disabled, loader }) => { + return ( +
+ + {icon} + + + {loader && } +
+ ); +}; const ProduceConsumList = ({ producer }) => { const [stationState, stationDispatch] = useContext(StationStoreContext); const [selectedRowIndex, setSelectedRowIndex] = useState(0); const [producersList, setProducersList] = useState([]); + const [connectorsSourceList, setConnectorsSourceList] = useState([]); + const [connectorsSinkList, setConnectorsSinkList] = useState([]); const [cgsList, setCgsList] = useState([]); const [openProduceMessages, setOpenProduceMessages] = useState(false); const [cgDetails, setCgDetails] = useState([]); @@ -44,15 +78,126 @@ const ProduceConsumList = ({ producer }) => { const [produceloading, setProduceLoading] = useState(false); const [openNoConsumer, setOpenNoConsumer] = useState(false); const [activeConsumerList, setActiveConsumerList] = useState([]); + const [openProducerPopover, setOpenProducerPopover] = useState(false); + const [openConnectorPopover, setOpenConnectorPopover] = useState(false); + const [openConnectorPopoverItem, setOpenConnectorPopoverItem] = useState(null); + const [selectedConnector, setSelectedConnector] = useState(null); + const [openConnectorModal, setOpenConnectorModal] = useState(false); + const [loading, setLoader] = useState(false); + const producerItemsList = [ + { + action: 'Produce Synthetic Data', + onClick: () => { + setOpenProduceMessages(true); + setOpenProducerPopover(false); + } + }, + { + action: 'Develop a Producer', + onClick: () => { + setOpenCreateProducer(true); + setOpenProducerPopover(false); + } + }, + { + action: 'Produce using REST', + onClick: () => { + // setOpenProduceMessages(true); + setOpenProducerPopover(false); + }, + disabled: true + }, + { + action: 'Add a Source', + onClick: () => { + setOpenConnectorModal(true); + setOpenProducerPopover(false); + } + } + ]; + + const removeConnector = async (type) => { + setLoader(true); + try { + await httpRequest('POST', ApiEndpoints.REMOVE_CONNECTOR, { + connector_id: selectedConnector?.id + }); + if (type === 'source') { + let newConnecorList = [...connectorsSourceList]; + newConnecorList.splice(openConnectorPopoverItem, 1); + setConnectorsSourceList(newConnecorList); + } + if (type === 'sink') { + let newConnecorList = [...connectorsSinkList]; + newConnecorList.splice(openConnectorPopoverItem, 1); + setConnectorsSinkList(newConnecorList); + } + setLoader(false); + setOpenConnectorPopover(false); + } catch (error) { + setLoader(false); + } + }; + + const startConnector = async (type) => { + setLoader(true); + try { + await httpRequest('POST', ApiEndpoints.START_CONNECTOR, { + connector_id: selectedConnector?.id + }); + if (type === 'source') { + let newConnecorList = [...connectorsSourceList]; + newConnecorList[openConnectorPopoverItem].is_active = true; + setConnectorsSourceList(newConnecorList); + } + if (type === 'sink') { + let newConnecorList = [...connectorsSinkList]; + newConnecorList[openConnectorPopoverItem].is_active = true; + setConnectorsSinkList(newConnecorList); + } + setLoader(false); + setOpenConnectorPopover(false); + } catch (error) { + setLoader(false); + } + }; + const stopConnector = async (type) => { + setLoader(true); + try { + await httpRequest('POST', ApiEndpoints.STOP_CONNECTOR, { + connector_id: selectedConnector?.id + }); + if (type === 'source') { + let newConnecorList = [...connectorsSourceList]; + newConnecorList[openConnectorPopoverItem].is_active = false; + setConnectorsSourceList(newConnecorList); + } + if (type === 'sink') { + let newConnecorList = [...connectorsSinkList]; + newConnecorList[openConnectorPopoverItem].is_active = false; + setConnectorsSinkList(newConnecorList); + } + setLoader(false); + setOpenConnectorPopover(false); + } catch (error) { + setLoader(false); + } + }; + + const handleNewConnector = (connector, source) => { + source ? setConnectorsSourceList([...connectorsSourceList, connector]) : setConnectorsSinkList([...connectorsSinkList, ...connector]); + }; useEffect(() => { if (producer) { let [result, activeConsumers] = concatFunction('producer', stationState?.stationSocketData); setProducersList(result); + setConnectorsSourceList(stationState?.stationSocketData?.connectors?.filter((connector) => connector?.connector_type === 'source')); setActiveConsumerList(activeConsumers); } else { let result = concatFunction('cgs', stationState?.stationSocketData); setCgsList(result); + setConnectorsSinkList(stationState?.stationSocketData?.connectors?.filter((connector) => connector?.connector_type === 'sink')); } }, [stationState?.stationSocketData]); @@ -153,46 +298,62 @@ const ProduceConsumList = ({ producer }) => {
{producer && ( - <> -

Producers {producersList?.length > 0 && `(${producersList?.length})`}

-
- } - colorType={'purple'} - radiusType="circle" - border={'gray-light'} - backgroundColorType={'white'} - fontSize="12px" - fontFamily="InterSemiBold" - onClick={() => setOpenProduceMessages(true)} - /> - + +

+ Sources {(producersList?.length > 0 || connectorsSourceList?.length > 0) && `(${producersList?.length + connectorsSourceList?.length})`} +

+ ( + + ))} + trigger="click" + onOpenChange={() => setOpenProducerPopover(!openProducerPopover)} + open={openProducerPopover} + > + + +
+ )} + {!producer && ( + +

+ Consumer groups {(cgsList?.length > 0 || connectorsSinkList?.length > 0) && `(${cgsList?.length + connectorsSinkList?.length})`} +

+ ( + + ))} + trigger="click" + onOpenChange={() => setOpenProducerPopover(!openProducerPopover)} + open={openProducerPopover} + > + + +
)} - {!producer &&

Consumer groups {cgsList?.length > 0 && `(${cgsList?.length})`}

}
- {producer && producersList?.length > 0 && ( + {producer && (producersList?.length > 0 || connectorsSourceList?.length > 0) && (
Name Count Status +
)} - {!producer && cgsList.length > 0 && ( + {!producer && (cgsList.length > 0 || connectorsSinkList?.length > 0) && (
Name Unacknowledged Unprocessed Status +
)} - {(producersList?.length > 0 || cgsList?.length > 0) && ( + {(producersList?.length > 0 || connectorsSourceList?.length > 0 || connectorsSinkList?.length > 0 || cgsList?.length > 0) && (
{ })` }} > - {producer && producersList?.length > 0 && ( + {producer && (producersList?.length > 0 || connectorsSourceList?.length > 0) && ( ( -
onSelectedRow(index, 'producer')}> - - {row.name} - - - {row.connected_producers_count} +
onSelectedRow(index, 'producer')}> + + {row?.connector_connection_id ? : } + + {row.name} + + + + + {row.count || 1} - + + { + setOpenConnectorPopoverItem(index); + setSelectedConnector(row); + setOpenConnectorPopover(!openConnectorPopover); + }} + open={openConnectorPopover && openConnectorPopoverItem === index} + content={ + <> + (row?.is_active ? stopConnector('source') : startConnector('source'))} + icon={row?.is_active ? : } + /> + removeConnector('source')} + icon={} + loader={loading && openConnectorPopoverItem === index} + /> + + } + trigger="click" + > +
row?.connector_connection_id && e.stopPropagation()} + > + {row?.connector_connection_id && } +
+
)} /> )} - {!producer && cgsList?.length > 0 && ( + {!producer && (cgsList?.length > 0 || connectorsSinkList?.length > 0) && ( ( -
onSelectedRow(index, 'consumer')}> - - {row.name} - +
onSelectedRow(index, 'consumer')}> + + {row?.connector_connection_id ? : } + + {row.name} + + 0 ? '#F7685B' : null} + textColor={row?.poison_messages > 0 ? '#F7685B' : null} > - {row.poison_messages.toLocaleString()} + {row?.poison_messages?.toLocaleString()} - - {row.unprocessed_messages.toLocaleString()} + + {row?.unprocessed_messages?.toLocaleString()} - + + { + setOpenConnectorPopoverItem(index); + setOpenConnectorPopover(!openConnectorPopover); + }} + open={openConnectorPopover && openConnectorPopoverItem === index} + content={ + <> + (row?.is_active ? stopConnector('sink') : startConnector('sink'))} + icon={row?.is_active ? : } + /> + removeConnector('sink')} icon={} /> + + } + trigger="click" + > +
{ + if (row?.connector_connection_id) e.stopPropagation(); + }} + > + {row?.connector_connection_id && } +
+
)} /> )}
- {producer && producersList?.length > 0} + {producer && (producersList?.length > 0 || connectorsSourceList?.length > 0)} {!producer && cgsList?.length > 0 && ( @@ -262,7 +492,8 @@ const ProduceConsumList = ({ producer }) => {
)} - {((producer && producersList?.length === 0) || (!producer && cgsList?.length === 0)) && ( + {((producer && producersList?.length === 0 && connectorsSourceList?.length === 0) || + (!producer && cgsList?.length === 0 && connectorsSinkList?.length === 0)) && (
{producer ? : }

{`No ${producer ? 'producers' : 'consumers'} yet`}

@@ -307,15 +538,7 @@ const ProduceConsumList = ({ producer }) => {
)}
- { - setOpenCreateConsumer(false); - }} - open={openCreateConsumer} - displayButtons={false} - > + setOpenCreateConsumer(false)} open={openCreateConsumer} displayButtons={false}> {

The message will not be stored

+ setOpenConnectorModal(false)} + newConnecor={(connector, source) => handleNewConnector(connector, source)} + source={producer} + />
); }; diff --git a/ui_src/src/domain/stationOverview/stationObservabilty/ProduceConsumList/style.scss b/ui_src/src/domain/stationOverview/stationObservabilty/ProduceConsumList/style.scss index 057dc117a..343a3faee 100644 --- a/ui_src/src/domain/stationOverview/stationObservabilty/ProduceConsumList/style.scss +++ b/ui_src/src/domain/stationOverview/stationObservabilty/ProduceConsumList/style.scss @@ -8,16 +8,30 @@ box-shadow: 0px 0px 4px 3px rgba(204, 204, 204, 0.19); border-radius: 8px; padding: 15px 0px 15px 0px; + .connector-name { + display: flex; + align-items: center; + gap: 4px; + } .header { display: flex; justify-content: space-between; border-bottom: 1px solid #e9e9e9; line-height: 35px; padding: 0 15px 5px 15px; - .title { - font-family: 'InterSemiBold'; - font-size: 14px; - margin: 0; + .poduce-consume-header { + display: flex; + align-items: center; + justify-content: space-between; + width: 100%; + .title { + font-family: 'InterSemiBold'; + font-size: 14px; + margin: 0; + } + .add { + cursor: pointer; + } } .add-connector-button { cursor: pointer; @@ -50,7 +64,7 @@ .rows-wrapper { position: relative; width: calc(100%); - height: calc(100% - 90px); + height: calc(100% - 70px); overflow: auto; padding-left: 10px; display: flex; @@ -70,10 +84,35 @@ padding: 5px; margin-right: 10px; margin-top: 5px; + .connector-options-selected { + background-color: #e3e0ff; + height: 20px; + width: 20px; + display: flex; + justify-content: center; + align-items: center; + border-radius: 3px; + cursor: pointer; + color: var(--purple); + } + .connector-options { + background-color: transparent; + height: 20px; + width: 20px; + display: flex; + justify-content: center; + align-items: center; + border-radius: 3px; + cursor: pointer; + color: rgba(154, 154, 160, 0.8); + } .status-icon { position: relative; display: flex; justify-content: center; + align-items: center; + gap: 4px; + width: 38px; .circle-status { border-radius: 32px; width: 18px; @@ -214,3 +253,27 @@ opacity: 0; } } +.item-wrapper-connectors { + padding-left: 5px; + padding-right: 5px; + margin: 0 5px; + height: 34px; + display: flex; + align-items: center; + justify-content: space-between; + gap: 5px; + cursor: pointer; + .item-name { + display: flex; + align-items: center; + gap: 5px; + } + label { + cursor: pointer; + } +} +.item-wrapper-connectors:hover { + background: #e3e0ff; + border-radius: 4px; + color: var(--purple); +} diff --git a/ui_src/src/domain/stationOverview/stationObservabilty/components/functionsOverview/style.scss b/ui_src/src/domain/stationOverview/stationObservabilty/components/functionsOverview/style.scss index c51369edc..7ee05681d 100644 --- a/ui_src/src/domain/stationOverview/stationObservabilty/components/functionsOverview/style.scss +++ b/ui_src/src/domain/stationOverview/stationObservabilty/components/functionsOverview/style.scss @@ -85,11 +85,11 @@ width: 100%; position: absolute; left: 0; - top: 48.5px; + top: 50px; display: flex; flex-wrap: nowrap; overflow: hidden; - height: 61px; + height: 62px; } &-left { diff --git a/ui_src/src/domain/stationOverview/stationOverviewHeader/index.js b/ui_src/src/domain/stationOverview/stationOverviewHeader/index.js index d3540e5d9..7933815da 100644 --- a/ui_src/src/domain/stationOverview/stationOverviewHeader/index.js +++ b/ui_src/src/domain/stationOverview/stationOverviewHeader/index.js @@ -249,10 +249,10 @@ const StationOverviewHeader = () => { Dead-letter for: { stationState?.stationSocketData?.act_as_dls_station_in_stations && stationState?.stationSocketData?.act_as_dls_station_in_stations.length ? - - {stationState?.stationSocketData?.act_as_dls_station_in_stations.join(', ')} - - : + + {stationState?.stationSocketData?.act_as_dls_station_in_stations.join(', ')} + + : }

@@ -285,7 +285,9 @@ const StationOverviewHeader = () => {

Schema validation

{stationState?.stationSocketData?.schema !== undefined && Object.keys(stationState?.stationSocketData?.schema).length !== 0 && (
- {stationState?.stationSocketData?.schema?.updates_available && } + {stationState?.stationSocketData?.schema?.updates_available && + stationState?.stationSocketData?.schema?.updates_available.length && + }
)}
@@ -307,7 +309,7 @@ const StationOverviewHeader = () => {

v{stationState?.stationSocketData?.schema?.version_number}

)} - {stationState?.stationSocketData?.schema === undefined || + {stationState?.stationSocketData?.schema && (Object.keys(stationState?.stationSocketData?.schema).length === 0 ? ( <>
@@ -346,7 +348,8 @@ const StationOverviewHeader = () => { boxShadowStyle="float" onClick={() => setDeleteModal(true)} /> - {stationState?.stationSocketData?.schema?.updates_available && ( + {stationState?.stationSocketData?.schema?.updates_available && + stationState?.stationSocketData?.schema?.updates_available.length && (
From ea9e624a545693b7b686d7067e90aa240824531d Mon Sep 17 00:00:00 2001 From: idanasulinStrech Date: Thu, 7 Dec 2023 11:16:32 +0200 Subject: [PATCH 11/16] bugfix --- server/reload.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/reload.go b/server/reload.go index 953623429..b2bb329bc 100644 --- a/server/reload.go +++ b/server/reload.go @@ -1067,7 +1067,7 @@ func imposeOrder(value interface{}) error { sort.Strings(value.AllowedOrigins) case string, bool, uint8, int, int32, int64, time.Duration, float64, nil, LeafNodeOpts, ClusterOpts, *tls.Config, PinnedCertSet, *URLAccResolver, *MemAccResolver, *DirAccResolver, *CacheDirAccResolver, Authentication, MQTTOpts, jwt.TagList, - *OCSPConfig, map[string]string, JSLimitOpts, StoreCipher, *OCSPResponseCacheConfig: + *OCSPConfig, map[string]string, JSLimitOpts, StoreCipher, *OCSPResponseCacheConfig, map[string]int: // ** map[string]int added by Memphis // explicitly skipped types default: // this will fail during unit tests From 1b175586a6d3bf85f76b1aaf97db52092a616d28 Mon Sep 17 00:00:00 2001 From: idanasulinStrech Date: Thu, 7 Dec 2023 13:56:59 +0200 Subject: [PATCH 12/16] bugfix --- server/parser.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/server/parser.go b/server/parser.go index 8501879ab..85fea811a 100644 --- a/server/parser.go +++ b/server/parser.go @@ -16,6 +16,7 @@ package server import ( "bufio" "bytes" + "encoding/json" "fmt" "net/http" "net/textproto" @@ -919,6 +920,13 @@ func (c *client) parse(buf []byte) error { c.argBuf = nil } else { arg = buf[c.as : i-c.drop] + + d := json.NewDecoder(strings.NewReader(string(arg))) + err := d.Decode(&c.opts) + + if err != nil { + return err + } } if err := c.overMaxControlLineLimit(arg, mcl); err != nil { return err From ce2becc61d756001298ed937d41f15430de5254a Mon Sep 17 00:00:00 2001 From: idanasulinStrech Date: Thu, 7 Dec 2023 15:30:04 +0200 Subject: [PATCH 13/16] bugfix --- server/memphis_cloud.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/server/memphis_cloud.go b/server/memphis_cloud.go index 0bfc6c973..cbfe57876 100644 --- a/server/memphis_cloud.go +++ b/server/memphis_cloud.go @@ -1835,6 +1835,9 @@ func (mh MonitoringHandler) GetBrokersThroughputs(tenantName string) ([]models.B if streamInfo.State.FirstSeq > 0 { startSeq = streamInfo.State.FirstSeq } + if streamInfo.State.LastSeq > ws_updates_interval_sec { + startSeq = streamInfo.State.LastSeq - ws_updates_interval_sec + 1 + } cc := ConsumerConfig{ OptStartSeq: startSeq, From 8d2db48bf7491a9083ca3938cbfe6cdb5ee0d050 Mon Sep 17 00:00:00 2001 From: idanasulinStrech Date: Fri, 8 Dec 2023 15:13:31 +0200 Subject: [PATCH 14/16] remove unnecessary replicas mention --- server/memphis_helper.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/server/memphis_helper.go b/server/memphis_helper.go index b89c008a5..25f135080 100644 --- a/server/memphis_helper.go +++ b/server/memphis_helper.go @@ -541,13 +541,11 @@ func tryCreateInternalJetStreamResources(s *Server, retentionDur time.Duration, // create function tasks consumer if shouldCreateSystemTasksStream() && !FUNCTIONS_TASKS_CONSUMER_CREATED { - replicas := GetStationReplicas(1) cc := ConsumerConfig{ Durable: FUNCTION_TASKS_CONSUMER, DeliverPolicy: DeliverAll, AckPolicy: AckExplicit, MaxAckPending: 1, - Replicas: replicas, FilterSubject: systemTasksStreamName + ".functions", AckWait: time.Duration(90) * time.Second, MaxDeliver: 5, From 985887f18d9a6b9b7ebdf403aec5fb8f3d3b4253 Mon Sep 17 00:00:00 2001 From: idanasulinStrech Date: Sun, 10 Dec 2023 11:25:00 +0200 Subject: [PATCH 15/16] add time based retention layer for the throughput stream --- server/memphis_helper.go | 1 + 1 file changed, 1 insertion(+) diff --git a/server/memphis_helper.go b/server/memphis_helper.go index 25f135080..743b61ebd 100644 --- a/server/memphis_helper.go +++ b/server/memphis_helper.go @@ -486,6 +486,7 @@ func tryCreateInternalJetStreamResources(s *Server, retentionDur time.Duration, MaxConsumers: -1, MaxMsgs: int64(-1), MaxBytes: int64(-1), + MaxAge: time.Second * ws_updates_interval_sec, // since it stores only 1 msg per second Discard: DiscardOld, MaxMsgsPer: ws_updates_interval_sec, Storage: FileStorage, From 3db4ecf617ee44f51b557467334606f1ed702a60 Mon Sep 17 00:00:00 2001 From: idanasulinStrech Date: Mon, 11 Dec 2023 10:09:36 +0200 Subject: [PATCH 16/16] update the memphis package --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d27980bf9..8a0f6b6dc 100644 --- a/go.mod +++ b/go.mod @@ -113,7 +113,7 @@ require ( github.com/leodido/go-urn v1.2.4 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-isatty v0.0.19 // indirect - github.com/memphisdev/memphis.go v1.2.0 + github.com/memphisdev/memphis.go v1.2.1-beta.1 github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/go.sum b/go.sum index 7dcbca35f..9d4e27a1f 100644 --- a/go.sum +++ b/go.sum @@ -275,8 +275,8 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/memphisdev/memphis.go v1.2.0 h1:kRqpUwLTLeh5YWpV2/uMUllTP6fmYxcr0FGgqy4n/Jo= -github.com/memphisdev/memphis.go v1.2.0/go.mod h1:IkB6GmtCq2lvtu1t1UmaIstKHioiN3+cwGR3q8mALok= +github.com/memphisdev/memphis.go v1.2.1-beta.1 h1:Si3/QRkZZj63jwlNTnriBrCiyVXYx5cd+YxiU4C8Uz4= +github.com/memphisdev/memphis.go v1.2.1-beta.1/go.mod h1:IkB6GmtCq2lvtu1t1UmaIstKHioiN3+cwGR3q8mALok= github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=