From 88c5165bfdc38fad315628d1f2403e70e7634394 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Wed, 27 Jun 2018 22:47:55 +0000 Subject: [PATCH] Update containerd and k8s. Signed-off-by: Lantao Liu --- vendor.conf | 18 +- .../containerd/console/console_linux.go | 16 +- .../containerd/console/console_windows.go | 108 +- vendor/github.com/json-iterator/go/README.md | 5 + .../go/{feature_adapter.go => adapter.go} | 47 +- .../go/{feature_any.go => any.go} | 82 +- .../go/{feature_any_array.go => any_array.go} | 0 .../go/{feature_any_bool.go => any_bool.go} | 0 .../go/{feature_any_float.go => any_float.go} | 0 .../go/{feature_any_int32.go => any_int32.go} | 0 .../go/{feature_any_int64.go => any_int64.go} | 0 ...{feature_any_invalid.go => any_invalid.go} | 0 .../go/{feature_any_nil.go => any_nil.go} | 0 .../{feature_any_number.go => any_number.go} | 37 +- .../{feature_any_object.go => any_object.go} | 0 .../go/{feature_any_string.go => any_str.go} | 2 +- .../{feature_any_uint32.go => any_uint32.go} | 0 .../{feature_any_uint64.go => any_uint64.go} | 0 .../go/{feature_config.go => config.go} | 253 +-- .../json-iterator/go/feature_json_number.go | 31 - .../json-iterator/go/feature_reflect.go | 721 --------- .../json-iterator/go/feature_reflect_array.go | 99 -- .../json-iterator/go/feature_reflect_map.go | 244 --- .../go/feature_reflect_native.go | 764 --------- .../json-iterator/go/feature_reflect_slice.go | 147 -- .../json-iterator/go/feature_stream_int.go | 320 ---- .../go/{feature_iter.go => iter.go} | 0 .../{feature_iter_array.go => iter_array.go} | 0 .../{feature_iter_float.go => iter_float.go} | 6 + .../go/{feature_iter_int.go => iter_int.go} | 97 +- ...{feature_iter_object.go => iter_object.go} | 141 +- .../go/{feature_iter_skip.go => iter_skip.go} | 0 ...ter_skip_sloppy.go => iter_skip_sloppy.go} | 0 ...ter_skip_strict.go => iter_skip_strict.go} | 0 .../{feature_iter_string.go => iter_str.go} | 0 .../go/{feature_pool.go => pool.go} | 35 +- vendor/github.com/json-iterator/go/reflect.go | 330 ++++ .../json-iterator/go/reflect_array.go | 104 ++ .../json-iterator/go/reflect_dynamic.go | 70 + ...lect_extension.go => reflect_extension.go} | 281 ++-- .../json-iterator/go/reflect_json_number.go | 112 ++ .../go/reflect_json_raw_message.go | 60 + .../json-iterator/go/reflect_map.go | 318 ++++ .../json-iterator/go/reflect_marshaler.go | 218 +++ .../json-iterator/go/reflect_native.go | 451 ++++++ .../json-iterator/go/reflect_optional.go | 133 ++ .../json-iterator/go/reflect_slice.go | 99 ++ ...t_decoder.go => reflect_struct_decoder.go} | 532 +++--- ...ct_object.go => reflect_struct_encoder.go} | 158 +- .../go/{feature_stream.go => stream.go} | 155 +- ...eature_stream_float.go => stream_float.go} | 14 +- .../github.com/json-iterator/go/stream_int.go | 190 +++ ...feature_stream_string.go => stream_str.go} | 40 +- .../github.com/modern-go/concurrent/LICENSE | 201 +++ .../github.com/modern-go/concurrent/README.md | 49 + .../modern-go/concurrent/executor.go | 14 + .../modern-go/concurrent/go_above_19.go | 15 + .../modern-go/concurrent/go_below_19.go | 33 + vendor/github.com/modern-go/concurrent/log.go | 13 + .../concurrent/unbounded_executor.go | 119 ++ vendor/github.com/modern-go/reflect2/LICENSE | 201 +++ .../github.com/modern-go/reflect2/README.md | 71 + .../modern-go/reflect2/go_above_17.go | 8 + .../modern-go/reflect2/go_above_19.go | 14 + .../modern-go/reflect2/go_below_17.go | 9 + .../modern-go/reflect2/go_below_19.go | 14 + .../github.com/modern-go/reflect2/reflect2.go | 295 ++++ .../modern-go/reflect2/reflect2_amd64.s | 0 .../modern-go/reflect2/reflect2_kind.go | 30 + .../modern-go/reflect2/relfect2_386.s | 0 .../modern-go/reflect2/relfect2_amd64p32.s | 0 .../modern-go/reflect2/relfect2_arm.s | 0 .../modern-go/reflect2/relfect2_arm64.s | 0 .../modern-go/reflect2/relfect2_mips64x.s | 0 .../modern-go/reflect2/relfect2_mipsx.s | 0 .../modern-go/reflect2/relfect2_ppc64x.s | 0 .../modern-go/reflect2/relfect2_s390x.s | 0 .../modern-go/reflect2/safe_field.go | 58 + .../github.com/modern-go/reflect2/safe_map.go | 101 ++ .../modern-go/reflect2/safe_slice.go | 92 ++ .../modern-go/reflect2/safe_struct.go | 29 + .../modern-go/reflect2/safe_type.go | 78 + .../github.com/modern-go/reflect2/type_map.go | 103 ++ .../modern-go/reflect2/unsafe_array.go | 65 + .../modern-go/reflect2/unsafe_eface.go | 59 + .../modern-go/reflect2/unsafe_field.go | 74 + .../modern-go/reflect2/unsafe_iface.go | 64 + .../modern-go/reflect2/unsafe_link.go | 70 + .../modern-go/reflect2/unsafe_map.go | 138 ++ .../modern-go/reflect2/unsafe_ptr.go | 46 + .../modern-go/reflect2/unsafe_slice.go | 177 ++ .../modern-go/reflect2/unsafe_struct.go | 59 + .../modern-go/reflect2/unsafe_type.go | 85 + vendor/k8s.io/api/core/v1/generated.proto | 8 +- vendor/k8s.io/api/core/v1/types.go | 8 +- .../core/v1/types_swagger_doc_generated.go | 8 +- .../api/core/v1/zz_generated.deepcopy.go | 1424 +++++------------ .../pkg/api/resource/generated.proto | 5 - .../apimachinery/pkg/api/resource/quantity.go | 7 +- .../internalversion/zz_generated.deepcopy.go | 8 +- .../apimachinery/pkg/apis/meta/v1/meta.go | 54 +- .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 102 +- .../meta/v1beta1/zz_generated.deepcopy.go | 5 +- .../pkg/runtime/serializer/json/json.go | 82 +- .../pkg/util/validation/field/errors.go | 2 +- .../k8s.io/apimachinery/pkg/util/wait/wait.go | 4 +- .../v1alpha1/zz_generated.deepcopy.go | 29 +- .../v1beta1/zz_generated.deepcopy.go | 14 +- .../zz_generated.deepcopy.go | 29 +- .../clientcmd/api/zz_generated.deepcopy.go | 44 +- vendor/k8s.io/client-go/util/cert/io.go | 12 +- 111 files changed, 6122 insertions(+), 4545 deletions(-) rename vendor/github.com/json-iterator/go/{feature_adapter.go => adapter.go} (76%) rename vendor/github.com/json-iterator/go/{feature_any.go => any.go} (75%) rename vendor/github.com/json-iterator/go/{feature_any_array.go => any_array.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_bool.go => any_bool.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_float.go => any_float.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_int32.go => any_int32.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_int64.go => any_int64.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_invalid.go => any_invalid.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_nil.go => any_nil.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_number.go => any_number.go} (76%) rename vendor/github.com/json-iterator/go/{feature_any_object.go => any_object.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_string.go => any_str.go} (97%) rename vendor/github.com/json-iterator/go/{feature_any_uint32.go => any_uint32.go} (100%) rename vendor/github.com/json-iterator/go/{feature_any_uint64.go => any_uint64.go} (100%) rename vendor/github.com/json-iterator/go/{feature_config.go => config.go} (60%) delete mode 100644 vendor/github.com/json-iterator/go/feature_json_number.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect_array.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect_map.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect_native.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect_slice.go delete mode 100644 vendor/github.com/json-iterator/go/feature_stream_int.go rename vendor/github.com/json-iterator/go/{feature_iter.go => iter.go} (100%) rename vendor/github.com/json-iterator/go/{feature_iter_array.go => iter_array.go} (100%) rename vendor/github.com/json-iterator/go/{feature_iter_float.go => iter_float.go} (98%) rename vendor/github.com/json-iterator/go/{feature_iter_int.go => iter_int.go} (72%) rename vendor/github.com/json-iterator/go/{feature_iter_object.go => iter_object.go} (68%) rename vendor/github.com/json-iterator/go/{feature_iter_skip.go => iter_skip.go} (100%) rename vendor/github.com/json-iterator/go/{feature_iter_skip_sloppy.go => iter_skip_sloppy.go} (100%) rename vendor/github.com/json-iterator/go/{feature_iter_skip_strict.go => iter_skip_strict.go} (100%) rename vendor/github.com/json-iterator/go/{feature_iter_string.go => iter_str.go} (100%) rename vendor/github.com/json-iterator/go/{feature_pool.go => pool.go} (64%) create mode 100644 vendor/github.com/json-iterator/go/reflect.go create mode 100644 vendor/github.com/json-iterator/go/reflect_array.go create mode 100644 vendor/github.com/json-iterator/go/reflect_dynamic.go rename vendor/github.com/json-iterator/go/{feature_reflect_extension.go => reflect_extension.go} (55%) create mode 100644 vendor/github.com/json-iterator/go/reflect_json_number.go create mode 100644 vendor/github.com/json-iterator/go/reflect_json_raw_message.go create mode 100644 vendor/github.com/json-iterator/go/reflect_map.go create mode 100644 vendor/github.com/json-iterator/go/reflect_marshaler.go create mode 100644 vendor/github.com/json-iterator/go/reflect_native.go create mode 100644 vendor/github.com/json-iterator/go/reflect_optional.go create mode 100644 vendor/github.com/json-iterator/go/reflect_slice.go rename vendor/github.com/json-iterator/go/{feature_reflect_struct_decoder.go => reflect_struct_decoder.go} (66%) rename vendor/github.com/json-iterator/go/{feature_reflect_object.go => reflect_struct_encoder.go} (50%) rename vendor/github.com/json-iterator/go/{feature_stream.go => stream.go} (62%) rename vendor/github.com/json-iterator/go/{feature_stream_float.go => stream_float.go} (85%) create mode 100644 vendor/github.com/json-iterator/go/stream_int.go rename vendor/github.com/json-iterator/go/{feature_stream_string.go => stream_str.go} (91%) create mode 100644 vendor/github.com/modern-go/concurrent/LICENSE create mode 100644 vendor/github.com/modern-go/concurrent/README.md create mode 100644 vendor/github.com/modern-go/concurrent/executor.go create mode 100644 vendor/github.com/modern-go/concurrent/go_above_19.go create mode 100644 vendor/github.com/modern-go/concurrent/go_below_19.go create mode 100644 vendor/github.com/modern-go/concurrent/log.go create mode 100644 vendor/github.com/modern-go/concurrent/unbounded_executor.go create mode 100644 vendor/github.com/modern-go/reflect2/LICENSE create mode 100644 vendor/github.com/modern-go/reflect2/README.md create mode 100644 vendor/github.com/modern-go/reflect2/go_above_17.go create mode 100644 vendor/github.com/modern-go/reflect2/go_above_19.go create mode 100644 vendor/github.com/modern-go/reflect2/go_below_17.go create mode 100644 vendor/github.com/modern-go/reflect2/go_below_19.go create mode 100644 vendor/github.com/modern-go/reflect2/reflect2.go create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_amd64.s create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_kind.go create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_386.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm64.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mips64x.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mipsx.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_s390x.s create mode 100644 vendor/github.com/modern-go/reflect2/safe_field.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_map.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_slice.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_struct.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_type.go create mode 100644 vendor/github.com/modern-go/reflect2/type_map.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_array.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_eface.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_field.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_iface.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_link.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_map.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_ptr.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_slice.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_struct.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_type.go diff --git a/vendor.conf b/vendor.conf index bc63bbace..521dc47f3 100644 --- a/vendor.conf +++ b/vendor.conf @@ -3,7 +3,7 @@ github.com/blang/semver v3.1.0 github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130 -github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925 +github.com/containerd/console 9290d21dc56074581f619579c43d970b4514bc08 github.com/containerd/containerd 84bebdd91d347c99069d1705b7d4e6d6f746160c github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c @@ -31,10 +31,12 @@ github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f -github.com/json-iterator/go 1.0.4 +github.com/json-iterator/go f2b4162afba35581b6d4a50d3b8f34e33c144682 github.com/matttproud/golang_protobuf_extensions v1.0.0 github.com/Microsoft/go-winio v0.4.7 github.com/Microsoft/hcsshim v0.6.11 +github.com/modern-go/reflect2 05fbef0ca5da472bbf96c9322b84a53edc03c9fd +github.com/modern-go/concurrent 1.0.3 github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 github.com/opencontainers/image-spec v1.0.1 github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340 @@ -67,9 +69,9 @@ google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 google.golang.org/grpc v1.12.0 gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77 -k8s.io/api aac5d23ea3ca7132d75daeed7516f8205553c832 -k8s.io/apimachinery 521145febf93d5639dce48a49ee8dc080863b034 -k8s.io/apiserver 0553b9748924ffb5737340cb0afa6f0de2b12c42 -k8s.io/client-go 6fd8d2ef33ca9a0d821c9d66c23793cc8603d897 -k8s.io/kubernetes v1.11.0-beta.2 -k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e +k8s.io/api 9e5ffd1f1320950b238cfce291b926411f0af722 +k8s.io/apimachinery ed135c5b96450fd24e5e981c708114fbbd950697 +k8s.io/apiserver a90e3a95c2e91b944bfca8225c4e0d12e42a9eb5 +k8s.io/client-go 03bfb9bdcfe5482795b999f39ca3ed9ad42ce5bb +k8s.io/kubernetes v1.11.0 +k8s.io/utils 733eca437aa39379e4bcc25e726439dfca40fcff diff --git a/vendor/github.com/containerd/console/console_linux.go b/vendor/github.com/containerd/console/console_linux.go index 312bce17d..efefcb1e6 100644 --- a/vendor/github.com/containerd/console/console_linux.go +++ b/vendor/github.com/containerd/console/console_linux.go @@ -72,7 +72,7 @@ func NewEpoller() (*Epoller, error) { }, nil } -// Add creates a epoll console based on the provided console. The console will +// Add creates an epoll console based on the provided console. The console will // be registered with EPOLLET (i.e. using edge-triggered notification) and its // file descriptor will be set to non-blocking mode. After this, user should use // the return console to perform I/O. @@ -134,7 +134,7 @@ func (e *Epoller) Wait() error { } } -// Close unregister the console's file descriptor from epoll interface +// CloseConsole unregisters the console's file descriptor from epoll interface func (e *Epoller) CloseConsole(fd int) error { e.mu.Lock() defer e.mu.Unlock() @@ -149,12 +149,12 @@ func (e *Epoller) getConsole(sysfd int) *EpollConsole { return f } -// Close the epoll fd +// Close closes the epoll fd func (e *Epoller) Close() error { return unix.Close(e.efd) } -// EpollConsole acts like a console but register its file descriptor with a +// EpollConsole acts like a console but registers its file descriptor with an // epoll fd and uses epoll API to perform I/O. type EpollConsole struct { Console @@ -167,7 +167,7 @@ type EpollConsole struct { // Read reads up to len(p) bytes into p. It returns the number of bytes read // (0 <= n <= len(p)) and any error encountered. // -// If the console's read returns EAGAIN or EIO, we assumes that its a +// If the console's read returns EAGAIN or EIO, we assume that it's a // temporary error because the other side went away and wait for the signal // generated by epoll event to continue. func (ec *EpollConsole) Read(p []byte) (n int, err error) { @@ -207,7 +207,7 @@ func (ec *EpollConsole) Read(p []byte) (n int, err error) { // written from p (0 <= n <= len(p)) and any error encountered that caused // the write to stop early. // -// If writes to the console returns EAGAIN or EIO, we assumes that its a +// If writes to the console returns EAGAIN or EIO, we assume that it's a // temporary error because the other side went away and wait for the signal // generated by epoll event to continue. func (ec *EpollConsole) Write(p []byte) (n int, err error) { @@ -224,7 +224,7 @@ func (ec *EpollConsole) Write(p []byte) (n int, err error) { } else { hangup = (err == unix.EAGAIN || err == unix.EIO) } - // if the other end disappear, assume this is temporary and wait for the + // if the other end disappears, assume this is temporary and wait for the // signal to continue again. if hangup { ec.writec.Wait() @@ -242,7 +242,7 @@ func (ec *EpollConsole) Write(p []byte) (n int, err error) { return n, err } -// Close closed the file descriptor and signal call waiters for this fd. +// Shutdown closes the file descriptor and signals call waiters for this fd. // It accepts a callback which will be called with the console's fd. The // callback typically will be used to do further cleanup such as unregister the // console's fd from the epoll interface. diff --git a/vendor/github.com/containerd/console/console_windows.go b/vendor/github.com/containerd/console/console_windows.go index 7aa726f99..ff0174df4 100644 --- a/vendor/github.com/containerd/console/console_windows.go +++ b/vendor/github.com/containerd/console/console_windows.go @@ -17,7 +17,6 @@ package console import ( - "fmt" "os" "github.com/pkg/errors" @@ -29,90 +28,55 @@ var ( ErrNotImplemented = errors.New("not implemented") ) -func (m *master) initStdios() { - m.in = windows.Handle(os.Stdin.Fd()) - if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil { - // Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. - if err = windows.SetConsoleMode(m.in, m.inMode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil { - vtInputSupported = true - } - // Unconditionally set the console mode back even on failure because SetConsoleMode - // remembers invalid bits on input handles. - windows.SetConsoleMode(m.in, m.inMode) - } else { - fmt.Printf("failed to get console mode for stdin: %v\n", err) - } - - m.out = windows.Handle(os.Stdout.Fd()) - if err := windows.GetConsoleMode(m.out, &m.outMode); err == nil { - if err := windows.SetConsoleMode(m.out, m.outMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { - m.outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING +func (m *master) init() { + m.h = windows.Handle(m.f.Fd()) + if err := windows.GetConsoleMode(m.h, &m.mode); err == nil { + if m.f == os.Stdin { + // Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. + if err = windows.SetConsoleMode(m.h, m.mode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + windows.SetConsoleMode(m.h, m.mode) + } else if err := windows.SetConsoleMode(m.h, m.mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { + m.mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING } else { - windows.SetConsoleMode(m.out, m.outMode) + windows.SetConsoleMode(m.h, m.mode) } - } else { - fmt.Printf("failed to get console mode for stdout: %v\n", err) - } - - m.err = windows.Handle(os.Stderr.Fd()) - if err := windows.GetConsoleMode(m.err, &m.errMode); err == nil { - if err := windows.SetConsoleMode(m.err, m.errMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { - m.errMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING - } else { - windows.SetConsoleMode(m.err, m.errMode) - } - } else { - fmt.Printf("failed to get console mode for stderr: %v\n", err) } } type master struct { - in windows.Handle - inMode uint32 - - out windows.Handle - outMode uint32 - - err windows.Handle - errMode uint32 + h windows.Handle + mode uint32 + f *os.File } func (m *master) SetRaw() error { - if err := makeInputRaw(m.in, m.inMode); err != nil { - return err + if m.f == os.Stdin { + if err := makeInputRaw(m.h, m.mode); err != nil { + return err + } + } else { + // Set StdOut and StdErr to raw mode, we ignore failures since + // windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of + // Windows. + windows.SetConsoleMode(m.h, m.mode|windows.DISABLE_NEWLINE_AUTO_RETURN) } - - // Set StdOut and StdErr to raw mode, we ignore failures since - // windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of - // Windows. - - windows.SetConsoleMode(m.out, m.outMode|windows.DISABLE_NEWLINE_AUTO_RETURN) - - windows.SetConsoleMode(m.err, m.errMode|windows.DISABLE_NEWLINE_AUTO_RETURN) - return nil } func (m *master) Reset() error { - for _, s := range []struct { - fd windows.Handle - mode uint32 - }{ - {m.in, m.inMode}, - {m.out, m.outMode}, - {m.err, m.errMode}, - } { - if err := windows.SetConsoleMode(s.fd, s.mode); err != nil { - return errors.Wrap(err, "unable to restore console mode") - } + if err := windows.SetConsoleMode(m.h, m.mode); err != nil { + return errors.Wrap(err, "unable to restore console mode") } - return nil } func (m *master) Size() (WinSize, error) { var info windows.ConsoleScreenBufferInfo - err := windows.GetConsoleScreenBufferInfo(m.out, &info) + err := windows.GetConsoleScreenBufferInfo(m.h, &info) if err != nil { return WinSize{}, errors.Wrap(err, "unable to get console info") } @@ -134,11 +98,11 @@ func (m *master) ResizeFrom(c Console) error { } func (m *master) DisableEcho() error { - mode := m.inMode &^ windows.ENABLE_ECHO_INPUT + mode := m.mode &^ windows.ENABLE_ECHO_INPUT mode |= windows.ENABLE_PROCESSED_INPUT mode |= windows.ENABLE_LINE_INPUT - if err := windows.SetConsoleMode(m.in, mode); err != nil { + if err := windows.SetConsoleMode(m.h, mode); err != nil { return errors.Wrap(err, "unable to set console to disable echo") } @@ -150,15 +114,15 @@ func (m *master) Close() error { } func (m *master) Read(b []byte) (int, error) { - panic("not implemented on windows") + return m.f.Read(b) } func (m *master) Write(b []byte) (int, error) { - panic("not implemented on windows") + return m.f.Write(b) } func (m *master) Fd() uintptr { - return uintptr(m.in) + return uintptr(m.h) } // on windows, console can only be made from os.Std{in,out,err}, hence there @@ -210,7 +174,7 @@ func newMaster(f *os.File) (Console, error) { if f != os.Stdin && f != os.Stdout && f != os.Stderr { return nil, errors.New("creating a console from a file is not supported on windows") } - m := &master{} - m.initStdios() + m := &master{f: f} + m.init() return m, nil } diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md index 3a0d68098..54d5afe95 100644 --- a/vendor/github.com/json-iterator/go/README.md +++ b/vendor/github.com/json-iterator/go/README.md @@ -8,6 +8,8 @@ A high-performance 100% compatible drop-in replacement of "encoding/json" +You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) + ``` Go开发者们请加入我们,滴滴出行平台技术部 taowen@didichuxing.com ``` @@ -29,6 +31,9 @@ Raw Result (easyjson requires static code generation) | easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | | jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | +Always benchmark with your own workload. +The result depends heavily on the data input. + # Usage 100% compatibility with standard lib diff --git a/vendor/github.com/json-iterator/go/feature_adapter.go b/vendor/github.com/json-iterator/go/adapter.go similarity index 76% rename from vendor/github.com/json-iterator/go/feature_adapter.go rename to vendor/github.com/json-iterator/go/adapter.go index 0214b711a..f371bfed7 100644 --- a/vendor/github.com/json-iterator/go/feature_adapter.go +++ b/vendor/github.com/json-iterator/go/adapter.go @@ -16,15 +16,6 @@ func Unmarshal(data []byte, v interface{}) error { return ConfigDefault.Unmarshal(data, v) } -func lastNotSpacePos(data []byte) int { - for i := len(data) - 1; i >= 0; i-- { - if data[i] != ' ' && data[i] != '\t' && data[i] != '\r' && data[i] != '\n' { - return i + 1 - } - } - return 0 -} - // UnmarshalFromString convenient method to read from string instead of []byte func UnmarshalFromString(str string, v interface{}) error { return ConfigDefault.UnmarshalFromString(str, v) @@ -71,6 +62,11 @@ type Decoder struct { // Decode decode JSON into interface{} func (adapter *Decoder) Decode(obj interface{}) error { + if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { + if !adapter.iter.loadMore() { + return io.EOF + } + } adapter.iter.ReadVal(obj) err := adapter.iter.Error if err == io.EOF { @@ -81,7 +77,14 @@ func (adapter *Decoder) Decode(obj interface{}) error { // More is there more? func (adapter *Decoder) More() bool { - return adapter.iter.head != adapter.iter.tail + iter := adapter.iter + if iter.Error != nil { + return false + } + if iter.head != iter.tail { + return true + } + return iter.loadMore() } // Buffered remaining buffer @@ -90,11 +93,21 @@ func (adapter *Decoder) Buffered() io.Reader { return bytes.NewReader(remaining) } -// UseNumber for number JSON element, use float64 or json.NumberValue (alias of string) +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. func (adapter *Decoder) UseNumber() { - origCfg := adapter.iter.cfg.configBeforeFrozen - origCfg.UseNumber = true - adapter.iter.cfg = origCfg.Froze().(*frozenConfig) + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.UseNumber = true + adapter.iter.cfg = cfg.frozeWithCacheReuse() +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (adapter *Decoder) DisallowUnknownFields() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.DisallowUnknownFields = true + adapter.iter.cfg = cfg.frozeWithCacheReuse() } // NewEncoder same as json.NewEncoder @@ -117,14 +130,16 @@ func (adapter *Encoder) Encode(val interface{}) error { // SetIndent set the indention. Prefix is not supported func (adapter *Encoder) SetIndent(prefix, indent string) { - adapter.stream.cfg.indentionStep = len(indent) + config := adapter.stream.cfg.configBeforeFrozen + config.IndentionStep = len(indent) + adapter.stream.cfg = config.frozeWithCacheReuse() } // SetEscapeHTML escape html by default, set to false to disable func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { config := adapter.stream.cfg.configBeforeFrozen config.EscapeHTML = escapeHTML - adapter.stream.cfg = config.Froze().(*frozenConfig) + adapter.stream.cfg = config.frozeWithCacheReuse() } // Valid reports whether data is a valid JSON encoding. diff --git a/vendor/github.com/json-iterator/go/feature_any.go b/vendor/github.com/json-iterator/go/any.go similarity index 75% rename from vendor/github.com/json-iterator/go/feature_any.go rename to vendor/github.com/json-iterator/go/any.go index 87716d1fc..daecfed61 100644 --- a/vendor/github.com/json-iterator/go/feature_any.go +++ b/vendor/github.com/json-iterator/go/any.go @@ -3,8 +3,11 @@ package jsoniter import ( "errors" "fmt" + "github.com/modern-go/reflect2" "io" "reflect" + "strconv" + "unsafe" ) // Any generic object representation. @@ -25,7 +28,6 @@ type Any interface { ToString() string ToVal(val interface{}) Get(path ...interface{}) Any - // TODO: add Set Size() int Keys() []string GetInterface() interface{} @@ -35,7 +37,7 @@ type Any interface { type baseAny struct{} func (any *baseAny) Get(path ...interface{}) Any { - return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} } func (any *baseAny) Size() int { @@ -89,7 +91,7 @@ func Wrap(val interface{}) Any { if isAny { return asAny } - typ := reflect.TypeOf(val) + typ := reflect2.TypeOf(val) switch typ.Kind() { case reflect.Slice: return wrapArray(val) @@ -100,6 +102,9 @@ func Wrap(val interface{}) Any { case reflect.String: return WrapString(val.(string)) case reflect.Int: + if strconv.IntSize == 32 { + return WrapInt32(int32(val.(int))) + } return WrapInt64(int64(val.(int))) case reflect.Int8: return WrapInt32(int32(val.(int8))) @@ -110,7 +115,15 @@ func Wrap(val interface{}) Any { case reflect.Int64: return WrapInt64(val.(int64)) case reflect.Uint: + if strconv.IntSize == 32 { + return WrapUint32(uint32(val.(uint))) + } return WrapUint64(uint64(val.(uint))) + case reflect.Uintptr: + if ptrSize == 32 { + return WrapUint32(uint32(val.(uintptr))) + } + return WrapUint64(uint64(val.(uintptr))) case reflect.Uint8: return WrapUint32(uint32(val.(uint8))) case reflect.Uint16: @@ -243,3 +256,66 @@ func locatePath(iter *Iterator, path []interface{}) Any { } return iter.readAny() } + +var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() + +func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +type anyCodec struct { + valType reflect2.Type +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + panic("not implemented") +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + any.WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + return any.Size() == 0 +} + +type directAnyCodec struct { +} + +func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *(*Any)(ptr) = iter.readAny() +} + +func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + any := *(*Any)(ptr) + any.WriteTo(stream) +} + +func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { + any := *(*Any)(ptr) + return any.Size() == 0 +} diff --git a/vendor/github.com/json-iterator/go/feature_any_array.go b/vendor/github.com/json-iterator/go/any_array.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_array.go rename to vendor/github.com/json-iterator/go/any_array.go diff --git a/vendor/github.com/json-iterator/go/feature_any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_bool.go rename to vendor/github.com/json-iterator/go/any_bool.go diff --git a/vendor/github.com/json-iterator/go/feature_any_float.go b/vendor/github.com/json-iterator/go/any_float.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_float.go rename to vendor/github.com/json-iterator/go/any_float.go diff --git a/vendor/github.com/json-iterator/go/feature_any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_int32.go rename to vendor/github.com/json-iterator/go/any_int32.go diff --git a/vendor/github.com/json-iterator/go/feature_any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_int64.go rename to vendor/github.com/json-iterator/go/any_int64.go diff --git a/vendor/github.com/json-iterator/go/feature_any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_invalid.go rename to vendor/github.com/json-iterator/go/any_invalid.go diff --git a/vendor/github.com/json-iterator/go/feature_any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_nil.go rename to vendor/github.com/json-iterator/go/any_nil.go diff --git a/vendor/github.com/json-iterator/go/feature_any_number.go b/vendor/github.com/json-iterator/go/any_number.go similarity index 76% rename from vendor/github.com/json-iterator/go/feature_any_number.go rename to vendor/github.com/json-iterator/go/any_number.go index 4e1c27641..9d1e901a6 100644 --- a/vendor/github.com/json-iterator/go/feature_any_number.go +++ b/vendor/github.com/json-iterator/go/any_number.go @@ -1,6 +1,9 @@ package jsoniter -import "unsafe" +import ( + "io" + "unsafe" +) type numberLazyAny struct { baseAny @@ -29,7 +32,9 @@ func (any *numberLazyAny) ToInt() int { iter := any.cfg.BorrowIterator(any.buf) defer any.cfg.ReturnIterator(iter) val := iter.ReadInt() - any.err = iter.Error + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } return val } @@ -37,7 +42,9 @@ func (any *numberLazyAny) ToInt32() int32 { iter := any.cfg.BorrowIterator(any.buf) defer any.cfg.ReturnIterator(iter) val := iter.ReadInt32() - any.err = iter.Error + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } return val } @@ -45,7 +52,9 @@ func (any *numberLazyAny) ToInt64() int64 { iter := any.cfg.BorrowIterator(any.buf) defer any.cfg.ReturnIterator(iter) val := iter.ReadInt64() - any.err = iter.Error + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } return val } @@ -53,7 +62,9 @@ func (any *numberLazyAny) ToUint() uint { iter := any.cfg.BorrowIterator(any.buf) defer any.cfg.ReturnIterator(iter) val := iter.ReadUint() - any.err = iter.Error + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } return val } @@ -61,7 +72,9 @@ func (any *numberLazyAny) ToUint32() uint32 { iter := any.cfg.BorrowIterator(any.buf) defer any.cfg.ReturnIterator(iter) val := iter.ReadUint32() - any.err = iter.Error + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } return val } @@ -69,7 +82,9 @@ func (any *numberLazyAny) ToUint64() uint64 { iter := any.cfg.BorrowIterator(any.buf) defer any.cfg.ReturnIterator(iter) val := iter.ReadUint64() - any.err = iter.Error + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } return val } @@ -77,7 +92,9 @@ func (any *numberLazyAny) ToFloat32() float32 { iter := any.cfg.BorrowIterator(any.buf) defer any.cfg.ReturnIterator(iter) val := iter.ReadFloat32() - any.err = iter.Error + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } return val } @@ -85,7 +102,9 @@ func (any *numberLazyAny) ToFloat64() float64 { iter := any.cfg.BorrowIterator(any.buf) defer any.cfg.ReturnIterator(iter) val := iter.ReadFloat64() - any.err = iter.Error + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } return val } diff --git a/vendor/github.com/json-iterator/go/feature_any_object.go b/vendor/github.com/json-iterator/go/any_object.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_object.go rename to vendor/github.com/json-iterator/go/any_object.go diff --git a/vendor/github.com/json-iterator/go/feature_any_string.go b/vendor/github.com/json-iterator/go/any_str.go similarity index 97% rename from vendor/github.com/json-iterator/go/feature_any_string.go rename to vendor/github.com/json-iterator/go/any_str.go index abf060bd5..a4b93c78c 100644 --- a/vendor/github.com/json-iterator/go/feature_any_string.go +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -14,7 +14,7 @@ func (any *stringAny) Get(path ...interface{}) Any { if len(path) == 0 { return any } - return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} } func (any *stringAny) Parse() *Iterator { diff --git a/vendor/github.com/json-iterator/go/feature_any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_uint32.go rename to vendor/github.com/json-iterator/go/any_uint32.go diff --git a/vendor/github.com/json-iterator/go/feature_any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_any_uint64.go rename to vendor/github.com/json-iterator/go/any_uint64.go diff --git a/vendor/github.com/json-iterator/go/feature_config.go b/vendor/github.com/json-iterator/go/config.go similarity index 60% rename from vendor/github.com/json-iterator/go/feature_config.go rename to vendor/github.com/json-iterator/go/config.go index 140679536..835819129 100644 --- a/vendor/github.com/json-iterator/go/feature_config.go +++ b/vendor/github.com/json-iterator/go/config.go @@ -2,11 +2,13 @@ package jsoniter import ( "encoding/json" - "errors" "io" "reflect" - "sync/atomic" + "sync" "unsafe" + + "github.com/modern-go/concurrent" + "github.com/modern-go/reflect2" ) // Config customize how the API should behave. @@ -17,21 +19,12 @@ type Config struct { EscapeHTML bool SortMapKeys bool UseNumber bool + DisallowUnknownFields bool TagKey string + OnlyTaggedField bool ValidateJsonRawMessage bool ObjectFieldMustBeSimpleString bool -} - -type frozenConfig struct { - configBeforeFrozen Config - sortMapKeys bool - indentionStep int - objectFieldMustBeSimpleString bool - decoderCache unsafe.Pointer - encoderCache unsafe.Pointer - extensions []Extension - streamPool chan *Stream - iteratorPool chan *Iterator + CaseSensitive bool } // API the public interface of this package. @@ -48,6 +41,9 @@ type API interface { NewEncoder(writer io.Writer) *Encoder NewDecoder(reader io.Reader) *Decoder Valid(data []byte) bool + RegisterExtension(extension Extension) + DecoderOf(typ reflect2.Type) ValDecoder + EncoderOf(typ reflect2.Type) ValEncoder } // ConfigDefault the default API @@ -69,35 +65,120 @@ var ConfigFastest = Config{ ObjectFieldMustBeSimpleString: true, // do not unescape object field }.Froze() +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + disallowUnknownFields bool + decoderCache *concurrent.Map + encoderCache *concurrent.Map + extensions []Extension + streamPool *sync.Pool + iteratorPool *sync.Pool + caseSensitive bool +} + +func (cfg *frozenConfig) initCache() { + cfg.decoderCache = concurrent.NewMap() + cfg.encoderCache = concurrent.NewMap() +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { + cfg.decoderCache.Store(cacheKey, decoder) +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { + cfg.encoderCache.Store(cacheKey, encoder) +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { + decoder, found := cfg.decoderCache.Load(cacheKey) + if found { + return decoder.(ValDecoder) + } + return nil +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { + encoder, found := cfg.encoderCache.Load(cacheKey) + if found { + return encoder.(ValEncoder) + } + return nil +} + +var cfgCache = concurrent.NewMap() + +func getFrozenConfigFromCache(cfg Config) *frozenConfig { + obj, found := cfgCache.Load(cfg) + if found { + return obj.(*frozenConfig) + } + return nil +} + +func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { + cfgCache.Store(cfg, frozenConfig) +} + // Froze forge API from config func (cfg Config) Froze() API { - // TODO: cache frozen config - frozenConfig := &frozenConfig{ + api := &frozenConfig{ sortMapKeys: cfg.SortMapKeys, indentionStep: cfg.IndentionStep, objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, - streamPool: make(chan *Stream, 16), - iteratorPool: make(chan *Iterator, 16), + onlyTaggedField: cfg.OnlyTaggedField, + disallowUnknownFields: cfg.DisallowUnknownFields, + caseSensitive: cfg.CaseSensitive, } - atomic.StorePointer(&frozenConfig.decoderCache, unsafe.Pointer(&map[string]ValDecoder{})) - atomic.StorePointer(&frozenConfig.encoderCache, unsafe.Pointer(&map[string]ValEncoder{})) + api.streamPool = &sync.Pool{ + New: func() interface{} { + return NewStream(api, nil, 512) + }, + } + api.iteratorPool = &sync.Pool{ + New: func() interface{} { + return NewIterator(api) + }, + } + api.initCache() + encoderExtension := EncoderExtension{} + decoderExtension := DecoderExtension{} if cfg.MarshalFloatWith6Digits { - frozenConfig.marshalFloatWith6Digits() + api.marshalFloatWith6Digits(encoderExtension) } if cfg.EscapeHTML { - frozenConfig.escapeHTML() + api.escapeHTML(encoderExtension) } if cfg.UseNumber { - frozenConfig.useNumber() + api.useNumber(decoderExtension) } if cfg.ValidateJsonRawMessage { - frozenConfig.validateJsonRawMessage() + api.validateJsonRawMessage(encoderExtension) + } + if len(encoderExtension) > 0 { + api.extensions = append(api.extensions, encoderExtension) } - frozenConfig.configBeforeFrozen = cfg - return frozenConfig + if len(decoderExtension) > 0 { + api.extensions = append(api.extensions, decoderExtension) + } + api.configBeforeFrozen = cfg + return api } -func (cfg *frozenConfig) validateJsonRawMessage() { +func (cfg Config) frozeWithCacheReuse() *frozenConfig { + api := getFrozenConfigFromCache(cfg) + if api != nil { + return api + } + api = cfg.Froze().(*frozenConfig) + addFrozenConfigToCache(cfg, api) + return api +} + +func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { rawMessage := *(*json.RawMessage)(ptr) iter := cfg.BorrowIterator([]byte(rawMessage)) @@ -111,18 +192,23 @@ func (cfg *frozenConfig) validateJsonRawMessage() { }, func(ptr unsafe.Pointer) bool { return false }} - cfg.addEncoderToCache(reflect.TypeOf((*json.RawMessage)(nil)).Elem(), encoder) - cfg.addEncoderToCache(reflect.TypeOf((*RawMessage)(nil)).Elem(), encoder) + extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder + extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder } -func (cfg *frozenConfig) useNumber() { - cfg.addDecoderToCache(reflect.TypeOf((*interface{})(nil)).Elem(), &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { +func (cfg *frozenConfig) useNumber(extension DecoderExtension) { + extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + exitingValue := *((*interface{})(ptr)) + if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { + iter.ReadVal(exitingValue) + return + } if iter.WhatIsNext() == NumberValue { *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) } else { *((*interface{})(ptr)) = iter.Read() } - }}) + }} } func (cfg *frozenConfig) getTagKey() string { tagKey := cfg.configBeforeFrozen.TagKey @@ -132,7 +218,7 @@ func (cfg *frozenConfig) getTagKey() string { return tagKey } -func (cfg *frozenConfig) registerExtension(extension Extension) { +func (cfg *frozenConfig) RegisterExtension(extension Extension) { cfg.extensions = append(cfg.extensions, extension) } @@ -143,10 +229,6 @@ func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { stream.WriteFloat32Lossy(*((*float32)(ptr))) } -func (encoder *lossyFloat32Encoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { return *((*float32)(ptr)) == 0 } @@ -158,20 +240,16 @@ func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { stream.WriteFloat64Lossy(*((*float64)(ptr))) } -func (encoder *lossyFloat64Encoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { return *((*float64)(ptr)) == 0 } // EnableLossyFloatMarshalling keeps 10**(-6) precision // for float variables for better performance. -func (cfg *frozenConfig) marshalFloatWith6Digits() { +func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { // for better performance - cfg.addEncoderToCache(reflect.TypeOf((*float32)(nil)).Elem(), &lossyFloat32Encoder{}) - cfg.addEncoderToCache(reflect.TypeOf((*float64)(nil)).Elem(), &lossyFloat64Encoder{}) + extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} + extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} } type htmlEscapedStringEncoder struct { @@ -182,56 +260,12 @@ func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stre stream.WriteStringWithHTMLEscaped(str) } -func (encoder *htmlEscapedStringEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { return *((*string)(ptr)) == "" } -func (cfg *frozenConfig) escapeHTML() { - cfg.addEncoderToCache(reflect.TypeOf((*string)(nil)).Elem(), &htmlEscapedStringEncoder{}) -} - -func (cfg *frozenConfig) addDecoderToCache(cacheKey reflect.Type, decoder ValDecoder) { - done := false - for !done { - ptr := atomic.LoadPointer(&cfg.decoderCache) - cache := *(*map[reflect.Type]ValDecoder)(ptr) - copied := map[reflect.Type]ValDecoder{} - for k, v := range cache { - copied[k] = v - } - copied[cacheKey] = decoder - done = atomic.CompareAndSwapPointer(&cfg.decoderCache, ptr, unsafe.Pointer(&copied)) - } -} - -func (cfg *frozenConfig) addEncoderToCache(cacheKey reflect.Type, encoder ValEncoder) { - done := false - for !done { - ptr := atomic.LoadPointer(&cfg.encoderCache) - cache := *(*map[reflect.Type]ValEncoder)(ptr) - copied := map[reflect.Type]ValEncoder{} - for k, v := range cache { - copied[k] = v - } - copied[cacheKey] = encoder - done = atomic.CompareAndSwapPointer(&cfg.encoderCache, ptr, unsafe.Pointer(&copied)) - } -} - -func (cfg *frozenConfig) getDecoderFromCache(cacheKey reflect.Type) ValDecoder { - ptr := atomic.LoadPointer(&cfg.decoderCache) - cache := *(*map[reflect.Type]ValDecoder)(ptr) - return cache[cacheKey] -} - -func (cfg *frozenConfig) getEncoderFromCache(cacheKey reflect.Type) ValEncoder { - ptr := atomic.LoadPointer(&cfg.encoderCache) - cache := *(*map[reflect.Type]ValEncoder)(ptr) - return cache[cacheKey] +func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { + encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} } func (cfg *frozenConfig) cleanDecoders() { @@ -280,24 +314,22 @@ func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([] } newCfg := cfg.configBeforeFrozen newCfg.IndentionStep = len(indent) - return newCfg.Froze().Marshal(v) + return newCfg.frozeWithCacheReuse().Marshal(v) } func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { data := []byte(str) - data = data[:lastNotSpacePos(data)] iter := cfg.BorrowIterator(data) defer cfg.ReturnIterator(iter) iter.ReadVal(v) - if iter.head == iter.tail { - iter.loadMore() - } - if iter.Error == io.EOF { - return nil - } - if iter.Error == nil { - iter.ReportError("UnmarshalFromString", "there are bytes left after unmarshal") + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") return iter.Error } @@ -308,24 +340,17 @@ func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { } func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { - data = data[:lastNotSpacePos(data)] iter := cfg.BorrowIterator(data) defer cfg.ReturnIterator(iter) - typ := reflect.TypeOf(v) - if typ.Kind() != reflect.Ptr { - // return non-pointer error - return errors.New("the second param must be ptr type") - } iter.ReadVal(v) - if iter.head == iter.tail { - iter.loadMore() - } - if iter.Error == io.EOF { - return nil - } - if iter.Error == nil { - iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") return iter.Error } diff --git a/vendor/github.com/json-iterator/go/feature_json_number.go b/vendor/github.com/json-iterator/go/feature_json_number.go deleted file mode 100644 index e187b200a..000000000 --- a/vendor/github.com/json-iterator/go/feature_json_number.go +++ /dev/null @@ -1,31 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "strconv" -) - -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -func CastJsonNumber(val interface{}) (string, bool) { - switch typedVal := val.(type) { - case json.Number: - return string(typedVal), true - case Number: - return string(typedVal), true - } - return "", false -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect.go b/vendor/github.com/json-iterator/go/feature_reflect.go deleted file mode 100644 index 1bd8987f2..000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect.go +++ /dev/null @@ -1,721 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/json" - "fmt" - "reflect" - "time" - "unsafe" -) - -// ValDecoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValDecoder with json.Decoder. -// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). -// -// Reflection on type to create decoders, which is then cached -// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions -// 1. create instance of new value, for example *int will need a int to be allocated -// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New -// 3. assignment to map, both key and value will be reflect.Value -// For a simple struct binding, it will be reflect.Value free and allocation free -type ValDecoder interface { - Decode(ptr unsafe.Pointer, iter *Iterator) -} - -// ValEncoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValEncoder with json.Encoder. -// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). -type ValEncoder interface { - IsEmpty(ptr unsafe.Pointer) bool - Encode(ptr unsafe.Pointer, stream *Stream) - EncodeInterface(val interface{}, stream *Stream) -} - -type checkIsEmpty interface { - IsEmpty(ptr unsafe.Pointer) bool -} - -// WriteToStream the default implementation for TypeEncoder method EncodeInterface -func WriteToStream(val interface{}, stream *Stream, encoder ValEncoder) { - e := (*emptyInterface)(unsafe.Pointer(&val)) - if e.word == nil { - stream.WriteNil() - return - } - if reflect.TypeOf(val).Kind() == reflect.Ptr { - encoder.Encode(unsafe.Pointer(&e.word), stream) - } else { - encoder.Encode(e.word, stream) - } -} - -var jsonNumberType reflect.Type -var jsoniterNumberType reflect.Type -var jsonRawMessageType reflect.Type -var jsoniterRawMessageType reflect.Type -var anyType reflect.Type -var marshalerType reflect.Type -var unmarshalerType reflect.Type -var textMarshalerType reflect.Type -var textUnmarshalerType reflect.Type - -func init() { - jsonNumberType = reflect.TypeOf((*json.Number)(nil)).Elem() - jsoniterNumberType = reflect.TypeOf((*Number)(nil)).Elem() - jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem() - jsoniterRawMessageType = reflect.TypeOf((*RawMessage)(nil)).Elem() - anyType = reflect.TypeOf((*Any)(nil)).Elem() - marshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() - textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() -} - -type OptionalDecoder struct { - ValueType reflect.Type - ValueDecoder ValDecoder -} - -func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - *((*unsafe.Pointer)(ptr)) = nil - } else { - if *((*unsafe.Pointer)(ptr)) == nil { - //pointer to null, we have to allocate memory to hold the value - value := reflect.New(decoder.ValueType) - newPtr := extractInterface(value.Interface()).word - decoder.ValueDecoder.Decode(newPtr, iter) - *((*uintptr)(ptr)) = uintptr(newPtr) - } else { - //reuse existing instance - decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) - } - } -} - -type deferenceDecoder struct { - // only to deference a pointer - valueType reflect.Type - valueDecoder ValDecoder -} - -func (decoder *deferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if *((*unsafe.Pointer)(ptr)) == nil { - //pointer to null, we have to allocate memory to hold the value - value := reflect.New(decoder.valueType) - newPtr := extractInterface(value.Interface()).word - decoder.valueDecoder.Decode(newPtr, iter) - *((*uintptr)(ptr)) = uintptr(newPtr) - } else { - //reuse existing instance - decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) - } -} - -type OptionalEncoder struct { - ValueEncoder ValEncoder -} - -func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *((*unsafe.Pointer)(ptr)) == nil { - stream.WriteNil() - } else { - encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) - } -} - -func (encoder *OptionalEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*unsafe.Pointer)(ptr)) == nil -} - -type optionalMapEncoder struct { - valueEncoder ValEncoder -} - -func (encoder *optionalMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *((*unsafe.Pointer)(ptr)) == nil { - stream.WriteNil() - } else { - encoder.valueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) - } -} - -func (encoder *optionalMapEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *optionalMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - p := *((*unsafe.Pointer)(ptr)) - return p == nil || encoder.valueEncoder.IsEmpty(p) -} - -type placeholderEncoder struct { - cfg *frozenConfig - cacheKey reflect.Type -} - -func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.getRealEncoder().Encode(ptr, stream) -} - -func (encoder *placeholderEncoder) EncodeInterface(val interface{}, stream *Stream) { - encoder.getRealEncoder().EncodeInterface(val, stream) -} - -func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.getRealEncoder().IsEmpty(ptr) -} - -func (encoder *placeholderEncoder) getRealEncoder() ValEncoder { - for i := 0; i < 500; i++ { - realDecoder := encoder.cfg.getEncoderFromCache(encoder.cacheKey) - _, isPlaceholder := realDecoder.(*placeholderEncoder) - if isPlaceholder { - time.Sleep(10 * time.Millisecond) - } else { - return realDecoder - } - } - panic(fmt.Sprintf("real encoder not found for cache key: %v", encoder.cacheKey)) -} - -type placeholderDecoder struct { - cfg *frozenConfig - cacheKey reflect.Type -} - -func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - for i := 0; i < 500; i++ { - realDecoder := decoder.cfg.getDecoderFromCache(decoder.cacheKey) - _, isPlaceholder := realDecoder.(*placeholderDecoder) - if isPlaceholder { - time.Sleep(10 * time.Millisecond) - } else { - realDecoder.Decode(ptr, iter) - return - } - } - panic(fmt.Sprintf("real decoder not found for cache key: %v", decoder.cacheKey)) -} - -// emptyInterface is the header for an interface{} value. -type emptyInterface struct { - typ unsafe.Pointer - word unsafe.Pointer -} - -// emptyInterface is the header for an interface with method (not interface{}) -type nonEmptyInterface struct { - // see ../runtime/iface.go:/Itab - itab *struct { - ityp unsafe.Pointer // static interface type - typ unsafe.Pointer // dynamic concrete type - link unsafe.Pointer - bad int32 - unused int32 - fun [100000]unsafe.Pointer // method table - } - word unsafe.Pointer -} - -// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal -func (iter *Iterator) ReadVal(obj interface{}) { - typ := reflect.TypeOf(obj) - cacheKey := typ.Elem() - decoder, err := decoderOfType(iter.cfg, cacheKey) - if err != nil { - iter.Error = err - return - } - e := (*emptyInterface)(unsafe.Pointer(&obj)) - decoder.Decode(e.word, iter) -} - -// WriteVal copy the go interface into underlying JSON, same as json.Marshal -func (stream *Stream) WriteVal(val interface{}) { - if nil == val { - stream.WriteNil() - return - } - typ := reflect.TypeOf(val) - cacheKey := typ - encoder, err := encoderOfType(stream.cfg, cacheKey) - if err != nil { - stream.Error = err - return - } - encoder.EncodeInterface(val, stream) -} - -type prefix string - -func (p prefix) addToDecoder(decoder ValDecoder, err error) (ValDecoder, error) { - if err != nil { - return nil, fmt.Errorf("%s: %s", p, err.Error()) - } - return decoder, err -} - -func (p prefix) addToEncoder(encoder ValEncoder, err error) (ValEncoder, error) { - if err != nil { - return nil, fmt.Errorf("%s: %s", p, err.Error()) - } - return encoder, err -} - -func decoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - cacheKey := typ - decoder := cfg.getDecoderFromCache(cacheKey) - if decoder != nil { - return decoder, nil - } - decoder = getTypeDecoderFromExtension(typ) - if decoder != nil { - cfg.addDecoderToCache(cacheKey, decoder) - return decoder, nil - } - decoder = &placeholderDecoder{cfg: cfg, cacheKey: cacheKey} - cfg.addDecoderToCache(cacheKey, decoder) - decoder, err := createDecoderOfType(cfg, typ) - for _, extension := range extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - cfg.addDecoderToCache(cacheKey, decoder) - return decoder, err -} - -func createDecoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - typeName := typ.String() - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{}, nil - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{}, nil - } - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{}, nil - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{}, nil - } - if typ.Implements(unmarshalerType) { - templateInterface := reflect.New(typ).Elem().Interface() - var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} - if typ.Kind() == reflect.Ptr { - decoder = &OptionalDecoder{typ.Elem(), decoder} - } - return decoder, nil - } - if reflect.PtrTo(typ).Implements(unmarshalerType) { - templateInterface := reflect.New(typ).Interface() - var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} - return decoder, nil - } - if typ.Implements(textUnmarshalerType) { - templateInterface := reflect.New(typ).Elem().Interface() - var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} - if typ.Kind() == reflect.Ptr { - decoder = &OptionalDecoder{typ.Elem(), decoder} - } - return decoder, nil - } - if reflect.PtrTo(typ).Implements(textUnmarshalerType) { - templateInterface := reflect.New(typ).Interface() - var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} - return decoder, nil - } - if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { - sliceDecoder, err := prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ)) - if err != nil { - return nil, err - } - return &base64Codec{sliceDecoder: sliceDecoder}, nil - } - if typ.Implements(anyType) { - return &anyCodec{}, nil - } - switch typ.Kind() { - case reflect.String: - if typeName != "string" { - return decoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem()) - } - return &stringCodec{}, nil - case reflect.Int: - if typeName != "int" { - return decoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem()) - } - return &intCodec{}, nil - case reflect.Int8: - if typeName != "int8" { - return decoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem()) - } - return &int8Codec{}, nil - case reflect.Int16: - if typeName != "int16" { - return decoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem()) - } - return &int16Codec{}, nil - case reflect.Int32: - if typeName != "int32" { - return decoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem()) - } - return &int32Codec{}, nil - case reflect.Int64: - if typeName != "int64" { - return decoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem()) - } - return &int64Codec{}, nil - case reflect.Uint: - if typeName != "uint" { - return decoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem()) - } - return &uintCodec{}, nil - case reflect.Uint8: - if typeName != "uint8" { - return decoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem()) - } - return &uint8Codec{}, nil - case reflect.Uint16: - if typeName != "uint16" { - return decoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem()) - } - return &uint16Codec{}, nil - case reflect.Uint32: - if typeName != "uint32" { - return decoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem()) - } - return &uint32Codec{}, nil - case reflect.Uintptr: - if typeName != "uintptr" { - return decoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem()) - } - return &uintptrCodec{}, nil - case reflect.Uint64: - if typeName != "uint64" { - return decoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem()) - } - return &uint64Codec{}, nil - case reflect.Float32: - if typeName != "float32" { - return decoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem()) - } - return &float32Codec{}, nil - case reflect.Float64: - if typeName != "float64" { - return decoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem()) - } - return &float64Codec{}, nil - case reflect.Bool: - if typeName != "bool" { - return decoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem()) - } - return &boolCodec{}, nil - case reflect.Interface: - if typ.NumMethod() == 0 { - return &emptyInterfaceCodec{}, nil - } - return &nonEmptyInterfaceCodec{}, nil - case reflect.Struct: - return prefix(fmt.Sprintf("[%s]", typeName)).addToDecoder(decoderOfStruct(cfg, typ)) - case reflect.Array: - return prefix("[array]").addToDecoder(decoderOfArray(cfg, typ)) - case reflect.Slice: - return prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ)) - case reflect.Map: - return prefix("[map]").addToDecoder(decoderOfMap(cfg, typ)) - case reflect.Ptr: - return prefix("[optional]").addToDecoder(decoderOfOptional(cfg, typ)) - default: - return nil, fmt.Errorf("unsupported type: %v", typ) - } -} - -func encoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - cacheKey := typ - encoder := cfg.getEncoderFromCache(cacheKey) - if encoder != nil { - return encoder, nil - } - encoder = getTypeEncoderFromExtension(typ) - if encoder != nil { - cfg.addEncoderToCache(cacheKey, encoder) - return encoder, nil - } - encoder = &placeholderEncoder{cfg: cfg, cacheKey: cacheKey} - cfg.addEncoderToCache(cacheKey, encoder) - encoder, err := createEncoderOfType(cfg, typ) - for _, extension := range extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - cfg.addEncoderToCache(cacheKey, encoder) - return encoder, err -} - -func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{}, nil - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{}, nil - } - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{}, nil - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{}, nil - } - if typ.Implements(marshalerType) { - checkIsEmpty, err := createCheckIsEmpty(typ) - if err != nil { - return nil, err - } - templateInterface := reflect.New(typ).Elem().Interface() - var encoder ValEncoder = &marshalerEncoder{ - templateInterface: extractInterface(templateInterface), - checkIsEmpty: checkIsEmpty, - } - if typ.Kind() == reflect.Ptr { - encoder = &OptionalEncoder{encoder} - } - return encoder, nil - } - if reflect.PtrTo(typ).Implements(marshalerType) { - checkIsEmpty, err := createCheckIsEmpty(reflect.PtrTo(typ)) - if err != nil { - return nil, err - } - templateInterface := reflect.New(typ).Interface() - var encoder ValEncoder = &marshalerEncoder{ - templateInterface: extractInterface(templateInterface), - checkIsEmpty: checkIsEmpty, - } - return encoder, nil - } - if typ.Implements(textMarshalerType) { - checkIsEmpty, err := createCheckIsEmpty(typ) - if err != nil { - return nil, err - } - templateInterface := reflect.New(typ).Elem().Interface() - var encoder ValEncoder = &textMarshalerEncoder{ - templateInterface: extractInterface(templateInterface), - checkIsEmpty: checkIsEmpty, - } - if typ.Kind() == reflect.Ptr { - encoder = &OptionalEncoder{encoder} - } - return encoder, nil - } - if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { - return &base64Codec{}, nil - } - if typ.Implements(anyType) { - return &anyCodec{}, nil - } - return createEncoderOfSimpleType(cfg, typ) -} - -func createCheckIsEmpty(typ reflect.Type) (checkIsEmpty, error) { - kind := typ.Kind() - switch kind { - case reflect.String: - return &stringCodec{}, nil - case reflect.Int: - return &intCodec{}, nil - case reflect.Int8: - return &int8Codec{}, nil - case reflect.Int16: - return &int16Codec{}, nil - case reflect.Int32: - return &int32Codec{}, nil - case reflect.Int64: - return &int64Codec{}, nil - case reflect.Uint: - return &uintCodec{}, nil - case reflect.Uint8: - return &uint8Codec{}, nil - case reflect.Uint16: - return &uint16Codec{}, nil - case reflect.Uint32: - return &uint32Codec{}, nil - case reflect.Uintptr: - return &uintptrCodec{}, nil - case reflect.Uint64: - return &uint64Codec{}, nil - case reflect.Float32: - return &float32Codec{}, nil - case reflect.Float64: - return &float64Codec{}, nil - case reflect.Bool: - return &boolCodec{}, nil - case reflect.Interface: - if typ.NumMethod() == 0 { - return &emptyInterfaceCodec{}, nil - } - return &nonEmptyInterfaceCodec{}, nil - case reflect.Struct: - return &structEncoder{}, nil - case reflect.Array: - return &arrayEncoder{}, nil - case reflect.Slice: - return &sliceEncoder{}, nil - case reflect.Map: - return &mapEncoder{}, nil - case reflect.Ptr: - return &OptionalEncoder{}, nil - default: - return nil, fmt.Errorf("unsupported type: %v", typ) - } -} - -func createEncoderOfSimpleType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - typeName := typ.String() - kind := typ.Kind() - switch kind { - case reflect.String: - if typeName != "string" { - return encoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem()) - } - return &stringCodec{}, nil - case reflect.Int: - if typeName != "int" { - return encoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem()) - } - return &intCodec{}, nil - case reflect.Int8: - if typeName != "int8" { - return encoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem()) - } - return &int8Codec{}, nil - case reflect.Int16: - if typeName != "int16" { - return encoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem()) - } - return &int16Codec{}, nil - case reflect.Int32: - if typeName != "int32" { - return encoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem()) - } - return &int32Codec{}, nil - case reflect.Int64: - if typeName != "int64" { - return encoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem()) - } - return &int64Codec{}, nil - case reflect.Uint: - if typeName != "uint" { - return encoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem()) - } - return &uintCodec{}, nil - case reflect.Uint8: - if typeName != "uint8" { - return encoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem()) - } - return &uint8Codec{}, nil - case reflect.Uint16: - if typeName != "uint16" { - return encoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem()) - } - return &uint16Codec{}, nil - case reflect.Uint32: - if typeName != "uint32" { - return encoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem()) - } - return &uint32Codec{}, nil - case reflect.Uintptr: - if typeName != "uintptr" { - return encoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem()) - } - return &uintptrCodec{}, nil - case reflect.Uint64: - if typeName != "uint64" { - return encoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem()) - } - return &uint64Codec{}, nil - case reflect.Float32: - if typeName != "float32" { - return encoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem()) - } - return &float32Codec{}, nil - case reflect.Float64: - if typeName != "float64" { - return encoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem()) - } - return &float64Codec{}, nil - case reflect.Bool: - if typeName != "bool" { - return encoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem()) - } - return &boolCodec{}, nil - case reflect.Interface: - if typ.NumMethod() == 0 { - return &emptyInterfaceCodec{}, nil - } - return &nonEmptyInterfaceCodec{}, nil - case reflect.Struct: - return prefix(fmt.Sprintf("[%s]", typeName)).addToEncoder(encoderOfStruct(cfg, typ)) - case reflect.Array: - return prefix("[array]").addToEncoder(encoderOfArray(cfg, typ)) - case reflect.Slice: - return prefix("[slice]").addToEncoder(encoderOfSlice(cfg, typ)) - case reflect.Map: - return prefix("[map]").addToEncoder(encoderOfMap(cfg, typ)) - case reflect.Ptr: - return prefix("[optional]").addToEncoder(encoderOfOptional(cfg, typ)) - default: - return nil, fmt.Errorf("unsupported type: %v", typ) - } -} - -func decoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - elemType := typ.Elem() - decoder, err := decoderOfType(cfg, elemType) - if err != nil { - return nil, err - } - return &OptionalDecoder{elemType, decoder}, nil -} - -func encoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - elemType := typ.Elem() - elemEncoder, err := encoderOfType(cfg, elemType) - if err != nil { - return nil, err - } - encoder := &OptionalEncoder{elemEncoder} - if elemType.Kind() == reflect.Map { - encoder = &OptionalEncoder{encoder} - } - return encoder, nil -} - -func decoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - decoder, err := decoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - mapInterface := reflect.New(typ).Interface() - return &mapDecoder{typ, typ.Key(), typ.Elem(), decoder, extractInterface(mapInterface)}, nil -} - -func extractInterface(val interface{}) emptyInterface { - return *((*emptyInterface)(unsafe.Pointer(&val))) -} - -func encoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - elemType := typ.Elem() - encoder, err := encoderOfType(cfg, elemType) - if err != nil { - return nil, err - } - mapInterface := reflect.New(typ).Elem().Interface() - if cfg.sortMapKeys { - return &sortKeysMapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil - } - return &mapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_array.go b/vendor/github.com/json-iterator/go/feature_reflect_array.go deleted file mode 100644 index d661fb6fe..000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_array.go +++ /dev/null @@ -1,99 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" - "unsafe" -) - -func decoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - decoder, err := decoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - return &arrayDecoder{typ, typ.Elem(), decoder}, nil -} - -func encoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - encoder, err := encoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - if typ.Elem().Kind() == reflect.Map { - encoder = &OptionalEncoder{encoder} - } - return &arrayEncoder{typ, typ.Elem(), encoder}, nil -} - -type arrayEncoder struct { - arrayType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder -} - -func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteArrayStart() - elemPtr := unsafe.Pointer(ptr) - encoder.elemEncoder.Encode(elemPtr, stream) - for i := 1; i < encoder.arrayType.Len(); i++ { - stream.WriteMore() - elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) - encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) - } -} - -func (encoder *arrayEncoder) EncodeInterface(val interface{}, stream *Stream) { - // special optimization for interface{} - e := (*emptyInterface)(unsafe.Pointer(&val)) - if e.word == nil { - stream.WriteArrayStart() - stream.WriteNil() - stream.WriteArrayEnd() - return - } - elemType := encoder.arrayType.Elem() - if encoder.arrayType.Len() == 1 && (elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map) { - ptr := uintptr(e.word) - e.word = unsafe.Pointer(&ptr) - } - if reflect.TypeOf(val).Kind() == reflect.Ptr { - encoder.Encode(unsafe.Pointer(&e.word), stream) - } else { - encoder.Encode(e.word, stream) - } -} - -func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type arrayDecoder struct { - arrayType reflect.Type - elemType reflect.Type - elemDecoder ValDecoder -} - -func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) - } -} - -func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - offset := uintptr(0) - iter.ReadArrayCB(func(iter *Iterator) bool { - if offset < decoder.arrayType.Size() { - decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(ptr)+offset), iter) - offset += decoder.elemType.Size() - } else { - iter.Skip() - } - return true - }) -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_map.go b/vendor/github.com/json-iterator/go/feature_reflect_map.go deleted file mode 100644 index 005671e01..000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_map.go +++ /dev/null @@ -1,244 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/json" - "reflect" - "sort" - "strconv" - "unsafe" -) - -type mapDecoder struct { - mapType reflect.Type - keyType reflect.Type - elemType reflect.Type - elemDecoder ValDecoder - mapInterface emptyInterface -} - -func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - // dark magic to cast unsafe.Pointer back to interface{} using reflect.Type - mapInterface := decoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface).Elem() - if iter.ReadNil() { - realVal.Set(reflect.Zero(decoder.mapType)) - return - } - if realVal.IsNil() { - realVal.Set(reflect.MakeMap(realVal.Type())) - } - iter.ReadMapCB(func(iter *Iterator, keyStr string) bool { - elem := reflect.New(decoder.elemType) - decoder.elemDecoder.Decode(unsafe.Pointer(elem.Pointer()), iter) - // to put into map, we have to use reflection - keyType := decoder.keyType - // TODO: remove this from loop - switch { - case keyType.Kind() == reflect.String: - realVal.SetMapIndex(reflect.ValueOf(keyStr).Convert(keyType), elem.Elem()) - return true - case keyType.Implements(textUnmarshalerType): - textUnmarshaler := reflect.New(keyType.Elem()).Interface().(encoding.TextUnmarshaler) - err := textUnmarshaler.UnmarshalText([]byte(keyStr)) - if err != nil { - iter.ReportError("read map key as TextUnmarshaler", err.Error()) - return false - } - realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler), elem.Elem()) - return true - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): - textUnmarshaler := reflect.New(keyType).Interface().(encoding.TextUnmarshaler) - err := textUnmarshaler.UnmarshalText([]byte(keyStr)) - if err != nil { - iter.ReportError("read map key as TextUnmarshaler", err.Error()) - return false - } - realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler).Elem(), elem.Elem()) - return true - default: - switch keyType.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(keyStr, 10, 64) - if err != nil || reflect.Zero(keyType).OverflowInt(n) { - iter.ReportError("read map key as int64", "read int64 failed") - return false - } - realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(keyStr, 10, 64) - if err != nil || reflect.Zero(keyType).OverflowUint(n) { - iter.ReportError("read map key as uint64", "read uint64 failed") - return false - } - realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) - return true - } - } - iter.ReportError("read map key", "unexpected map key type "+keyType.String()) - return true - }) -} - -type mapEncoder struct { - mapType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder - mapInterface emptyInterface -} - -func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - stream.WriteObjectStart() - for i, key := range realVal.MapKeys() { - if i != 0 { - stream.WriteMore() - } - encodeMapKey(key, stream) - if stream.indention > 0 { - stream.writeTwoBytes(byte(':'), byte(' ')) - } else { - stream.writeByte(':') - } - val := realVal.MapIndex(key).Interface() - encoder.elemEncoder.EncodeInterface(val, stream) - } - stream.WriteObjectEnd() -} - -func encodeMapKey(key reflect.Value, stream *Stream) { - if key.Kind() == reflect.String { - stream.WriteString(key.String()) - return - } - if tm, ok := key.Interface().(encoding.TextMarshaler); ok { - buf, err := tm.MarshalText() - if err != nil { - stream.Error = err - return - } - stream.writeByte('"') - stream.Write(buf) - stream.writeByte('"') - return - } - switch key.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - stream.writeByte('"') - stream.WriteInt64(key.Int()) - stream.writeByte('"') - return - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - stream.writeByte('"') - stream.WriteUint64(key.Uint()) - stream.writeByte('"') - return - } - stream.Error = &json.UnsupportedTypeError{Type: key.Type()} -} - -func (encoder *mapEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - return realVal.Len() == 0 -} - -type sortKeysMapEncoder struct { - mapType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder - mapInterface emptyInterface -} - -func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - - // Extract and sort the keys. - keys := realVal.MapKeys() - sv := stringValues(make([]reflectWithString, len(keys))) - for i, v := range keys { - sv[i].v = v - if err := sv[i].resolve(); err != nil { - stream.Error = err - return - } - } - sort.Sort(sv) - - stream.WriteObjectStart() - for i, key := range sv { - if i != 0 { - stream.WriteMore() - } - stream.WriteVal(key.s) // might need html escape, so can not WriteString directly - if stream.indention > 0 { - stream.writeTwoBytes(byte(':'), byte(' ')) - } else { - stream.writeByte(':') - } - val := realVal.MapIndex(key.v).Interface() - encoder.elemEncoder.EncodeInterface(val, stream) - } - stream.WriteObjectEnd() -} - -// stringValues is a slice of reflect.Value holding *reflect.StringValue. -// It implements the methods to sort by string. -type stringValues []reflectWithString - -type reflectWithString struct { - v reflect.Value - s string -} - -func (w *reflectWithString) resolve() error { - if w.v.Kind() == reflect.String { - w.s = w.v.String() - return nil - } - if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok { - buf, err := tm.MarshalText() - w.s = string(buf) - return err - } - switch w.v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - w.s = strconv.FormatInt(w.v.Int(), 10) - return nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - w.s = strconv.FormatUint(w.v.Uint(), 10) - return nil - } - return &json.UnsupportedTypeError{Type: w.v.Type()} -} - -func (sv stringValues) Len() int { return len(sv) } -func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv stringValues) Less(i, j int) bool { return sv[i].s < sv[j].s } - -func (encoder *sortKeysMapEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - return realVal.Len() == 0 -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_native.go b/vendor/github.com/json-iterator/go/feature_reflect_native.go deleted file mode 100644 index 95bd1e87c..000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_native.go +++ /dev/null @@ -1,764 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/base64" - "encoding/json" - "reflect" - "unsafe" -) - -type stringCodec struct { -} - -func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*string)(ptr)) = iter.ReadString() -} - -func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - str := *((*string)(ptr)) - stream.WriteString(str) -} - -func (codec *stringCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*string)(ptr)) == "" -} - -type intCodec struct { -} - -func (codec *intCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int)(ptr)) = iter.ReadInt() - } -} - -func (codec *intCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt(*((*int)(ptr))) -} - -func (codec *intCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *intCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int)(ptr)) == 0 -} - -type uintptrCodec struct { -} - -func (codec *uintptrCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uintptr)(ptr)) = uintptr(iter.ReadUint64()) - } -} - -func (codec *uintptrCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint64(uint64(*((*uintptr)(ptr)))) -} - -func (codec *uintptrCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uintptrCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uintptr)(ptr)) == 0 -} - -type int8Codec struct { -} - -func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int8)(ptr)) = iter.ReadInt8() - } -} - -func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt8(*((*int8)(ptr))) -} - -func (codec *int8Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int8)(ptr)) == 0 -} - -type int16Codec struct { -} - -func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int16)(ptr)) = iter.ReadInt16() - } -} - -func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt16(*((*int16)(ptr))) -} - -func (codec *int16Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int16)(ptr)) == 0 -} - -type int32Codec struct { -} - -func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int32)(ptr)) = iter.ReadInt32() - } -} - -func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt32(*((*int32)(ptr))) -} - -func (codec *int32Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int32)(ptr)) == 0 -} - -type int64Codec struct { -} - -func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int64)(ptr)) = iter.ReadInt64() - } -} - -func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt64(*((*int64)(ptr))) -} - -func (codec *int64Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int64)(ptr)) == 0 -} - -type uintCodec struct { -} - -func (codec *uintCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint)(ptr)) = iter.ReadUint() - return - } -} - -func (codec *uintCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint(*((*uint)(ptr))) -} - -func (codec *uintCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uintCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint)(ptr)) == 0 -} - -type uint8Codec struct { -} - -func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint8)(ptr)) = iter.ReadUint8() - } -} - -func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint8(*((*uint8)(ptr))) -} - -func (codec *uint8Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint8)(ptr)) == 0 -} - -type uint16Codec struct { -} - -func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint16)(ptr)) = iter.ReadUint16() - } -} - -func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint16(*((*uint16)(ptr))) -} - -func (codec *uint16Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint16)(ptr)) == 0 -} - -type uint32Codec struct { -} - -func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint32)(ptr)) = iter.ReadUint32() - } -} - -func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint32(*((*uint32)(ptr))) -} - -func (codec *uint32Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint32)(ptr)) == 0 -} - -type uint64Codec struct { -} - -func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint64)(ptr)) = iter.ReadUint64() - } -} - -func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint64(*((*uint64)(ptr))) -} - -func (codec *uint64Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint64)(ptr)) == 0 -} - -type float32Codec struct { -} - -func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*float32)(ptr)) = iter.ReadFloat32() - } -} - -func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat32(*((*float32)(ptr))) -} - -func (codec *float32Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float32)(ptr)) == 0 -} - -type float64Codec struct { -} - -func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*float64)(ptr)) = iter.ReadFloat64() - } -} - -func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat64(*((*float64)(ptr))) -} - -func (codec *float64Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float64)(ptr)) == 0 -} - -type boolCodec struct { -} - -func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*bool)(ptr)) = iter.ReadBool() - } -} - -func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteBool(*((*bool)(ptr))) -} - -func (codec *boolCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { - return !(*((*bool)(ptr))) -} - -type emptyInterfaceCodec struct { -} - -func (codec *emptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - existing := *((*interface{})(ptr)) - - // Checking for both typed and untyped nil pointers. - if existing != nil && - reflect.TypeOf(existing).Kind() == reflect.Ptr && - !reflect.ValueOf(existing).IsNil() { - - var ptrToExisting interface{} - for { - elem := reflect.ValueOf(existing).Elem() - if elem.Kind() != reflect.Ptr || elem.IsNil() { - break - } - ptrToExisting = existing - existing = elem.Interface() - } - - if iter.ReadNil() { - if ptrToExisting != nil { - nilPtr := reflect.Zero(reflect.TypeOf(ptrToExisting).Elem()) - reflect.ValueOf(ptrToExisting).Elem().Set(nilPtr) - } else { - *((*interface{})(ptr)) = nil - } - } else { - iter.ReadVal(existing) - } - - return - } - - if iter.ReadNil() { - *((*interface{})(ptr)) = nil - } else { - *((*interface{})(ptr)) = iter.Read() - } -} - -func (codec *emptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteVal(*((*interface{})(ptr))) -} - -func (codec *emptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteVal(val) -} - -func (codec *emptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { - emptyInterface := (*emptyInterface)(ptr) - return emptyInterface.typ == nil -} - -type nonEmptyInterfaceCodec struct { -} - -func (codec *nonEmptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - nonEmptyInterface := (*nonEmptyInterface)(ptr) - if nonEmptyInterface.itab == nil { - iter.ReportError("read non-empty interface", "do not know which concrete type to decode to") - return - } - var i interface{} - e := (*emptyInterface)(unsafe.Pointer(&i)) - e.typ = nonEmptyInterface.itab.typ - e.word = nonEmptyInterface.word - iter.ReadVal(&i) - if e.word == nil { - nonEmptyInterface.itab = nil - } - nonEmptyInterface.word = e.word -} - -func (codec *nonEmptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - nonEmptyInterface := (*nonEmptyInterface)(ptr) - var i interface{} - if nonEmptyInterface.itab != nil { - e := (*emptyInterface)(unsafe.Pointer(&i)) - e.typ = nonEmptyInterface.itab.typ - e.word = nonEmptyInterface.word - } - stream.WriteVal(i) -} - -func (codec *nonEmptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteVal(val) -} - -func (codec *nonEmptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { - nonEmptyInterface := (*nonEmptyInterface)(ptr) - return nonEmptyInterface.word == nil -} - -type anyCodec struct { -} - -func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*Any)(ptr)) = iter.ReadAny() -} - -func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - (*((*Any)(ptr))).WriteTo(stream) -} - -func (codec *anyCodec) EncodeInterface(val interface{}, stream *Stream) { - (val.(Any)).WriteTo(stream) -} - -func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { - return (*((*Any)(ptr))).Size() == 0 -} - -type jsonNumberCodec struct { -} - -func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - switch iter.WhatIsNext() { - case StringValue: - *((*json.Number)(ptr)) = json.Number(iter.ReadString()) - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*json.Number)(ptr)) = "" - default: - *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) - } -} - -func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*json.Number)(ptr)))) -} - -func (codec *jsonNumberCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(json.Number))) -} - -func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.Number)(ptr))) == 0 -} - -type jsoniterNumberCodec struct { -} - -func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - switch iter.WhatIsNext() { - case StringValue: - *((*Number)(ptr)) = Number(iter.ReadString()) - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*Number)(ptr)) = "" - default: - *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) - } -} - -func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*Number)(ptr)))) -} - -func (codec *jsoniterNumberCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(Number))) -} - -func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*Number)(ptr))) == 0 -} - -type jsonRawMessageCodec struct { -} - -func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) -} - -func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) -} - -func (codec *jsonRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(json.RawMessage))) -} - -func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.RawMessage)(ptr))) == 0 -} - -type jsoniterRawMessageCodec struct { -} - -func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) -} - -func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*RawMessage)(ptr)))) -} - -func (codec *jsoniterRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(RawMessage))) -} - -func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*RawMessage)(ptr))) == 0 -} - -type base64Codec struct { - sliceDecoder ValDecoder -} - -func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - ptrSlice := (*sliceHeader)(ptr) - ptrSlice.Len = 0 - ptrSlice.Cap = 0 - ptrSlice.Data = nil - return - } - switch iter.WhatIsNext() { - case StringValue: - encoding := base64.StdEncoding - src := iter.SkipAndReturnBytes() - src = src[1 : len(src)-1] - decodedLen := encoding.DecodedLen(len(src)) - dst := make([]byte, decodedLen) - len, err := encoding.Decode(dst, src) - if err != nil { - iter.ReportError("decode base64", err.Error()) - } else { - dst = dst[:len] - dstSlice := (*sliceHeader)(unsafe.Pointer(&dst)) - ptrSlice := (*sliceHeader)(ptr) - ptrSlice.Data = dstSlice.Data - ptrSlice.Cap = dstSlice.Cap - ptrSlice.Len = dstSlice.Len - } - case ArrayValue: - codec.sliceDecoder.Decode(ptr, iter) - default: - iter.ReportError("base64Codec", "invalid input") - } -} - -func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - src := *((*[]byte)(ptr)) - if len(src) == 0 { - stream.WriteNil() - return - } - encoding := base64.StdEncoding - stream.writeByte('"') - toGrow := encoding.EncodedLen(len(src)) - stream.ensure(toGrow) - encoding.Encode(stream.buf[stream.n:], src) - stream.n += toGrow - stream.writeByte('"') -} - -func (codec *base64Codec) EncodeInterface(val interface{}, stream *Stream) { - ptr := extractInterface(val).word - src := *((*[]byte)(ptr)) - if len(src) == 0 { - stream.WriteNil() - return - } - encoding := base64.StdEncoding - stream.writeByte('"') - toGrow := encoding.EncodedLen(len(src)) - stream.ensure(toGrow) - encoding.Encode(stream.buf[stream.n:], src) - stream.n += toGrow - stream.writeByte('"') -} - -func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*[]byte)(ptr))) == 0 -} - -type stringModeNumberDecoder struct { - elemDecoder ValDecoder -} - -func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) - return - } - decoder.elemDecoder.Decode(ptr, iter) - if iter.Error != nil { - return - } - c = iter.readByte() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) - return - } -} - -type stringModeStringDecoder struct { - elemDecoder ValDecoder - cfg *frozenConfig -} - -func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.elemDecoder.Decode(ptr, iter) - str := *((*string)(ptr)) - tempIter := decoder.cfg.BorrowIterator([]byte(str)) - defer decoder.cfg.ReturnIterator(tempIter) - *((*string)(ptr)) = tempIter.ReadString() -} - -type stringModeNumberEncoder struct { - elemEncoder ValEncoder -} - -func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.writeByte('"') - encoder.elemEncoder.Encode(ptr, stream) - stream.writeByte('"') -} - -func (encoder *stringModeNumberEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} - -type stringModeStringEncoder struct { - elemEncoder ValEncoder - cfg *frozenConfig -} - -func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - tempStream := encoder.cfg.BorrowStream(nil) - defer encoder.cfg.ReturnStream(tempStream) - encoder.elemEncoder.Encode(ptr, tempStream) - stream.WriteString(string(tempStream.Buffer())) -} - -func (encoder *stringModeStringEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} - -type marshalerEncoder struct { - templateInterface emptyInterface - checkIsEmpty checkIsEmpty -} - -func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - templateInterface := encoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - marshaler, ok := (*realInterface).(json.Marshaler) - if !ok { - stream.WriteVal(nil) - return - } - - bytes, err := marshaler.MarshalJSON() - if err != nil { - stream.Error = err - } else { - stream.Write(bytes) - } -} -func (encoder *marshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type textMarshalerEncoder struct { - templateInterface emptyInterface - checkIsEmpty checkIsEmpty -} - -func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - templateInterface := encoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - marshaler := (*realInterface).(encoding.TextMarshaler) - bytes, err := marshaler.MarshalText() - if err != nil { - stream.Error = err - } else { - stream.WriteString(string(bytes)) - } -} - -func (encoder *textMarshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type unmarshalerDecoder struct { - templateInterface emptyInterface -} - -func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - templateInterface := decoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - unmarshaler := (*realInterface).(json.Unmarshaler) - iter.nextToken() - iter.unreadByte() // skip spaces - bytes := iter.SkipAndReturnBytes() - err := unmarshaler.UnmarshalJSON(bytes) - if err != nil { - iter.ReportError("unmarshalerDecoder", err.Error()) - } -} - -type textUnmarshalerDecoder struct { - templateInterface emptyInterface -} - -func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - templateInterface := decoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - unmarshaler := (*realInterface).(encoding.TextUnmarshaler) - str := iter.ReadString() - err := unmarshaler.UnmarshalText([]byte(str)) - if err != nil { - iter.ReportError("textUnmarshalerDecoder", err.Error()) - } -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_slice.go b/vendor/github.com/json-iterator/go/feature_reflect_slice.go deleted file mode 100644 index 51a8daecf..000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_slice.go +++ /dev/null @@ -1,147 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" - "unsafe" -) - -func decoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - decoder, err := decoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - return &sliceDecoder{typ, typ.Elem(), decoder}, nil -} - -func encoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - encoder, err := encoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - if typ.Elem().Kind() == reflect.Map { - encoder = &OptionalEncoder{encoder} - } - return &sliceEncoder{typ, typ.Elem(), encoder}, nil -} - -type sliceEncoder struct { - sliceType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder -} - -func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - slice := (*sliceHeader)(ptr) - if slice.Data == nil { - stream.WriteNil() - return - } - if slice.Len == 0 { - stream.WriteEmptyArray() - return - } - stream.WriteArrayStart() - elemPtr := unsafe.Pointer(slice.Data) - encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) - for i := 1; i < slice.Len; i++ { - stream.WriteMore() - elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) - encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) - } -} - -func (encoder *sliceEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - slice := (*sliceHeader)(ptr) - return slice.Len == 0 -} - -type sliceDecoder struct { - sliceType reflect.Type - elemType reflect.Type - elemDecoder ValDecoder -} - -// sliceHeader is a safe version of SliceHeader used within this package. -type sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int -} - -func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) - } -} - -func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - slice := (*sliceHeader)(ptr) - if iter.ReadNil() { - slice.Len = 0 - slice.Cap = 0 - slice.Data = nil - return - } - reuseSlice(slice, decoder.sliceType, 4) - slice.Len = 0 - offset := uintptr(0) - iter.ReadArrayCB(func(iter *Iterator) bool { - growOne(slice, decoder.sliceType, decoder.elemType) - decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(slice.Data)+offset), iter) - offset += decoder.elemType.Size() - return true - }) -} - -// grow grows the slice s so that it can hold extra more values, allocating -// more capacity if needed. It also returns the old and new slice lengths. -func growOne(slice *sliceHeader, sliceType reflect.Type, elementType reflect.Type) { - newLen := slice.Len + 1 - if newLen <= slice.Cap { - slice.Len = newLen - return - } - newCap := slice.Cap - if newCap == 0 { - newCap = 1 - } else { - for newCap < newLen { - if slice.Len < 1024 { - newCap += newCap - } else { - newCap += newCap / 4 - } - } - } - newVal := reflect.MakeSlice(sliceType, newLen, newCap) - dst := unsafe.Pointer(newVal.Pointer()) - // copy old array into new array - originalBytesCount := slice.Len * int(elementType.Size()) - srcSliceHeader := (unsafe.Pointer)(&sliceHeader{slice.Data, originalBytesCount, originalBytesCount}) - dstSliceHeader := (unsafe.Pointer)(&sliceHeader{dst, originalBytesCount, originalBytesCount}) - copy(*(*[]byte)(dstSliceHeader), *(*[]byte)(srcSliceHeader)) - slice.Data = dst - slice.Len = newLen - slice.Cap = newCap -} - -func reuseSlice(slice *sliceHeader, sliceType reflect.Type, expectedCap int) { - if expectedCap <= slice.Cap { - return - } - newVal := reflect.MakeSlice(sliceType, 0, expectedCap) - dst := unsafe.Pointer(newVal.Pointer()) - slice.Data = dst - slice.Cap = expectedCap -} diff --git a/vendor/github.com/json-iterator/go/feature_stream_int.go b/vendor/github.com/json-iterator/go/feature_stream_int.go deleted file mode 100644 index 7cfd522c1..000000000 --- a/vendor/github.com/json-iterator/go/feature_stream_int.go +++ /dev/null @@ -1,320 +0,0 @@ -package jsoniter - -var digits []uint32 - -func init() { - digits = make([]uint32, 1000) - for i := uint32(0); i < 1000; i++ { - digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' - if i < 10 { - digits[i] += 2 << 24 - } else if i < 100 { - digits[i] += 1 << 24 - } - } -} - -func writeFirstBuf(buf []byte, v uint32, n int) int { - start := v >> 24 - if start == 0 { - buf[n] = byte(v >> 16) - n++ - buf[n] = byte(v >> 8) - n++ - } else if start == 1 { - buf[n] = byte(v >> 8) - n++ - } - buf[n] = byte(v) - n++ - return n -} - -func writeBuf(buf []byte, v uint32, n int) { - buf[n] = byte(v >> 16) - buf[n+1] = byte(v >> 8) - buf[n+2] = byte(v) -} - -// WriteUint8 write uint8 to stream -func (stream *Stream) WriteUint8(val uint8) { - stream.ensure(3) - stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) -} - -// WriteInt8 write int8 to stream -func (stream *Stream) WriteInt8(nval int8) { - stream.ensure(4) - n := stream.n - var val uint8 - if nval < 0 { - val = uint8(-nval) - stream.buf[n] = '-' - n++ - } else { - val = uint8(nval) - } - stream.n = writeFirstBuf(stream.buf, digits[val], n) -} - -// WriteUint16 write uint16 to stream -func (stream *Stream) WriteUint16(val uint16) { - stream.ensure(5) - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) - return - } - r1 := val - q1*1000 - n := writeFirstBuf(stream.buf, digits[q1], stream.n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return -} - -// WriteInt16 write int16 to stream -func (stream *Stream) WriteInt16(nval int16) { - stream.ensure(6) - n := stream.n - var val uint16 - if nval < 0 { - val = uint16(-nval) - stream.buf[n] = '-' - n++ - } else { - val = uint16(nval) - } - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - n = writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return -} - -// WriteUint32 write uint32 to stream -func (stream *Stream) WriteUint32(val uint32) { - stream.ensure(10) - n := stream.n - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - n := writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - n = writeFirstBuf(stream.buf, digits[q2], n) - } else { - r3 := q2 - q3*1000 - stream.buf[n] = byte(q3 + '0') - n++ - writeBuf(stream.buf, digits[r3], n) - n += 3 - } - writeBuf(stream.buf, digits[r2], n) - writeBuf(stream.buf, digits[r1], n+3) - stream.n = n + 6 -} - -// WriteInt32 write int32 to stream -func (stream *Stream) WriteInt32(nval int32) { - stream.ensure(11) - n := stream.n - var val uint32 - if nval < 0 { - val = uint32(-nval) - stream.buf[n] = '-' - n++ - } else { - val = uint32(nval) - } - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - n := writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - n = writeFirstBuf(stream.buf, digits[q2], n) - } else { - r3 := q2 - q3*1000 - stream.buf[n] = byte(q3 + '0') - n++ - writeBuf(stream.buf, digits[r3], n) - n += 3 - } - writeBuf(stream.buf, digits[r2], n) - writeBuf(stream.buf, digits[r1], n+3) - stream.n = n + 6 -} - -// WriteUint64 write uint64 to stream -func (stream *Stream) WriteUint64(val uint64) { - stream.ensure(20) - n := stream.n - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - n := writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - n = writeFirstBuf(stream.buf, digits[q2], n) - writeBuf(stream.buf, digits[r2], n) - writeBuf(stream.buf, digits[r1], n+3) - stream.n = n + 6 - return - } - r3 := q2 - q3*1000 - q4 := q3 / 1000 - if q4 == 0 { - n = writeFirstBuf(stream.buf, digits[q3], n) - writeBuf(stream.buf, digits[r3], n) - writeBuf(stream.buf, digits[r2], n+3) - writeBuf(stream.buf, digits[r1], n+6) - stream.n = n + 9 - return - } - r4 := q3 - q4*1000 - q5 := q4 / 1000 - if q5 == 0 { - n = writeFirstBuf(stream.buf, digits[q4], n) - writeBuf(stream.buf, digits[r4], n) - writeBuf(stream.buf, digits[r3], n+3) - writeBuf(stream.buf, digits[r2], n+6) - writeBuf(stream.buf, digits[r1], n+9) - stream.n = n + 12 - return - } - r5 := q4 - q5*1000 - q6 := q5 / 1000 - if q6 == 0 { - n = writeFirstBuf(stream.buf, digits[q5], n) - } else { - n = writeFirstBuf(stream.buf, digits[q6], n) - r6 := q5 - q6*1000 - writeBuf(stream.buf, digits[r6], n) - n += 3 - } - writeBuf(stream.buf, digits[r5], n) - writeBuf(stream.buf, digits[r4], n+3) - writeBuf(stream.buf, digits[r3], n+6) - writeBuf(stream.buf, digits[r2], n+9) - writeBuf(stream.buf, digits[r1], n+12) - stream.n = n + 15 -} - -// WriteInt64 write int64 to stream -func (stream *Stream) WriteInt64(nval int64) { - stream.ensure(20) - n := stream.n - var val uint64 - if nval < 0 { - val = uint64(-nval) - stream.buf[n] = '-' - n++ - } else { - val = uint64(nval) - } - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - n := writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - n = writeFirstBuf(stream.buf, digits[q2], n) - writeBuf(stream.buf, digits[r2], n) - writeBuf(stream.buf, digits[r1], n+3) - stream.n = n + 6 - return - } - r3 := q2 - q3*1000 - q4 := q3 / 1000 - if q4 == 0 { - n = writeFirstBuf(stream.buf, digits[q3], n) - writeBuf(stream.buf, digits[r3], n) - writeBuf(stream.buf, digits[r2], n+3) - writeBuf(stream.buf, digits[r1], n+6) - stream.n = n + 9 - return - } - r4 := q3 - q4*1000 - q5 := q4 / 1000 - if q5 == 0 { - n = writeFirstBuf(stream.buf, digits[q4], n) - writeBuf(stream.buf, digits[r4], n) - writeBuf(stream.buf, digits[r3], n+3) - writeBuf(stream.buf, digits[r2], n+6) - writeBuf(stream.buf, digits[r1], n+9) - stream.n = n + 12 - return - } - r5 := q4 - q5*1000 - q6 := q5 / 1000 - if q6 == 0 { - n = writeFirstBuf(stream.buf, digits[q5], n) - } else { - stream.buf[n] = byte(q6 + '0') - n++ - r6 := q5 - q6*1000 - writeBuf(stream.buf, digits[r6], n) - n += 3 - } - writeBuf(stream.buf, digits[r5], n) - writeBuf(stream.buf, digits[r4], n+3) - writeBuf(stream.buf, digits[r3], n+6) - writeBuf(stream.buf, digits[r2], n+9) - writeBuf(stream.buf, digits[r1], n+12) - stream.n = n + 15 -} - -// WriteInt write int to stream -func (stream *Stream) WriteInt(val int) { - stream.WriteInt64(int64(val)) -} - -// WriteUint write uint to stream -func (stream *Stream) WriteUint(val uint) { - stream.WriteUint64(uint64(val)) -} diff --git a/vendor/github.com/json-iterator/go/feature_iter.go b/vendor/github.com/json-iterator/go/iter.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_iter.go rename to vendor/github.com/json-iterator/go/iter.go diff --git a/vendor/github.com/json-iterator/go/feature_iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_iter_array.go rename to vendor/github.com/json-iterator/go/iter_array.go diff --git a/vendor/github.com/json-iterator/go/feature_iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go similarity index 98% rename from vendor/github.com/json-iterator/go/feature_iter_float.go rename to vendor/github.com/json-iterator/go/iter_float.go index 86f459912..4f883c095 100644 --- a/vendor/github.com/json-iterator/go/feature_iter_float.go +++ b/vendor/github.com/json-iterator/go/iter_float.go @@ -1,6 +1,7 @@ package jsoniter import ( + "encoding/json" "io" "math/big" "strconv" @@ -339,3 +340,8 @@ func validateFloat(str string) string { } return "" } + +// ReadNumber read json.Number +func (iter *Iterator) ReadNumber() (ret json.Number) { + return json.Number(iter.readNumberAsString()) +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go similarity index 72% rename from vendor/github.com/json-iterator/go/feature_iter_int.go rename to vendor/github.com/json-iterator/go/iter_int.go index 6137348cd..214232035 100644 --- a/vendor/github.com/json-iterator/go/feature_iter_int.go +++ b/vendor/github.com/json-iterator/go/iter_int.go @@ -22,11 +22,17 @@ func init() { // ReadUint read uint func (iter *Iterator) ReadUint() uint { + if strconv.IntSize == 32 { + return uint(iter.ReadUint32()) + } return uint(iter.ReadUint64()) } // ReadInt read int func (iter *Iterator) ReadInt() int { + if strconv.IntSize == 32 { + return int(iter.ReadInt32()) + } return int(iter.ReadInt64()) } @@ -113,13 +119,9 @@ func (iter *Iterator) ReadUint32() (ret uint32) { } func (iter *Iterator) readUint32(c byte) (ret uint32) { - defer func() { - if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { - iter.ReportError("readUint32", "can not decode float as int") - } - }() ind := intDigits[c] if ind == 0 { + iter.assertInteger() return 0 // single zero } if ind == invalidCharForNumber { @@ -132,12 +134,14 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) { ind2 := intDigits[iter.buf[i]] if ind2 == invalidCharForNumber { iter.head = i + iter.assertInteger() return value } i++ ind3 := intDigits[iter.buf[i]] if ind3 == invalidCharForNumber { iter.head = i + iter.assertInteger() return value*10 + uint32(ind2) } //iter.head = i + 1 @@ -146,30 +150,35 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) { ind4 := intDigits[iter.buf[i]] if ind4 == invalidCharForNumber { iter.head = i + iter.assertInteger() return value*100 + uint32(ind2)*10 + uint32(ind3) } i++ ind5 := intDigits[iter.buf[i]] if ind5 == invalidCharForNumber { iter.head = i + iter.assertInteger() return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) } i++ ind6 := intDigits[iter.buf[i]] if ind6 == invalidCharForNumber { iter.head = i + iter.assertInteger() return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) } i++ ind7 := intDigits[iter.buf[i]] if ind7 == invalidCharForNumber { iter.head = i + iter.assertInteger() return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) } i++ ind8 := intDigits[iter.buf[i]] if ind8 == invalidCharForNumber { iter.head = i + iter.assertInteger() return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) } i++ @@ -177,6 +186,7 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) { value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) iter.head = i if ind9 == invalidCharForNumber { + iter.assertInteger() return value } } @@ -185,6 +195,7 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) { ind = intDigits[iter.buf[i]] if ind == invalidCharForNumber { iter.head = i + iter.assertInteger() return value } if value > uint32SafeToMultiply10 { @@ -199,6 +210,7 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) { value = (value << 3) + (value << 1) + uint32(ind) } if !iter.loadMore() { + iter.assertInteger() return value } } @@ -229,13 +241,9 @@ func (iter *Iterator) ReadUint64() uint64 { } func (iter *Iterator) readUint64(c byte) (ret uint64) { - defer func() { - if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { - iter.ReportError("readUint64", "can not decode float as int") - } - }() ind := intDigits[c] if ind == 0 { + iter.assertInteger() return 0 // single zero } if ind == invalidCharForNumber { @@ -243,11 +251,73 @@ func (iter *Iterator) readUint64(c byte) (ret uint64) { return } value := uint64(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint64(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint64(ind2)*10 + uint64(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } for { for i := iter.head; i < iter.tail; i++ { ind = intDigits[iter.buf[i]] if ind == invalidCharForNumber { iter.head = i + iter.assertInteger() return value } if value > uint64SafeToMultiple10 { @@ -262,7 +332,14 @@ func (iter *Iterator) readUint64(c byte) (ret uint64) { value = (value << 3) + (value << 1) + uint64(ind) } if !iter.loadMore() { + iter.assertInteger() return value } } } + +func (iter *Iterator) assertInteger() { + if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { + iter.ReportError("assertInteger", "can not decode float as int") + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go similarity index 68% rename from vendor/github.com/json-iterator/go/feature_iter_object.go rename to vendor/github.com/json-iterator/go/iter_object.go index dfd91fa60..6e7c370ab 100644 --- a/vendor/github.com/json-iterator/go/feature_iter_object.go +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -3,7 +3,6 @@ package jsoniter import ( "fmt" "unicode" - "unsafe" ) // ReadObject read one field from object. @@ -19,16 +18,12 @@ func (iter *Iterator) ReadObject() (ret string) { c = iter.nextToken() if c == '"' { iter.unreadByte() - if iter.cfg.objectFieldMustBeSimpleString { - return string(iter.readObjectFieldAsBytes()) - } else { - field := iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - return field + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } + return field } if c == '}' { return "" // end of object @@ -36,16 +31,12 @@ func (iter *Iterator) ReadObject() (ret string) { iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) return case ',': - if iter.cfg.objectFieldMustBeSimpleString { - return string(iter.readObjectFieldAsBytes()) - } else { - field := iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - return field + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } + return field case '}': return "" // end of object default: @@ -54,97 +45,91 @@ func (iter *Iterator) ReadObject() (ret string) { } } -func (iter *Iterator) readFieldHash() int32 { +// CaseInsensitive +func (iter *Iterator) readFieldHash() int64 { hash := int64(0x811c9dc5) c := iter.nextToken() - if c == '"' { - for { - for i := iter.head; i < iter.tail; i++ { - // require ascii string and no escape - b := iter.buf[i] - if !iter.cfg.objectFieldMustBeSimpleString && b == '\\' { - iter.head = i - for _, b := range iter.readStringSlowPath() { - if 'A' <= b && b <= 'Z' { - b += 'a' - 'A' - } - hash ^= int64(b) - hash *= 0x1000193 - } - c = iter.nextToken() - if c != ':' { - iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) - return 0 + if c != '"' { + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 + } + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if b == '\\' { + iter.head = i + for _, b := range iter.readStringSlowPath() { + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' } - return int32(hash) + hash ^= int64(b) + hash *= 0x1000193 } - if b == '"' { - iter.head = i + 1 - c = iter.nextToken() - if c != ':' { - iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) - return 0 - } - return int32(hash) + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 } - if 'A' <= b && b <= 'Z' { - b += 'a' - 'A' + return hash + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 } - hash ^= int64(b) - hash *= 0x1000193 + return hash } - if !iter.loadMore() { - iter.ReportError("readFieldHash", `incomplete field name`) - return 0 + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 } } - iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) - return 0 } -func calcHash(str string) int32 { +func calcHash(str string, caseSensitive bool) int64 { hash := int64(0x811c9dc5) for _, b := range str { - hash ^= int64(unicode.ToLower(b)) + if caseSensitive { + hash ^= int64(b) + } else { + hash ^= int64(unicode.ToLower(b)) + } hash *= 0x1000193 } - return int32(hash) + return int64(hash) } // ReadObjectCB read object with callback, the key is ascii only and field name not copied func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { c := iter.nextToken() - var fieldBytes []byte var field string if c == '{' { c = iter.nextToken() if c == '"' { iter.unreadByte() - if iter.cfg.objectFieldMustBeSimpleString { - fieldBytes = iter.readObjectFieldAsBytes() - field = *(*string)(unsafe.Pointer(&fieldBytes)) - } else { - field = iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } if !callback(iter, field) { return false } c = iter.nextToken() for c == ',' { - if iter.cfg.objectFieldMustBeSimpleString { - fieldBytes = iter.readObjectFieldAsBytes() - field = *(*string)(unsafe.Pointer(&fieldBytes)) - } else { - field = iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } if !callback(iter, field) { return false diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_iter_skip.go rename to vendor/github.com/json-iterator/go/iter_skip.go diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go rename to vendor/github.com/json-iterator/go/iter_skip_sloppy.go diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_iter_skip_strict.go rename to vendor/github.com/json-iterator/go/iter_skip_strict.go diff --git a/vendor/github.com/json-iterator/go/feature_iter_string.go b/vendor/github.com/json-iterator/go/iter_str.go similarity index 100% rename from vendor/github.com/json-iterator/go/feature_iter_string.go rename to vendor/github.com/json-iterator/go/iter_str.go diff --git a/vendor/github.com/json-iterator/go/feature_pool.go b/vendor/github.com/json-iterator/go/pool.go similarity index 64% rename from vendor/github.com/json-iterator/go/feature_pool.go rename to vendor/github.com/json-iterator/go/pool.go index 52d38e685..e2389b56c 100644 --- a/vendor/github.com/json-iterator/go/feature_pool.go +++ b/vendor/github.com/json-iterator/go/pool.go @@ -17,43 +17,26 @@ type StreamPool interface { } func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { - select { - case stream := <-cfg.streamPool: - stream.Reset(writer) - return stream - default: - return NewStream(cfg, writer, 512) - } + stream := cfg.streamPool.Get().(*Stream) + stream.Reset(writer) + return stream } func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.out = nil stream.Error = nil stream.Attachment = nil - select { - case cfg.streamPool <- stream: - return - default: - return - } + cfg.streamPool.Put(stream) } func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { - select { - case iter := <-cfg.iteratorPool: - iter.ResetBytes(data) - return iter - default: - return ParseBytes(cfg, data) - } + iter := cfg.iteratorPool.Get().(*Iterator) + iter.ResetBytes(data) + return iter } func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { iter.Error = nil iter.Attachment = nil - select { - case cfg.iteratorPool <- iter: - return - default: - return - } + cfg.iteratorPool.Put(iter) } diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go new file mode 100644 index 000000000..be7a0e218 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -0,0 +1,330 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +type ctx struct { + *frozenConfig + prefix string + encoders map[reflect2.Type]ValEncoder + decoders map[reflect2.Type]ValDecoder +} + +func (b *ctx) caseSensitive() bool { + if b.frozenConfig == nil { + // default is case-insensitive + return false + } + return b.frozenConfig.caseSensitive +} + +func (b *ctx) append(prefix string) *ctx { + return &ctx{ + frozenConfig: b.frozenConfig, + prefix: b.prefix + " " + prefix, + encoders: b.encoders, + decoders: b.decoders, + } +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + cacheKey := reflect2.RTypeOf(obj) + decoder := iter.cfg.getDecoderFromCache(cacheKey) + if decoder == nil { + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + iter.ReportError("ReadVal", "can only unmarshal into pointer") + return + } + decoder = iter.cfg.DecoderOf(typ) + } + ptr := reflect2.PtrOf(obj) + if ptr == nil { + iter.ReportError("ReadVal", "can not read into nil pointer") + return + } + decoder.Decode(ptr, iter) +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + cacheKey := reflect2.RTypeOf(val) + encoder := stream.cfg.getEncoderFromCache(cacheKey) + if encoder == nil { + typ := reflect2.TypeOf(val) + encoder = stream.cfg.EncoderOf(typ) + } + encoder.Encode(reflect2.PtrOf(val), stream) +} + +func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { + cacheKey := typ.RType() + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + ptrType := typ.(*reflect2.UnsafePtrType) + decoder = decoderOfType(ctx, ptrType.Elem()) + cfg.addDecoderToCache(cacheKey, decoder) + return decoder +} + +func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfType(ctx, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + for _, extension := range ctx.extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + return decoder +} + +func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoders[typ] + if decoder != nil { + return decoder + } + placeholder := &placeholderDecoder{} + ctx.decoders[typ] = placeholder + decoder = _createDecoderOfType(ctx, typ) + placeholder.decoder = decoder + return decoder +} + +func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := createDecoderOfJsonRawMessage(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfJsonNumber(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfMarshaler(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfAny(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfNative(ctx, typ) + if decoder != nil { + return decoder + } + switch typ.Kind() { + case reflect.Interface: + ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) + if isIFace { + return &ifaceDecoder{valType: ifaceType} + } + return &efaceDecoder{} + case reflect.Struct: + return decoderOfStruct(ctx, typ) + case reflect.Array: + return decoderOfArray(ctx, typ) + case reflect.Slice: + return decoderOfSlice(ctx, typ) + case reflect.Map: + return decoderOfMap(ctx, typ) + case reflect.Ptr: + return decoderOfOptional(ctx, typ) + default: + return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { + cacheKey := typ.RType() + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + encoder = encoderOfType(ctx, typ) + if typ.LikePtr() { + encoder = &onePtrEncoder{encoder} + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder +} + +type onePtrEncoder struct { + encoder ValEncoder +} + +func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfType(ctx, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + for _, extension := range ctx.extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + return encoder +} + +func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoders[typ] + if encoder != nil { + return encoder + } + placeholder := &placeholderEncoder{} + ctx.encoders[typ] = placeholder + encoder = _createEncoderOfType(ctx, typ) + placeholder.encoder = encoder + return encoder +} +func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := createEncoderOfJsonRawMessage(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfJsonNumber(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfMarshaler(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfAny(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return encoderOfStruct(ctx, typ) + case reflect.Array: + return encoderOfArray(ctx, typ) + case reflect.Slice: + return encoderOfSlice(ctx, typ) + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return encoderOfOptional(ctx, typ) + default: + return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +type lazyErrorDecoder struct { + err error +} + +func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() != NilValue { + if iter.Error == nil { + iter.Error = decoder.err + } + } else { + iter.Skip() + } +} + +type lazyErrorEncoder struct { + err error +} + +func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if ptr == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type placeholderDecoder struct { + decoder ValDecoder +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(ptr, iter) +} + +type placeholderEncoder struct { + encoder ValEncoder +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go new file mode 100644 index 000000000..13a0b7b08 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_array.go @@ -0,0 +1,104 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayDecoder{arrayType, decoder} +} + +func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + if arrayType.Len() == 0 { + return emptyArrayEncoder{} + } + encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayEncoder{arrayType, encoder} +} + +type emptyArrayEncoder struct{} + +func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyArray() +} + +func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return true +} + +type arrayEncoder struct { + arrayType *reflect2.UnsafeArrayType + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType *reflect2.UnsafeArrayType + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + arrayType := decoder.arrayType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return + } + if c != '[' { + iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + return + } + iter.unreadByte() + elemPtr := arrayType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + if length >= arrayType.Len() { + iter.Skip() + continue + } + idx := length + length += 1 + elemPtr = arrayType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go new file mode 100644 index 000000000..8b6bc8b43 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go @@ -0,0 +1,70 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +type dynamicEncoder struct { + valType reflect2.Type +} + +func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + stream.WriteVal(obj) +} + +func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.valType.UnsafeIndirect(ptr) == nil +} + +type efaceDecoder struct { +} + +func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + pObj := (*interface{})(ptr) + obj := *pObj + if obj == nil { + *pObj = iter.Read() + return + } + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + *pObj = iter.Read() + return + } + ptrType := typ.(*reflect2.UnsafePtrType) + ptrElemType := ptrType.Elem() + if iter.WhatIsNext() == NilValue { + if ptrElemType.Kind() != reflect.Ptr { + iter.skipFourBytes('n', 'u', 'l', 'l') + *pObj = nil + return + } + } + if reflect2.IsNil(obj) { + obj := ptrElemType.New() + iter.ReadVal(obj) + *pObj = obj + return + } + iter.ReadVal(obj) +} + +type ifaceDecoder struct { + valType *reflect2.UnsafeIFaceType +} + +func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) + return + } + obj := decoder.valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + iter.ReportError("decode non empty interface", "can not unmarshal into nil") + return + } + iter.ReadVal(obj) +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go similarity index 55% rename from vendor/github.com/json-iterator/go/feature_reflect_extension.go rename to vendor/github.com/json-iterator/go/reflect_extension.go index 177df2c81..917bbe84e 100644 --- a/vendor/github.com/json-iterator/go/feature_reflect_extension.go +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -2,6 +2,7 @@ package jsoniter import ( "fmt" + "github.com/modern-go/reflect2" "reflect" "sort" "strings" @@ -17,17 +18,15 @@ var extensions = []Extension{} // StructDescriptor describe how should we encode/decode the struct type StructDescriptor struct { - onePtrEmbedded bool - onePtrOptimization bool - Type reflect.Type - Fields []*Binding + Type reflect2.Type + Fields []*Binding } // GetField get one field from the descriptor by its name. // Can not use map here to keep field orders. func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { for _, binding := range structDescriptor.Fields { - if binding.Field.Name == fieldName { + if binding.Field.Name() == fieldName { return binding } } @@ -37,7 +36,7 @@ func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { // Binding describe how should we encode/decode the struct field type Binding struct { levels []int - Field *reflect.StructField + Field reflect2.StructField FromNames []string ToNames []string Encoder ValEncoder @@ -48,10 +47,12 @@ type Binding struct { // Can also rename fields by UpdateStructDescriptor. type Extension interface { UpdateStructDescriptor(structDescriptor *StructDescriptor) - CreateDecoder(typ reflect.Type) ValDecoder - CreateEncoder(typ reflect.Type) ValEncoder - DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder - DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder + CreateMapKeyDecoder(typ reflect2.Type) ValDecoder + CreateMapKeyEncoder(typ reflect2.Type) ValEncoder + CreateDecoder(typ reflect2.Type) ValDecoder + CreateEncoder(typ reflect2.Type) ValEncoder + DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder } // DummyExtension embed this type get dummy implementation for all methods of Extension @@ -62,23 +63,105 @@ type DummyExtension struct { func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { } +// CreateMapKeyDecoder No-op +func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + // CreateDecoder No-op -func (extension *DummyExtension) CreateDecoder(typ reflect.Type) ValDecoder { +func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type EncoderExtension map[reflect2.Type]ValEncoder + +// UpdateStructDescriptor No-op +func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder get encoder from map +func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return extension[typ] +} + +// CreateMapKeyDecoder No-op +func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type DecoderExtension map[reflect2.Type]ValDecoder + +// UpdateStructDescriptor No-op +func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { return nil } +// CreateDecoder get decoder from map +func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return extension[typ] +} + // CreateEncoder No-op -func (extension *DummyExtension) CreateEncoder(typ reflect.Type) ValEncoder { +func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { return nil } // DecorateDecoder No-op -func (extension *DummyExtension) DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder { +func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { return decoder } // DecorateEncoder No-op -func (extension *DummyExtension) DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder { +func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { return encoder } @@ -99,10 +182,6 @@ func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { encoder.fun(ptr, stream) } -func (encoder *funcEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { if encoder.isEmptyFunc == nil { return false @@ -161,60 +240,80 @@ func RegisterExtension(extension Extension) { extensions = append(extensions, extension) } -func getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { - decoder := _getTypeDecoderFromExtension(typ) +func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(ctx, typ) if decoder != nil { for _, extension := range extensions { decoder = extension.DecorateDecoder(typ, decoder) } + for _, extension := range ctx.extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } } return decoder } -func _getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { +func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { for _, extension := range extensions { decoder := extension.CreateDecoder(typ) if decoder != nil { return decoder } } + for _, extension := range ctx.extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } typeName := typ.String() decoder := typeDecoders[typeName] if decoder != nil { return decoder } if typ.Kind() == reflect.Ptr { - decoder := typeDecoders[typ.Elem().String()] + ptrType := typ.(*reflect2.UnsafePtrType) + decoder := typeDecoders[ptrType.Elem().String()] if decoder != nil { - return &OptionalDecoder{typ.Elem(), decoder} + return &OptionalDecoder{ptrType.Elem(), decoder} } } return nil } -func getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { - encoder := _getTypeEncoderFromExtension(typ) +func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(ctx, typ) if encoder != nil { for _, extension := range extensions { encoder = extension.DecorateEncoder(typ, encoder) } + for _, extension := range ctx.extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } } return encoder } -func _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { +func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { for _, extension := range extensions { encoder := extension.CreateEncoder(typ) if encoder != nil { return encoder } } + for _, extension := range ctx.extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } typeName := typ.String() encoder := typeEncoders[typeName] if encoder != nil { return encoder } if typ.Kind() == reflect.Ptr { - encoder := typeEncoders[typ.Elem().String()] + typePtr := typ.(*reflect2.UnsafePtrType) + encoder := typeEncoders[typePtr.Elem().String()] if encoder != nil { return &OptionalEncoder{encoder} } @@ -222,72 +321,60 @@ func _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { return nil } -func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, error) { +func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { + structType := typ.(*reflect2.UnsafeStructType) embeddedBindings := []*Binding{} bindings := []*Binding{} - for i := 0; i < typ.NumField(); i++ { - field := typ.Field(i) - tag := field.Tag.Get(cfg.getTagKey()) + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + tag, hastag := field.Tag().Lookup(ctx.getTagKey()) + if ctx.onlyTaggedField && !hastag { + continue + } tagParts := strings.Split(tag, ",") if tag == "-" { continue } - if field.Anonymous && (tag == "" || tagParts[0] == "") { - if field.Type.Kind() == reflect.Struct { - structDescriptor, err := describeStruct(cfg, field.Type) - if err != nil { - return nil, err - } + if field.Anonymous() && (tag == "" || tagParts[0] == "") { + if field.Type().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, field.Type()) for _, binding := range structDescriptor.Fields { binding.levels = append([]int{i}, binding.levels...) omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} - binding.Decoder = &structFieldDecoder{&field, binding.Decoder} + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} embeddedBindings = append(embeddedBindings, binding) } continue - } else if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { - structDescriptor, err := describeStruct(cfg, field.Type.Elem()) - if err != nil { - return nil, err - } - for _, binding := range structDescriptor.Fields { - binding.levels = append([]int{i}, binding.levels...) - omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &OptionalEncoder{binding.Encoder} - binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} - binding.Decoder = &deferenceDecoder{field.Type.Elem(), binding.Decoder} - binding.Decoder = &structFieldDecoder{&field, binding.Decoder} - embeddedBindings = append(embeddedBindings, binding) + } else if field.Type().Kind() == reflect.Ptr { + ptrType := field.Type().(*reflect2.UnsafePtrType) + if ptrType.Elem().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, ptrType.Elem()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &dereferenceEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue } - continue } } - fieldNames := calcFieldNames(field.Name, tagParts[0], tag) - fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name) + fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) decoder := fieldDecoders[fieldCacheKey] if decoder == nil { - var err error - decoder, err = decoderOfType(cfg, field.Type) - if len(fieldNames) > 0 && err != nil { - return nil, err - } + decoder = decoderOfType(ctx.append(field.Name()), field.Type()) } encoder := fieldEncoders[fieldCacheKey] if encoder == nil { - var err error - encoder, err = encoderOfType(cfg, field.Type) - if len(fieldNames) > 0 && err != nil { - return nil, err - } - // map is stored as pointer in the struct, - // and treat nil or empty map as empty field - if encoder != nil && field.Type.Kind() == reflect.Map { - encoder = &optionalMapEncoder{encoder} - } + encoder = encoderOfType(ctx.append(field.Name()), field.Type()) } binding := &Binding{ - Field: &field, + Field: field, FromNames: fieldNames, ToNames: fieldNames, Decoder: decoder, @@ -296,35 +383,20 @@ func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, err binding.levels = []int{i} bindings = append(bindings, binding) } - return createStructDescriptor(cfg, typ, bindings, embeddedBindings), nil -} -func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { - onePtrEmbedded := false - onePtrOptimization := false - if typ.NumField() == 1 { - firstField := typ.Field(0) - switch firstField.Type.Kind() { - case reflect.Ptr: - if firstField.Anonymous && firstField.Type.Elem().Kind() == reflect.Struct { - onePtrEmbedded = true - } - fallthrough - case reflect.Map: - onePtrOptimization = true - case reflect.Struct: - onePtrOptimization = isStructOnePtr(firstField.Type) - } - } + return createStructDescriptor(ctx, typ, bindings, embeddedBindings) +} +func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { structDescriptor := &StructDescriptor{ - onePtrEmbedded: onePtrEmbedded, - onePtrOptimization: onePtrOptimization, - Type: typ, - Fields: bindings, + Type: typ, + Fields: bindings, } for _, extension := range extensions { extension.UpdateStructDescriptor(structDescriptor) } - processTags(structDescriptor, cfg) + for _, extension := range ctx.extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, ctx.frozenConfig) // merge normal & embedded bindings & sort with original order allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) sort.Sort(allBindings) @@ -332,21 +404,6 @@ func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Bin return structDescriptor } -func isStructOnePtr(typ reflect.Type) bool { - if typ.NumField() == 1 { - firstField := typ.Field(0) - switch firstField.Type.Kind() { - case reflect.Ptr: - return true - case reflect.Map: - return true - case reflect.Struct: - return isStructOnePtr(firstField.Type) - } - } - return false -} - type sortableBindings []*Binding func (bindings sortableBindings) Len() int { @@ -374,12 +431,12 @@ func (bindings sortableBindings) Swap(i, j int) { func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { for _, binding := range structDescriptor.Fields { shouldOmitEmpty := false - tagParts := strings.Split(binding.Field.Tag.Get(cfg.getTagKey()), ",") + tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") for _, tagPart := range tagParts[1:] { if tagPart == "omitempty" { shouldOmitEmpty = true } else if tagPart == "string" { - if binding.Field.Type.Kind() == reflect.String { + if binding.Field.Type().Kind() == reflect.String { binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} } else { diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go new file mode 100644 index 000000000..98d45c1ec --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_number.go @@ -0,0 +1,112 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "strconv" + "unsafe" +) + +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} + +var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() +var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() + +func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*json.Number)(ptr)) = json.Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*json.Number)(ptr)) = "" + default: + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*json.Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*Number)(ptr)) = Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*Number)(ptr)) = "" + default: + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go new file mode 100644 index 000000000..f2619936c --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -0,0 +1,60 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() +var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() + +func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go new file mode 100644 index 000000000..8812f0850 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -0,0 +1,318 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "sort" + "unsafe" +) + +func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { + mapType := typ.(*reflect2.UnsafeMapType) + keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) + elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) + return &mapDecoder{ + mapType: mapType, + keyType: mapType.Key(), + elemType: mapType.Elem(), + keyDecoder: keyDecoder, + elemDecoder: elemDecoder, + } +} + +func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { + mapType := typ.(*reflect2.UnsafeMapType) + if ctx.sortMapKeys { + return &sortKeysMapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } + } + return &mapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } +} + +func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { + for _, extension := range ctx.extensions { + decoder := extension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + } + switch typ.Kind() { + case reflect.String: + return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyDecoder{decoderOfType(ctx, typ)} + default: + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(textMarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textMarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } + return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { + for _, extension := range ctx.extensions { + encoder := extension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + } + switch typ.Kind() { + case reflect.String: + return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyEncoder{encoderOfType(ctx, typ)} + default: + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Kind() == reflect.Interface { + return &dynamicMapKeyEncoder{ctx, typ} + } + return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +type mapDecoder struct { + mapType *reflect2.UnsafeMapType + keyType reflect2.Type + elemType reflect2.Type + keyDecoder ValDecoder + elemDecoder ValDecoder +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + mapType := decoder.mapType + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + *(*unsafe.Pointer)(ptr) = nil + mapType.UnsafeSet(ptr, mapType.UnsafeNew()) + return + } + if mapType.UnsafeIsNil(ptr) { + mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) + } + if c != '{' { + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return + } + c = iter.nextToken() + if c == '}' { + return + } + if c != '"' { + iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + return + } + iter.unreadByte() + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + } + if c != '}' { + iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) + } +} + +type numericMapKeyDecoder struct { + decoder ValDecoder +} + +func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } + decoder.decoder.Decode(ptr, iter) + c = iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } +} + +type numericMapKeyEncoder struct { + encoder ValEncoder +} + +func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.encoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type dynamicMapKeyEncoder struct { + ctx *ctx + valType reflect2.Type +} + +func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) +} + +func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + obj := encoder.valType.UnsafeIndirect(ptr) + return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) +} + +type mapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + iter := encoder.mapType.UnsafeIterate(ptr) + for i := 0; iter.HasNext(); i++ { + if i != 0 { + stream.WriteMore() + } + key, elem := iter.UnsafeNext() + encoder.keyEncoder.Encode(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, stream) + } + stream.WriteObjectEnd() +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type sortKeysMapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + mapIter := encoder.mapType.UnsafeIterate(ptr) + subStream := stream.cfg.BorrowStream(nil) + subIter := stream.cfg.BorrowIterator(nil) + keyValues := encodedKeyValues{} + for mapIter.HasNext() { + subStream.buf = make([]byte, 0, 64) + key, elem := mapIter.UnsafeNext() + encoder.keyEncoder.Encode(key, subStream) + if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { + stream.Error = subStream.Error + } + encodedKey := subStream.Buffer() + subIter.ResetBytes(encodedKey) + decodedKey := subIter.ReadString() + if stream.indention > 0 { + subStream.writeTwoBytes(byte(':'), byte(' ')) + } else { + subStream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, subStream) + keyValues = append(keyValues, encodedKV{ + key: decodedKey, + keyValue: subStream.Buffer(), + }) + } + sort.Sort(keyValues) + for i, keyValue := range keyValues { + if i != 0 { + stream.WriteMore() + } + stream.Write(keyValue.keyValue) + } + stream.WriteObjectEnd() + stream.cfg.ReturnStream(subStream) + stream.cfg.ReturnIterator(subIter) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type encodedKeyValues []encodedKV + +type encodedKV struct { + key string + keyValue []byte +} + +func (sv encodedKeyValues) Len() int { return len(sv) } +func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go new file mode 100644 index 000000000..58ac959ad --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -0,0 +1,218 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() +var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() +var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() + +func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ptrType}, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ptrType}, + } + } + return nil +} + +func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == marshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + } + return encoder + } + if typ.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &marshalerEncoder{ + valType: typ, + checkIsEmpty: checkIsEmpty, + } + return encoder + } + ptrType := reflect2.PtrTo(typ) + if ctx.prefix != "" && ptrType.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &marshalerEncoder{ + valType: ptrType, + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + if typ == textMarshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directTextMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + return encoder + } + if typ.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return encoder + } + // if prefix is empty, the type is the root type + if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: ptrType, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + return nil +} + +type marshalerEncoder struct { + checkIsEmpty checkIsEmpty + valType reflect2.Type +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directMarshalerEncoder struct { + checkIsEmpty checkIsEmpty +} + +func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*json.Marshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + valType reflect2.Type + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := (obj).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directTextMarshalerEncoder struct { + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*encoding.TextMarshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + unmarshaler := obj.(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + ptrType := valType.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elem := elemType.UnsafeNew() + ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) + obj = valType.UnsafeIndirect(ptr) + } + unmarshaler := (obj).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go new file mode 100644 index 000000000..9042eb0cb --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_native.go @@ -0,0 +1,451 @@ +package jsoniter + +import ( + "encoding/base64" + "reflect" + "strconv" + "unsafe" + + "github.com/modern-go/reflect2" +) + +const ptrSize = 32 << uintptr(^uintptr(0)>>63) + +func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int8)(ptr)) = iter.ReadInt8() + } +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int16)(ptr)) = iter.ReadInt16() + } +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int32)(ptr)) = iter.ReadInt32() + } +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int64)(ptr)) = iter.ReadInt64() + } +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint8)(ptr)) = iter.ReadUint8() + } +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint16)(ptr)) = iter.ReadUint16() + } +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint32)(ptr)) = iter.ReadUint32() + } +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint64)(ptr)) = iter.ReadUint64() + } +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float32)(ptr)) = iter.ReadFloat32() + } +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float64)(ptr)) = iter.ReadFloat64() + } +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*bool)(ptr)) = iter.ReadBool() + } +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type base64Codec struct { + sliceType *reflect2.UnsafeSliceType + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + codec.sliceType.UnsafeSetNil(ptr) + return + } + switch iter.WhatIsNext() { + case StringValue: + src := iter.ReadString() + dst, err := base64.StdEncoding.DecodeString(src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + src := *((*[]byte)(ptr)) + if len(src) == 0 { + stream.WriteNil() + return + } + encoding := base64.StdEncoding + stream.writeByte('"') + size := encoding.EncodedLen(len(src)) + buf := make([]byte, size) + encoding.Encode(buf, src) + stream.buf = append(stream.buf, buf...) + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go new file mode 100644 index 000000000..43ec71d6d --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_optional.go @@ -0,0 +1,133 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + decoder := decoderOfType(ctx, elemType) + if ctx.prefix == "" && elemType.Kind() == reflect.Ptr { + return &dereferenceDecoder{elemType, decoder} + } + return &OptionalDecoder{elemType, decoder} +} + +func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elemEncoder := encoderOfType(ctx, elemType) + encoder := &OptionalEncoder{elemEncoder} + return encoder +} + +type OptionalDecoder struct { + ValueType reflect2.Type + ValueDecoder ValDecoder +} + +func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.ValueType.UnsafeNew() + decoder.ValueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type dereferenceDecoder struct { + // only to deference a pointer + valueType reflect2.Type + valueDecoder ValDecoder +} + +func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.valueType.UnsafeNew() + decoder.valueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type OptionalEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*unsafe.Pointer)(ptr)) == nil +} + +type dereferenceEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + dePtr := *((*unsafe.Pointer)(ptr)) + if dePtr == nil { + return true + } + return encoder.ValueEncoder.IsEmpty(dePtr) +} + +func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + deReferenced := *((*unsafe.Pointer)(ptr)) + if deReferenced == nil { + return true + } + isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := unsafe.Pointer(deReferenced) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type referenceEncoder struct { + encoder ValEncoder +} + +func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +type referenceDecoder struct { + decoder ValDecoder +} + +func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go new file mode 100644 index 000000000..9441d79df --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_slice.go @@ -0,0 +1,99 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceDecoder{sliceType, decoder} +} + +func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceEncoder{sliceType, encoder} +} + +type sliceEncoder struct { + sliceType *reflect2.UnsafeSliceType + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if encoder.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + length := encoder.sliceType.UnsafeLengthOf(ptr) + if length == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) + for i := 1; i < length; i++ { + stream.WriteMore() + elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.sliceType.UnsafeLengthOf(ptr) == 0 +} + +type sliceDecoder struct { + sliceType *reflect2.UnsafeSliceType + elemDecoder ValDecoder +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + sliceType := decoder.sliceType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + sliceType.UnsafeSetNil(ptr) + return + } + if c != '[' { + iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) + return + } + iter.unreadByte() + sliceType.UnsafeGrow(ptr, 1) + elemPtr := sliceType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + idx := length + length += 1 + sliceType.UnsafeGrow(ptr, length) + elemPtr = sliceType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go similarity index 66% rename from vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go rename to vendor/github.com/json-iterator/go/reflect_struct_decoder.go index e6ced77c2..355d2d116 100644 --- a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -3,38 +3,78 @@ package jsoniter import ( "fmt" "io" - "reflect" "strings" "unsafe" + + "github.com/modern-go/reflect2" ) -func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder) (ValDecoder, error) { - knownHash := map[int32]struct{}{ +func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { + bindings := map[string]*Binding{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[k] = binding.Decoder.(*structFieldDecoder) + } + + if !ctx.caseSensitive() { + for k, binding := range bindings { + if _, found := fields[strings.ToLower(k)]; !found { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + } + } + + return createStructDecoder(ctx, typ, fields) +} + +func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { + if ctx.disallowUnknownFields { + return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} + } + knownHash := map[int64]struct{}{ 0: {}, } + switch len(fields) { case 0: - return &skipObjectDecoder{typ}, nil + return &skipObjectDecoder{typ} case 1: for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { - return &generalStructDecoder{typ, fields}, nil + return &generalStructDecoder{typ, fields, false} } knownHash[fieldHash] = struct{}{} - return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}, nil + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} } case 2: - var fieldHash1 int32 - var fieldHash2 int32 + var fieldHash1 int64 + var fieldHash2 int64 var fieldDecoder1 *structFieldDecoder var fieldDecoder2 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { - return &generalStructDecoder{typ, fields}, nil + return &generalStructDecoder{typ, fields, false} } knownHash[fieldHash] = struct{}{} if fieldHash1 == 0 { @@ -45,19 +85,19 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder fieldDecoder2 = fieldDecoder } } - return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}, nil + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} case 3: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 var fieldDecoder1 *structFieldDecoder var fieldDecoder2 *structFieldDecoder var fieldDecoder3 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { - return &generalStructDecoder{typ, fields}, nil + return &generalStructDecoder{typ, fields, false} } knownHash[fieldHash] = struct{}{} if fieldName1 == 0 { @@ -72,21 +112,23 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder } } return &threeFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3}, nil + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3} case 4: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 var fieldDecoder1 *structFieldDecoder var fieldDecoder2 *structFieldDecoder var fieldDecoder3 *structFieldDecoder var fieldDecoder4 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { - return &generalStructDecoder{typ, fields}, nil + return &generalStructDecoder{typ, fields, false} } knownHash[fieldHash] = struct{}{} if fieldName1 == 0 { @@ -104,24 +146,26 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder } } return &fourFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4}, nil + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4} case 5: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 var fieldDecoder1 *structFieldDecoder var fieldDecoder2 *structFieldDecoder var fieldDecoder3 *structFieldDecoder var fieldDecoder4 *structFieldDecoder var fieldDecoder5 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { - return &generalStructDecoder{typ, fields}, nil + return &generalStructDecoder{typ, fields, false} } knownHash[fieldHash] = struct{}{} if fieldName1 == 0 { @@ -142,15 +186,18 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder } } return &fiveFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5}, nil + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5} case 6: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 var fieldDecoder1 *structFieldDecoder var fieldDecoder2 *structFieldDecoder var fieldDecoder3 *structFieldDecoder @@ -158,10 +205,10 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder var fieldDecoder5 *structFieldDecoder var fieldDecoder6 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { - return &generalStructDecoder{typ, fields}, nil + return &generalStructDecoder{typ, fields, false} } knownHash[fieldHash] = struct{}{} if fieldName1 == 0 { @@ -185,16 +232,20 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder } } return &sixFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6}, nil + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6} case 7: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldName7 int32 + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 var fieldDecoder1 *structFieldDecoder var fieldDecoder2 *structFieldDecoder var fieldDecoder3 *structFieldDecoder @@ -203,10 +254,10 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder var fieldDecoder6 *structFieldDecoder var fieldDecoder7 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { - return &generalStructDecoder{typ, fields}, nil + return &generalStructDecoder{typ, fields, false} } knownHash[fieldHash] = struct{}{} if fieldName1 == 0 { @@ -233,18 +284,22 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder } } return &sevenFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7}, nil + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7} case 8: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldName7 int32 - var fieldName8 int32 + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 var fieldDecoder1 *structFieldDecoder var fieldDecoder2 *structFieldDecoder var fieldDecoder3 *structFieldDecoder @@ -254,10 +309,10 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder var fieldDecoder7 *structFieldDecoder var fieldDecoder8 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { - return &generalStructDecoder{typ, fields}, nil + return &generalStructDecoder{typ, fields, false} } knownHash[fieldHash] = struct{}{} if fieldName1 == 0 { @@ -287,19 +342,24 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder } } return &eightFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, fieldName8, fieldDecoder8}, nil + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8} case 9: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldName7 int32 - var fieldName8 int32 - var fieldName9 int32 + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 var fieldDecoder1 *structFieldDecoder var fieldDecoder2 *structFieldDecoder var fieldDecoder3 *structFieldDecoder @@ -310,10 +370,10 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder var fieldDecoder8 *structFieldDecoder var fieldDecoder9 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { - return &generalStructDecoder{typ, fields}, nil + return &generalStructDecoder{typ, fields, false} } knownHash[fieldHash] = struct{}{} if fieldName1 == 0 { @@ -346,20 +406,26 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder } } return &nineFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9}, nil + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9} case 10: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldName7 int32 - var fieldName8 int32 - var fieldName9 int32 - var fieldName10 int32 + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldName10 int64 var fieldDecoder1 *structFieldDecoder var fieldDecoder2 *structFieldDecoder var fieldDecoder3 *structFieldDecoder @@ -371,10 +437,10 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder var fieldDecoder9 *structFieldDecoder var fieldDecoder10 *structFieldDecoder for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) + fieldHash := calcHash(fieldName, ctx.caseSensitive()) _, known := knownHash[fieldHash] if known { - return &generalStructDecoder{typ, fields}, nil + return &generalStructDecoder{typ, fields, false} } knownHash[fieldHash] = struct{}{} if fieldName1 == 0 { @@ -410,66 +476,80 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder } } return &tenFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9, - fieldName10, fieldDecoder10}, nil + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10} } - return &generalStructDecoder{typ, fields}, nil + return &generalStructDecoder{typ, fields, false} } type generalStructDecoder struct { - typ reflect.Type - fields map[string]*structFieldDecoder + typ reflect2.Type + fields map[string]*structFieldDecoder + disallowUnknownFields bool } func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { if !iter.readObjectStart() { return } - var fieldBytes []byte + var c byte + for c = ','; c == ','; c = iter.nextToken() { + decoder.decodeOneField(ptr, iter) + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + if c != '}' { + iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) + } +} + +func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { var field string + var fieldDecoder *structFieldDecoder if iter.cfg.objectFieldMustBeSimpleString { - fieldBytes = iter.readObjectFieldAsBytes() + fieldBytes := iter.ReadStringAsSlice() field = *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } } else { field = iter.ReadString() - c := iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] } } - fieldDecoder := decoder.fields[strings.ToLower(field)] if fieldDecoder == nil { - iter.Skip() - } else { - fieldDecoder.Decode(ptr, iter) - } - for iter.nextToken() == ',' { - if iter.cfg.objectFieldMustBeSimpleString { - fieldBytes := iter.readObjectFieldAsBytes() - field = *(*string)(unsafe.Pointer(&fieldBytes)) - } else { - field = iter.ReadString() - c := iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } + msg := "found unknown field: " + field + if decoder.disallowUnknownFields { + iter.ReportError("ReadObject", msg) } - fieldDecoder = decoder.fields[strings.ToLower(field)] - if fieldDecoder == nil { - iter.Skip() - } else { - fieldDecoder.Decode(ptr, iter) + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } + iter.Skip() + return } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } + fieldDecoder.Decode(ptr, iter) } type skipObjectDecoder struct { - typ reflect.Type + typ reflect2.Type } func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { @@ -482,8 +562,8 @@ func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { } type oneFieldStructDecoder struct { - typ reflect.Type - fieldHash int32 + typ reflect2.Type + fieldHash int64 fieldDecoder *structFieldDecoder } @@ -502,15 +582,15 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) } } if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } } type twoFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 + typ reflect2.Type + fieldHash1 int64 fieldDecoder1 *structFieldDecoder - fieldHash2 int32 + fieldHash2 int64 fieldDecoder2 *structFieldDecoder } @@ -532,17 +612,17 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator } } if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } } type threeFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 + typ reflect2.Type + fieldHash1 int64 fieldDecoder1 *structFieldDecoder - fieldHash2 int32 + fieldHash2 int64 fieldDecoder2 *structFieldDecoder - fieldHash3 int32 + fieldHash3 int64 fieldDecoder3 *structFieldDecoder } @@ -566,19 +646,19 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat } } if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } } type fourFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 + typ reflect2.Type + fieldHash1 int64 fieldDecoder1 *structFieldDecoder - fieldHash2 int32 + fieldHash2 int64 fieldDecoder2 *structFieldDecoder - fieldHash3 int32 + fieldHash3 int64 fieldDecoder3 *structFieldDecoder - fieldHash4 int32 + fieldHash4 int64 fieldDecoder4 *structFieldDecoder } @@ -604,21 +684,21 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato } } if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } } type fiveFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 + typ reflect2.Type + fieldHash1 int64 fieldDecoder1 *structFieldDecoder - fieldHash2 int32 + fieldHash2 int64 fieldDecoder2 *structFieldDecoder - fieldHash3 int32 + fieldHash3 int64 fieldDecoder3 *structFieldDecoder - fieldHash4 int32 + fieldHash4 int64 fieldDecoder4 *structFieldDecoder - fieldHash5 int32 + fieldHash5 int64 fieldDecoder5 *structFieldDecoder } @@ -646,23 +726,23 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato } } if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } } type sixFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 + typ reflect2.Type + fieldHash1 int64 fieldDecoder1 *structFieldDecoder - fieldHash2 int32 + fieldHash2 int64 fieldDecoder2 *structFieldDecoder - fieldHash3 int32 + fieldHash3 int64 fieldDecoder3 *structFieldDecoder - fieldHash4 int32 + fieldHash4 int64 fieldDecoder4 *structFieldDecoder - fieldHash5 int32 + fieldHash5 int64 fieldDecoder5 *structFieldDecoder - fieldHash6 int32 + fieldHash6 int64 fieldDecoder6 *structFieldDecoder } @@ -692,25 +772,25 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator } } if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } } type sevenFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 + typ reflect2.Type + fieldHash1 int64 fieldDecoder1 *structFieldDecoder - fieldHash2 int32 + fieldHash2 int64 fieldDecoder2 *structFieldDecoder - fieldHash3 int32 + fieldHash3 int64 fieldDecoder3 *structFieldDecoder - fieldHash4 int32 + fieldHash4 int64 fieldDecoder4 *structFieldDecoder - fieldHash5 int32 + fieldHash5 int64 fieldDecoder5 *structFieldDecoder - fieldHash6 int32 + fieldHash6 int64 fieldDecoder6 *structFieldDecoder - fieldHash7 int32 + fieldHash7 int64 fieldDecoder7 *structFieldDecoder } @@ -742,27 +822,27 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat } } if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } } type eightFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 + typ reflect2.Type + fieldHash1 int64 fieldDecoder1 *structFieldDecoder - fieldHash2 int32 + fieldHash2 int64 fieldDecoder2 *structFieldDecoder - fieldHash3 int32 + fieldHash3 int64 fieldDecoder3 *structFieldDecoder - fieldHash4 int32 + fieldHash4 int64 fieldDecoder4 *structFieldDecoder - fieldHash5 int32 + fieldHash5 int64 fieldDecoder5 *structFieldDecoder - fieldHash6 int32 + fieldHash6 int64 fieldDecoder6 *structFieldDecoder - fieldHash7 int32 + fieldHash7 int64 fieldDecoder7 *structFieldDecoder - fieldHash8 int32 + fieldHash8 int64 fieldDecoder8 *structFieldDecoder } @@ -796,29 +876,29 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat } } if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } } type nineFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 + typ reflect2.Type + fieldHash1 int64 fieldDecoder1 *structFieldDecoder - fieldHash2 int32 + fieldHash2 int64 fieldDecoder2 *structFieldDecoder - fieldHash3 int32 + fieldHash3 int64 fieldDecoder3 *structFieldDecoder - fieldHash4 int32 + fieldHash4 int64 fieldDecoder4 *structFieldDecoder - fieldHash5 int32 + fieldHash5 int64 fieldDecoder5 *structFieldDecoder - fieldHash6 int32 + fieldHash6 int64 fieldDecoder6 *structFieldDecoder - fieldHash7 int32 + fieldHash7 int64 fieldDecoder7 *structFieldDecoder - fieldHash8 int32 + fieldHash8 int64 fieldDecoder8 *structFieldDecoder - fieldHash9 int32 + fieldHash9 int64 fieldDecoder9 *structFieldDecoder } @@ -854,31 +934,31 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato } } if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } } type tenFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 + typ reflect2.Type + fieldHash1 int64 fieldDecoder1 *structFieldDecoder - fieldHash2 int32 + fieldHash2 int64 fieldDecoder2 *structFieldDecoder - fieldHash3 int32 + fieldHash3 int64 fieldDecoder3 *structFieldDecoder - fieldHash4 int32 + fieldHash4 int64 fieldDecoder4 *structFieldDecoder - fieldHash5 int32 + fieldHash5 int64 fieldDecoder5 *structFieldDecoder - fieldHash6 int32 + fieldHash6 int64 fieldDecoder6 *structFieldDecoder - fieldHash7 int32 + fieldHash7 int64 fieldDecoder7 *structFieldDecoder - fieldHash8 int32 + fieldHash8 int64 fieldDecoder8 *structFieldDecoder - fieldHash9 int32 + fieldHash9 int64 fieldDecoder9 *structFieldDecoder - fieldHash10 int32 + fieldHash10 int64 fieldDecoder10 *structFieldDecoder } @@ -916,19 +996,53 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator } } if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } } type structFieldDecoder struct { - field *reflect.StructField + field reflect2.StructField fieldDecoder ValDecoder } func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - fieldPtr := unsafe.Pointer(uintptr(ptr) + decoder.field.Offset) + fieldPtr := decoder.field.UnsafeGet(ptr) decoder.fieldDecoder.Decode(fieldPtr, iter) if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%s: %s", decoder.field.Name, iter.Error.Error()) + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return } } diff --git a/vendor/github.com/json-iterator/go/feature_reflect_object.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go similarity index 50% rename from vendor/github.com/json-iterator/go/feature_reflect_object.go rename to vendor/github.com/json-iterator/go/reflect_struct_encoder.go index 59b1235c0..d0759cf64 100644 --- a/vendor/github.com/json-iterator/go/feature_reflect_object.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go @@ -2,23 +2,20 @@ package jsoniter import ( "fmt" + "github.com/modern-go/reflect2" "io" "reflect" - "strings" "unsafe" ) -func encoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { +func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { type bindingTo struct { binding *Binding toName string ignored bool } orderedBindings := []*bindingTo{} - structDescriptor, err := describeStruct(cfg, typ) - if err != nil { - return nil, err - } + structDescriptor := describeStruct(ctx, typ) for _, binding := range structDescriptor.Fields { for _, toName := range binding.ToNames { new := &bindingTo{ @@ -29,13 +26,13 @@ func encoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { if old.toName != toName { continue } - old.ignored, new.ignored = resolveConflictBinding(cfg, old.binding, new.binding) + old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) } orderedBindings = append(orderedBindings, new) } } if len(orderedBindings) == 0 { - return &emptyStructEncoder{}, nil + return &emptyStructEncoder{} } finalOrderedFields := []structFieldTo{} for _, bindingTo := range orderedBindings { @@ -46,12 +43,36 @@ func encoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { }) } } - return &structEncoder{structDescriptor.onePtrEmbedded, structDescriptor.onePtrOptimization, finalOrderedFields}, nil + return &structEncoder{typ, finalOrderedFields} +} + +func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { + encoder := createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return &structEncoder{typ: typ} + case reflect.Array: + return &arrayEncoder{} + case reflect.Slice: + return &sliceEncoder{} + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return &OptionalEncoder{} + default: + return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} + } } func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { - newTagged := new.Field.Tag.Get(cfg.getTagKey()) != "" - oldTagged := old.Field.Tag.Get(cfg.getTagKey()) != "" + newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" if newTagged { if oldTagged { if len(old.levels) > len(new.levels) { @@ -78,62 +99,41 @@ func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ig } } -func decoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - bindings := map[string]*Binding{} - structDescriptor, err := describeStruct(cfg, typ) - if err != nil { - return nil, err - } - for _, binding := range structDescriptor.Fields { - for _, fromName := range binding.FromNames { - old := bindings[fromName] - if old == nil { - bindings[fromName] = binding - continue - } - ignoreOld, ignoreNew := resolveConflictBinding(cfg, old, binding) - if ignoreOld { - delete(bindings, fromName) - } - if !ignoreNew { - bindings[fromName] = binding - } - } - } - fields := map[string]*structFieldDecoder{} - for k, binding := range bindings { - fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) - } - return createStructDecoder(typ, fields) -} - type structFieldEncoder struct { - field *reflect.StructField + field reflect2.StructField fieldEncoder ValEncoder omitempty bool } func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) + fieldPtr := encoder.field.UnsafeGet(ptr) encoder.fieldEncoder.Encode(fieldPtr, stream) if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%s: %s", encoder.field.Name, stream.Error.Error()) + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) } } -func (encoder *structFieldEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { - fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) + fieldPtr := encoder.field.UnsafeGet(ptr) return encoder.fieldEncoder.IsEmpty(fieldPtr) } +func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := encoder.field.UnsafeGet(ptr) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type IsEmbeddedPtrNil interface { + IsEmbeddedPtrNil(ptr unsafe.Pointer) bool +} + type structEncoder struct { - onePtrEmbedded bool - onePtrOptimization bool - fields []structFieldTo + typ reflect2.Type + fields []structFieldTo } type structFieldTo struct { @@ -148,6 +148,9 @@ func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { continue } + if field.encoder.IsEmbeddedPtrNil(ptr) { + continue + } if isNotFirst { stream.WriteMore() } @@ -156,23 +159,8 @@ func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { isNotFirst = true } stream.WriteObjectEnd() -} - -func (encoder *structEncoder) EncodeInterface(val interface{}, stream *Stream) { - e := (*emptyInterface)(unsafe.Pointer(&val)) - if encoder.onePtrOptimization { - if e.word == nil && encoder.onePtrEmbedded { - stream.WriteObjectStart() - stream.WriteObjectEnd() - return - } - ptr := uintptr(e.word) - e.word = unsafe.Pointer(&ptr) - } - if reflect.TypeOf(val).Kind() == reflect.Ptr { - encoder.Encode(unsafe.Pointer(&e.word), stream) - } else { - encoder.Encode(e.word, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) } } @@ -187,10 +175,36 @@ func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { stream.WriteEmptyObject() } -func (encoder *emptyStructEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { return false } + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/feature_stream.go b/vendor/github.com/json-iterator/go/stream.go similarity index 62% rename from vendor/github.com/json-iterator/go/feature_stream.go rename to vendor/github.com/json-iterator/go/stream.go index 97355eb5b..17662fded 100644 --- a/vendor/github.com/json-iterator/go/feature_stream.go +++ b/vendor/github.com/json-iterator/go/stream.go @@ -10,7 +10,6 @@ type Stream struct { cfg *frozenConfig out io.Writer buf []byte - n int Error error indention int Attachment interface{} // open for customized encoder @@ -24,8 +23,7 @@ func NewStream(cfg API, out io.Writer, bufSize int) *Stream { return &Stream{ cfg: cfg.(*frozenConfig), out: out, - buf: make([]byte, bufSize), - n: 0, + buf: make([]byte, 0, bufSize), Error: nil, indention: 0, } @@ -39,22 +37,27 @@ func (stream *Stream) Pool() StreamPool { // Reset reuse this stream instance by assign a new writer func (stream *Stream) Reset(out io.Writer) { stream.out = out - stream.n = 0 + stream.buf = stream.buf[:0] } // Available returns how many bytes are unused in the buffer. func (stream *Stream) Available() int { - return len(stream.buf) - stream.n + return cap(stream.buf) - len(stream.buf) } // Buffered returns the number of bytes that have been written into the current buffer. func (stream *Stream) Buffered() int { - return stream.n + return len(stream.buf) } // Buffer if writer is nil, use this method to take the result func (stream *Stream) Buffer() []byte { - return stream.buf[:stream.n] + return stream.buf +} + +// SetBuffer allows to append to the internal buffer directly +func (stream *Stream) SetBuffer(buf []byte) { + stream.buf = buf } // Write writes the contents of p into the buffer. @@ -62,97 +65,34 @@ func (stream *Stream) Buffer() []byte { // If nn < len(p), it also returns an error explaining // why the write is short. func (stream *Stream) Write(p []byte) (nn int, err error) { - for len(p) > stream.Available() && stream.Error == nil { - if stream.out == nil { - stream.growAtLeast(len(p)) - } else { - var n int - if stream.Buffered() == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, stream.Error = stream.out.Write(p) - } else { - n = copy(stream.buf[stream.n:], p) - stream.n += n - stream.Flush() - } - nn += n - p = p[n:] - } - } - if stream.Error != nil { - return nn, stream.Error + stream.buf = append(stream.buf, p...) + if stream.out != nil { + nn, err = stream.out.Write(stream.buf) + stream.buf = stream.buf[nn:] + return } - n := copy(stream.buf[stream.n:], p) - stream.n += n - nn += n - return nn, nil + return len(p), nil } // WriteByte writes a single byte. func (stream *Stream) writeByte(c byte) { - if stream.Error != nil { - return - } - if stream.Available() < 1 { - stream.growAtLeast(1) - } - stream.buf[stream.n] = c - stream.n++ + stream.buf = append(stream.buf, c) } func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { - if stream.Error != nil { - return - } - if stream.Available() < 2 { - stream.growAtLeast(2) - } - stream.buf[stream.n] = c1 - stream.buf[stream.n+1] = c2 - stream.n += 2 + stream.buf = append(stream.buf, c1, c2) } func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { - if stream.Error != nil { - return - } - if stream.Available() < 3 { - stream.growAtLeast(3) - } - stream.buf[stream.n] = c1 - stream.buf[stream.n+1] = c2 - stream.buf[stream.n+2] = c3 - stream.n += 3 + stream.buf = append(stream.buf, c1, c2, c3) } func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { - if stream.Error != nil { - return - } - if stream.Available() < 4 { - stream.growAtLeast(4) - } - stream.buf[stream.n] = c1 - stream.buf[stream.n+1] = c2 - stream.buf[stream.n+2] = c3 - stream.buf[stream.n+3] = c4 - stream.n += 4 + stream.buf = append(stream.buf, c1, c2, c3, c4) } func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { - if stream.Error != nil { - return - } - if stream.Available() < 5 { - stream.growAtLeast(5) - } - stream.buf[stream.n] = c1 - stream.buf[stream.n+1] = c2 - stream.buf[stream.n+2] = c3 - stream.buf[stream.n+3] = c4 - stream.buf[stream.n+4] = c5 - stream.n += 5 + stream.buf = append(stream.buf, c1, c2, c3, c4, c5) } // Flush writes any buffered data to the underlying io.Writer. @@ -163,56 +103,20 @@ func (stream *Stream) Flush() error { if stream.Error != nil { return stream.Error } - if stream.n == 0 { - return nil - } - n, err := stream.out.Write(stream.buf[0:stream.n]) - if n < stream.n && err == nil { - err = io.ErrShortWrite - } + n, err := stream.out.Write(stream.buf) if err != nil { - if n > 0 && n < stream.n { - copy(stream.buf[0:stream.n-n], stream.buf[n:stream.n]) + if stream.Error == nil { + stream.Error = err } - stream.n -= n - stream.Error = err return err } - stream.n = 0 + stream.buf = stream.buf[n:] return nil } -func (stream *Stream) ensure(minimal int) { - available := stream.Available() - if available < minimal { - stream.growAtLeast(minimal) - } -} - -func (stream *Stream) growAtLeast(minimal int) { - if stream.out != nil { - stream.Flush() - if stream.Available() >= minimal { - return - } - } - toGrow := len(stream.buf) - if toGrow < minimal { - toGrow = minimal - } - newBuf := make([]byte, len(stream.buf)+toGrow) - copy(newBuf, stream.Buffer()) - stream.buf = newBuf -} - // WriteRaw write string out without quotes, just like []byte func (stream *Stream) WriteRaw(s string) { - stream.ensure(len(s)) - if stream.Error != nil { - return - } - n := copy(stream.buf[stream.n:], s) - stream.n += n + stream.buf = append(stream.buf, s...) } // WriteNil write null to stream @@ -273,6 +177,7 @@ func (stream *Stream) WriteEmptyObject() { func (stream *Stream) WriteMore() { stream.writeByte(',') stream.writeIndention(0) + stream.Flush() } // WriteArrayStart write [ with possible indention @@ -300,9 +205,7 @@ func (stream *Stream) writeIndention(delta int) { } stream.writeByte('\n') toWrite := stream.indention - delta - stream.ensure(toWrite) - for i := 0; i < toWrite && stream.n < len(stream.buf); i++ { - stream.buf[stream.n] = ' ' - stream.n++ + for i := 0; i < toWrite; i++ { + stream.buf = append(stream.buf, ' ') } } diff --git a/vendor/github.com/json-iterator/go/feature_stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go similarity index 85% rename from vendor/github.com/json-iterator/go/feature_stream_float.go rename to vendor/github.com/json-iterator/go/stream_float.go index 9a404e11d..f318d2c59 100644 --- a/vendor/github.com/json-iterator/go/feature_stream_float.go +++ b/vendor/github.com/json-iterator/go/stream_float.go @@ -21,7 +21,7 @@ func (stream *Stream) WriteFloat32(val float32) { fmt = 'e' } } - stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 32)) + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32) } // WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster @@ -43,13 +43,12 @@ func (stream *Stream) WriteFloat32Lossy(val float32) { return } stream.writeByte('.') - stream.ensure(10) for p := precision - 1; p > 0 && fval < pow10[p]; p-- { stream.writeByte('0') } stream.WriteUint64(fval) - for stream.buf[stream.n-1] == '0' { - stream.n-- + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] } } @@ -63,7 +62,7 @@ func (stream *Stream) WriteFloat64(val float64) { fmt = 'e' } } - stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 64)) + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64) } // WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster @@ -85,12 +84,11 @@ func (stream *Stream) WriteFloat64Lossy(val float64) { return } stream.writeByte('.') - stream.ensure(10) for p := precision - 1; p > 0 && fval < pow10[p]; p-- { stream.writeByte('0') } stream.WriteUint64(fval) - for stream.buf[stream.n-1] == '0' { - stream.n-- + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] } } diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go new file mode 100644 index 000000000..d1059ee4c --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_int.go @@ -0,0 +1,190 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(space []byte, v uint32) []byte { + start := v >> 24 + if start == 0 { + space = append(space, byte(v>>16), byte(v>>8)) + } else if start == 1 { + space = append(space, byte(v>>8)) + } + space = append(space, byte(v)) + return space +} + +func writeBuf(buf []byte, v uint32) []byte { + return append(buf, byte(v>>16), byte(v>>8), byte(v)) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint8(nval) + } + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint16(nval) + } + stream.WriteUint16(val) +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + } else { + r3 := q2 - q3*1000 + stream.buf = append(stream.buf, byte(q3+'0')) + stream.buf = writeBuf(stream.buf, digits[r3]) + } + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint32(nval) + } + stream.WriteUint32(val) +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q3]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q4]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q5]) + } else { + stream.buf = writeFirstBuf(stream.buf, digits[q6]) + r6 := q5 - q6*1000 + stream.buf = writeBuf(stream.buf, digits[r6]) + } + stream.buf = writeBuf(stream.buf, digits[r5]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint64(nval) + } + stream.WriteUint64(val) +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/vendor/github.com/json-iterator/go/feature_stream_string.go b/vendor/github.com/json-iterator/go/stream_str.go similarity index 91% rename from vendor/github.com/json-iterator/go/feature_stream_string.go rename to vendor/github.com/json-iterator/go/stream_str.go index 334282f05..54c2ba0b3 100644 --- a/vendor/github.com/json-iterator/go/feature_stream_string.go +++ b/vendor/github.com/json-iterator/go/stream_str.go @@ -219,34 +219,22 @@ var hex = "0123456789abcdef" // WriteStringWithHTMLEscaped write string to stream with html special characters escaped func (stream *Stream) WriteStringWithHTMLEscaped(s string) { - stream.ensure(32) valLen := len(s) - toWriteLen := valLen - bufLengthMinusTwo := len(stream.buf) - 2 // make room for the quotes - if stream.n+toWriteLen > bufLengthMinusTwo { - toWriteLen = bufLengthMinusTwo - stream.n - } - n := stream.n - stream.buf[n] = '"' - n++ + stream.buf = append(stream.buf, '"') // write string, the fast path, without utf8 and escape support i := 0 - for ; i < toWriteLen; i++ { + for ; i < valLen; i++ { c := s[i] if c < utf8.RuneSelf && htmlSafeSet[c] { - stream.buf[n] = c - n++ + stream.buf = append(stream.buf, c) } else { break } } if i == valLen { - stream.buf[n] = '"' - n++ - stream.n = n + stream.buf = append(stream.buf, '"') return } - stream.n = n writeStringSlowPathWithHTMLEscaped(stream, i, s, valLen) } @@ -321,34 +309,22 @@ func writeStringSlowPathWithHTMLEscaped(stream *Stream, i int, s string, valLen // WriteString write string to stream without html escape func (stream *Stream) WriteString(s string) { - stream.ensure(32) valLen := len(s) - toWriteLen := valLen - bufLengthMinusTwo := len(stream.buf) - 2 // make room for the quotes - if stream.n+toWriteLen > bufLengthMinusTwo { - toWriteLen = bufLengthMinusTwo - stream.n - } - n := stream.n - stream.buf[n] = '"' - n++ + stream.buf = append(stream.buf, '"') // write string, the fast path, without utf8 and escape support i := 0 - for ; i < toWriteLen; i++ { + for ; i < valLen; i++ { c := s[i] if c > 31 && c != '"' && c != '\\' { - stream.buf[n] = c - n++ + stream.buf = append(stream.buf, c) } else { break } } if i == valLen { - stream.buf[n] = '"' - n++ - stream.n = n + stream.buf = append(stream.buf, '"') return } - stream.n = n writeStringSlowPath(stream, i, s, valLen) } diff --git a/vendor/github.com/modern-go/concurrent/LICENSE b/vendor/github.com/modern-go/concurrent/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/modern-go/concurrent/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/modern-go/concurrent/README.md b/vendor/github.com/modern-go/concurrent/README.md new file mode 100644 index 000000000..acab3200a --- /dev/null +++ b/vendor/github.com/modern-go/concurrent/README.md @@ -0,0 +1,49 @@ +# concurrent + +[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/concurrent/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/concurrent?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/concurrent) +[![Build Status](https://travis-ci.org/modern-go/concurrent.svg?branch=master)](https://travis-ci.org/modern-go/concurrent) +[![codecov](https://codecov.io/gh/modern-go/concurrent/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/concurrent) +[![rcard](https://goreportcard.com/badge/github.com/modern-go/concurrent)](https://goreportcard.com/report/github.com/modern-go/concurrent) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/concurrent/master/LICENSE) + +* concurrent.Map: backport sync.Map for go below 1.9 +* concurrent.Executor: goroutine with explicit ownership and cancellable + +# concurrent.Map + +because sync.Map is only available in go 1.9, we can use concurrent.Map to make code portable + +```go +m := concurrent.NewMap() +m.Store("hello", "world") +elem, found := m.Load("hello") +// elem will be "world" +// found will be true +``` + +# concurrent.Executor + +```go +executor := concurrent.NewUnboundedExecutor() +executor.Go(func(ctx context.Context) { + everyMillisecond := time.NewTicker(time.Millisecond) + for { + select { + case <-ctx.Done(): + fmt.Println("goroutine exited") + return + case <-everyMillisecond.C: + // do something + } + } +}) +time.Sleep(time.Second) +executor.StopAndWaitForever() +fmt.Println("executor stopped") +``` + +attach goroutine to executor instance, so that we can + +* cancel it by stop the executor with Stop/StopAndWait/StopAndWaitForever +* handle panic by callback: the default behavior will no longer crash your application \ No newline at end of file diff --git a/vendor/github.com/modern-go/concurrent/executor.go b/vendor/github.com/modern-go/concurrent/executor.go new file mode 100644 index 000000000..623dba1ac --- /dev/null +++ b/vendor/github.com/modern-go/concurrent/executor.go @@ -0,0 +1,14 @@ +package concurrent + +import "context" + +// Executor replace go keyword to start a new goroutine +// the goroutine should cancel itself if the context passed in has been cancelled +// the goroutine started by the executor, is owned by the executor +// we can cancel all executors owned by the executor just by stop the executor itself +// however Executor interface does not Stop method, the one starting and owning executor +// should use the concrete type of executor, instead of this interface. +type Executor interface { + // Go starts a new goroutine controlled by the context + Go(handler func(ctx context.Context)) +} diff --git a/vendor/github.com/modern-go/concurrent/go_above_19.go b/vendor/github.com/modern-go/concurrent/go_above_19.go new file mode 100644 index 000000000..aeabf8c4f --- /dev/null +++ b/vendor/github.com/modern-go/concurrent/go_above_19.go @@ -0,0 +1,15 @@ +//+build go1.9 + +package concurrent + +import "sync" + +// Map is a wrapper for sync.Map introduced in go1.9 +type Map struct { + sync.Map +} + +// NewMap creates a thread safe Map +func NewMap() *Map { + return &Map{} +} diff --git a/vendor/github.com/modern-go/concurrent/go_below_19.go b/vendor/github.com/modern-go/concurrent/go_below_19.go new file mode 100644 index 000000000..b9c8df7f4 --- /dev/null +++ b/vendor/github.com/modern-go/concurrent/go_below_19.go @@ -0,0 +1,33 @@ +//+build !go1.9 + +package concurrent + +import "sync" + +// Map implements a thread safe map for go version below 1.9 using mutex +type Map struct { + lock sync.RWMutex + data map[interface{}]interface{} +} + +// NewMap creates a thread safe map +func NewMap() *Map { + return &Map{ + data: make(map[interface{}]interface{}, 32), + } +} + +// Load is same as sync.Map Load +func (m *Map) Load(key interface{}) (elem interface{}, found bool) { + m.lock.RLock() + elem, found = m.data[key] + m.lock.RUnlock() + return +} + +// Load is same as sync.Map Store +func (m *Map) Store(key interface{}, elem interface{}) { + m.lock.Lock() + m.data[key] = elem + m.lock.Unlock() +} diff --git a/vendor/github.com/modern-go/concurrent/log.go b/vendor/github.com/modern-go/concurrent/log.go new file mode 100644 index 000000000..9756fcc75 --- /dev/null +++ b/vendor/github.com/modern-go/concurrent/log.go @@ -0,0 +1,13 @@ +package concurrent + +import ( + "os" + "log" + "io/ioutil" +) + +// ErrorLogger is used to print out error, can be set to writer other than stderr +var ErrorLogger = log.New(os.Stderr, "", 0) + +// InfoLogger is used to print informational message, default to off +var InfoLogger = log.New(ioutil.Discard, "", 0) \ No newline at end of file diff --git a/vendor/github.com/modern-go/concurrent/unbounded_executor.go b/vendor/github.com/modern-go/concurrent/unbounded_executor.go new file mode 100644 index 000000000..05a77dceb --- /dev/null +++ b/vendor/github.com/modern-go/concurrent/unbounded_executor.go @@ -0,0 +1,119 @@ +package concurrent + +import ( + "context" + "fmt" + "runtime" + "runtime/debug" + "sync" + "time" + "reflect" +) + +// HandlePanic logs goroutine panic by default +var HandlePanic = func(recovered interface{}, funcName string) { + ErrorLogger.Println(fmt.Sprintf("%s panic: %v", funcName, recovered)) + ErrorLogger.Println(string(debug.Stack())) +} + +// UnboundedExecutor is a executor without limits on counts of alive goroutines +// it tracks the goroutine started by it, and can cancel them when shutdown +type UnboundedExecutor struct { + ctx context.Context + cancel context.CancelFunc + activeGoroutinesMutex *sync.Mutex + activeGoroutines map[string]int + HandlePanic func(recovered interface{}, funcName string) +} + +// GlobalUnboundedExecutor has the life cycle of the program itself +// any goroutine want to be shutdown before main exit can be started from this executor +// GlobalUnboundedExecutor expects the main function to call stop +// it does not magically knows the main function exits +var GlobalUnboundedExecutor = NewUnboundedExecutor() + +// NewUnboundedExecutor creates a new UnboundedExecutor, +// UnboundedExecutor can not be created by &UnboundedExecutor{} +// HandlePanic can be set with a callback to override global HandlePanic +func NewUnboundedExecutor() *UnboundedExecutor { + ctx, cancel := context.WithCancel(context.TODO()) + return &UnboundedExecutor{ + ctx: ctx, + cancel: cancel, + activeGoroutinesMutex: &sync.Mutex{}, + activeGoroutines: map[string]int{}, + } +} + +// Go starts a new goroutine and tracks its lifecycle. +// Panic will be recovered and logged automatically, except for StopSignal +func (executor *UnboundedExecutor) Go(handler func(ctx context.Context)) { + pc := reflect.ValueOf(handler).Pointer() + f := runtime.FuncForPC(pc) + funcName := f.Name() + file, line := f.FileLine(pc) + executor.activeGoroutinesMutex.Lock() + defer executor.activeGoroutinesMutex.Unlock() + startFrom := fmt.Sprintf("%s:%d", file, line) + executor.activeGoroutines[startFrom] += 1 + go func() { + defer func() { + recovered := recover() + // if you want to quit a goroutine without trigger HandlePanic + // use runtime.Goexit() to quit + if recovered != nil { + if executor.HandlePanic == nil { + HandlePanic(recovered, funcName) + } else { + executor.HandlePanic(recovered, funcName) + } + } + executor.activeGoroutinesMutex.Lock() + executor.activeGoroutines[startFrom] -= 1 + executor.activeGoroutinesMutex.Unlock() + }() + handler(executor.ctx) + }() +} + +// Stop cancel all goroutines started by this executor without wait +func (executor *UnboundedExecutor) Stop() { + executor.cancel() +} + +// StopAndWaitForever cancel all goroutines started by this executor and +// wait until all goroutines exited +func (executor *UnboundedExecutor) StopAndWaitForever() { + executor.StopAndWait(context.Background()) +} + +// StopAndWait cancel all goroutines started by this executor and wait. +// Wait can be cancelled by the context passed in. +func (executor *UnboundedExecutor) StopAndWait(ctx context.Context) { + executor.cancel() + for { + oneHundredMilliseconds := time.NewTimer(time.Millisecond * 100) + select { + case <-oneHundredMilliseconds.C: + if executor.checkNoActiveGoroutines() { + return + } + case <-ctx.Done(): + return + } + } +} + +func (executor *UnboundedExecutor) checkNoActiveGoroutines() bool { + executor.activeGoroutinesMutex.Lock() + defer executor.activeGoroutinesMutex.Unlock() + for startFrom, count := range executor.activeGoroutines { + if count > 0 { + InfoLogger.Println("UnboundedExecutor is still waiting goroutines to quit", + "startFrom", startFrom, + "count", count) + return false + } + } + return true +} diff --git a/vendor/github.com/modern-go/reflect2/LICENSE b/vendor/github.com/modern-go/reflect2/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/modern-go/reflect2/README.md b/vendor/github.com/modern-go/reflect2/README.md new file mode 100644 index 000000000..6f968aab9 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/README.md @@ -0,0 +1,71 @@ +# reflect2 + +[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/reflect2/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/reflect2?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/reflect2) +[![Build Status](https://travis-ci.org/modern-go/reflect2.svg?branch=master)](https://travis-ci.org/modern-go/reflect2) +[![codecov](https://codecov.io/gh/modern-go/reflect2/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/reflect2) +[![rcard](https://goreportcard.com/badge/github.com/modern-go/reflect2)](https://goreportcard.com/report/github.com/modern-go/reflect2) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/reflect2/master/LICENSE) + +reflect api that avoids runtime reflect.Value cost + +* reflect get/set interface{}, with type checking +* reflect get/set unsafe.Pointer, without type checking +* `reflect2.TypeByName` works like `Class.forName` found in java + +[json-iterator](https://github.com/json-iterator/go) use this package to save runtime dispatching cost. +This package is designed for low level libraries to optimize reflection performance. +General application should still use reflect standard library. + +# reflect2.TypeByName + +```go +// given package is github.com/your/awesome-package +type MyStruct struct { + // ... +} + +// will return the type +reflect2.TypeByName("awesome-package.MyStruct") +// however, if the type has not been used +// it will be eliminated by compiler, so we can not get it in runtime +``` + +# reflect2 get/set interface{} + +```go +valType := reflect2.TypeOf(1) +i := 1 +j := 10 +valType.Set(&i, &j) +// i will be 10 +``` + +to get set `type`, always use its pointer `*type` + +# reflect2 get/set unsafe.Pointer + +```go +valType := reflect2.TypeOf(1) +i := 1 +j := 10 +valType.UnsafeSet(unsafe.Pointer(&i), unsafe.Pointer(&j)) +// i will be 10 +``` + +to get set `type`, always use its pointer `*type` + +# benchmark + +Benchmark is not necessary for this package. It does nothing actually. +As it is just a thin wrapper to make go runtime public. +Both `reflect2` and `reflect` call same function +provided by `runtime` package exposed by go language. + +# unsafe safety + +Instead of casting `[]byte` to `sliceHeader` in your application using unsafe. +We can use reflect2 instead. This way, if `sliceHeader` changes in the future, +only reflect2 need to be upgraded. + +reflect2 tries its best to keep the implementation same as reflect (by testing). \ No newline at end of file diff --git a/vendor/github.com/modern-go/reflect2/go_above_17.go b/vendor/github.com/modern-go/reflect2/go_above_17.go new file mode 100644 index 000000000..5c1cea868 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_above_17.go @@ -0,0 +1,8 @@ +//+build go1.7 + +package reflect2 + +import "unsafe" + +//go:linkname resolveTypeOff reflect.resolveTypeOff +func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer diff --git a/vendor/github.com/modern-go/reflect2/go_above_19.go b/vendor/github.com/modern-go/reflect2/go_above_19.go new file mode 100644 index 000000000..c7e3b7801 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_above_19.go @@ -0,0 +1,14 @@ +//+build go1.9 + +package reflect2 + +import ( + "unsafe" +) + +//go:linkname makemap reflect.makemap +func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer) + +func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer { + return makemap(rtype, cap) +} diff --git a/vendor/github.com/modern-go/reflect2/go_below_17.go b/vendor/github.com/modern-go/reflect2/go_below_17.go new file mode 100644 index 000000000..65a93c889 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_below_17.go @@ -0,0 +1,9 @@ +//+build !go1.7 + +package reflect2 + +import "unsafe" + +func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { + return nil +} diff --git a/vendor/github.com/modern-go/reflect2/go_below_19.go b/vendor/github.com/modern-go/reflect2/go_below_19.go new file mode 100644 index 000000000..b050ef70c --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/go_below_19.go @@ -0,0 +1,14 @@ +//+build !go1.9 + +package reflect2 + +import ( + "unsafe" +) + +//go:linkname makemap reflect.makemap +func makemap(rtype unsafe.Pointer) (m unsafe.Pointer) + +func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer { + return makemap(rtype) +} diff --git a/vendor/github.com/modern-go/reflect2/reflect2.go b/vendor/github.com/modern-go/reflect2/reflect2.go new file mode 100644 index 000000000..0632b71fb --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/reflect2.go @@ -0,0 +1,295 @@ +package reflect2 + +import ( + "github.com/modern-go/concurrent" + "reflect" + "unsafe" +) + +type Type interface { + Kind() reflect.Kind + // New return pointer to data of this type + New() interface{} + // UnsafeNew return the allocated space pointed by unsafe.Pointer + UnsafeNew() unsafe.Pointer + // PackEFace cast a unsafe pointer to object represented pointer + PackEFace(ptr unsafe.Pointer) interface{} + // Indirect dereference object represented pointer to this type + Indirect(obj interface{}) interface{} + // UnsafeIndirect dereference pointer to this type + UnsafeIndirect(ptr unsafe.Pointer) interface{} + // Type1 returns reflect.Type + Type1() reflect.Type + Implements(thatType Type) bool + String() string + RType() uintptr + // interface{} of this type has pointer like behavior + LikePtr() bool + IsNullable() bool + IsNil(obj interface{}) bool + UnsafeIsNil(ptr unsafe.Pointer) bool + Set(obj interface{}, val interface{}) + UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) + AssignableTo(anotherType Type) bool +} + +type ListType interface { + Type + Elem() Type + SetIndex(obj interface{}, index int, elem interface{}) + UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) + GetIndex(obj interface{}, index int) interface{} + UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer +} + +type ArrayType interface { + ListType + Len() int +} + +type SliceType interface { + ListType + MakeSlice(length int, cap int) interface{} + UnsafeMakeSlice(length int, cap int) unsafe.Pointer + Grow(obj interface{}, newLength int) + UnsafeGrow(ptr unsafe.Pointer, newLength int) + Append(obj interface{}, elem interface{}) + UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) + LengthOf(obj interface{}) int + UnsafeLengthOf(ptr unsafe.Pointer) int + SetNil(obj interface{}) + UnsafeSetNil(ptr unsafe.Pointer) + Cap(obj interface{}) int + UnsafeCap(ptr unsafe.Pointer) int +} + +type StructType interface { + Type + NumField() int + Field(i int) StructField + FieldByName(name string) StructField + FieldByIndex(index []int) StructField + FieldByNameFunc(match func(string) bool) StructField +} + +type StructField interface { + Offset() uintptr + Name() string + PkgPath() string + Type() Type + Tag() reflect.StructTag + Index() []int + Anonymous() bool + Set(obj interface{}, value interface{}) + UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) + Get(obj interface{}) interface{} + UnsafeGet(obj unsafe.Pointer) unsafe.Pointer +} + +type MapType interface { + Type + Key() Type + Elem() Type + MakeMap(cap int) interface{} + UnsafeMakeMap(cap int) unsafe.Pointer + SetIndex(obj interface{}, key interface{}, elem interface{}) + UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) + TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) + GetIndex(obj interface{}, key interface{}) interface{} + UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer + Iterate(obj interface{}) MapIterator + UnsafeIterate(obj unsafe.Pointer) MapIterator +} + +type MapIterator interface { + HasNext() bool + Next() (key interface{}, elem interface{}) + UnsafeNext() (key unsafe.Pointer, elem unsafe.Pointer) +} + +type PtrType interface { + Type + Elem() Type +} + +type InterfaceType interface { + NumMethod() int +} + +type Config struct { + UseSafeImplementation bool +} + +type API interface { + TypeOf(obj interface{}) Type + Type2(type1 reflect.Type) Type +} + +var ConfigUnsafe = Config{UseSafeImplementation: false}.Froze() +var ConfigSafe = Config{UseSafeImplementation: true}.Froze() + +type frozenConfig struct { + useSafeImplementation bool + cache *concurrent.Map +} + +func (cfg Config) Froze() *frozenConfig { + return &frozenConfig{ + useSafeImplementation: cfg.UseSafeImplementation, + cache: concurrent.NewMap(), + } +} + +func (cfg *frozenConfig) TypeOf(obj interface{}) Type { + cacheKey := uintptr(unpackEFace(obj).rtype) + typeObj, found := cfg.cache.Load(cacheKey) + if found { + return typeObj.(Type) + } + return cfg.Type2(reflect.TypeOf(obj)) +} + +func (cfg *frozenConfig) Type2(type1 reflect.Type) Type { + cacheKey := uintptr(unpackEFace(type1).data) + typeObj, found := cfg.cache.Load(cacheKey) + if found { + return typeObj.(Type) + } + type2 := cfg.wrapType(type1) + cfg.cache.Store(cacheKey, type2) + return type2 +} + +func (cfg *frozenConfig) wrapType(type1 reflect.Type) Type { + safeType := safeType{Type: type1, cfg: cfg} + switch type1.Kind() { + case reflect.Struct: + if cfg.useSafeImplementation { + return &safeStructType{safeType} + } + return newUnsafeStructType(cfg, type1) + case reflect.Array: + if cfg.useSafeImplementation { + return &safeSliceType{safeType} + } + return newUnsafeArrayType(cfg, type1) + case reflect.Slice: + if cfg.useSafeImplementation { + return &safeSliceType{safeType} + } + return newUnsafeSliceType(cfg, type1) + case reflect.Map: + if cfg.useSafeImplementation { + return &safeMapType{safeType} + } + return newUnsafeMapType(cfg, type1) + case reflect.Ptr, reflect.Chan, reflect.Func: + if cfg.useSafeImplementation { + return &safeMapType{safeType} + } + return newUnsafePtrType(cfg, type1) + case reflect.Interface: + if cfg.useSafeImplementation { + return &safeMapType{safeType} + } + if type1.NumMethod() == 0 { + return newUnsafeEFaceType(cfg, type1) + } + return newUnsafeIFaceType(cfg, type1) + default: + if cfg.useSafeImplementation { + return &safeType + } + return newUnsafeType(cfg, type1) + } +} + +func TypeOf(obj interface{}) Type { + return ConfigUnsafe.TypeOf(obj) +} + +func TypeOfPtr(obj interface{}) PtrType { + return TypeOf(obj).(PtrType) +} + +func Type2(type1 reflect.Type) Type { + if type1 == nil { + return nil + } + return ConfigUnsafe.Type2(type1) +} + +func PtrTo(typ Type) Type { + return Type2(reflect.PtrTo(typ.Type1())) +} + +func PtrOf(obj interface{}) unsafe.Pointer { + return unpackEFace(obj).data +} + +func RTypeOf(obj interface{}) uintptr { + return uintptr(unpackEFace(obj).rtype) +} + +func IsNil(obj interface{}) bool { + if obj == nil { + return true + } + return unpackEFace(obj).data == nil +} + +func IsNullable(kind reflect.Kind) bool { + switch kind { + case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Func, reflect.Slice, reflect.Interface: + return true + } + return false +} + +func likePtrKind(kind reflect.Kind) bool { + switch kind { + case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Func: + return true + } + return false +} + +func likePtrType(typ reflect.Type) bool { + if likePtrKind(typ.Kind()) { + return true + } + if typ.Kind() == reflect.Struct { + if typ.NumField() != 1 { + return false + } + return likePtrType(typ.Field(0).Type) + } + if typ.Kind() == reflect.Array { + if typ.Len() != 1 { + return false + } + return likePtrType(typ.Elem()) + } + return false +} + +// NoEscape hides a pointer from escape analysis. noescape is +// the identity function but escape analysis doesn't think the +// output depends on the input. noescape is inlined and currently +// compiles down to zero instructions. +// USE CAREFULLY! +//go:nosplit +func NoEscape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} + +func UnsafeCastString(str string) []byte { + stringHeader := (*reflect.StringHeader)(unsafe.Pointer(&str)) + sliceHeader := &reflect.SliceHeader{ + Data: stringHeader.Data, + Cap: stringHeader.Len, + Len: stringHeader.Len, + } + return *(*[]byte)(unsafe.Pointer(sliceHeader)) +} diff --git a/vendor/github.com/modern-go/reflect2/reflect2_amd64.s b/vendor/github.com/modern-go/reflect2/reflect2_amd64.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/reflect2_kind.go b/vendor/github.com/modern-go/reflect2/reflect2_kind.go new file mode 100644 index 000000000..62f299e40 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/reflect2_kind.go @@ -0,0 +1,30 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +// DefaultTypeOfKind return the non aliased default type for the kind +func DefaultTypeOfKind(kind reflect.Kind) Type { + return kindTypes[kind] +} + +var kindTypes = map[reflect.Kind]Type{ + reflect.Bool: TypeOf(true), + reflect.Uint8: TypeOf(uint8(0)), + reflect.Int8: TypeOf(int8(0)), + reflect.Uint16: TypeOf(uint16(0)), + reflect.Int16: TypeOf(int16(0)), + reflect.Uint32: TypeOf(uint32(0)), + reflect.Int32: TypeOf(int32(0)), + reflect.Uint64: TypeOf(uint64(0)), + reflect.Int64: TypeOf(int64(0)), + reflect.Uint: TypeOf(uint(0)), + reflect.Int: TypeOf(int(0)), + reflect.Float32: TypeOf(float32(0)), + reflect.Float64: TypeOf(float64(0)), + reflect.Uintptr: TypeOf(uintptr(0)), + reflect.String: TypeOf(""), + reflect.UnsafePointer: TypeOf(unsafe.Pointer(nil)), +} diff --git a/vendor/github.com/modern-go/reflect2/relfect2_386.s b/vendor/github.com/modern-go/reflect2/relfect2_386.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s b/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/relfect2_arm.s b/vendor/github.com/modern-go/reflect2/relfect2_arm.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/relfect2_arm64.s b/vendor/github.com/modern-go/reflect2/relfect2_arm64.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s b/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s b/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s b/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/relfect2_s390x.s b/vendor/github.com/modern-go/reflect2/relfect2_s390x.s new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/modern-go/reflect2/safe_field.go b/vendor/github.com/modern-go/reflect2/safe_field.go new file mode 100644 index 000000000..d4ba1f4f8 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/safe_field.go @@ -0,0 +1,58 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type safeField struct { + reflect.StructField +} + +func (field *safeField) Offset() uintptr { + return field.StructField.Offset +} + +func (field *safeField) Name() string { + return field.StructField.Name +} + +func (field *safeField) PkgPath() string { + return field.StructField.PkgPath +} + +func (field *safeField) Type() Type { + panic("not implemented") +} + +func (field *safeField) Tag() reflect.StructTag { + return field.StructField.Tag +} + +func (field *safeField) Index() []int { + return field.StructField.Index +} + +func (field *safeField) Anonymous() bool { + return field.StructField.Anonymous +} + +func (field *safeField) Set(obj interface{}, value interface{}) { + val := reflect.ValueOf(obj).Elem() + val.FieldByIndex(field.Index()).Set(reflect.ValueOf(value).Elem()) +} + +func (field *safeField) UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) { + panic("unsafe operation is not supported") +} + +func (field *safeField) Get(obj interface{}) interface{} { + val := reflect.ValueOf(obj).Elem().FieldByIndex(field.Index()) + ptr := reflect.New(val.Type()) + ptr.Elem().Set(val) + return ptr.Interface() +} + +func (field *safeField) UnsafeGet(obj unsafe.Pointer) unsafe.Pointer { + panic("does not support unsafe operation") +} diff --git a/vendor/github.com/modern-go/reflect2/safe_map.go b/vendor/github.com/modern-go/reflect2/safe_map.go new file mode 100644 index 000000000..88362205a --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/safe_map.go @@ -0,0 +1,101 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type safeMapType struct { + safeType +} + +func (type2 *safeMapType) Key() Type { + return type2.safeType.cfg.Type2(type2.Type.Key()) +} + +func (type2 *safeMapType) MakeMap(cap int) interface{} { + ptr := reflect.New(type2.Type) + ptr.Elem().Set(reflect.MakeMap(type2.Type)) + return ptr.Interface() +} + +func (type2 *safeMapType) UnsafeMakeMap(cap int) unsafe.Pointer { + panic("does not support unsafe operation") +} + +func (type2 *safeMapType) SetIndex(obj interface{}, key interface{}, elem interface{}) { + keyVal := reflect.ValueOf(key) + elemVal := reflect.ValueOf(elem) + val := reflect.ValueOf(obj) + val.Elem().SetMapIndex(keyVal.Elem(), elemVal.Elem()) +} + +func (type2 *safeMapType) UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) { + panic("does not support unsafe operation") +} + +func (type2 *safeMapType) TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) { + keyVal := reflect.ValueOf(key) + if key == nil { + keyVal = reflect.New(type2.Type.Key()).Elem() + } + val := reflect.ValueOf(obj).MapIndex(keyVal) + if !val.IsValid() { + return nil, false + } + return val.Interface(), true +} + +func (type2 *safeMapType) GetIndex(obj interface{}, key interface{}) interface{} { + val := reflect.ValueOf(obj).Elem() + keyVal := reflect.ValueOf(key).Elem() + elemVal := val.MapIndex(keyVal) + if !elemVal.IsValid() { + ptr := reflect.New(reflect.PtrTo(val.Type().Elem())) + return ptr.Elem().Interface() + } + ptr := reflect.New(elemVal.Type()) + ptr.Elem().Set(elemVal) + return ptr.Interface() +} + +func (type2 *safeMapType) UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer { + panic("does not support unsafe operation") +} + +func (type2 *safeMapType) Iterate(obj interface{}) MapIterator { + m := reflect.ValueOf(obj).Elem() + return &safeMapIterator{ + m: m, + keys: m.MapKeys(), + } +} + +func (type2 *safeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { + panic("does not support unsafe operation") +} + +type safeMapIterator struct { + i int + m reflect.Value + keys []reflect.Value +} + +func (iter *safeMapIterator) HasNext() bool { + return iter.i != len(iter.keys) +} + +func (iter *safeMapIterator) Next() (interface{}, interface{}) { + key := iter.keys[iter.i] + elem := iter.m.MapIndex(key) + iter.i += 1 + keyPtr := reflect.New(key.Type()) + keyPtr.Elem().Set(key) + elemPtr := reflect.New(elem.Type()) + elemPtr.Elem().Set(elem) + return keyPtr.Interface(), elemPtr.Interface() +} + +func (iter *safeMapIterator) UnsafeNext() (unsafe.Pointer, unsafe.Pointer) { + panic("does not support unsafe operation") +} diff --git a/vendor/github.com/modern-go/reflect2/safe_slice.go b/vendor/github.com/modern-go/reflect2/safe_slice.go new file mode 100644 index 000000000..bcce6fd20 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/safe_slice.go @@ -0,0 +1,92 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type safeSliceType struct { + safeType +} + +func (type2 *safeSliceType) SetIndex(obj interface{}, index int, value interface{}) { + val := reflect.ValueOf(obj).Elem() + elem := reflect.ValueOf(value).Elem() + val.Index(index).Set(elem) +} + +func (type2 *safeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, value unsafe.Pointer) { + panic("does not support unsafe operation") +} + +func (type2 *safeSliceType) GetIndex(obj interface{}, index int) interface{} { + val := reflect.ValueOf(obj).Elem() + elem := val.Index(index) + ptr := reflect.New(elem.Type()) + ptr.Elem().Set(elem) + return ptr.Interface() +} + +func (type2 *safeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer { + panic("does not support unsafe operation") +} + +func (type2 *safeSliceType) MakeSlice(length int, cap int) interface{} { + val := reflect.MakeSlice(type2.Type, length, cap) + ptr := reflect.New(val.Type()) + ptr.Elem().Set(val) + return ptr.Interface() +} + +func (type2 *safeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer { + panic("does not support unsafe operation") +} + +func (type2 *safeSliceType) Grow(obj interface{}, newLength int) { + oldCap := type2.Cap(obj) + oldSlice := reflect.ValueOf(obj).Elem() + delta := newLength - oldCap + deltaVals := make([]reflect.Value, delta) + newSlice := reflect.Append(oldSlice, deltaVals...) + oldSlice.Set(newSlice) +} + +func (type2 *safeSliceType) UnsafeGrow(ptr unsafe.Pointer, newLength int) { + panic("does not support unsafe operation") +} + +func (type2 *safeSliceType) Append(obj interface{}, elem interface{}) { + val := reflect.ValueOf(obj).Elem() + elemVal := reflect.ValueOf(elem).Elem() + newVal := reflect.Append(val, elemVal) + val.Set(newVal) +} + +func (type2 *safeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) { + panic("does not support unsafe operation") +} + +func (type2 *safeSliceType) SetNil(obj interface{}) { + val := reflect.ValueOf(obj).Elem() + val.Set(reflect.Zero(val.Type())) +} + +func (type2 *safeSliceType) UnsafeSetNil(ptr unsafe.Pointer) { + panic("does not support unsafe operation") +} + +func (type2 *safeSliceType) LengthOf(obj interface{}) int { + return reflect.ValueOf(obj).Elem().Len() +} + +func (type2 *safeSliceType) UnsafeLengthOf(ptr unsafe.Pointer) int { + panic("does not support unsafe operation") +} + +func (type2 *safeSliceType) Cap(obj interface{}) int { + return reflect.ValueOf(obj).Elem().Cap() +} + +func (type2 *safeSliceType) UnsafeCap(ptr unsafe.Pointer) int { + panic("does not support unsafe operation") +} diff --git a/vendor/github.com/modern-go/reflect2/safe_struct.go b/vendor/github.com/modern-go/reflect2/safe_struct.go new file mode 100644 index 000000000..e5fb9b313 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/safe_struct.go @@ -0,0 +1,29 @@ +package reflect2 + +type safeStructType struct { + safeType +} + +func (type2 *safeStructType) FieldByName(name string) StructField { + field, found := type2.Type.FieldByName(name) + if !found { + panic("field " + name + " not found") + } + return &safeField{StructField: field} +} + +func (type2 *safeStructType) Field(i int) StructField { + return &safeField{StructField: type2.Type.Field(i)} +} + +func (type2 *safeStructType) FieldByIndex(index []int) StructField { + return &safeField{StructField: type2.Type.FieldByIndex(index)} +} + +func (type2 *safeStructType) FieldByNameFunc(match func(string) bool) StructField { + field, found := type2.Type.FieldByNameFunc(match) + if !found { + panic("field match condition not found in " + type2.Type.String()) + } + return &safeField{StructField: field} +} diff --git a/vendor/github.com/modern-go/reflect2/safe_type.go b/vendor/github.com/modern-go/reflect2/safe_type.go new file mode 100644 index 000000000..ee4e7bb6e --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/safe_type.go @@ -0,0 +1,78 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type safeType struct { + reflect.Type + cfg *frozenConfig +} + +func (type2 *safeType) New() interface{} { + return reflect.New(type2.Type).Interface() +} + +func (type2 *safeType) UnsafeNew() unsafe.Pointer { + panic("does not support unsafe operation") +} + +func (type2 *safeType) Elem() Type { + return type2.cfg.Type2(type2.Type.Elem()) +} + +func (type2 *safeType) Type1() reflect.Type { + return type2.Type +} + +func (type2 *safeType) PackEFace(ptr unsafe.Pointer) interface{} { + panic("does not support unsafe operation") +} + +func (type2 *safeType) Implements(thatType Type) bool { + return type2.Type.Implements(thatType.Type1()) +} + +func (type2 *safeType) RType() uintptr { + panic("does not support unsafe operation") +} + +func (type2 *safeType) Indirect(obj interface{}) interface{} { + return reflect.Indirect(reflect.ValueOf(obj)).Interface() +} + +func (type2 *safeType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { + panic("does not support unsafe operation") +} + +func (type2 *safeType) LikePtr() bool { + panic("does not support unsafe operation") +} + +func (type2 *safeType) IsNullable() bool { + return IsNullable(type2.Kind()) +} + +func (type2 *safeType) IsNil(obj interface{}) bool { + if obj == nil { + return true + } + return reflect.ValueOf(obj).Elem().IsNil() +} + +func (type2 *safeType) UnsafeIsNil(ptr unsafe.Pointer) bool { + panic("does not support unsafe operation") +} + +func (type2 *safeType) Set(obj interface{}, val interface{}) { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(val).Elem()) +} + +func (type2 *safeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) { + panic("does not support unsafe operation") +} + +func (type2 *safeType) AssignableTo(anotherType Type) bool { + return type2.Type1().AssignableTo(anotherType.Type1()) +} diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go new file mode 100644 index 000000000..6d489112f --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/type_map.go @@ -0,0 +1,103 @@ +package reflect2 + +import ( + "reflect" + "runtime" + "strings" + "unsafe" +) + +// typelinks1 for 1.5 ~ 1.6 +//go:linkname typelinks1 reflect.typelinks +func typelinks1() [][]unsafe.Pointer + +// typelinks2 for 1.7 ~ +//go:linkname typelinks2 reflect.typelinks +func typelinks2() (sections []unsafe.Pointer, offset [][]int32) + +var types = map[string]reflect.Type{} +var packages = map[string]map[string]reflect.Type{} + +func init() { + ver := runtime.Version() + if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") { + loadGo15Types() + } else if ver == "go1.6" || strings.HasPrefix(ver, "go1.6.") { + loadGo15Types() + } else { + loadGo17Types() + } +} + +func loadGo15Types() { + var obj interface{} = reflect.TypeOf(0) + typePtrss := typelinks1() + for _, typePtrs := range typePtrss { + for _, typePtr := range typePtrs { + (*emptyInterface)(unsafe.Pointer(&obj)).word = typePtr + typ := obj.(reflect.Type) + if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct { + loadedType := typ.Elem() + pkgTypes := packages[loadedType.PkgPath()] + if pkgTypes == nil { + pkgTypes = map[string]reflect.Type{} + packages[loadedType.PkgPath()] = pkgTypes + } + types[loadedType.String()] = loadedType + pkgTypes[loadedType.Name()] = loadedType + } + if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Ptr && + typ.Elem().Elem().Kind() == reflect.Struct { + loadedType := typ.Elem().Elem() + pkgTypes := packages[loadedType.PkgPath()] + if pkgTypes == nil { + pkgTypes = map[string]reflect.Type{} + packages[loadedType.PkgPath()] = pkgTypes + } + types[loadedType.String()] = loadedType + pkgTypes[loadedType.Name()] = loadedType + } + } + } +} + +func loadGo17Types() { + var obj interface{} = reflect.TypeOf(0) + sections, offset := typelinks2() + for i, offs := range offset { + rodata := sections[i] + for _, off := range offs { + (*emptyInterface)(unsafe.Pointer(&obj)).word = resolveTypeOff(unsafe.Pointer(rodata), off) + typ := obj.(reflect.Type) + if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct { + loadedType := typ.Elem() + pkgTypes := packages[loadedType.PkgPath()] + if pkgTypes == nil { + pkgTypes = map[string]reflect.Type{} + packages[loadedType.PkgPath()] = pkgTypes + } + types[loadedType.String()] = loadedType + pkgTypes[loadedType.Name()] = loadedType + } + } + } +} + +type emptyInterface struct { + typ unsafe.Pointer + word unsafe.Pointer +} + +// TypeByName return the type by its name, just like Class.forName in java +func TypeByName(typeName string) Type { + return Type2(types[typeName]) +} + +// TypeByPackageName return the type by its package and name +func TypeByPackageName(pkgPath string, name string) Type { + pkgTypes := packages[pkgPath] + if pkgTypes == nil { + return nil + } + return Type2(pkgTypes[name]) +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_array.go b/vendor/github.com/modern-go/reflect2/unsafe_array.go new file mode 100644 index 000000000..76cbdba6e --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_array.go @@ -0,0 +1,65 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type UnsafeArrayType struct { + unsafeType + elemRType unsafe.Pointer + pElemRType unsafe.Pointer + elemSize uintptr + likePtr bool +} + +func newUnsafeArrayType(cfg *frozenConfig, type1 reflect.Type) *UnsafeArrayType { + return &UnsafeArrayType{ + unsafeType: *newUnsafeType(cfg, type1), + elemRType: unpackEFace(type1.Elem()).data, + pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data, + elemSize: type1.Elem().Size(), + likePtr: likePtrType(type1), + } +} + +func (type2 *UnsafeArrayType) LikePtr() bool { + return type2.likePtr +} + +func (type2 *UnsafeArrayType) Indirect(obj interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIndirect(objEFace.data) +} + +func (type2 *UnsafeArrayType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { + if type2.likePtr { + return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr)) + } + return packEFace(type2.rtype, ptr) +} + +func (type2 *UnsafeArrayType) SetIndex(obj interface{}, index int, elem interface{}) { + objEFace := unpackEFace(obj) + assertType("ArrayType.SetIndex argument 1", type2.ptrRType, objEFace.rtype) + elemEFace := unpackEFace(elem) + assertType("ArrayType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype) + type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data) +} + +func (type2 *UnsafeArrayType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) { + elemPtr := arrayAt(obj, index, type2.elemSize, "i < s.Len") + typedmemmove(type2.elemRType, elemPtr, elem) +} + +func (type2 *UnsafeArrayType) GetIndex(obj interface{}, index int) interface{} { + objEFace := unpackEFace(obj) + assertType("ArrayType.GetIndex argument 1", type2.ptrRType, objEFace.rtype) + elemPtr := type2.UnsafeGetIndex(objEFace.data, index) + return packEFace(type2.pElemRType, elemPtr) +} + +func (type2 *UnsafeArrayType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer { + return arrayAt(obj, index, type2.elemSize, "i < s.Len") +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_eface.go b/vendor/github.com/modern-go/reflect2/unsafe_eface.go new file mode 100644 index 000000000..805010f3a --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_eface.go @@ -0,0 +1,59 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type eface struct { + rtype unsafe.Pointer + data unsafe.Pointer +} + +func unpackEFace(obj interface{}) *eface { + return (*eface)(unsafe.Pointer(&obj)) +} + +func packEFace(rtype unsafe.Pointer, data unsafe.Pointer) interface{} { + var i interface{} + e := (*eface)(unsafe.Pointer(&i)) + e.rtype = rtype + e.data = data + return i +} + +type UnsafeEFaceType struct { + unsafeType +} + +func newUnsafeEFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeEFaceType { + return &UnsafeEFaceType{ + unsafeType: *newUnsafeType(cfg, type1), + } +} + +func (type2 *UnsafeEFaceType) IsNil(obj interface{}) bool { + if obj == nil { + return true + } + objEFace := unpackEFace(obj) + assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIsNil(objEFace.data) +} + +func (type2 *UnsafeEFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool { + if ptr == nil { + return true + } + return unpackEFace(*(*interface{})(ptr)).data == nil +} + +func (type2 *UnsafeEFaceType) Indirect(obj interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIndirect(objEFace.data) +} + +func (type2 *UnsafeEFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { + return *(*interface{})(ptr) +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_field.go b/vendor/github.com/modern-go/reflect2/unsafe_field.go new file mode 100644 index 000000000..5eb53130a --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_field.go @@ -0,0 +1,74 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type UnsafeStructField struct { + reflect.StructField + structType *UnsafeStructType + rtype unsafe.Pointer + ptrRType unsafe.Pointer +} + +func newUnsafeStructField(structType *UnsafeStructType, structField reflect.StructField) *UnsafeStructField { + return &UnsafeStructField{ + StructField: structField, + rtype: unpackEFace(structField.Type).data, + ptrRType: unpackEFace(reflect.PtrTo(structField.Type)).data, + structType: structType, + } +} + +func (field *UnsafeStructField) Offset() uintptr { + return field.StructField.Offset +} + +func (field *UnsafeStructField) Name() string { + return field.StructField.Name +} + +func (field *UnsafeStructField) PkgPath() string { + return field.StructField.PkgPath +} + +func (field *UnsafeStructField) Type() Type { + return field.structType.cfg.Type2(field.StructField.Type) +} + +func (field *UnsafeStructField) Tag() reflect.StructTag { + return field.StructField.Tag +} + +func (field *UnsafeStructField) Index() []int { + return field.StructField.Index +} + +func (field *UnsafeStructField) Anonymous() bool { + return field.StructField.Anonymous +} + +func (field *UnsafeStructField) Set(obj interface{}, value interface{}) { + objEFace := unpackEFace(obj) + assertType("StructField.SetIndex argument 1", field.structType.ptrRType, objEFace.rtype) + valueEFace := unpackEFace(value) + assertType("StructField.SetIndex argument 2", field.ptrRType, valueEFace.rtype) + field.UnsafeSet(objEFace.data, valueEFace.data) +} + +func (field *UnsafeStructField) UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) { + fieldPtr := add(obj, field.StructField.Offset, "same as non-reflect &v.field") + typedmemmove(field.rtype, fieldPtr, value) +} + +func (field *UnsafeStructField) Get(obj interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("StructField.GetIndex argument 1", field.structType.ptrRType, objEFace.rtype) + value := field.UnsafeGet(objEFace.data) + return packEFace(field.ptrRType, value) +} + +func (field *UnsafeStructField) UnsafeGet(obj unsafe.Pointer) unsafe.Pointer { + return add(obj, field.StructField.Offset, "same as non-reflect &v.field") +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_iface.go b/vendor/github.com/modern-go/reflect2/unsafe_iface.go new file mode 100644 index 000000000..b60195533 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_iface.go @@ -0,0 +1,64 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type iface struct { + itab *itab + data unsafe.Pointer +} + +type itab struct { + ignore unsafe.Pointer + rtype unsafe.Pointer +} + +func IFaceToEFace(ptr unsafe.Pointer) interface{} { + iface := (*iface)(ptr) + if iface.itab == nil { + return nil + } + return packEFace(iface.itab.rtype, iface.data) +} + +type UnsafeIFaceType struct { + unsafeType +} + +func newUnsafeIFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeIFaceType { + return &UnsafeIFaceType{ + unsafeType: *newUnsafeType(cfg, type1), + } +} + +func (type2 *UnsafeIFaceType) Indirect(obj interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIndirect(objEFace.data) +} + +func (type2 *UnsafeIFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { + return IFaceToEFace(ptr) +} + +func (type2 *UnsafeIFaceType) IsNil(obj interface{}) bool { + if obj == nil { + return true + } + objEFace := unpackEFace(obj) + assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIsNil(objEFace.data) +} + +func (type2 *UnsafeIFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool { + if ptr == nil { + return true + } + iface := (*iface)(ptr) + if iface.itab == nil { + return true + } + return false +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_link.go b/vendor/github.com/modern-go/reflect2/unsafe_link.go new file mode 100644 index 000000000..57229c8db --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_link.go @@ -0,0 +1,70 @@ +package reflect2 + +import "unsafe" + +//go:linkname unsafe_New reflect.unsafe_New +func unsafe_New(rtype unsafe.Pointer) unsafe.Pointer + +//go:linkname typedmemmove reflect.typedmemmove +func typedmemmove(rtype unsafe.Pointer, dst, src unsafe.Pointer) + +//go:linkname unsafe_NewArray reflect.unsafe_NewArray +func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer + +// typedslicecopy copies a slice of elemType values from src to dst, +// returning the number of elements copied. +//go:linkname typedslicecopy reflect.typedslicecopy +//go:noescape +func typedslicecopy(elemType unsafe.Pointer, dst, src sliceHeader) int + +//go:linkname mapassign reflect.mapassign +//go:noescape +func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key, val unsafe.Pointer) + +//go:linkname mapaccess reflect.mapaccess +//go:noescape +func mapaccess(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer) + +// m escapes into the return value, but the caller of mapiterinit +// doesn't let the return value escape. +//go:noescape +//go:linkname mapiterinit reflect.mapiterinit +func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) *hiter + +//go:noescape +//go:linkname mapiternext reflect.mapiternext +func mapiternext(it *hiter) + +//go:linkname ifaceE2I reflect.ifaceE2I +func ifaceE2I(rtype unsafe.Pointer, src interface{}, dst unsafe.Pointer) + +// A hash iteration structure. +// If you modify hiter, also change cmd/internal/gc/reflect.go to indicate +// the layout of this structure. +type hiter struct { + key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go). + value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go). + // rest fields are ignored +} + +// add returns p+x. +// +// The whySafe string is ignored, so that the function still inlines +// as efficiently as p+x, but all call sites should use the string to +// record why the addition is safe, which is to say why the addition +// does not cause x to advance to the very end of p's allocation +// and therefore point incorrectly at the next block in memory. +func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer { + return unsafe.Pointer(uintptr(p) + x) +} + +// arrayAt returns the i-th element of p, +// an array whose elements are eltSize bytes wide. +// The array pointed at by p must have at least i+1 elements: +// it is invalid (but impossible to check here) to pass i >= len, +// because then the result will point outside the array. +// whySafe must explain why i < len. (Passing "i < len" is fine; +// the benefit is to surface this assumption at the call site.) +func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer { + return add(p, uintptr(i)*eltSize, "i < len") +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_map.go b/vendor/github.com/modern-go/reflect2/unsafe_map.go new file mode 100644 index 000000000..f2e76e6bb --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_map.go @@ -0,0 +1,138 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type UnsafeMapType struct { + unsafeType + pKeyRType unsafe.Pointer + pElemRType unsafe.Pointer +} + +func newUnsafeMapType(cfg *frozenConfig, type1 reflect.Type) MapType { + return &UnsafeMapType{ + unsafeType: *newUnsafeType(cfg, type1), + pKeyRType: unpackEFace(reflect.PtrTo(type1.Key())).data, + pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data, + } +} + +func (type2 *UnsafeMapType) IsNil(obj interface{}) bool { + if obj == nil { + return true + } + objEFace := unpackEFace(obj) + assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIsNil(objEFace.data) +} + +func (type2 *UnsafeMapType) UnsafeIsNil(ptr unsafe.Pointer) bool { + if ptr == nil { + return true + } + return *(*unsafe.Pointer)(ptr) == nil +} + +func (type2 *UnsafeMapType) LikePtr() bool { + return true +} + +func (type2 *UnsafeMapType) Indirect(obj interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("MapType.Indirect argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIndirect(objEFace.data) +} + +func (type2 *UnsafeMapType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { + return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr)) +} + +func (type2 *UnsafeMapType) Key() Type { + return type2.cfg.Type2(type2.Type.Key()) +} + +func (type2 *UnsafeMapType) MakeMap(cap int) interface{} { + return packEFace(type2.ptrRType, type2.UnsafeMakeMap(cap)) +} + +func (type2 *UnsafeMapType) UnsafeMakeMap(cap int) unsafe.Pointer { + m := makeMapWithSize(type2.rtype, cap) + return unsafe.Pointer(&m) +} + +func (type2 *UnsafeMapType) SetIndex(obj interface{}, key interface{}, elem interface{}) { + objEFace := unpackEFace(obj) + assertType("MapType.SetIndex argument 1", type2.ptrRType, objEFace.rtype) + keyEFace := unpackEFace(key) + assertType("MapType.SetIndex argument 2", type2.pKeyRType, keyEFace.rtype) + elemEFace := unpackEFace(elem) + assertType("MapType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype) + type2.UnsafeSetIndex(objEFace.data, keyEFace.data, elemEFace.data) +} + +func (type2 *UnsafeMapType) UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) { + mapassign(type2.rtype, *(*unsafe.Pointer)(obj), key, elem) +} + +func (type2 *UnsafeMapType) TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) { + objEFace := unpackEFace(obj) + assertType("MapType.TryGetIndex argument 1", type2.ptrRType, objEFace.rtype) + keyEFace := unpackEFace(key) + assertType("MapType.TryGetIndex argument 2", type2.pKeyRType, keyEFace.rtype) + elemPtr := type2.UnsafeGetIndex(objEFace.data, keyEFace.data) + if elemPtr == nil { + return nil, false + } + return packEFace(type2.pElemRType, elemPtr), true +} + +func (type2 *UnsafeMapType) GetIndex(obj interface{}, key interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("MapType.GetIndex argument 1", type2.ptrRType, objEFace.rtype) + keyEFace := unpackEFace(key) + assertType("MapType.GetIndex argument 2", type2.pKeyRType, keyEFace.rtype) + elemPtr := type2.UnsafeGetIndex(objEFace.data, keyEFace.data) + return packEFace(type2.pElemRType, elemPtr) +} + +func (type2 *UnsafeMapType) UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer { + return mapaccess(type2.rtype, *(*unsafe.Pointer)(obj), key) +} + +func (type2 *UnsafeMapType) Iterate(obj interface{}) MapIterator { + objEFace := unpackEFace(obj) + assertType("MapType.Iterate argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIterate(objEFace.data) +} + +func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator { + return &UnsafeMapIterator{ + hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)), + pKeyRType: type2.pKeyRType, + pElemRType: type2.pElemRType, + } +} + +type UnsafeMapIterator struct { + *hiter + pKeyRType unsafe.Pointer + pElemRType unsafe.Pointer +} + +func (iter *UnsafeMapIterator) HasNext() bool { + return iter.key != nil +} + +func (iter *UnsafeMapIterator) Next() (interface{}, interface{}) { + key, elem := iter.UnsafeNext() + return packEFace(iter.pKeyRType, key), packEFace(iter.pElemRType, elem) +} + +func (iter *UnsafeMapIterator) UnsafeNext() (unsafe.Pointer, unsafe.Pointer) { + key := iter.key + elem := iter.value + mapiternext(iter.hiter) + return key, elem +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_ptr.go b/vendor/github.com/modern-go/reflect2/unsafe_ptr.go new file mode 100644 index 000000000..8e5ec9cf4 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_ptr.go @@ -0,0 +1,46 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type UnsafePtrType struct { + unsafeType +} + +func newUnsafePtrType(cfg *frozenConfig, type1 reflect.Type) *UnsafePtrType { + return &UnsafePtrType{ + unsafeType: *newUnsafeType(cfg, type1), + } +} + +func (type2 *UnsafePtrType) IsNil(obj interface{}) bool { + if obj == nil { + return true + } + objEFace := unpackEFace(obj) + assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIsNil(objEFace.data) +} + +func (type2 *UnsafePtrType) UnsafeIsNil(ptr unsafe.Pointer) bool { + if ptr == nil { + return true + } + return *(*unsafe.Pointer)(ptr) == nil +} + +func (type2 *UnsafePtrType) LikePtr() bool { + return true +} + +func (type2 *UnsafePtrType) Indirect(obj interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIndirect(objEFace.data) +} + +func (type2 *UnsafePtrType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { + return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr)) +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_slice.go b/vendor/github.com/modern-go/reflect2/unsafe_slice.go new file mode 100644 index 000000000..1c6d876c7 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_slice.go @@ -0,0 +1,177 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +// sliceHeader is a safe version of SliceHeader used within this package. +type sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} + +type UnsafeSliceType struct { + unsafeType + elemRType unsafe.Pointer + pElemRType unsafe.Pointer + elemSize uintptr +} + +func newUnsafeSliceType(cfg *frozenConfig, type1 reflect.Type) SliceType { + elemType := type1.Elem() + return &UnsafeSliceType{ + unsafeType: *newUnsafeType(cfg, type1), + pElemRType: unpackEFace(reflect.PtrTo(elemType)).data, + elemRType: unpackEFace(elemType).data, + elemSize: elemType.Size(), + } +} + +func (type2 *UnsafeSliceType) Set(obj interface{}, val interface{}) { + objEFace := unpackEFace(obj) + assertType("Type.Set argument 1", type2.ptrRType, objEFace.rtype) + valEFace := unpackEFace(val) + assertType("Type.Set argument 2", type2.ptrRType, valEFace.rtype) + type2.UnsafeSet(objEFace.data, valEFace.data) +} + +func (type2 *UnsafeSliceType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) { + *(*sliceHeader)(ptr) = *(*sliceHeader)(val) +} + +func (type2 *UnsafeSliceType) IsNil(obj interface{}) bool { + if obj == nil { + return true + } + objEFace := unpackEFace(obj) + assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIsNil(objEFace.data) +} + +func (type2 *UnsafeSliceType) UnsafeIsNil(ptr unsafe.Pointer) bool { + if ptr == nil { + return true + } + return (*sliceHeader)(ptr).Data == nil +} + +func (type2 *UnsafeSliceType) SetNil(obj interface{}) { + objEFace := unpackEFace(obj) + assertType("SliceType.SetNil argument 1", type2.ptrRType, objEFace.rtype) + type2.UnsafeSetNil(objEFace.data) +} + +func (type2 *UnsafeSliceType) UnsafeSetNil(ptr unsafe.Pointer) { + header := (*sliceHeader)(ptr) + header.Len = 0 + header.Cap = 0 + header.Data = nil +} + +func (type2 *UnsafeSliceType) MakeSlice(length int, cap int) interface{} { + return packEFace(type2.ptrRType, type2.UnsafeMakeSlice(length, cap)) +} + +func (type2 *UnsafeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer { + header := &sliceHeader{unsafe_NewArray(type2.elemRType, cap), length, cap} + return unsafe.Pointer(header) +} + +func (type2 *UnsafeSliceType) LengthOf(obj interface{}) int { + objEFace := unpackEFace(obj) + assertType("SliceType.Len argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeLengthOf(objEFace.data) +} + +func (type2 *UnsafeSliceType) UnsafeLengthOf(obj unsafe.Pointer) int { + header := (*sliceHeader)(obj) + return header.Len +} + +func (type2 *UnsafeSliceType) SetIndex(obj interface{}, index int, elem interface{}) { + objEFace := unpackEFace(obj) + assertType("SliceType.SetIndex argument 1", type2.ptrRType, objEFace.rtype) + elemEFace := unpackEFace(elem) + assertType("SliceType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype) + type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data) +} + +func (type2 *UnsafeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) { + header := (*sliceHeader)(obj) + elemPtr := arrayAt(header.Data, index, type2.elemSize, "i < s.Len") + typedmemmove(type2.elemRType, elemPtr, elem) +} + +func (type2 *UnsafeSliceType) GetIndex(obj interface{}, index int) interface{} { + objEFace := unpackEFace(obj) + assertType("SliceType.GetIndex argument 1", type2.ptrRType, objEFace.rtype) + elemPtr := type2.UnsafeGetIndex(objEFace.data, index) + return packEFace(type2.pElemRType, elemPtr) +} + +func (type2 *UnsafeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer { + header := (*sliceHeader)(obj) + return arrayAt(header.Data, index, type2.elemSize, "i < s.Len") +} + +func (type2 *UnsafeSliceType) Append(obj interface{}, elem interface{}) { + objEFace := unpackEFace(obj) + assertType("SliceType.Append argument 1", type2.ptrRType, objEFace.rtype) + elemEFace := unpackEFace(elem) + assertType("SliceType.Append argument 2", type2.pElemRType, elemEFace.rtype) + type2.UnsafeAppend(objEFace.data, elemEFace.data) +} + +func (type2 *UnsafeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) { + header := (*sliceHeader)(obj) + oldLen := header.Len + type2.UnsafeGrow(obj, oldLen+1) + type2.UnsafeSetIndex(obj, oldLen, elem) +} + +func (type2 *UnsafeSliceType) Cap(obj interface{}) int { + objEFace := unpackEFace(obj) + assertType("SliceType.Cap argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeCap(objEFace.data) +} + +func (type2 *UnsafeSliceType) UnsafeCap(ptr unsafe.Pointer) int { + return (*sliceHeader)(ptr).Cap +} + +func (type2 *UnsafeSliceType) Grow(obj interface{}, newLength int) { + objEFace := unpackEFace(obj) + assertType("SliceType.Grow argument 1", type2.ptrRType, objEFace.rtype) + type2.UnsafeGrow(objEFace.data, newLength) +} + +func (type2 *UnsafeSliceType) UnsafeGrow(obj unsafe.Pointer, newLength int) { + header := (*sliceHeader)(obj) + if newLength <= header.Cap { + header.Len = newLength + return + } + newCap := calcNewCap(header.Cap, newLength) + newHeader := (*sliceHeader)(type2.UnsafeMakeSlice(header.Len, newCap)) + typedslicecopy(type2.elemRType, *newHeader, *header) + header.Data = newHeader.Data + header.Cap = newHeader.Cap + header.Len = newLength +} + +func calcNewCap(cap int, expectedCap int) int { + if cap == 0 { + cap = expectedCap + } else { + for cap < expectedCap { + if cap < 1024 { + cap += cap + } else { + cap += cap / 4 + } + } + } + return cap +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_struct.go b/vendor/github.com/modern-go/reflect2/unsafe_struct.go new file mode 100644 index 000000000..804d91663 --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_struct.go @@ -0,0 +1,59 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type UnsafeStructType struct { + unsafeType + likePtr bool +} + +func newUnsafeStructType(cfg *frozenConfig, type1 reflect.Type) *UnsafeStructType { + return &UnsafeStructType{ + unsafeType: *newUnsafeType(cfg, type1), + likePtr: likePtrType(type1), + } +} + +func (type2 *UnsafeStructType) LikePtr() bool { + return type2.likePtr +} + +func (type2 *UnsafeStructType) Indirect(obj interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIndirect(objEFace.data) +} + +func (type2 *UnsafeStructType) UnsafeIndirect(ptr unsafe.Pointer) interface{} { + if type2.likePtr { + return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr)) + } + return packEFace(type2.rtype, ptr) +} + +func (type2 *UnsafeStructType) FieldByName(name string) StructField { + structField, found := type2.Type.FieldByName(name) + if !found { + return nil + } + return newUnsafeStructField(type2, structField) +} + +func (type2 *UnsafeStructType) Field(i int) StructField { + return newUnsafeStructField(type2, type2.Type.Field(i)) +} + +func (type2 *UnsafeStructType) FieldByIndex(index []int) StructField { + return newUnsafeStructField(type2, type2.Type.FieldByIndex(index)) +} + +func (type2 *UnsafeStructType) FieldByNameFunc(match func(string) bool) StructField { + structField, found := type2.Type.FieldByNameFunc(match) + if !found { + panic("field match condition not found in " + type2.Type.String()) + } + return newUnsafeStructField(type2, structField) +} diff --git a/vendor/github.com/modern-go/reflect2/unsafe_type.go b/vendor/github.com/modern-go/reflect2/unsafe_type.go new file mode 100644 index 000000000..13941716c --- /dev/null +++ b/vendor/github.com/modern-go/reflect2/unsafe_type.go @@ -0,0 +1,85 @@ +package reflect2 + +import ( + "reflect" + "unsafe" +) + +type unsafeType struct { + safeType + rtype unsafe.Pointer + ptrRType unsafe.Pointer +} + +func newUnsafeType(cfg *frozenConfig, type1 reflect.Type) *unsafeType { + return &unsafeType{ + safeType: safeType{ + Type: type1, + cfg: cfg, + }, + rtype: unpackEFace(type1).data, + ptrRType: unpackEFace(reflect.PtrTo(type1)).data, + } +} + +func (type2 *unsafeType) Set(obj interface{}, val interface{}) { + objEFace := unpackEFace(obj) + assertType("Type.Set argument 1", type2.ptrRType, objEFace.rtype) + valEFace := unpackEFace(val) + assertType("Type.Set argument 2", type2.ptrRType, valEFace.rtype) + type2.UnsafeSet(objEFace.data, valEFace.data) +} + +func (type2 *unsafeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) { + typedmemmove(type2.rtype, ptr, val) +} + +func (type2 *unsafeType) IsNil(obj interface{}) bool { + objEFace := unpackEFace(obj) + assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIsNil(objEFace.data) +} + +func (type2 *unsafeType) UnsafeIsNil(ptr unsafe.Pointer) bool { + return ptr == nil +} + +func (type2 *unsafeType) UnsafeNew() unsafe.Pointer { + return unsafe_New(type2.rtype) +} + +func (type2 *unsafeType) New() interface{} { + return packEFace(type2.ptrRType, type2.UnsafeNew()) +} + +func (type2 *unsafeType) PackEFace(ptr unsafe.Pointer) interface{} { + return packEFace(type2.ptrRType, ptr) +} + +func (type2 *unsafeType) RType() uintptr { + return uintptr(type2.rtype) +} + +func (type2 *unsafeType) Indirect(obj interface{}) interface{} { + objEFace := unpackEFace(obj) + assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype) + return type2.UnsafeIndirect(objEFace.data) +} + +func (type2 *unsafeType) UnsafeIndirect(obj unsafe.Pointer) interface{} { + return packEFace(type2.rtype, obj) +} + +func (type2 *unsafeType) LikePtr() bool { + return false +} + +func assertType(where string, expectRType unsafe.Pointer, actualRType unsafe.Pointer) { + if expectRType != actualRType { + expectType := reflect.TypeOf(0) + (*iface)(unsafe.Pointer(&expectType)).data = expectRType + actualType := reflect.TypeOf(0) + (*iface)(unsafe.Pointer(&actualType)).data = actualRType + panic(where + ": expect " + expectType.String() + ", actual " + actualType.String()) + } +} diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index b4d75069f..7b5199099 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -3733,6 +3733,7 @@ message ScaleIOPersistentVolumeSource { optional string storagePool = 6; // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // Default is ThinProvisioned. // +optional optional string storageMode = 7; @@ -3742,7 +3743,8 @@ message ScaleIOPersistentVolumeSource { // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // Ex. "ext4", "xfs", "ntfs". + // Default is "xfs" // +optional optional string fsType = 9; @@ -3777,6 +3779,7 @@ message ScaleIOVolumeSource { optional string storagePool = 6; // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // Default is ThinProvisioned. // +optional optional string storageMode = 7; @@ -3786,7 +3789,8 @@ message ScaleIOVolumeSource { // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // Ex. "ext4", "xfs", "ntfs". + // Default is "xfs". // +optional optional string fsType = 9; diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index b941e0ac2..99159ee75 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -1339,6 +1339,7 @@ type ScaleIOVolumeSource struct { // +optional StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // Default is ThinProvisioned. // +optional StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` // The name of a volume already created in the ScaleIO system @@ -1346,7 +1347,8 @@ type ScaleIOVolumeSource struct { VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // Ex. "ext4", "xfs", "ntfs". + // Default is "xfs". // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` // Defaults to false (read/write). ReadOnly here will force @@ -1374,6 +1376,7 @@ type ScaleIOPersistentVolumeSource struct { // +optional StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // Default is ThinProvisioned. // +optional StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` // The name of a volume already created in the ScaleIO system @@ -1381,7 +1384,8 @@ type ScaleIOPersistentVolumeSource struct { VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // Ex. "ext4", "xfs", "ntfs". + // Default is "xfs" // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` // Defaults to false (read/write). ReadOnly here will force diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 5b4e173db..0efd777a6 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -1854,9 +1854,9 @@ var map_ScaleIOPersistentVolumeSource = map[string]string{ "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", "protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.", "storagePool": "The ScaleIO Storage Pool associated with the protection domain.", - "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.", + "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\"", "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", } @@ -1872,9 +1872,9 @@ var map_ScaleIOVolumeSource = map[string]string{ "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", "protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.", "storagePool": "The ScaleIO Storage Pool associated with the protection domain.", - "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.", + "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".", "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", } diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 0501bbcb5..63a7d9cf3 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -47,30 +47,18 @@ func (in *Affinity) DeepCopyInto(out *Affinity) { *out = *in if in.NodeAffinity != nil { in, out := &in.NodeAffinity, &out.NodeAffinity - if *in == nil { - *out = nil - } else { - *out = new(NodeAffinity) - (*in).DeepCopyInto(*out) - } + *out = new(NodeAffinity) + (*in).DeepCopyInto(*out) } if in.PodAffinity != nil { in, out := &in.PodAffinity, &out.PodAffinity - if *in == nil { - *out = nil - } else { - *out = new(PodAffinity) - (*in).DeepCopyInto(*out) - } + *out = new(PodAffinity) + (*in).DeepCopyInto(*out) } if in.PodAntiAffinity != nil { in, out := &in.PodAntiAffinity, &out.PodAntiAffinity - if *in == nil { - *out = nil - } else { - *out = new(PodAntiAffinity) - (*in).DeepCopyInto(*out) - } + *out = new(PodAntiAffinity) + (*in).DeepCopyInto(*out) } return } @@ -129,39 +117,23 @@ func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) { *out = *in if in.CachingMode != nil { in, out := &in.CachingMode, &out.CachingMode - if *in == nil { - *out = nil - } else { - *out = new(AzureDataDiskCachingMode) - **out = **in - } + *out = new(AzureDataDiskCachingMode) + **out = **in } if in.FSType != nil { in, out := &in.FSType, &out.FSType - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } + *out = new(string) + **out = **in } if in.ReadOnly != nil { in, out := &in.ReadOnly, &out.ReadOnly - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } if in.Kind != nil { in, out := &in.Kind, &out.Kind - if *in == nil { - *out = nil - } else { - *out = new(AzureDataDiskKind) - **out = **in - } + *out = new(AzureDataDiskKind) + **out = **in } return } @@ -181,12 +153,8 @@ func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistent *out = *in if in.SecretNamespace != nil { in, out := &in.SecretNamespace, &out.SecretNamespace - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } + *out = new(string) + **out = **in } return } @@ -256,30 +224,18 @@ func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource } if in.ControllerPublishSecretRef != nil { in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef - if *in == nil { - *out = nil - } else { - *out = new(SecretReference) - **out = **in - } + *out = new(SecretReference) + **out = **in } if in.NodeStageSecretRef != nil { in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef - if *in == nil { - *out = nil - } else { - *out = new(SecretReference) - **out = **in - } + *out = new(SecretReference) + **out = **in } if in.NodePublishSecretRef != nil { in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef - if *in == nil { - *out = nil - } else { - *out = new(SecretReference) - **out = **in - } + *out = new(SecretReference) + **out = **in } return } @@ -330,12 +286,8 @@ func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolume } if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(SecretReference) - **out = **in - } + *out = new(SecretReference) + **out = **in } return } @@ -360,12 +312,8 @@ func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) { } if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } + *out = new(LocalObjectReference) + **out = **in } return } @@ -385,12 +333,8 @@ func (in *CinderPersistentVolumeSource) DeepCopyInto(out *CinderPersistentVolume *out = *in if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(SecretReference) - **out = **in - } + *out = new(SecretReference) + **out = **in } return } @@ -410,12 +354,8 @@ func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) { *out = *in if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } + *out = new(LocalObjectReference) + **out = **in } return } @@ -435,12 +375,8 @@ func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { *out = *in if in.TimeoutSeconds != nil { in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } + *out = new(int32) + **out = **in } return } @@ -551,12 +487,15 @@ func (in *ConfigMap) DeepCopyInto(out *ConfigMap) { in, out := &in.BinaryData, &out.BinaryData *out = make(map[string][]byte, len(*in)) for key, val := range *in { + var outVal []byte if val == nil { (*out)[key] = nil } else { - (*out)[key] = make([]byte, len(val)) - copy((*out)[key], val) + in, out := &val, &outVal + *out = make([]byte, len(*in)) + copy(*out, *in) } + (*out)[key] = outVal } } return @@ -586,12 +525,8 @@ func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) { out.LocalObjectReference = in.LocalObjectReference if in.Optional != nil { in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } return } @@ -612,12 +547,8 @@ func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) { out.LocalObjectReference = in.LocalObjectReference if in.Optional != nil { in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } return } @@ -694,12 +625,8 @@ func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) { } if in.Optional != nil { in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } return } @@ -727,21 +654,13 @@ func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) { } if in.DefaultMode != nil { in, out := &in.DefaultMode, &out.DefaultMode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } + *out = new(int32) + **out = **in } if in.Optional != nil { in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } return } @@ -803,39 +722,23 @@ func (in *Container) DeepCopyInto(out *Container) { } if in.LivenessProbe != nil { in, out := &in.LivenessProbe, &out.LivenessProbe - if *in == nil { - *out = nil - } else { - *out = new(Probe) - (*in).DeepCopyInto(*out) - } + *out = new(Probe) + (*in).DeepCopyInto(*out) } if in.ReadinessProbe != nil { in, out := &in.ReadinessProbe, &out.ReadinessProbe - if *in == nil { - *out = nil - } else { - *out = new(Probe) - (*in).DeepCopyInto(*out) - } + *out = new(Probe) + (*in).DeepCopyInto(*out) } if in.Lifecycle != nil { in, out := &in.Lifecycle, &out.Lifecycle - if *in == nil { - *out = nil - } else { - *out = new(Lifecycle) - (*in).DeepCopyInto(*out) - } + *out = new(Lifecycle) + (*in).DeepCopyInto(*out) } if in.SecurityContext != nil { in, out := &in.SecurityContext, &out.SecurityContext - if *in == nil { - *out = nil - } else { - *out = new(SecurityContext) - (*in).DeepCopyInto(*out) - } + *out = new(SecurityContext) + (*in).DeepCopyInto(*out) } return } @@ -892,30 +795,18 @@ func (in *ContainerState) DeepCopyInto(out *ContainerState) { *out = *in if in.Waiting != nil { in, out := &in.Waiting, &out.Waiting - if *in == nil { - *out = nil - } else { - *out = new(ContainerStateWaiting) - **out = **in - } + *out = new(ContainerStateWaiting) + **out = **in } if in.Running != nil { in, out := &in.Running, &out.Running - if *in == nil { - *out = nil - } else { - *out = new(ContainerStateRunning) - (*in).DeepCopyInto(*out) - } + *out = new(ContainerStateRunning) + (*in).DeepCopyInto(*out) } if in.Terminated != nil { in, out := &in.Terminated, &out.Terminated - if *in == nil { - *out = nil - } else { - *out = new(ContainerStateTerminated) - (*in).DeepCopyInto(*out) - } + *out = new(ContainerStateTerminated) + (*in).DeepCopyInto(*out) } return } @@ -1043,30 +934,18 @@ func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) { *out = *in if in.FieldRef != nil { in, out := &in.FieldRef, &out.FieldRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectFieldSelector) - **out = **in - } + *out = new(ObjectFieldSelector) + **out = **in } if in.ResourceFieldRef != nil { in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - if *in == nil { - *out = nil - } else { - *out = new(ResourceFieldSelector) - (*in).DeepCopyInto(*out) - } + *out = new(ResourceFieldSelector) + (*in).DeepCopyInto(*out) } if in.Mode != nil { in, out := &in.Mode, &out.Mode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } + *out = new(int32) + **out = **in } return } @@ -1093,12 +972,8 @@ func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) { } if in.DefaultMode != nil { in, out := &in.DefaultMode, &out.DefaultMode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } + *out = new(int32) + **out = **in } return } @@ -1118,12 +993,8 @@ func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) { *out = *in if in.SizeLimit != nil { in, out := &in.SizeLimit, &out.SizeLimit - if *in == nil { - *out = nil - } else { - x := (*in).DeepCopy() - *out = &x - } + x := (*in).DeepCopy() + *out = &x } return } @@ -1143,21 +1014,13 @@ func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) { *out = *in if in.NodeName != nil { in, out := &in.NodeName, &out.NodeName - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } + *out = new(string) + **out = **in } if in.TargetRef != nil { in, out := &in.TargetRef, &out.TargetRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectReference) - **out = **in - } + *out = new(ObjectReference) + **out = **in } return } @@ -1294,21 +1157,13 @@ func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) { *out = *in if in.ConfigMapRef != nil { in, out := &in.ConfigMapRef, &out.ConfigMapRef - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapEnvSource) - (*in).DeepCopyInto(*out) - } + *out = new(ConfigMapEnvSource) + (*in).DeepCopyInto(*out) } if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(SecretEnvSource) - (*in).DeepCopyInto(*out) - } + *out = new(SecretEnvSource) + (*in).DeepCopyInto(*out) } return } @@ -1328,12 +1183,8 @@ func (in *EnvVar) DeepCopyInto(out *EnvVar) { *out = *in if in.ValueFrom != nil { in, out := &in.ValueFrom, &out.ValueFrom - if *in == nil { - *out = nil - } else { - *out = new(EnvVarSource) - (*in).DeepCopyInto(*out) - } + *out = new(EnvVarSource) + (*in).DeepCopyInto(*out) } return } @@ -1353,39 +1204,23 @@ func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) { *out = *in if in.FieldRef != nil { in, out := &in.FieldRef, &out.FieldRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectFieldSelector) - **out = **in - } + *out = new(ObjectFieldSelector) + **out = **in } if in.ResourceFieldRef != nil { in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - if *in == nil { - *out = nil - } else { - *out = new(ResourceFieldSelector) - (*in).DeepCopyInto(*out) - } + *out = new(ResourceFieldSelector) + (*in).DeepCopyInto(*out) } if in.ConfigMapKeyRef != nil { in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapKeySelector) - (*in).DeepCopyInto(*out) - } + *out = new(ConfigMapKeySelector) + (*in).DeepCopyInto(*out) } if in.SecretKeyRef != nil { in, out := &in.SecretKeyRef, &out.SecretKeyRef - if *in == nil { - *out = nil - } else { - *out = new(SecretKeySelector) - (*in).DeepCopyInto(*out) - } + *out = new(SecretKeySelector) + (*in).DeepCopyInto(*out) } return } @@ -1412,21 +1247,13 @@ func (in *Event) DeepCopyInto(out *Event) { in.EventTime.DeepCopyInto(&out.EventTime) if in.Series != nil { in, out := &in.Series, &out.Series - if *in == nil { - *out = nil - } else { - *out = new(EventSeries) - (*in).DeepCopyInto(*out) - } + *out = new(EventSeries) + (*in).DeepCopyInto(*out) } if in.Related != nil { in, out := &in.Related, &out.Related - if *in == nil { - *out = nil - } else { - *out = new(ObjectReference) - **out = **in - } + *out = new(ObjectReference) + **out = **in } return } @@ -1546,12 +1373,8 @@ func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) { } if in.Lun != nil { in, out := &in.Lun, &out.Lun - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } + *out = new(int32) + **out = **in } if in.WWIDs != nil { in, out := &in.WWIDs, &out.WWIDs @@ -1576,12 +1399,8 @@ func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSour *out = *in if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(SecretReference) - **out = **in - } + *out = new(SecretReference) + **out = **in } if in.Options != nil { in, out := &in.Options, &out.Options @@ -1608,12 +1427,8 @@ func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) { *out = *in if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } + *out = new(LocalObjectReference) + **out = **in } if in.Options != nil { in, out := &in.Options, &out.Options @@ -1742,30 +1557,18 @@ func (in *Handler) DeepCopyInto(out *Handler) { *out = *in if in.Exec != nil { in, out := &in.Exec, &out.Exec - if *in == nil { - *out = nil - } else { - *out = new(ExecAction) - (*in).DeepCopyInto(*out) - } + *out = new(ExecAction) + (*in).DeepCopyInto(*out) } if in.HTTPGet != nil { in, out := &in.HTTPGet, &out.HTTPGet - if *in == nil { - *out = nil - } else { - *out = new(HTTPGetAction) - (*in).DeepCopyInto(*out) - } + *out = new(HTTPGetAction) + (*in).DeepCopyInto(*out) } if in.TCPSocket != nil { in, out := &in.TCPSocket, &out.TCPSocket - if *in == nil { - *out = nil - } else { - *out = new(TCPSocketAction) - **out = **in - } + *out = new(TCPSocketAction) + **out = **in } return } @@ -1806,12 +1609,8 @@ func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) { *out = *in if in.Type != nil { in, out := &in.Type, &out.Type - if *in == nil { - *out = nil - } else { - *out = new(HostPathType) - **out = **in - } + *out = new(HostPathType) + **out = **in } return } @@ -1836,21 +1635,13 @@ func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSo } if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(SecretReference) - **out = **in - } + *out = new(SecretReference) + **out = **in } if in.InitiatorName != nil { in, out := &in.InitiatorName, &out.InitiatorName - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } + *out = new(string) + **out = **in } return } @@ -1875,21 +1666,13 @@ func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { } if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } + *out = new(LocalObjectReference) + **out = **in } if in.InitiatorName != nil { in, out := &in.InitiatorName, &out.InitiatorName - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } + *out = new(string) + **out = **in } return } @@ -1909,12 +1692,8 @@ func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { *out = *in if in.Mode != nil { in, out := &in.Mode, &out.Mode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } + *out = new(int32) + **out = **in } return } @@ -1934,21 +1713,13 @@ func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { *out = *in if in.PostStart != nil { in, out := &in.PostStart, &out.PostStart - if *in == nil { - *out = nil - } else { - *out = new(Handler) - (*in).DeepCopyInto(*out) - } + *out = new(Handler) + (*in).DeepCopyInto(*out) } if in.PreStop != nil { in, out := &in.PreStop, &out.PreStop - if *in == nil { - *out = nil - } else { - *out = new(Handler) - (*in).DeepCopyInto(*out) - } + *out = new(Handler) + (*in).DeepCopyInto(*out) } return } @@ -2362,12 +2133,8 @@ func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) { *out = *in if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - if *in == nil { - *out = nil - } else { - *out = new(NodeSelector) - (*in).DeepCopyInto(*out) - } + *out = new(NodeSelector) + (*in).DeepCopyInto(*out) } if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution @@ -2412,12 +2179,8 @@ func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) { *out = *in if in.ConfigMap != nil { in, out := &in.ConfigMap, &out.ConfigMap - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapNodeConfigSource) - **out = **in - } + *out = new(ConfigMapNodeConfigSource) + **out = **in } return } @@ -2437,30 +2200,18 @@ func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) { *out = *in if in.Assigned != nil { in, out := &in.Assigned, &out.Assigned - if *in == nil { - *out = nil - } else { - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) } if in.Active != nil { in, out := &in.Active, &out.Active - if *in == nil { - *out = nil - } else { - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) } if in.LastKnownGood != nil { in, out := &in.LastKnownGood, &out.LastKnownGood - if *in == nil { - *out = nil - } else { - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) } return } @@ -2659,12 +2410,8 @@ func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { } if in.ConfigSource != nil { in, out := &in.ConfigSource, &out.ConfigSource - if *in == nil { - *out = nil - } else { - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) } return } @@ -2729,12 +2476,8 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { } if in.Config != nil { in, out := &in.Config, &out.Config - if *in == nil { - *out = nil - } else { - *out = new(NodeConfigStatus) - (*in).DeepCopyInto(*out) - } + *out = new(NodeConfigStatus) + (*in).DeepCopyInto(*out) } return } @@ -2922,31 +2665,19 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec } if in.Selector != nil { in, out := &in.Selector, &out.Selector - if *in == nil { - *out = nil - } else { - *out = new(meta_v1.LabelSelector) - (*in).DeepCopyInto(*out) - } + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) } in.Resources.DeepCopyInto(&out.Resources) if in.StorageClassName != nil { in, out := &in.StorageClassName, &out.StorageClassName - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } + *out = new(string) + **out = **in } if in.VolumeMode != nil { in, out := &in.VolumeMode, &out.VolumeMode - if *in == nil { - *out = nil - } else { - *out = new(PersistentVolumeMode) - **out = **in - } + *out = new(PersistentVolumeMode) + **out = **in } return } @@ -3050,201 +2781,113 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { *out = *in if in.GCEPersistentDisk != nil { in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - if *in == nil { - *out = nil - } else { - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } + *out = new(GCEPersistentDiskVolumeSource) + **out = **in } if in.AWSElasticBlockStore != nil { in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - if *in == nil { - *out = nil - } else { - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in } if in.HostPath != nil { in, out := &in.HostPath, &out.HostPath - if *in == nil { - *out = nil - } else { - *out = new(HostPathVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(HostPathVolumeSource) + (*in).DeepCopyInto(*out) } if in.Glusterfs != nil { in, out := &in.Glusterfs, &out.Glusterfs - if *in == nil { - *out = nil - } else { - *out = new(GlusterfsVolumeSource) - **out = **in - } + *out = new(GlusterfsVolumeSource) + **out = **in } if in.NFS != nil { in, out := &in.NFS, &out.NFS - if *in == nil { - *out = nil - } else { - *out = new(NFSVolumeSource) - **out = **in - } + *out = new(NFSVolumeSource) + **out = **in } if in.RBD != nil { in, out := &in.RBD, &out.RBD - if *in == nil { - *out = nil - } else { - *out = new(RBDPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(RBDPersistentVolumeSource) + (*in).DeepCopyInto(*out) } if in.ISCSI != nil { in, out := &in.ISCSI, &out.ISCSI - if *in == nil { - *out = nil - } else { - *out = new(ISCSIPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(ISCSIPersistentVolumeSource) + (*in).DeepCopyInto(*out) } if in.Cinder != nil { in, out := &in.Cinder, &out.Cinder - if *in == nil { - *out = nil - } else { - *out = new(CinderPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(CinderPersistentVolumeSource) + (*in).DeepCopyInto(*out) } if in.CephFS != nil { in, out := &in.CephFS, &out.CephFS - if *in == nil { - *out = nil - } else { - *out = new(CephFSPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(CephFSPersistentVolumeSource) + (*in).DeepCopyInto(*out) } if in.FC != nil { in, out := &in.FC, &out.FC - if *in == nil { - *out = nil - } else { - *out = new(FCVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(FCVolumeSource) + (*in).DeepCopyInto(*out) } if in.Flocker != nil { in, out := &in.Flocker, &out.Flocker - if *in == nil { - *out = nil - } else { - *out = new(FlockerVolumeSource) - **out = **in - } + *out = new(FlockerVolumeSource) + **out = **in } if in.FlexVolume != nil { in, out := &in.FlexVolume, &out.FlexVolume - if *in == nil { - *out = nil - } else { - *out = new(FlexPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(FlexPersistentVolumeSource) + (*in).DeepCopyInto(*out) } if in.AzureFile != nil { in, out := &in.AzureFile, &out.AzureFile - if *in == nil { - *out = nil - } else { - *out = new(AzureFilePersistentVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(AzureFilePersistentVolumeSource) + (*in).DeepCopyInto(*out) } if in.VsphereVolume != nil { in, out := &in.VsphereVolume, &out.VsphereVolume - if *in == nil { - *out = nil - } else { - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in } if in.Quobyte != nil { in, out := &in.Quobyte, &out.Quobyte - if *in == nil { - *out = nil - } else { - *out = new(QuobyteVolumeSource) - **out = **in - } + *out = new(QuobyteVolumeSource) + **out = **in } if in.AzureDisk != nil { in, out := &in.AzureDisk, &out.AzureDisk - if *in == nil { - *out = nil - } else { - *out = new(AzureDiskVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) } if in.PhotonPersistentDisk != nil { in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - if *in == nil { - *out = nil - } else { - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in } if in.PortworxVolume != nil { in, out := &in.PortworxVolume, &out.PortworxVolume - if *in == nil { - *out = nil - } else { - *out = new(PortworxVolumeSource) - **out = **in - } + *out = new(PortworxVolumeSource) + **out = **in } if in.ScaleIO != nil { in, out := &in.ScaleIO, &out.ScaleIO - if *in == nil { - *out = nil - } else { - *out = new(ScaleIOPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(ScaleIOPersistentVolumeSource) + (*in).DeepCopyInto(*out) } if in.Local != nil { in, out := &in.Local, &out.Local - if *in == nil { - *out = nil - } else { - *out = new(LocalVolumeSource) - **out = **in - } + *out = new(LocalVolumeSource) + **out = **in } if in.StorageOS != nil { in, out := &in.StorageOS, &out.StorageOS - if *in == nil { - *out = nil - } else { - *out = new(StorageOSPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(StorageOSPersistentVolumeSource) + (*in).DeepCopyInto(*out) } if in.CSI != nil { in, out := &in.CSI, &out.CSI - if *in == nil { - *out = nil - } else { - *out = new(CSIPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(CSIPersistentVolumeSource) + (*in).DeepCopyInto(*out) } return } @@ -3277,12 +2920,8 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { } if in.ClaimRef != nil { in, out := &in.ClaimRef, &out.ClaimRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectReference) - **out = **in - } + *out = new(ObjectReference) + **out = **in } if in.MountOptions != nil { in, out := &in.MountOptions, &out.MountOptions @@ -3291,21 +2930,13 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { } if in.VolumeMode != nil { in, out := &in.VolumeMode, &out.VolumeMode - if *in == nil { - *out = nil - } else { - *out = new(PersistentVolumeMode) - **out = **in - } + *out = new(PersistentVolumeMode) + **out = **in } if in.NodeAffinity != nil { in, out := &in.NodeAffinity, &out.NodeAffinity - if *in == nil { - *out = nil - } else { - *out = new(VolumeNodeAffinity) - (*in).DeepCopyInto(*out) - } + *out = new(VolumeNodeAffinity) + (*in).DeepCopyInto(*out) } return } @@ -3415,12 +3046,8 @@ func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { *out = *in if in.LabelSelector != nil { in, out := &in.LabelSelector, &out.LabelSelector - if *in == nil { - *out = nil - } else { - *out = new(meta_v1.LabelSelector) - (*in).DeepCopyInto(*out) - } + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) } if in.Namespaces != nil { in, out := &in.Namespaces, &out.Namespaces @@ -3551,12 +3178,8 @@ func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { *out = *in if in.Value != nil { in, out := &in.Value, &out.Value - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } + *out = new(string) + **out = **in } return } @@ -3640,38 +3263,22 @@ func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { out.TypeMeta = in.TypeMeta if in.SinceSeconds != nil { in, out := &in.SinceSeconds, &out.SinceSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.SinceTime != nil { in, out := &in.SinceTime, &out.SinceTime - if *in == nil { - *out = nil - } else { - *out = (*in).DeepCopy() - } + *out = (*in).DeepCopy() } if in.TailLines != nil { in, out := &in.TailLines, &out.TailLines - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.LimitBytes != nil { in, out := &in.LimitBytes, &out.LimitBytes - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } return } @@ -3770,39 +3377,23 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = *in if in.SELinuxOptions != nil { in, out := &in.SELinuxOptions, &out.SELinuxOptions - if *in == nil { - *out = nil - } else { - *out = new(SELinuxOptions) - **out = **in - } + *out = new(SELinuxOptions) + **out = **in } if in.RunAsUser != nil { in, out := &in.RunAsUser, &out.RunAsUser - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.RunAsGroup != nil { in, out := &in.RunAsGroup, &out.RunAsGroup - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.RunAsNonRoot != nil { in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } if in.SupplementalGroups != nil { in, out := &in.SupplementalGroups, &out.SupplementalGroups @@ -3811,12 +3402,8 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { } if in.FSGroup != nil { in, out := &in.FSGroup, &out.FSGroup - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.Sysctls != nil { in, out := &in.Sysctls, &out.Sysctls @@ -3841,12 +3428,8 @@ func (in *PodSignature) DeepCopyInto(out *PodSignature) { *out = *in if in.PodController != nil { in, out := &in.PodController, &out.PodController - if *in == nil { - *out = nil - } else { - *out = new(meta_v1.OwnerReference) - (*in).DeepCopyInto(*out) - } + *out = new(meta_v1.OwnerReference) + (*in).DeepCopyInto(*out) } return } @@ -3887,21 +3470,13 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { } if in.TerminationGracePeriodSeconds != nil { in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.ActiveDeadlineSeconds != nil { in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.NodeSelector != nil { in, out := &in.NodeSelector, &out.NodeSelector @@ -3912,30 +3487,18 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { } if in.AutomountServiceAccountToken != nil { in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } if in.ShareProcessNamespace != nil { in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } if in.SecurityContext != nil { in, out := &in.SecurityContext, &out.SecurityContext - if *in == nil { - *out = nil - } else { - *out = new(PodSecurityContext) - (*in).DeepCopyInto(*out) - } + *out = new(PodSecurityContext) + (*in).DeepCopyInto(*out) } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets @@ -3944,12 +3507,8 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { } if in.Affinity != nil { in, out := &in.Affinity, &out.Affinity - if *in == nil { - *out = nil - } else { - *out = new(Affinity) - (*in).DeepCopyInto(*out) - } + *out = new(Affinity) + (*in).DeepCopyInto(*out) } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations @@ -3967,21 +3526,13 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { } if in.Priority != nil { in, out := &in.Priority, &out.Priority - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } + *out = new(int32) + **out = **in } if in.DNSConfig != nil { in, out := &in.DNSConfig, &out.DNSConfig - if *in == nil { - *out = nil - } else { - *out = new(PodDNSConfig) - (*in).DeepCopyInto(*out) - } + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) } if in.ReadinessGates != nil { in, out := &in.ReadinessGates, &out.ReadinessGates @@ -4013,11 +3564,7 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { } if in.StartTime != nil { in, out := &in.StartTime, &out.StartTime - if *in == nil { - *out = nil - } else { - *out = (*in).DeepCopy() - } + *out = (*in).DeepCopy() } if in.InitContainerStatuses != nil { in, out := &in.InitContainerStatuses, &out.InitContainerStatuses @@ -4172,12 +3719,8 @@ func (in *Preconditions) DeepCopyInto(out *Preconditions) { *out = *in if in.UID != nil { in, out := &in.UID, &out.UID - if *in == nil { - *out = nil - } else { - *out = new(types.UID) - **out = **in - } + *out = new(types.UID) + **out = **in } return } @@ -4256,12 +3799,8 @@ func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) { } if in.DefaultMode != nil { in, out := &in.DefaultMode, &out.DefaultMode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } + *out = new(int32) + **out = **in } return } @@ -4302,12 +3841,8 @@ func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource } if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(SecretReference) - **out = **in - } + *out = new(SecretReference) + **out = **in } return } @@ -4332,12 +3867,8 @@ func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) { } if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } + *out = new(LocalObjectReference) + **out = **in } return } @@ -4466,12 +3997,8 @@ func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec *out = *in if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } + *out = new(int32) + **out = **in } if in.Selector != nil { in, out := &in.Selector, &out.Selector @@ -4482,12 +4009,8 @@ func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec } if in.Template != nil { in, out := &in.Template, &out.Template - if *in == nil { - *out = nil - } else { - *out = new(PodTemplateSpec) - (*in).DeepCopyInto(*out) - } + *out = new(PodTemplateSpec) + (*in).DeepCopyInto(*out) } return } @@ -4642,12 +4165,8 @@ func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) { } if in.ScopeSelector != nil { in, out := &in.ScopeSelector, &out.ScopeSelector - if *in == nil { - *out = nil - } else { - *out = new(ScopeSelector) - (*in).DeepCopyInto(*out) - } + *out = new(ScopeSelector) + (*in).DeepCopyInto(*out) } return } @@ -4743,12 +4262,8 @@ func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolu *out = *in if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(SecretReference) - **out = **in - } + *out = new(SecretReference) + **out = **in } return } @@ -4768,12 +4283,8 @@ func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) { *out = *in if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } + *out = new(LocalObjectReference) + **out = **in } return } @@ -4841,12 +4352,15 @@ func (in *Secret) DeepCopyInto(out *Secret) { in, out := &in.Data, &out.Data *out = make(map[string][]byte, len(*in)) for key, val := range *in { + var outVal []byte if val == nil { (*out)[key] = nil } else { - (*out)[key] = make([]byte, len(val)) - copy((*out)[key], val) + in, out := &val, &outVal + *out = make([]byte, len(*in)) + copy(*out, *in) } + (*out)[key] = outVal } } if in.StringData != nil { @@ -4883,12 +4397,8 @@ func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) { out.LocalObjectReference = in.LocalObjectReference if in.Optional != nil { in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } return } @@ -4909,12 +4419,8 @@ func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { out.LocalObjectReference = in.LocalObjectReference if in.Optional != nil { in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } return } @@ -4975,12 +4481,8 @@ func (in *SecretProjection) DeepCopyInto(out *SecretProjection) { } if in.Optional != nil { in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } return } @@ -5023,21 +4525,13 @@ func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) { } if in.DefaultMode != nil { in, out := &in.DefaultMode, &out.DefaultMode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } + *out = new(int32) + **out = **in } if in.Optional != nil { in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } return } @@ -5057,75 +4551,43 @@ func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { *out = *in if in.Capabilities != nil { in, out := &in.Capabilities, &out.Capabilities - if *in == nil { - *out = nil - } else { - *out = new(Capabilities) - (*in).DeepCopyInto(*out) - } + *out = new(Capabilities) + (*in).DeepCopyInto(*out) } if in.Privileged != nil { in, out := &in.Privileged, &out.Privileged - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } if in.SELinuxOptions != nil { in, out := &in.SELinuxOptions, &out.SELinuxOptions - if *in == nil { - *out = nil - } else { - *out = new(SELinuxOptions) - **out = **in - } + *out = new(SELinuxOptions) + **out = **in } if in.RunAsUser != nil { in, out := &in.RunAsUser, &out.RunAsUser - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.RunAsGroup != nil { in, out := &in.RunAsGroup, &out.RunAsGroup - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.RunAsNonRoot != nil { in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } if in.ReadOnlyRootFilesystem != nil { in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } if in.AllowPrivilegeEscalation != nil { in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } return } @@ -5211,12 +4673,8 @@ func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) { } if in.AutomountServiceAccountToken != nil { in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } return } @@ -5277,12 +4735,8 @@ func (in *ServiceAccountTokenProjection) DeepCopyInto(out *ServiceAccountTokenPr *out = *in if in.ExpirationSeconds != nil { in, out := &in.ExpirationSeconds, &out.ExpirationSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } return } @@ -5399,12 +4853,8 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { } if in.SessionAffinityConfig != nil { in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig - if *in == nil { - *out = nil - } else { - *out = new(SessionAffinityConfig) - (*in).DeepCopyInto(*out) - } + *out = new(SessionAffinityConfig) + (*in).DeepCopyInto(*out) } return } @@ -5441,12 +4891,8 @@ func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) { *out = *in if in.ClientIP != nil { in, out := &in.ClientIP, &out.ClientIP - if *in == nil { - *out = nil - } else { - *out = new(ClientIPConfig) - (*in).DeepCopyInto(*out) - } + *out = new(ClientIPConfig) + (*in).DeepCopyInto(*out) } return } @@ -5466,12 +4912,8 @@ func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistent *out = *in if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectReference) - **out = **in - } + *out = new(ObjectReference) + **out = **in } return } @@ -5491,12 +4933,8 @@ func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) { *out = *in if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } + *out = new(LocalObjectReference) + **out = **in } return } @@ -5549,11 +4987,7 @@ func (in *Taint) DeepCopyInto(out *Taint) { *out = *in if in.TimeAdded != nil { in, out := &in.TimeAdded, &out.TimeAdded - if *in == nil { - *out = nil - } else { - *out = (*in).DeepCopy() - } + *out = (*in).DeepCopy() } return } @@ -5573,12 +5007,8 @@ func (in *Toleration) DeepCopyInto(out *Toleration) { *out = *in if in.TolerationSeconds != nil { in, out := &in.TolerationSeconds, &out.TolerationSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } return } @@ -5675,12 +5105,8 @@ func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { *out = *in if in.MountPropagation != nil { in, out := &in.MountPropagation, &out.MountPropagation - if *in == nil { - *out = nil - } else { - *out = new(MountPropagationMode) - **out = **in - } + *out = new(MountPropagationMode) + **out = **in } return } @@ -5700,12 +5126,8 @@ func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) { *out = *in if in.Required != nil { in, out := &in.Required, &out.Required - if *in == nil { - *out = nil - } else { - *out = new(NodeSelector) - (*in).DeepCopyInto(*out) - } + *out = new(NodeSelector) + (*in).DeepCopyInto(*out) } return } @@ -5725,39 +5147,23 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { *out = *in if in.Secret != nil { in, out := &in.Secret, &out.Secret - if *in == nil { - *out = nil - } else { - *out = new(SecretProjection) - (*in).DeepCopyInto(*out) - } + *out = new(SecretProjection) + (*in).DeepCopyInto(*out) } if in.DownwardAPI != nil { in, out := &in.DownwardAPI, &out.DownwardAPI - if *in == nil { - *out = nil - } else { - *out = new(DownwardAPIProjection) - (*in).DeepCopyInto(*out) - } + *out = new(DownwardAPIProjection) + (*in).DeepCopyInto(*out) } if in.ConfigMap != nil { in, out := &in.ConfigMap, &out.ConfigMap - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapProjection) - (*in).DeepCopyInto(*out) - } + *out = new(ConfigMapProjection) + (*in).DeepCopyInto(*out) } if in.ServiceAccountToken != nil { in, out := &in.ServiceAccountToken, &out.ServiceAccountToken - if *in == nil { - *out = nil - } else { - *out = new(ServiceAccountTokenProjection) - (*in).DeepCopyInto(*out) - } + *out = new(ServiceAccountTokenProjection) + (*in).DeepCopyInto(*out) } return } @@ -5777,246 +5183,138 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { *out = *in if in.HostPath != nil { in, out := &in.HostPath, &out.HostPath - if *in == nil { - *out = nil - } else { - *out = new(HostPathVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(HostPathVolumeSource) + (*in).DeepCopyInto(*out) } if in.EmptyDir != nil { in, out := &in.EmptyDir, &out.EmptyDir - if *in == nil { - *out = nil - } else { - *out = new(EmptyDirVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(EmptyDirVolumeSource) + (*in).DeepCopyInto(*out) } if in.GCEPersistentDisk != nil { in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - if *in == nil { - *out = nil - } else { - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } + *out = new(GCEPersistentDiskVolumeSource) + **out = **in } if in.AWSElasticBlockStore != nil { in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - if *in == nil { - *out = nil - } else { - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in } if in.GitRepo != nil { in, out := &in.GitRepo, &out.GitRepo - if *in == nil { - *out = nil - } else { - *out = new(GitRepoVolumeSource) - **out = **in - } + *out = new(GitRepoVolumeSource) + **out = **in } if in.Secret != nil { in, out := &in.Secret, &out.Secret - if *in == nil { - *out = nil - } else { - *out = new(SecretVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(SecretVolumeSource) + (*in).DeepCopyInto(*out) } if in.NFS != nil { in, out := &in.NFS, &out.NFS - if *in == nil { - *out = nil - } else { - *out = new(NFSVolumeSource) - **out = **in - } + *out = new(NFSVolumeSource) + **out = **in } if in.ISCSI != nil { in, out := &in.ISCSI, &out.ISCSI - if *in == nil { - *out = nil - } else { - *out = new(ISCSIVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(ISCSIVolumeSource) + (*in).DeepCopyInto(*out) } if in.Glusterfs != nil { in, out := &in.Glusterfs, &out.Glusterfs - if *in == nil { - *out = nil - } else { - *out = new(GlusterfsVolumeSource) - **out = **in - } + *out = new(GlusterfsVolumeSource) + **out = **in } if in.PersistentVolumeClaim != nil { in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim - if *in == nil { - *out = nil - } else { - *out = new(PersistentVolumeClaimVolumeSource) - **out = **in - } + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in } if in.RBD != nil { in, out := &in.RBD, &out.RBD - if *in == nil { - *out = nil - } else { - *out = new(RBDVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(RBDVolumeSource) + (*in).DeepCopyInto(*out) } if in.FlexVolume != nil { in, out := &in.FlexVolume, &out.FlexVolume - if *in == nil { - *out = nil - } else { - *out = new(FlexVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(FlexVolumeSource) + (*in).DeepCopyInto(*out) } if in.Cinder != nil { in, out := &in.Cinder, &out.Cinder - if *in == nil { - *out = nil - } else { - *out = new(CinderVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(CinderVolumeSource) + (*in).DeepCopyInto(*out) } if in.CephFS != nil { in, out := &in.CephFS, &out.CephFS - if *in == nil { - *out = nil - } else { - *out = new(CephFSVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(CephFSVolumeSource) + (*in).DeepCopyInto(*out) } if in.Flocker != nil { in, out := &in.Flocker, &out.Flocker - if *in == nil { - *out = nil - } else { - *out = new(FlockerVolumeSource) - **out = **in - } + *out = new(FlockerVolumeSource) + **out = **in } if in.DownwardAPI != nil { in, out := &in.DownwardAPI, &out.DownwardAPI - if *in == nil { - *out = nil - } else { - *out = new(DownwardAPIVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(DownwardAPIVolumeSource) + (*in).DeepCopyInto(*out) } if in.FC != nil { in, out := &in.FC, &out.FC - if *in == nil { - *out = nil - } else { - *out = new(FCVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(FCVolumeSource) + (*in).DeepCopyInto(*out) } if in.AzureFile != nil { in, out := &in.AzureFile, &out.AzureFile - if *in == nil { - *out = nil - } else { - *out = new(AzureFileVolumeSource) - **out = **in - } + *out = new(AzureFileVolumeSource) + **out = **in } if in.ConfigMap != nil { in, out := &in.ConfigMap, &out.ConfigMap - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) } if in.VsphereVolume != nil { in, out := &in.VsphereVolume, &out.VsphereVolume - if *in == nil { - *out = nil - } else { - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in } if in.Quobyte != nil { in, out := &in.Quobyte, &out.Quobyte - if *in == nil { - *out = nil - } else { - *out = new(QuobyteVolumeSource) - **out = **in - } + *out = new(QuobyteVolumeSource) + **out = **in } if in.AzureDisk != nil { in, out := &in.AzureDisk, &out.AzureDisk - if *in == nil { - *out = nil - } else { - *out = new(AzureDiskVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) } if in.PhotonPersistentDisk != nil { in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - if *in == nil { - *out = nil - } else { - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in } if in.Projected != nil { in, out := &in.Projected, &out.Projected - if *in == nil { - *out = nil - } else { - *out = new(ProjectedVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(ProjectedVolumeSource) + (*in).DeepCopyInto(*out) } if in.PortworxVolume != nil { in, out := &in.PortworxVolume, &out.PortworxVolume - if *in == nil { - *out = nil - } else { - *out = new(PortworxVolumeSource) - **out = **in - } + *out = new(PortworxVolumeSource) + **out = **in } if in.ScaleIO != nil { in, out := &in.ScaleIO, &out.ScaleIO - if *in == nil { - *out = nil - } else { - *out = new(ScaleIOVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(ScaleIOVolumeSource) + (*in).DeepCopyInto(*out) } if in.StorageOS != nil { in, out := &in.StorageOS, &out.StorageOS - if *in == nil { - *out = nil - } else { - *out = new(StorageOSVolumeSource) - (*in).DeepCopyInto(*out) - } + *out = new(StorageOSVolumeSource) + (*in).DeepCopyInto(*out) } return } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index 31a46a6d3..9b2c41f7e 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -67,11 +67,6 @@ option go_package = "resource"; // 1.5 will be serialized as "1500m" // 1.5Gi will be serialized as "1536Mi" // -// NOTE: We reserve the right to amend this canonical format, perhaps to -// allow 1.5 to be canonical. -// TODO: Remove above disclaimer after all bikeshedding about format is over, -// or after March 2015. -// // Note that the quantity will NEVER be internally represented by a // floating point number. That is the whole point of this exercise. // diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index c3cd13960..6875e6a0f 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -69,11 +69,6 @@ import ( // 1.5 will be serialized as "1500m" // 1.5Gi will be serialized as "1536Mi" // -// NOTE: We reserve the right to amend this canonical format, perhaps to -// allow 1.5 to be canonical. -// TODO: Remove above disclaimer after all bikeshedding about format is over, -// or after March 2015. -// // Note that the quantity will NEVER be internally represented by a // floating point number. That is the whole point of this exercise. // @@ -506,7 +501,7 @@ func (q *Quantity) Sign() int { return q.i.Sign() } -// AsScaled returns the current value, rounded up to the provided scale, and returns +// AsScale returns the current value, rounded up to the provided scale, and returns // false if the scale resulted in a loss of precision. func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) { if q.d.Dec != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go index 77bd9a6b4..a3240cb2c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -77,12 +77,8 @@ func (in *ListOptions) DeepCopyInto(out *ListOptions) { } if in.TimeoutSeconds != nil { in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } return } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go index c13fe4af8..ee1447541 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -162,55 +162,9 @@ func (meta *ObjectMeta) GetInitializers() *Initializers { return m func (meta *ObjectMeta) SetInitializers(initializers *Initializers) { meta.Initializers = initializers } func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } - -func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { - if meta.OwnerReferences == nil { - return nil - } - ret := make([]OwnerReference, len(meta.OwnerReferences)) - for i := 0; i < len(meta.OwnerReferences); i++ { - ret[i].Kind = meta.OwnerReferences[i].Kind - ret[i].Name = meta.OwnerReferences[i].Name - ret[i].UID = meta.OwnerReferences[i].UID - ret[i].APIVersion = meta.OwnerReferences[i].APIVersion - if meta.OwnerReferences[i].Controller != nil { - value := *meta.OwnerReferences[i].Controller - ret[i].Controller = &value - } - if meta.OwnerReferences[i].BlockOwnerDeletion != nil { - value := *meta.OwnerReferences[i].BlockOwnerDeletion - ret[i].BlockOwnerDeletion = &value - } - } - return ret -} - +func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { return meta.OwnerReferences } func (meta *ObjectMeta) SetOwnerReferences(references []OwnerReference) { - if references == nil { - meta.OwnerReferences = nil - return - } - newReferences := make([]OwnerReference, len(references)) - for i := 0; i < len(references); i++ { - newReferences[i].Kind = references[i].Kind - newReferences[i].Name = references[i].Name - newReferences[i].UID = references[i].UID - newReferences[i].APIVersion = references[i].APIVersion - if references[i].Controller != nil { - value := *references[i].Controller - newReferences[i].Controller = &value - } - if references[i].BlockOwnerDeletion != nil { - value := *references[i].BlockOwnerDeletion - newReferences[i].BlockOwnerDeletion = &value - } - } - meta.OwnerReferences = newReferences -} - -func (meta *ObjectMeta) GetClusterName() string { - return meta.ClusterName -} -func (meta *ObjectMeta) SetClusterName(clusterName string) { - meta.ClusterName = clusterName + meta.OwnerReferences = references } +func (meta *ObjectMeta) GetClusterName() string { return meta.ClusterName } +func (meta *ObjectMeta) SetClusterName(clusterName string) { meta.ClusterName = clusterName } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 98dfea095..2367d2aee 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -197,39 +197,23 @@ func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) { out.TypeMeta = in.TypeMeta if in.GracePeriodSeconds != nil { in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.Preconditions != nil { in, out := &in.Preconditions, &out.Preconditions - if *in == nil { - *out = nil - } else { - *out = new(Preconditions) - (*in).DeepCopyInto(*out) - } + *out = new(Preconditions) + (*in).DeepCopyInto(*out) } if in.OrphanDependents != nil { in, out := &in.OrphanDependents, &out.OrphanDependents - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } if in.PropagationPolicy != nil { in, out := &in.PropagationPolicy, &out.PropagationPolicy - if *in == nil { - *out = nil - } else { - *out = new(DeletionPropagation) - **out = **in - } + *out = new(DeletionPropagation) + **out = **in } return } @@ -440,12 +424,8 @@ func (in *Initializers) DeepCopyInto(out *Initializers) { } if in.Result != nil { in, out := &in.Result, &out.Result - if *in == nil { - *out = nil - } else { - *out = new(Status) - (*in).DeepCopyInto(*out) - } + *out = new(Status) + (*in).DeepCopyInto(*out) } return } @@ -587,12 +567,8 @@ func (in *ListOptions) DeepCopyInto(out *ListOptions) { out.TypeMeta = in.TypeMeta if in.TimeoutSeconds != nil { in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } return } @@ -631,20 +607,12 @@ func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp) if in.DeletionTimestamp != nil { in, out := &in.DeletionTimestamp, &out.DeletionTimestamp - if *in == nil { - *out = nil - } else { - *out = (*in).DeepCopy() - } + *out = (*in).DeepCopy() } if in.DeletionGracePeriodSeconds != nil { in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.Labels != nil { in, out := &in.Labels, &out.Labels @@ -669,12 +637,8 @@ func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { } if in.Initializers != nil { in, out := &in.Initializers, &out.Initializers - if *in == nil { - *out = nil - } else { - *out = new(Initializers) - (*in).DeepCopyInto(*out) - } + *out = new(Initializers) + (*in).DeepCopyInto(*out) } if in.Finalizers != nil { in, out := &in.Finalizers, &out.Finalizers @@ -699,21 +663,13 @@ func (in *OwnerReference) DeepCopyInto(out *OwnerReference) { *out = *in if in.Controller != nil { in, out := &in.Controller, &out.Controller - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } if in.BlockOwnerDeletion != nil { in, out := &in.BlockOwnerDeletion, &out.BlockOwnerDeletion - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } + *out = new(bool) + **out = **in } return } @@ -749,12 +705,8 @@ func (in *Preconditions) DeepCopyInto(out *Preconditions) { *out = *in if in.UID != nil { in, out := &in.UID, &out.UID - if *in == nil { - *out = nil - } else { - *out = new(types.UID) - **out = **in - } + *out = new(types.UID) + **out = **in } return } @@ -813,12 +765,8 @@ func (in *Status) DeepCopyInto(out *Status) { out.ListMeta = in.ListMeta if in.Details != nil { in, out := &in.Details, &out.Details - if *in == nil { - *out = nil - } else { - *out = new(StatusDetails) - (*in).DeepCopyInto(*out) - } + *out = new(StatusDetails) + (*in).DeepCopyInto(*out) } return } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go index 2e79a131f..230c256ed 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go @@ -61,8 +61,9 @@ func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList if (*in)[i] == nil { (*out)[i] = nil } else { - (*out)[i] = new(PartialObjectMetadata) - (*in)[i].DeepCopyInto((*out)[i]) + in, out := &(*in)[i], &(*out)[i] + *out = new(PartialObjectMetadata) + (*in).DeepCopyInto(*out) } } } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index f7738d116..7b26567ef 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -24,6 +24,7 @@ import ( "github.com/ghodss/yaml" jsoniter "github.com/json-iterator/go" + "github.com/modern-go/reflect2" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -68,31 +69,62 @@ type Serializer struct { var _ runtime.Serializer = &Serializer{} var _ recognizer.RecognizingDecoder = &Serializer{} -func init() { - // Force jsoniter to decode number to interface{} via ints, if possible. - decodeNumberAsInt64IfPossible := func(ptr unsafe.Pointer, iter *jsoniter.Iterator) { - switch iter.WhatIsNext() { - case jsoniter.NumberValue: - var number json.Number - iter.ReadVal(&number) - i64, err := strconv.ParseInt(string(number), 10, 64) - if err == nil { - *(*interface{})(ptr) = i64 - return - } - f64, err := strconv.ParseFloat(string(number), 64) - if err == nil { - *(*interface{})(ptr) = f64 - return - } - // Not much we can do here. - default: - *(*interface{})(ptr) = iter.Read() +type customNumberExtension struct { + jsoniter.DummyExtension +} + +func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder { + if typ.String() == "interface {}" { + return customNumberDecoder{} + } + return nil +} + +type customNumberDecoder struct { +} + +func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + switch iter.WhatIsNext() { + case jsoniter.NumberValue: + var number jsoniter.Number + iter.ReadVal(&number) + i64, err := strconv.ParseInt(string(number), 10, 64) + if err == nil { + *(*interface{})(ptr) = i64 + return + } + f64, err := strconv.ParseFloat(string(number), 64) + if err == nil { + *(*interface{})(ptr) = f64 + return } + iter.ReportError("DecodeNumber", err.Error()) + default: + *(*interface{})(ptr) = iter.Read() } - jsoniter.RegisterTypeDecoderFunc("interface {}", decodeNumberAsInt64IfPossible) } +// CaseSensitiveJsonIterator returns a jsoniterator API that's configured to be +// case-sensitive when unmarshalling, and otherwise compatible with +// the encoding/json standard library. +func CaseSensitiveJsonIterator() jsoniter.API { + config := jsoniter.Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, + CaseSensitive: true, + }.Froze() + // Force jsoniter to decode number to interface{} via int64/float64, if possible. + config.RegisterExtension(&customNumberExtension{}) + return config +} + +// Private copy of jsoniter to try to shield against possible mutations +// from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them +// in some other library will mess with every usage of the jsoniter library in the whole program. +// See https://github.com/json-iterator/go/issues/265 +var caseSensitiveJsonIterator = CaseSensitiveJsonIterator() + // gvkWithDefaults returns group kind and version defaulting from provided default func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { if len(actual.Kind) == 0 { @@ -157,7 +189,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i types, _, err := s.typer.ObjectKinds(into) switch { case runtime.IsNotRegisteredError(err), isUnstructured: - if err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(data, into); err != nil { + if err := caseSensitiveJsonIterator.Unmarshal(data, into); err != nil { return nil, actual, err } return into, actual, nil @@ -181,7 +213,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i return nil, actual, err } - if err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(data, obj); err != nil { + if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil { return nil, actual, err } return obj, actual, nil @@ -190,7 +222,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i // Encode serializes the provided object to the given writer. func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { if s.yaml { - json, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(obj) + json, err := caseSensitiveJsonIterator.Marshal(obj) if err != nil { return err } @@ -203,7 +235,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { } if s.pretty { - data, err := jsoniter.ConfigCompatibleWithStandardLibrary.MarshalIndent(obj, "", " ") + data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", " ") if err != nil { return err } diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index 31705dee3..4767fd1dd 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -48,7 +48,7 @@ func (v *Error) ErrorBody() string { var s string switch v.Type { case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal: - s = fmt.Sprintf("%s", v.Type) + s = v.Type.String() default: value := v.BadValue valueType := reflect.TypeOf(value) diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index a25e92465..ca61168cd 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -230,13 +230,13 @@ func pollInternal(wait WaitFunc, condition ConditionFunc) error { // PollImmediate tries a condition func until it returns true, an error, or the timeout // is reached. // -// Poll always checks 'condition' before waiting for the interval. 'condition' +// PollImmediate always checks 'condition' before waiting for the interval. 'condition' // will always be invoked at least once. // // Some intervals may be missed if the condition takes too long or the time // window is too short. // -// If you want to Poll something forever, see PollInfinite. +// If you want to immediately Poll something forever, see PollImmediateInfinite. func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { return pollImmediateInternal(poller(interval, timeout), condition) } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go index f062b8f06..a73d31b3f 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go @@ -31,12 +31,8 @@ func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { in.Spec.DeepCopyInto(&out.Spec) if in.Status != nil { in, out := &in.Status, &out.Status - if *in == nil { - *out = nil - } else { - *out = new(ExecCredentialStatus) - (*in).DeepCopyInto(*out) - } + *out = new(ExecCredentialStatus) + (*in).DeepCopyInto(*out) } return } @@ -64,12 +60,8 @@ func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { *out = *in if in.Response != nil { in, out := &in.Response, &out.Response - if *in == nil { - *out = nil - } else { - *out = new(Response) - (*in).DeepCopyInto(*out) - } + *out = new(Response) + (*in).DeepCopyInto(*out) } return } @@ -89,11 +81,7 @@ func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) { *out = *in if in.ExpirationTimestamp != nil { in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp - if *in == nil { - *out = nil - } else { - *out = (*in).DeepCopy() - } + *out = (*in).DeepCopy() } return } @@ -115,12 +103,15 @@ func (in *Response) DeepCopyInto(out *Response) { in, out := &in.Header, &out.Header *out = make(map[string][]string, len(*in)) for key, val := range *in { + var outVal []string if val == nil { (*out)[key] = nil } else { - (*out)[key] = make([]string, len(val)) - copy((*out)[key], val) + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) } + (*out)[key] = outVal } } return diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go index 656fea4d8..736b8cf00 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go @@ -31,12 +31,8 @@ func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { out.Spec = in.Spec if in.Status != nil { in, out := &in.Status, &out.Status - if *in == nil { - *out = nil - } else { - *out = new(ExecCredentialStatus) - (*in).DeepCopyInto(*out) - } + *out = new(ExecCredentialStatus) + (*in).DeepCopyInto(*out) } return } @@ -80,11 +76,7 @@ func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) { *out = *in if in.ExpirationTimestamp != nil { in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp - if *in == nil { - *out = nil - } else { - *out = (*in).DeepCopy() - } + *out = (*in).DeepCopy() } return } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go index 008c3c7df..c568a6fc8 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go @@ -31,12 +31,8 @@ func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { in.Spec.DeepCopyInto(&out.Spec) if in.Status != nil { in, out := &in.Status, &out.Status - if *in == nil { - *out = nil - } else { - *out = new(ExecCredentialStatus) - (*in).DeepCopyInto(*out) - } + *out = new(ExecCredentialStatus) + (*in).DeepCopyInto(*out) } return } @@ -64,12 +60,8 @@ func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { *out = *in if in.Response != nil { in, out := &in.Response, &out.Response - if *in == nil { - *out = nil - } else { - *out = new(Response) - (*in).DeepCopyInto(*out) - } + *out = new(Response) + (*in).DeepCopyInto(*out) } return } @@ -89,11 +81,7 @@ func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) { *out = *in if in.ExpirationTimestamp != nil { in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp - if *in == nil { - *out = nil - } else { - *out = (*in).DeepCopy() - } + *out = (*in).DeepCopy() } return } @@ -115,12 +103,15 @@ func (in *Response) DeepCopyInto(out *Response) { in, out := &in.Header, &out.Header *out = make(map[string][]string, len(*in)) for key, val := range *in { + var outVal []string if val == nil { (*out)[key] = nil } else { - (*out)[key] = make([]string, len(val)) - copy((*out)[key], val) + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) } + (*out)[key] = outVal } } return diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go index b90aa8d74..3240a7a98 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go @@ -46,31 +46,26 @@ func (in *AuthInfo) DeepCopyInto(out *AuthInfo) { in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra *out = make(map[string][]string, len(*in)) for key, val := range *in { + var outVal []string if val == nil { (*out)[key] = nil } else { - (*out)[key] = make([]string, len(val)) - copy((*out)[key], val) + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) } + (*out)[key] = outVal } } if in.AuthProvider != nil { in, out := &in.AuthProvider, &out.AuthProvider - if *in == nil { - *out = nil - } else { - *out = new(AuthProviderConfig) - (*in).DeepCopyInto(*out) - } + *out = new(AuthProviderConfig) + (*in).DeepCopyInto(*out) } if in.Exec != nil { in, out := &in.Exec, &out.Exec - if *in == nil { - *out = nil - } else { - *out = new(ExecConfig) - (*in).DeepCopyInto(*out) - } + *out = new(ExecConfig) + (*in).DeepCopyInto(*out) } if in.Extensions != nil { in, out := &in.Extensions, &out.Extensions @@ -159,36 +154,45 @@ func (in *Config) DeepCopyInto(out *Config) { in, out := &in.Clusters, &out.Clusters *out = make(map[string]*Cluster, len(*in)) for key, val := range *in { + var outVal *Cluster if val == nil { (*out)[key] = nil } else { - (*out)[key] = new(Cluster) - val.DeepCopyInto((*out)[key]) + in, out := &val, &outVal + *out = new(Cluster) + (*in).DeepCopyInto(*out) } + (*out)[key] = outVal } } if in.AuthInfos != nil { in, out := &in.AuthInfos, &out.AuthInfos *out = make(map[string]*AuthInfo, len(*in)) for key, val := range *in { + var outVal *AuthInfo if val == nil { (*out)[key] = nil } else { - (*out)[key] = new(AuthInfo) - val.DeepCopyInto((*out)[key]) + in, out := &val, &outVal + *out = new(AuthInfo) + (*in).DeepCopyInto(*out) } + (*out)[key] = outVal } } if in.Contexts != nil { in, out := &in.Contexts, &out.Contexts *out = make(map[string]*Context, len(*in)) for key, val := range *in { + var outVal *Context if val == nil { (*out)[key] = nil } else { - (*out)[key] = new(Context) - val.DeepCopyInto((*out)[key]) + in, out := &val, &outVal + *out = new(Context) + (*in).DeepCopyInto(*out) } + (*out)[key] = outVal } } if in.Extensions != nil { diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go index 374e8cae6..a57bf09d5 100644 --- a/vendor/k8s.io/client-go/util/cert/io.go +++ b/vendor/k8s.io/client-go/util/cert/io.go @@ -88,7 +88,8 @@ func WriteKey(keyPath string, data []byte) error { // can't find one, it will generate a new key and store it there. func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { loadedData, err := ioutil.ReadFile(keyPath) - if err == nil { + // Call verifyKeyData to ensure the file wasn't empty/corrupt. + if err == nil && verifyKeyData(loadedData) { return loadedData, false, err } if !os.IsNotExist(err) { @@ -181,3 +182,12 @@ func PublicKeysFromFile(file string) ([]interface{}, error) { } return keys, nil } + +// verifyKeyData returns true if the provided data appears to be a valid private key. +func verifyKeyData(data []byte) bool { + if len(data) == 0 { + return false + } + _, err := ParsePrivateKeyPEM(data) + return err == nil +}