From 241e1057e412078ebe3beda0d4da945e3ad24626 Mon Sep 17 00:00:00 2001 From: joey <34028658+Joey-1445601153@users.noreply.github.com> Date: Thu, 21 Oct 2021 13:21:14 +0800 Subject: [PATCH] feat(services/oss): Move services oss back (#927) * Add oss back * Mobe oss back * Support read with offset and size Co-authored-by: JinnyYi <82445294+JinnyYi@users.noreply.github.com> --- .github/dependabot.yml | 4 + .github/workflows/services-test-oss.yml | 40 + services/oss/.gitignore | 7 + services/oss/CHANGELOG.md | 115 ++ services/oss/LICENSE | 201 ++++ services/oss/Makefile | 43 + services/oss/Makefile.env.example | 4 + services/oss/README.md | 35 + services/oss/doc.go | 6 + services/oss/generated.go | 1387 +++++++++++++++++++++++ services/oss/go.mod | 14 + services/oss/go.sum | 116 ++ services/oss/iterator.go | 35 + services/oss/service.go | 74 ++ services/oss/service.toml | 88 ++ services/oss/storage.go | 692 +++++++++++ services/oss/tests/README.md | 32 + services/oss/tests/storage_test.go | 43 + services/oss/tests/utils_test.go | 30 + services/oss/tools.go | 6 + services/oss/utils.go | 334 ++++++ 21 files changed, 3306 insertions(+) create mode 100644 .github/workflows/services-test-oss.yml create mode 100644 services/oss/.gitignore create mode 100644 services/oss/CHANGELOG.md create mode 100644 services/oss/LICENSE create mode 100644 services/oss/Makefile create mode 100644 services/oss/Makefile.env.example create mode 100644 services/oss/README.md create mode 100644 services/oss/doc.go create mode 100644 services/oss/generated.go create mode 100644 services/oss/go.mod create mode 100644 services/oss/go.sum create mode 100644 services/oss/iterator.go create mode 100644 services/oss/service.go create mode 100644 services/oss/service.toml create mode 100644 services/oss/storage.go create mode 100644 services/oss/tests/README.md create mode 100644 services/oss/tests/storage_test.go create mode 100644 services/oss/tests/utils_test.go create mode 100644 services/oss/tools.go create mode 100644 services/oss/utils.go diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 104eb6c1b..7f5c0c421 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -68,3 +68,7 @@ updates: directory: "/services/onedrive" schedule: interval: daily + - package-ecosystem: gomod + directory: "/services/oss" + schedule: + interval: daily diff --git a/.github/workflows/services-test-oss.yml b/.github/workflows/services-test-oss.yml new file mode 100644 index 000000000..171f7984f --- /dev/null +++ b/.github/workflows/services-test-oss.yml @@ -0,0 +1,40 @@ +name: "Services Test Oss" + +on: + push: + paths: + - 'services/oss/**' + pull_request: + paths: + - 'services/oss/**' + +jobs: + services_test_oss: + name: "Services Test Oss" + runs-on: self-hosted + + strategy: + matrix: + go: [ "1.16", "1.17" ] + + steps: + - name: Set up Go 1.x + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go }} + + - name: Load secret + uses: 1password/load-secrets-action@v1 + env: + STORAGE_OSS_CREDENTIAL: op://Engineering/Oss/testing/credential + STORAGE_OSS_NAME: op://Engineering/Oss/testing/name + STORAGE_OSS_ENDPOINT: op://Engineering/Oss/testing/endpoint + + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Test + env: + STORAGE_OSS_INTEGRATION_TEST: on + working-directory: services/oss + run: make integration_test diff --git a/services/oss/.gitignore b/services/oss/.gitignore new file mode 100644 index 000000000..a2eb8ad8d --- /dev/null +++ b/services/oss/.gitignore @@ -0,0 +1,7 @@ +coverage.* +bin/ +Makefile.env + +# Jetbrain IDE +.idea +*.iml diff --git a/services/oss/CHANGELOG.md b/services/oss/CHANGELOG.md new file mode 100644 index 000000000..6f4853b3b --- /dev/null +++ b/services/oss/CHANGELOG.md @@ -0,0 +1,115 @@ +# Change Log + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/) +and this project adheres to [Semantic Versioning](https://semver.org/). + +## [v2.4.0] - 2021-09-13 + +### Changed + +- ci: Upgrade go version to 1.16 +- ci: Enable auto merge for dependabot +- ci: Cleanup Service Integration Tests (#54) +- docs: Update README (#55) + +## [v2.3.0] - 2021-08-16 + +### Added + +- feat: Implement CreateLink and setup linker test in go-service-oss (#48) + +### Fixed + +- storage: Fix wrong method call when set multipart size maximum in metadata (#51) + +### Upgraded + +- build(deps): bump github.com/beyondstorage/go-storage/v4 from 4.3.2 to 4.4.0 (#45) +- build(deps): bump github.com/aliyun/aliyun-oss-go-sdk (#49) + +## [v2.2.0] - 2021-07-21 + +### Added + +- ci: Add gofmt action (#35) +- ci: Add diff check action (#38) +- ci: Add dependabot auto build support (#39) + +### Changed + +- storage: Implement GSP-134 Write Behavior Consistency (#43) +- storage: Implement GSP-654 Unify List Behavior (#43) + +### Fixed + +- ci: Fix auto-build not work correctly + +### Upgraded + +- build(deps): Bump github.com/aliyun/aliyun-oss-go-sdk from 2.1.8+incompatible to 2.1.9+incompatible (#33) + +## [v2.1.0] - 2021-06-29 + +### Added + +- *: Implement GSP-87 Feature Gates (#26) +- storage: Implement GSP-93 Add ObjectMode Pair (#31) +- storage: Implement GSP-97 Add Restrictions In Storage Metadata (#31) + +### Changed + +- *: Implement GSP-109 Redesign Features (#31) +- *: Implement GSP-117 Rename Service to System as the Opposite to Global (#31) + +### Fixed + +- storage: Fix listMultipart cannot get complete uploaded parts (#28) + +## [v2.0.0] - 2021-05-24 + +### Added + +- storage: Implement appender support (#15) +- storage: Implement CommitAppend (#18) +- *: Implement GSP-47 & GSP-51 (#22) +- storage: Implement Multipart support (#21) +- storage: Implement GSP-61 Add object mode check for operations (#23) + +### Changed + +- storage: Idempotent storager delete operation (#20) +- *: Implement GSP-73 Organization rename (#24) + +## [v1.1.0] - 2021-04-24 + +### Added + +- *: Implement default pair support for service (#5) +- storage: Implement Create API (#11) +- *: Add UnimplementedStub (#12) +- tests: Introduce STORAGE_OSS_INTEGRATION_TEST (#13) +- storage: Implement SSE support (#14) +- storage: Implement GSP-40 (#16) + +### Changed + +- ci: Only run Integration Test while push to master + +### Upgraded + +- build(deps): Bump github.com/aliyun/aliyun-oss-go-sdk (#9) + +## v1.0.0 - 2021-02-07 + +### Added + +- Implement oss services. + +[v2.4.0]: https://github.com/beyondstorage/go-service-oss/compare/v2.3.0...v2.4.0 +[v2.3.0]: https://github.com/beyondstorage/go-service-oss/compare/v2.2.0...v2.3.0 +[v2.2.0]: https://github.com/beyondstorage/go-service-oss/compare/v2.1.0...v2.2.0 +[v2.1.0]: https://github.com/beyondstorage/go-service-oss/compare/v2.0.0...v2.1.0 +[v2.0.0]: https://github.com/beyondstorage/go-service-oss/compare/v1.1.0...v2.0.0 +[v1.1.0]: https://github.com/beyondstorage/go-service-oss/compare/v1.0.0...v1.1.0 diff --git a/services/oss/LICENSE b/services/oss/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/services/oss/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/services/oss/Makefile b/services/oss/Makefile new file mode 100644 index 000000000..f14abf437 --- /dev/null +++ b/services/oss/Makefile @@ -0,0 +1,43 @@ +SHELL := /bin/bash + +-include Makefile.env + +.PHONY: all check format vet lint build test generate tidy + +help: + @echo "Please use \`make \` where is one of" + @echo " check to do static check" + @echo " build to create bin directory and build" + @echo " generate to generate code" + @echo " test to run test" + +check: vet + +format: + go fmt ./... + +vet: + go vet ./... + +generate: + @echo "generate code" + go generate ./... + go fmt ./... + +build: tidy generate check + go build ./... + +test: + go test -race -coverprofile=coverage.txt -covermode=atomic -v . + go tool cover -html="coverage.txt" -o "coverage.html" + +integration_test: + go test -count=1 -race -covermode=atomic -v ./tests + +tidy: + go mod tidy + go mod verify + +clean: + @echo "clean generated files" + find . -type f -name 'generated.go' -delete diff --git a/services/oss/Makefile.env.example b/services/oss/Makefile.env.example new file mode 100644 index 000000000..d27336a42 --- /dev/null +++ b/services/oss/Makefile.env.example @@ -0,0 +1,4 @@ +export STORAGE_OSS_INTEGRATION_TEST=on +export STORAGE_OSS_CREDENTIAL=hmac:access_key:secret_key +export STORAGE_OSS_NAME=bucketname +export STORAGE_OSS_ENDPOINT=https:oss-region-name.aliyuncs.com \ No newline at end of file diff --git a/services/oss/README.md b/services/oss/README.md new file mode 100644 index 000000000..c9aecebae --- /dev/null +++ b/services/oss/README.md @@ -0,0 +1,35 @@ +[![Services Test Oss](https://github.com/beyondstorage/go-storage/actions/workflows/services-test-oss.yml/badge.svg)](https://github.com/beyondstorage/go-storage/actions/workflows/services-test-oss.yml) + +# oss + +[Aliyun Object Storage](https://cn.aliyun.com/product/oss) service support for [go-storage](https://github.com/beyondstorage/go-storage). + +## Install + +```go +go get go.beyondstorage.io/services/oss/v3 +``` + +## Usage + +```go +import ( + "log" + + _ "go.beyondstorage.io/services/oss/v3" + "go.beyondstorage.io/v5/services" +) + +func main() { + store, err := services.NewStoragerFromString("oss://bucket_name/path/to/workdir?credential=hmac::&endpoint=https:.aliyuncs.com") + if err != nil { + log.Fatal(err) + } + + // Write data from io.Reader into hello.txt + n, err := store.Write("hello.txt", r, length) +} +``` + +- See more examples in [go-storage-example](https://github.com/beyondstorage/go-storage-example). +- Read [more docs](https://beyondstorage.io/docs/go-storage/services/oss) about go-service-oss. diff --git a/services/oss/doc.go b/services/oss/doc.go new file mode 100644 index 000000000..d79681669 --- /dev/null +++ b/services/oss/doc.go @@ -0,0 +1,6 @@ +/* +Package oss provided support for Aliyun Object Storage Service (https://cn.aliyun.com/product/oss) +*/ +package oss + +//go:generate go run -tags tools go.beyondstorage.io/v5/cmd/definitions service.toml diff --git a/services/oss/generated.go b/services/oss/generated.go new file mode 100644 index 000000000..e86da288a --- /dev/null +++ b/services/oss/generated.go @@ -0,0 +1,1387 @@ +// Code generated by go generate via cmd/definitions; DO NOT EDIT. +package oss + +import ( + "context" + "io" + "net/http" + "strings" + "time" + + . "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/pkg/httpclient" + "go.beyondstorage.io/v5/services" + . "go.beyondstorage.io/v5/types" +) + +var ( + _ Storager + _ services.ServiceError + _ httpclient.Options + _ time.Duration + _ http.Request + _ Error +) + +// Type is the type for oss +const Type = "oss" + +// ObjectSystemMetadata stores system metadata for object. +type ObjectSystemMetadata struct { + ServerSideEncryption string + ServerSideEncryptionKeyID string + StorageClass string +} + +// GetObjectSystemMetadata will get ObjectSystemMetadata from Object. +// +// - This function should not be called by service implementer. +// - The returning ObjectServiceMetadata is read only and should not be modified. +func GetObjectSystemMetadata(o *Object) ObjectSystemMetadata { + sm, ok := o.GetSystemMetadata() + if ok { + return sm.(ObjectSystemMetadata) + } + return ObjectSystemMetadata{} +} + +// setObjectSystemMetadata will set ObjectSystemMetadata into Object. +// +// - This function should only be called once, please make sure all data has been written before set. +func setObjectSystemMetadata(o *Object, sm ObjectSystemMetadata) { + o.SetSystemMetadata(sm) +} + +// StorageSystemMetadata stores system metadata for object. +type StorageSystemMetadata struct { + ServerSideEncryption string + ServerSideEncryptionKeyID string + StorageClass string +} + +// GetStorageSystemMetadata will get StorageSystemMetadata from Storage. +// +// - This function should not be called by service implementer. +// - The returning StorageServiceMetadata is read only and should not be modified. +func GetStorageSystemMetadata(s *StorageMeta) StorageSystemMetadata { + sm, ok := s.GetSystemMetadata() + if ok { + return sm.(StorageSystemMetadata) + } + return StorageSystemMetadata{} +} + +// setStorageSystemMetadata will set StorageSystemMetadata into Storage. +// +// - This function should only be called once, please make sure all data has been written before set. +func setStorageSystemMetadata(s *StorageMeta, sm StorageSystemMetadata) { + s.SetSystemMetadata(sm) +} + +// WithDefaultServicePairs will apply default_service_pairs value to Options. +// +// set default pairs for service actions +func WithDefaultServicePairs(v DefaultServicePairs) Pair { + return Pair{Key: "default_service_pairs", Value: v} +} + +// WithDefaultStoragePairs will apply default_storage_pairs value to Options. +// +// set default pairs for storager actions +func WithDefaultStoragePairs(v DefaultStoragePairs) Pair { + return Pair{Key: "default_storage_pairs", Value: v} +} + +// WithEnableVirtualDir will apply enable_virtual_dir value to Options. +// +// virtual_dir feature is designed for a service that doesn't have native dir support but wants to +// provide simulated operations. +// +// - If this feature is disabled (the default behavior), the service will behave like it doesn't have +// any dir support. +// - If this feature is enabled, the service will support simulated dir behavior in create_dir, create, +// list, delete, and so on. +// +// This feature was introduced in GSP-109. +func WithEnableVirtualDir() Pair { + return Pair{Key: "enable_virtual_dir", Value: true} +} + +// WithServerSideDataEncryption will apply server_side_data_encryption value to Options. +// +// specifies the encryption algorithm when server_side_encryption is KMS. Can only be set to SM4. +// If this is not set, AES256 will be used. +// +// For Chinese users, refer to https://help.aliyun.com/document_detail/31871.html for details. +// +// For global users, refer to https://www.alibabacloud.com/help/doc-detail/31871.htm for details, +// and double-check whether SM4 can be used. +func WithServerSideDataEncryption(v string) Pair { + return Pair{Key: "server_side_data_encryption", Value: v} +} + +// WithServerSideEncryption will apply server_side_encryption value to Options. +// +// specifies the encryption algorithm. Can be AES256, KMS or SM4. +// +// For Chinese users, refer to https://help.aliyun.com/document_detail/31871.html for details. +// +// For global users, refer to https://www.alibabacloud.com/help/doc-detail/31871.htm for details, +// and double-check whether SM4 can be used. +func WithServerSideEncryption(v string) Pair { + return Pair{Key: "server_side_encryption", Value: v} +} + +// WithServerSideEncryptionKeyID will apply server_side_encryption_key_id value to Options. +// +// is the KMS-managed user master key. Only valid when server_side_encryption is KMS. +func WithServerSideEncryptionKeyID(v string) Pair { + return Pair{Key: "server_side_encryption_key_id", Value: v} +} + +// WithServiceFeatures will apply service_features value to Options. +// +// set service features +func WithServiceFeatures(v ServiceFeatures) Pair { + return Pair{Key: "service_features", Value: v} +} + +// WithStorageClass will apply storage_class value to Options. +func WithStorageClass(v string) Pair { + return Pair{Key: "storage_class", Value: v} +} + +// WithStorageFeatures will apply storage_features value to Options. +// +// set storage features +func WithStorageFeatures(v StorageFeatures) Pair { + return Pair{Key: "storage_features", Value: v} +} + +var pairMap = map[string]string{"content_md5": "string", "content_type": "string", "context": "context.Context", "continuation_token": "string", "credential": "string", "default_content_type": "string", "default_io_callback": "func([]byte)", "default_service_pairs": "DefaultServicePairs", "default_storage_pairs": "DefaultStoragePairs", "enable_virtual_dir": "bool", "endpoint": "string", "expire": "time.Duration", "http_client_options": "*httpclient.Options", "interceptor": "Interceptor", "io_callback": "func([]byte)", "list_mode": "ListMode", "location": "string", "multipart_id": "string", "name": "string", "object_mode": "ObjectMode", "offset": "int64", "server_side_data_encryption": "string", "server_side_encryption": "string", "server_side_encryption_key_id": "string", "service_features": "ServiceFeatures", "size": "int64", "storage_class": "string", "storage_features": "StorageFeatures", "work_dir": "string"} +var _ Servicer = &Service{} + +type ServiceFeatures struct { +} + +// pairServiceNew is the parsed struct +type pairServiceNew struct { + pairs []Pair + + // Required pairs + HasCredential bool + Credential string + // Optional pairs + HasDefaultServicePairs bool + DefaultServicePairs DefaultServicePairs + HasEndpoint bool + Endpoint string + HasHTTPClientOptions bool + HTTPClientOptions *httpclient.Options + HasServiceFeatures bool + ServiceFeatures ServiceFeatures + // Enable features +} + +// parsePairServiceNew will parse Pair slice into *pairServiceNew +func parsePairServiceNew(opts []Pair) (pairServiceNew, error) { + result := + pairServiceNew{pairs: opts} + + for _, v := range opts { + switch v.Key { + case "credential": + if result.HasCredential { + continue + } + result.HasCredential = true + result.Credential = v.Value.(string) + case "default_service_pairs": + if result.HasDefaultServicePairs { + continue + } + result.HasDefaultServicePairs = true + result.DefaultServicePairs = v.Value.(DefaultServicePairs) + case "endpoint": + if result.HasEndpoint { + continue + } + result.HasEndpoint = true + result.Endpoint = v.Value.(string) + case "http_client_options": + if result.HasHTTPClientOptions { + continue + } + result.HasHTTPClientOptions = true + result.HTTPClientOptions = v.Value.(*httpclient.Options) + case "service_features": + if result.HasServiceFeatures { + continue + } + result.HasServiceFeatures = true + result.ServiceFeatures = v.Value.(ServiceFeatures) + } + } + // Enable features + + // Default pairs + + if !result.HasCredential { + return pairServiceNew{}, services.PairRequiredError{Keys: []string{"credential"}} + } + return result, nil +} + +// DefaultServicePairs is default pairs for specific action +type DefaultServicePairs struct { + Create []Pair + Delete []Pair + Get []Pair + List []Pair +} +type pairServiceCreate struct { + pairs []Pair + // Required pairs + // Optional pairs +} + +func (s *Service) parsePairServiceCreate(opts []Pair) (pairServiceCreate, error) { + result := + pairServiceCreate{pairs: opts} + + for _, v := range opts { + switch v.Key { + default: + return pairServiceCreate{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairServiceDelete struct { + pairs []Pair + // Required pairs + // Optional pairs +} + +func (s *Service) parsePairServiceDelete(opts []Pair) (pairServiceDelete, error) { + result := + pairServiceDelete{pairs: opts} + + for _, v := range opts { + switch v.Key { + default: + return pairServiceDelete{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairServiceGet struct { + pairs []Pair + // Required pairs + // Optional pairs +} + +func (s *Service) parsePairServiceGet(opts []Pair) (pairServiceGet, error) { + result := + pairServiceGet{pairs: opts} + + for _, v := range opts { + switch v.Key { + default: + return pairServiceGet{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairServiceList struct { + pairs []Pair + // Required pairs + // Optional pairs +} + +func (s *Service) parsePairServiceList(opts []Pair) (pairServiceList, error) { + result := + pairServiceList{pairs: opts} + + for _, v := range opts { + switch v.Key { + default: + return pairServiceList{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} +func (s *Service) Create(name string, pairs ...Pair) (store Storager, err error) { + ctx := context.Background() + return s.CreateWithContext(ctx, name, pairs...) +} +func (s *Service) CreateWithContext(ctx context.Context, name string, pairs ...Pair) (store Storager, err error) { + defer func() { + err = + s.formatError("create", err, name) + }() + + pairs = append(pairs, s.defaultPairs.Create...) + var opt pairServiceCreate + + opt, err = s.parsePairServiceCreate(pairs) + if err != nil { + return + } + return s.create(ctx, name, opt) +} +func (s *Service) Delete(name string, pairs ...Pair) (err error) { + ctx := context.Background() + return s.DeleteWithContext(ctx, name, pairs...) +} +func (s *Service) DeleteWithContext(ctx context.Context, name string, pairs ...Pair) (err error) { + defer func() { + err = + s.formatError("delete", err, name) + }() + + pairs = append(pairs, s.defaultPairs.Delete...) + var opt pairServiceDelete + + opt, err = s.parsePairServiceDelete(pairs) + if err != nil { + return + } + return s.delete(ctx, name, opt) +} +func (s *Service) Get(name string, pairs ...Pair) (store Storager, err error) { + ctx := context.Background() + return s.GetWithContext(ctx, name, pairs...) +} +func (s *Service) GetWithContext(ctx context.Context, name string, pairs ...Pair) (store Storager, err error) { + defer func() { + err = + s.formatError("get", err, name) + }() + + pairs = append(pairs, s.defaultPairs.Get...) + var opt pairServiceGet + + opt, err = s.parsePairServiceGet(pairs) + if err != nil { + return + } + return s.get(ctx, name, opt) +} +func (s *Service) List(pairs ...Pair) (sti *StoragerIterator, err error) { + ctx := context.Background() + return s.ListWithContext(ctx, pairs...) +} +func (s *Service) ListWithContext(ctx context.Context, pairs ...Pair) (sti *StoragerIterator, err error) { + defer func() { + err = + s.formatError("list", err, "") + }() + + pairs = append(pairs, s.defaultPairs.List...) + var opt pairServiceList + + opt, err = s.parsePairServiceList(pairs) + if err != nil { + return + } + return s.list(ctx, opt) +} + +var ( + _ Appender = &Storage{} + _ Direr = &Storage{} + _ Linker = &Storage{} + _ Multiparter = &Storage{} + _ Storager = &Storage{} +) + +type StorageFeatures struct { // virtual_dir feature is designed for a service that doesn't have native dir support but wants to + // provide simulated operations. + // + // - If this feature is disabled (the default behavior), the service will behave like it doesn't have + // any dir support. + // - If this feature is enabled, the service will support simulated dir behavior in create_dir, create, + // list, delete, and so on. + // + // This feature was introduced in GSP-109. + VirtualDir bool +} + +// pairStorageNew is the parsed struct +type pairStorageNew struct { + pairs []Pair + + // Required pairs + HasName bool + Name string + // Optional pairs + HasDefaultContentType bool + DefaultContentType string + HasDefaultIoCallback bool + DefaultIoCallback func([]byte) + HasDefaultStoragePairs bool + DefaultStoragePairs DefaultStoragePairs + HasStorageFeatures bool + StorageFeatures StorageFeatures + HasWorkDir bool + WorkDir string + // Enable features + hasEnableVirtualDir bool + EnableVirtualDir bool +} + +// parsePairStorageNew will parse Pair slice into *pairStorageNew +func parsePairStorageNew(opts []Pair) (pairStorageNew, error) { + result := + pairStorageNew{pairs: opts} + + for _, v := range opts { + switch v.Key { + case "name": + if result.HasName { + continue + } + result.HasName = true + result.Name = v.Value.(string) + case "default_content_type": + if result.HasDefaultContentType { + continue + } + result.HasDefaultContentType = true + result.DefaultContentType = v.Value.(string) + case "default_io_callback": + if result.HasDefaultIoCallback { + continue + } + result.HasDefaultIoCallback = true + result.DefaultIoCallback = v.Value.(func([]byte)) + case "default_storage_pairs": + if result.HasDefaultStoragePairs { + continue + } + result.HasDefaultStoragePairs = true + result.DefaultStoragePairs = v.Value.(DefaultStoragePairs) + case "storage_features": + if result.HasStorageFeatures { + continue + } + result.HasStorageFeatures = true + result.StorageFeatures = v.Value.(StorageFeatures) + case "work_dir": + if result.HasWorkDir { + continue + } + result.HasWorkDir = true + result.WorkDir = v.Value.(string) + case "enable_virtual_dir": + if result.hasEnableVirtualDir { + continue + } + result.hasEnableVirtualDir = true + result.EnableVirtualDir = true + } + } + // Enable features + if result.hasEnableVirtualDir { + result.HasStorageFeatures = true + result.StorageFeatures.VirtualDir = true + } + // Default pairs + if result.HasDefaultContentType { + result.HasDefaultStoragePairs = true + result.DefaultStoragePairs.CreateAppend = append(result.DefaultStoragePairs.CreateAppend, WithContentType(result.DefaultContentType)) + result.DefaultStoragePairs.CreateMultipart = append(result.DefaultStoragePairs.CreateMultipart, WithContentType(result.DefaultContentType)) + result.DefaultStoragePairs.Write = append(result.DefaultStoragePairs.Write, WithContentType(result.DefaultContentType)) + } + if result.HasDefaultIoCallback { + result.HasDefaultStoragePairs = true + result.DefaultStoragePairs.Read = append(result.DefaultStoragePairs.Read, WithIoCallback(result.DefaultIoCallback)) + result.DefaultStoragePairs.Write = append(result.DefaultStoragePairs.Write, WithIoCallback(result.DefaultIoCallback)) + result.DefaultStoragePairs.WriteAppend = append(result.DefaultStoragePairs.WriteAppend, WithIoCallback(result.DefaultIoCallback)) + } + if !result.HasName { + return pairStorageNew{}, services.PairRequiredError{Keys: []string{"name"}} + } + return result, nil +} + +// DefaultStoragePairs is default pairs for specific action +type DefaultStoragePairs struct { + CommitAppend []Pair + CompleteMultipart []Pair + Create []Pair + CreateAppend []Pair + CreateDir []Pair + CreateLink []Pair + CreateMultipart []Pair + Delete []Pair + List []Pair + ListMultipart []Pair + Metadata []Pair + Read []Pair + Stat []Pair + Write []Pair + WriteAppend []Pair + WriteMultipart []Pair +} +type pairStorageCommitAppend struct { + pairs []Pair + // Required pairs + // Optional pairs +} + +func (s *Storage) parsePairStorageCommitAppend(opts []Pair) (pairStorageCommitAppend, error) { + result := + pairStorageCommitAppend{pairs: opts} + + for _, v := range opts { + switch v.Key { + default: + return pairStorageCommitAppend{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairStorageCompleteMultipart struct { + pairs []Pair + // Required pairs + // Optional pairs +} + +func (s *Storage) parsePairStorageCompleteMultipart(opts []Pair) (pairStorageCompleteMultipart, error) { + result := + pairStorageCompleteMultipart{pairs: opts} + + for _, v := range opts { + switch v.Key { + default: + return pairStorageCompleteMultipart{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairStorageCreate struct { + pairs []Pair + // Required pairs + // Optional pairs + HasMultipartID bool + MultipartID string + HasObjectMode bool + ObjectMode ObjectMode +} + +func (s *Storage) parsePairStorageCreate(opts []Pair) (pairStorageCreate, error) { + result := + pairStorageCreate{pairs: opts} + + for _, v := range opts { + switch v.Key { + case "multipart_id": + if result.HasMultipartID { + continue + } + result.HasMultipartID = true + result.MultipartID = v.Value.(string) + case "object_mode": + if result.HasObjectMode { + continue + } + result.HasObjectMode = true + result.ObjectMode = v.Value.(ObjectMode) + default: + return pairStorageCreate{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairStorageCreateAppend struct { + pairs []Pair + // Required pairs + // Optional pairs + HasContentType bool + ContentType string + HasServerSideEncryption bool + ServerSideEncryption string + HasStorageClass bool + StorageClass string +} + +func (s *Storage) parsePairStorageCreateAppend(opts []Pair) (pairStorageCreateAppend, error) { + result := + pairStorageCreateAppend{pairs: opts} + + for _, v := range opts { + switch v.Key { + case "content_type": + if result.HasContentType { + continue + } + result.HasContentType = true + result.ContentType = v.Value.(string) + case "server_side_encryption": + if result.HasServerSideEncryption { + continue + } + result.HasServerSideEncryption = true + result.ServerSideEncryption = v.Value.(string) + case "storage_class": + if result.HasStorageClass { + continue + } + result.HasStorageClass = true + result.StorageClass = v.Value.(string) + default: + return pairStorageCreateAppend{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairStorageCreateDir struct { + pairs []Pair + // Required pairs + // Optional pairs + HasStorageClass bool + StorageClass string +} + +func (s *Storage) parsePairStorageCreateDir(opts []Pair) (pairStorageCreateDir, error) { + result := + pairStorageCreateDir{pairs: opts} + + for _, v := range opts { + switch v.Key { + case "storage_class": + if result.HasStorageClass { + continue + } + result.HasStorageClass = true + result.StorageClass = v.Value.(string) + default: + return pairStorageCreateDir{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairStorageCreateLink struct { + pairs []Pair + // Required pairs + // Optional pairs +} + +func (s *Storage) parsePairStorageCreateLink(opts []Pair) (pairStorageCreateLink, error) { + result := + pairStorageCreateLink{pairs: opts} + + for _, v := range opts { + switch v.Key { + default: + return pairStorageCreateLink{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairStorageCreateMultipart struct { + pairs []Pair + // Required pairs + // Optional pairs + HasContentType bool + ContentType string + HasServerSideDataEncryption bool + ServerSideDataEncryption string + HasServerSideEncryption bool + ServerSideEncryption string + HasServerSideEncryptionKeyID bool + ServerSideEncryptionKeyID string + HasStorageClass bool + StorageClass string +} + +func (s *Storage) parsePairStorageCreateMultipart(opts []Pair) (pairStorageCreateMultipart, error) { + result := + pairStorageCreateMultipart{pairs: opts} + + for _, v := range opts { + switch v.Key { + case "content_type": + if result.HasContentType { + continue + } + result.HasContentType = true + result.ContentType = v.Value.(string) + case "server_side_data_encryption": + if result.HasServerSideDataEncryption { + continue + } + result.HasServerSideDataEncryption = true + result.ServerSideDataEncryption = v.Value.(string) + case "server_side_encryption": + if result.HasServerSideEncryption { + continue + } + result.HasServerSideEncryption = true + result.ServerSideEncryption = v.Value.(string) + case "server_side_encryption_key_id": + if result.HasServerSideEncryptionKeyID { + continue + } + result.HasServerSideEncryptionKeyID = true + result.ServerSideEncryptionKeyID = v.Value.(string) + case "storage_class": + if result.HasStorageClass { + continue + } + result.HasStorageClass = true + result.StorageClass = v.Value.(string) + default: + return pairStorageCreateMultipart{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairStorageDelete struct { + pairs []Pair + // Required pairs + // Optional pairs + HasMultipartID bool + MultipartID string + HasObjectMode bool + ObjectMode ObjectMode +} + +func (s *Storage) parsePairStorageDelete(opts []Pair) (pairStorageDelete, error) { + result := + pairStorageDelete{pairs: opts} + + for _, v := range opts { + switch v.Key { + case "multipart_id": + if result.HasMultipartID { + continue + } + result.HasMultipartID = true + result.MultipartID = v.Value.(string) + case "object_mode": + if result.HasObjectMode { + continue + } + result.HasObjectMode = true + result.ObjectMode = v.Value.(ObjectMode) + default: + return pairStorageDelete{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairStorageList struct { + pairs []Pair + // Required pairs + // Optional pairs + HasListMode bool + ListMode ListMode +} + +func (s *Storage) parsePairStorageList(opts []Pair) (pairStorageList, error) { + result := + pairStorageList{pairs: opts} + + for _, v := range opts { + switch v.Key { + case "list_mode": + if result.HasListMode { + continue + } + result.HasListMode = true + result.ListMode = v.Value.(ListMode) + default: + return pairStorageList{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairStorageListMultipart struct { + pairs []Pair + // Required pairs + // Optional pairs +} + +func (s *Storage) parsePairStorageListMultipart(opts []Pair) (pairStorageListMultipart, error) { + result := + pairStorageListMultipart{pairs: opts} + + for _, v := range opts { + switch v.Key { + default: + return pairStorageListMultipart{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairStorageMetadata struct { + pairs []Pair + // Required pairs + // Optional pairs +} + +func (s *Storage) parsePairStorageMetadata(opts []Pair) (pairStorageMetadata, error) { + result := + pairStorageMetadata{pairs: opts} + + for _, v := range opts { + switch v.Key { + default: + return pairStorageMetadata{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairStorageRead struct { + pairs []Pair + // Required pairs + // Optional pairs + HasIoCallback bool + IoCallback func([]byte) + HasOffset bool + Offset int64 + HasSize bool + Size int64 +} + +func (s *Storage) parsePairStorageRead(opts []Pair) (pairStorageRead, error) { + result := + pairStorageRead{pairs: opts} + + for _, v := range opts { + switch v.Key { + case "io_callback": + if result.HasIoCallback { + continue + } + result.HasIoCallback = true + result.IoCallback = v.Value.(func([]byte)) + case "offset": + if result.HasOffset { + continue + } + result.HasOffset = true + result.Offset = v.Value.(int64) + case "size": + if result.HasSize { + continue + } + result.HasSize = true + result.Size = v.Value.(int64) + default: + return pairStorageRead{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairStorageStat struct { + pairs []Pair + // Required pairs + // Optional pairs + HasMultipartID bool + MultipartID string + HasObjectMode bool + ObjectMode ObjectMode +} + +func (s *Storage) parsePairStorageStat(opts []Pair) (pairStorageStat, error) { + result := + pairStorageStat{pairs: opts} + + for _, v := range opts { + switch v.Key { + case "multipart_id": + if result.HasMultipartID { + continue + } + result.HasMultipartID = true + result.MultipartID = v.Value.(string) + case "object_mode": + if result.HasObjectMode { + continue + } + result.HasObjectMode = true + result.ObjectMode = v.Value.(ObjectMode) + default: + return pairStorageStat{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairStorageWrite struct { + pairs []Pair + // Required pairs + // Optional pairs + HasContentMd5 bool + ContentMd5 string + HasContentType bool + ContentType string + HasIoCallback bool + IoCallback func([]byte) + HasServerSideDataEncryption bool + ServerSideDataEncryption string + HasServerSideEncryption bool + ServerSideEncryption string + HasServerSideEncryptionKeyID bool + ServerSideEncryptionKeyID string + HasStorageClass bool + StorageClass string +} + +func (s *Storage) parsePairStorageWrite(opts []Pair) (pairStorageWrite, error) { + result := + pairStorageWrite{pairs: opts} + + for _, v := range opts { + switch v.Key { + case "content_md5": + if result.HasContentMd5 { + continue + } + result.HasContentMd5 = true + result.ContentMd5 = v.Value.(string) + case "content_type": + if result.HasContentType { + continue + } + result.HasContentType = true + result.ContentType = v.Value.(string) + case "io_callback": + if result.HasIoCallback { + continue + } + result.HasIoCallback = true + result.IoCallback = v.Value.(func([]byte)) + case "server_side_data_encryption": + if result.HasServerSideDataEncryption { + continue + } + result.HasServerSideDataEncryption = true + result.ServerSideDataEncryption = v.Value.(string) + case "server_side_encryption": + if result.HasServerSideEncryption { + continue + } + result.HasServerSideEncryption = true + result.ServerSideEncryption = v.Value.(string) + case "server_side_encryption_key_id": + if result.HasServerSideEncryptionKeyID { + continue + } + result.HasServerSideEncryptionKeyID = true + result.ServerSideEncryptionKeyID = v.Value.(string) + case "storage_class": + if result.HasStorageClass { + continue + } + result.HasStorageClass = true + result.StorageClass = v.Value.(string) + default: + return pairStorageWrite{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairStorageWriteAppend struct { + pairs []Pair + // Required pairs + // Optional pairs + HasContentMd5 bool + ContentMd5 string + HasIoCallback bool + IoCallback func([]byte) +} + +func (s *Storage) parsePairStorageWriteAppend(opts []Pair) (pairStorageWriteAppend, error) { + result := + pairStorageWriteAppend{pairs: opts} + + for _, v := range opts { + switch v.Key { + case "content_md5": + if result.HasContentMd5 { + continue + } + result.HasContentMd5 = true + result.ContentMd5 = v.Value.(string) + case "io_callback": + if result.HasIoCallback { + continue + } + result.HasIoCallback = true + result.IoCallback = v.Value.(func([]byte)) + default: + return pairStorageWriteAppend{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} + +type pairStorageWriteMultipart struct { + pairs []Pair + // Required pairs + // Optional pairs + HasContentMd5 bool + ContentMd5 string +} + +func (s *Storage) parsePairStorageWriteMultipart(opts []Pair) (pairStorageWriteMultipart, error) { + result := + pairStorageWriteMultipart{pairs: opts} + + for _, v := range opts { + switch v.Key { + case "content_md5": + if result.HasContentMd5 { + continue + } + result.HasContentMd5 = true + result.ContentMd5 = v.Value.(string) + default: + return pairStorageWriteMultipart{}, services.PairUnsupportedError{Pair: v} + } + } + + return result, nil +} +func (s *Storage) CommitAppend(o *Object, pairs ...Pair) (err error) { + ctx := context.Background() + return s.CommitAppendWithContext(ctx, o, pairs...) +} +func (s *Storage) CommitAppendWithContext(ctx context.Context, o *Object, pairs ...Pair) (err error) { + defer func() { + err = + s.formatError("commit_append", err) + }() + if !o.Mode.IsAppend() { + err = services.ObjectModeInvalidError{Expected: ModeAppend, Actual: o.Mode} + return + } + pairs = append(pairs, s.defaultPairs.CommitAppend...) + var opt pairStorageCommitAppend + + opt, err = s.parsePairStorageCommitAppend(pairs) + if err != nil { + return + } + return s.commitAppend(ctx, o, opt) +} +func (s *Storage) CompleteMultipart(o *Object, parts []*Part, pairs ...Pair) (err error) { + ctx := context.Background() + return s.CompleteMultipartWithContext(ctx, o, parts, pairs...) +} +func (s *Storage) CompleteMultipartWithContext(ctx context.Context, o *Object, parts []*Part, pairs ...Pair) (err error) { + defer func() { + err = + s.formatError("complete_multipart", err) + }() + if !o.Mode.IsPart() { + err = services.ObjectModeInvalidError{Expected: ModePart, Actual: o.Mode} + return + } + pairs = append(pairs, s.defaultPairs.CompleteMultipart...) + var opt pairStorageCompleteMultipart + + opt, err = s.parsePairStorageCompleteMultipart(pairs) + if err != nil { + return + } + return s.completeMultipart(ctx, o, parts, opt) +} +func (s *Storage) Create(path string, pairs ...Pair) (o *Object) { + pairs = append(pairs, s.defaultPairs.Create...) + var opt pairStorageCreate + + // Ignore error while handling local functions. + opt, _ = s.parsePairStorageCreate(pairs) + return s.create(path, opt) +} +func (s *Storage) CreateAppend(path string, pairs ...Pair) (o *Object, err error) { + ctx := context.Background() + return s.CreateAppendWithContext(ctx, path, pairs...) +} +func (s *Storage) CreateAppendWithContext(ctx context.Context, path string, pairs ...Pair) (o *Object, err error) { + defer func() { + err = + s.formatError("create_append", err, path) + }() + + pairs = append(pairs, s.defaultPairs.CreateAppend...) + var opt pairStorageCreateAppend + + opt, err = s.parsePairStorageCreateAppend(pairs) + if err != nil { + return + } + return s.createAppend(ctx, strings.ReplaceAll(path, "\\", "/"), opt) +} +func (s *Storage) CreateDir(path string, pairs ...Pair) (o *Object, err error) { + ctx := context.Background() + return s.CreateDirWithContext(ctx, path, pairs...) +} +func (s *Storage) CreateDirWithContext(ctx context.Context, path string, pairs ...Pair) (o *Object, err error) { + defer func() { + err = + s.formatError("create_dir", err, path) + }() + + pairs = append(pairs, s.defaultPairs.CreateDir...) + var opt pairStorageCreateDir + + opt, err = s.parsePairStorageCreateDir(pairs) + if err != nil { + return + } + return s.createDir(ctx, strings.ReplaceAll(path, "\\", "/"), opt) +} +func (s *Storage) CreateLink(path string, target string, pairs ...Pair) (o *Object, err error) { + ctx := context.Background() + return s.CreateLinkWithContext(ctx, path, target, pairs...) +} +func (s *Storage) CreateLinkWithContext(ctx context.Context, path string, target string, pairs ...Pair) (o *Object, err error) { + defer func() { + err = + s.formatError("create_link", err, path, target) + }() + + pairs = append(pairs, s.defaultPairs.CreateLink...) + var opt pairStorageCreateLink + + opt, err = s.parsePairStorageCreateLink(pairs) + if err != nil { + return + } + return s.createLink(ctx, strings.ReplaceAll(path, "\\", "/"), strings.ReplaceAll(target, "\\", "/"), opt) +} +func (s *Storage) CreateMultipart(path string, pairs ...Pair) (o *Object, err error) { + ctx := context.Background() + return s.CreateMultipartWithContext(ctx, path, pairs...) +} +func (s *Storage) CreateMultipartWithContext(ctx context.Context, path string, pairs ...Pair) (o *Object, err error) { + defer func() { + err = + s.formatError("create_multipart", err, path) + }() + + pairs = append(pairs, s.defaultPairs.CreateMultipart...) + var opt pairStorageCreateMultipart + + opt, err = s.parsePairStorageCreateMultipart(pairs) + if err != nil { + return + } + return s.createMultipart(ctx, strings.ReplaceAll(path, "\\", "/"), opt) +} +func (s *Storage) Delete(path string, pairs ...Pair) (err error) { + ctx := context.Background() + return s.DeleteWithContext(ctx, path, pairs...) +} +func (s *Storage) DeleteWithContext(ctx context.Context, path string, pairs ...Pair) (err error) { + defer func() { + err = + s.formatError("delete", err, path) + }() + + pairs = append(pairs, s.defaultPairs.Delete...) + var opt pairStorageDelete + + opt, err = s.parsePairStorageDelete(pairs) + if err != nil { + return + } + return s.delete(ctx, strings.ReplaceAll(path, "\\", "/"), opt) +} +func (s *Storage) List(path string, pairs ...Pair) (oi *ObjectIterator, err error) { + ctx := context.Background() + return s.ListWithContext(ctx, path, pairs...) +} +func (s *Storage) ListWithContext(ctx context.Context, path string, pairs ...Pair) (oi *ObjectIterator, err error) { + defer func() { + err = + s.formatError("list", err, path) + }() + + pairs = append(pairs, s.defaultPairs.List...) + var opt pairStorageList + + opt, err = s.parsePairStorageList(pairs) + if err != nil { + return + } + return s.list(ctx, strings.ReplaceAll(path, "\\", "/"), opt) +} +func (s *Storage) ListMultipart(o *Object, pairs ...Pair) (pi *PartIterator, err error) { + ctx := context.Background() + return s.ListMultipartWithContext(ctx, o, pairs...) +} +func (s *Storage) ListMultipartWithContext(ctx context.Context, o *Object, pairs ...Pair) (pi *PartIterator, err error) { + defer func() { + err = + s.formatError("list_multipart", err) + }() + if !o.Mode.IsPart() { + err = services.ObjectModeInvalidError{Expected: ModePart, Actual: o.Mode} + return + } + pairs = append(pairs, s.defaultPairs.ListMultipart...) + var opt pairStorageListMultipart + + opt, err = s.parsePairStorageListMultipart(pairs) + if err != nil { + return + } + return s.listMultipart(ctx, o, opt) +} +func (s *Storage) Metadata(pairs ...Pair) (meta *StorageMeta) { + pairs = append(pairs, s.defaultPairs.Metadata...) + var opt pairStorageMetadata + + // Ignore error while handling local functions. + opt, _ = s.parsePairStorageMetadata(pairs) + return s.metadata(opt) +} +func (s *Storage) Read(path string, w io.Writer, pairs ...Pair) (n int64, err error) { + ctx := context.Background() + return s.ReadWithContext(ctx, path, w, pairs...) +} +func (s *Storage) ReadWithContext(ctx context.Context, path string, w io.Writer, pairs ...Pair) (n int64, err error) { + defer func() { + err = + s.formatError("read", err, path) + }() + + pairs = append(pairs, s.defaultPairs.Read...) + var opt pairStorageRead + + opt, err = s.parsePairStorageRead(pairs) + if err != nil { + return + } + return s.read(ctx, strings.ReplaceAll(path, "\\", "/"), w, opt) +} +func (s *Storage) Stat(path string, pairs ...Pair) (o *Object, err error) { + ctx := context.Background() + return s.StatWithContext(ctx, path, pairs...) +} +func (s *Storage) StatWithContext(ctx context.Context, path string, pairs ...Pair) (o *Object, err error) { + defer func() { + err = + s.formatError("stat", err, path) + }() + + pairs = append(pairs, s.defaultPairs.Stat...) + var opt pairStorageStat + + opt, err = s.parsePairStorageStat(pairs) + if err != nil { + return + } + return s.stat(ctx, strings.ReplaceAll(path, "\\", "/"), opt) +} +func (s *Storage) Write(path string, r io.Reader, size int64, pairs ...Pair) (n int64, err error) { + ctx := context.Background() + return s.WriteWithContext(ctx, path, r, size, pairs...) +} +func (s *Storage) WriteWithContext(ctx context.Context, path string, r io.Reader, size int64, pairs ...Pair) (n int64, err error) { + defer func() { + err = + s.formatError("write", err, path) + }() + + pairs = append(pairs, s.defaultPairs.Write...) + var opt pairStorageWrite + + opt, err = s.parsePairStorageWrite(pairs) + if err != nil { + return + } + return s.write(ctx, strings.ReplaceAll(path, "\\", "/"), r, size, opt) +} +func (s *Storage) WriteAppend(o *Object, r io.Reader, size int64, pairs ...Pair) (n int64, err error) { + ctx := context.Background() + return s.WriteAppendWithContext(ctx, o, r, size, pairs...) +} +func (s *Storage) WriteAppendWithContext(ctx context.Context, o *Object, r io.Reader, size int64, pairs ...Pair) (n int64, err error) { + defer func() { + err = + s.formatError("write_append", err) + }() + if !o.Mode.IsAppend() { + err = services.ObjectModeInvalidError{Expected: ModeAppend, Actual: o.Mode} + return + } + pairs = append(pairs, s.defaultPairs.WriteAppend...) + var opt pairStorageWriteAppend + + opt, err = s.parsePairStorageWriteAppend(pairs) + if err != nil { + return + } + return s.writeAppend(ctx, o, r, size, opt) +} +func (s *Storage) WriteMultipart(o *Object, r io.Reader, size int64, index int, pairs ...Pair) (n int64, part *Part, err error) { + ctx := context.Background() + return s.WriteMultipartWithContext(ctx, o, r, size, index, pairs...) +} +func (s *Storage) WriteMultipartWithContext(ctx context.Context, o *Object, r io.Reader, size int64, index int, pairs ...Pair) (n int64, part *Part, err error) { + defer func() { + err = + s.formatError("write_multipart", err) + }() + if !o.Mode.IsPart() { + err = services.ObjectModeInvalidError{Expected: ModePart, Actual: o.Mode} + return + } + pairs = append(pairs, s.defaultPairs.WriteMultipart...) + var opt pairStorageWriteMultipart + + opt, err = s.parsePairStorageWriteMultipart(pairs) + if err != nil { + return + } + return s.writeMultipart(ctx, o, r, size, index, opt) +} +func init() { + services.RegisterServicer(Type, NewServicer) + services.RegisterStorager(Type, NewStorager) + services.RegisterSchema(Type, pairMap) +} diff --git a/services/oss/go.mod b/services/oss/go.mod new file mode 100644 index 000000000..6ae2d6cb9 --- /dev/null +++ b/services/oss/go.mod @@ -0,0 +1,14 @@ +module go.beyondstorage.io/services/oss/v3 + +go 1.16 + +require ( + github.com/aliyun/aliyun-oss-go-sdk v2.1.10+incompatible + github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect + github.com/google/uuid v1.3.0 + github.com/satori/go.uuid v1.2.0 // indirect + go.beyondstorage.io/credential v1.0.0 + go.beyondstorage.io/endpoint v1.2.0 + go.beyondstorage.io/v5 v5.0.0 + golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect +) diff --git a/services/oss/go.sum b/services/oss/go.sum new file mode 100644 index 000000000..c7fe4011b --- /dev/null +++ b/services/oss/go.sum @@ -0,0 +1,116 @@ +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Xuanwo/gg v0.2.0 h1:axbZmA0qmidh3s9PA86GqvBXVQ3o7Bbpf0aImGtlimA= +github.com/Xuanwo/gg v0.2.0/go.mod h1:0fLiiSxR87u2UA0ZNZiKZXuz3jnJdbDHWtU2xpdcH3s= +github.com/Xuanwo/go-bufferpool v0.2.0 h1:DXzqJD9lJufXbT/03GrcEvYOs4gXYUj9/g5yi6Q9rUw= +github.com/Xuanwo/go-bufferpool v0.2.0/go.mod h1:Mle++9GGouhOwGj52i9PJLNAPmW2nb8PWBP7JJzNCzk= +github.com/Xuanwo/templateutils v0.1.0 h1:WpkWOqQtIQ2vAIpJLa727DdN8WtxhUkkbDGa6UhntJY= +github.com/Xuanwo/templateutils v0.1.0/go.mod h1:OdE0DJ+CJxDBq6psX5DPV+gOZi8bhuHuVUpPCG++Wb8= +github.com/aliyun/aliyun-oss-go-sdk v2.1.10+incompatible h1:D3gwOr9qUUmyyBRDbpnATqu+EkqqmigFd3Od6xO1QUU= +github.com/aliyun/aliyun-oss-go-sdk v2.1.10+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/dave/dst v0.26.2 h1:lnxLAKI3tx7MgLNVDirFCsDTlTG9nKTk7GcptKcWSwY= +github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU= +github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8= +github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kevinburke/go-bindata v3.22.0+incompatible h1:/JmqEhIWQ7GRScV0WjX/0tqBrC5D21ALg0H0U/KZ/ts= +github.com/kevinburke/go-bindata v3.22.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.6 h1:lH+Snxmzl92r1jww8/jYPqKkhs3C9AF4LunzU56ZZr4= +github.com/smartystreets/goconvey v1.6.6/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.beyondstorage.io/credential v1.0.0 h1:xJ7hBXmeUE0+rbW+RYZSz4KgHpXvc9g7oQ56f8dXdBk= +go.beyondstorage.io/credential v1.0.0/go.mod h1:7KAYievVw4a8u/eLZmnQt65Z91n84sMQj3LFbt8Xous= +go.beyondstorage.io/endpoint v1.2.0 h1:/7mgKquTykeqJ9op82hso2+WQfECeywGd/Lda1N3tF4= +go.beyondstorage.io/endpoint v1.2.0/go.mod h1:oZ7Z7HZ7mAo337JBLjuCF/DM66HVEUu6+hw68c3UcLs= +go.beyondstorage.io/v5 v5.0.0 h1:k9Axfgbt+oZXoDwSBVCl1XANHSL4rkNTGP2Lz9YdJe0= +go.beyondstorage.io/v5 v5.0.0/go.mod h1:3wV9gCQnqu7tD/3LMeo2yimUKIeTSHpTc6wHSb0yY20= +golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/services/oss/iterator.go b/services/oss/iterator.go new file mode 100644 index 000000000..ca3d11b68 --- /dev/null +++ b/services/oss/iterator.go @@ -0,0 +1,35 @@ +package oss + +import "strconv" + +type objectPageStatus struct { + delimiter string + maxKeys int + prefix string + marker string + partIdMarker string +} + +func (i *objectPageStatus) ContinuationToken() string { + return i.marker +} + +type storagePageStatus struct { + marker string + maxKeys int +} + +func (i *storagePageStatus) ContinuationToken() string { + return i.marker +} + +type partPageStatus struct { + key string + maxParts int + partNumberMarker int + uploadId string +} + +func (i *partPageStatus) ContinuationToken() string { + return strconv.Itoa(i.partNumberMarker) +} diff --git a/services/oss/service.go b/services/oss/service.go new file mode 100644 index 000000000..b8612fb8d --- /dev/null +++ b/services/oss/service.go @@ -0,0 +1,74 @@ +package oss + +import ( + "context" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + + ps "go.beyondstorage.io/v5/pairs" + typ "go.beyondstorage.io/v5/types" +) + +func (s *Service) create(ctx context.Context, name string, opt pairServiceCreate) (store typ.Storager, err error) { + st, err := s.newStorage(ps.WithName(name)) + if err != nil { + return nil, err + } + err = s.service.CreateBucket(name) + if err != nil { + return nil, err + } + return st, nil +} + +func (s *Service) delete(ctx context.Context, name string, opt pairServiceDelete) (err error) { + err = s.service.DeleteBucket(name) + if err != nil { + return err + } + return nil +} + +func (s *Service) get(ctx context.Context, name string, opt pairServiceGet) (store typ.Storager, err error) { + st, err := s.newStorage(ps.WithName(name)) + if err != nil { + return nil, err + } + return st, nil +} + +func (s *Service) list(ctx context.Context, opt pairServiceList) (it *typ.StoragerIterator, err error) { + input := &storagePageStatus{ + maxKeys: 200, + } + + return typ.NewStoragerIterator(ctx, s.nextStoragePage, input), nil +} + +func (s *Service) nextStoragePage(ctx context.Context, page *typ.StoragerPage) error { + input := page.Status.(*storagePageStatus) + + output, err := s.service.ListBuckets( + oss.Marker(input.marker), + oss.MaxKeys(input.maxKeys), + ) + if err != nil { + return err + } + + for _, v := range output.Buckets { + store, err := s.newStorage(ps.WithName(v.Name)) + if err != nil { + return err + } + + page.Data = append(page.Data, store) + } + + if !output.IsTruncated { + return typ.IterateDone + } + + input.marker = output.NextMarker + return nil +} diff --git a/services/oss/service.toml b/services/oss/service.toml new file mode 100644 index 000000000..b9d7f4672 --- /dev/null +++ b/services/oss/service.toml @@ -0,0 +1,88 @@ +name = "oss" + +[namespace.service] + +[namespace.service.new] +required = ["credential"] +optional = ["service_features", "default_service_pairs", "endpoint", "http_client_options"] + +[namespace.storage] +features = ["virtual_dir"] +implement = ["appender", "direr", "multiparter", "linker"] + +[namespace.storage.new] +required = ["name"] +optional = ["storage_features", "default_storage_pairs", "work_dir"] + +[namespace.storage.op.create] +optional = ["multipart_id", "object_mode"] + +[namespace.storage.op.create_dir] +optional = ["storage_class"] + +[namespace.storage.op.delete] +optional = ["multipart_id", "object_mode"] + +[namespace.storage.op.stat] +optional = ["multipart_id", "object_mode"] + +[namespace.storage.op.list] +optional = ["list_mode"] + +[namespace.storage.op.read] +optional = ["offset", "io_callback", "size"] + +[namespace.storage.op.write] +optional = ["content_md5", "content_type", "io_callback", "storage_class", "server_side_encryption", "server_side_data_encryption", "server_side_encryption_key_id"] + +[namespace.storage.op.create_append] +optional = ["content_type", "server_side_encryption", "storage_class"] + +[namespace.storage.op.write_append] +optional = ["content_md5", "io_callback"] + +[namespace.storage.op.create_multipart] +optional = ["content_type", "server_side_encryption", "server_side_encryption_key_id", "server_side_data_encryption", "storage_class"] + +[namespace.storage.op.write_multipart] +optional = ["content_md5"] + +[pairs.service_features] +type = "ServiceFeatures" +description = "set service features" + +[pairs.default_service_pairs] +type = "DefaultServicePairs" +description = "set default pairs for service actions" + +[pairs.storage_features] +type = "StorageFeatures" +description = "set storage features" + +[pairs.default_storage_pairs] +type = "DefaultStoragePairs" +description = "set default pairs for storager actions" + +[pairs.storage_class] +type = "string" + +[pairs.server_side_encryption] +type = "string" +description = "specifies the encryption algorithm. Can be AES256, KMS or SM4.\n\nFor Chinese users, refer to https://help.aliyun.com/document_detail/31871.html for details.\n\nFor global users, refer to https://www.alibabacloud.com/help/doc-detail/31871.htm for details, and double-check whether SM4 can be used." + +[pairs.server_side_data_encryption] +type = "string" +description = "specifies the encryption algorithm when server_side_encryption is KMS. Can only be set to SM4. If this is not set, AES256 will be used.\n\nFor Chinese users, refer to https://help.aliyun.com/document_detail/31871.html for details.\n\nFor global users, refer to https://www.alibabacloud.com/help/doc-detail/31871.htm for details, and double-check whether SM4 can be used." + +[pairs.server_side_encryption_key_id] +type = "string" +description = "is the KMS-managed user master key. Only valid when server_side_encryption is KMS." + +[infos.object.meta.storage-class] +type = "string" + +[infos.object.meta.server_side_encryption] +type = "string" + +[infos.object.meta.server_side_encryption_key_id] +type = "string" \ No newline at end of file diff --git a/services/oss/storage.go b/services/oss/storage.go new file mode 100644 index 000000000..39010f4f5 --- /dev/null +++ b/services/oss/storage.go @@ -0,0 +1,692 @@ +package oss + +import ( + "context" + "fmt" + "io" + "strconv" + "time" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + + ps "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/pkg/headers" + "go.beyondstorage.io/v5/pkg/iowrap" + "go.beyondstorage.io/v5/services" + . "go.beyondstorage.io/v5/types" +) + +func (s *Storage) commitAppend(ctx context.Context, o *Object, opt pairStorageCommitAppend) (err error) { + return +} + +func (s *Storage) completeMultipart(ctx context.Context, o *Object, parts []*Part, opt pairStorageCompleteMultipart) (err error) { + imur := oss.InitiateMultipartUploadResult{ + Bucket: s.bucket.BucketName, + Key: o.ID, + UploadID: o.MustGetMultipartID(), + } + + var uploadParts []oss.UploadPart + for _, v := range parts { + uploadParts = append(uploadParts, oss.UploadPart{ + // For user the `PartNumber` is zero-based. But for OSS, the effective `PartNumber` is [1, 10000]. + // Set PartNumber=v.Index+1 here to ensure pass in the effective `PartNumber` for `UploadPart`. + PartNumber: v.Index + 1, + ETag: v.ETag, + }) + } + + _, err = s.bucket.CompleteMultipartUpload(imur, uploadParts) + if err != nil { + return + } + + o.Mode &= ^ModePart + o.Mode |= ModeRead + return +} + +func (s *Storage) create(path string, opt pairStorageCreate) (o *Object) { + rp := s.getAbsPath(path) + + // Handle create multipart object separately. + if opt.HasMultipartID { + o = s.newObject(true) + o.Mode = ModePart + o.SetMultipartID(opt.MultipartID) + } else { + if opt.HasObjectMode && opt.ObjectMode.IsDir() { + if !s.features.VirtualDir { + return + } + rp += "/" + o = s.newObject(true) + o.Mode = ModeDir + } else { + o = s.newObject(false) + o.Mode = ModeRead + } + } + o.ID = rp + o.Path = path + return o +} + +func (s *Storage) createAppend(ctx context.Context, path string, opt pairStorageCreateAppend) (o *Object, err error) { + rp := s.getAbsPath(path) + + // oss `append` doesn't support `overwrite`, so we need to check and delete the object if exists. + // ref: [GSP-134](https://github.com/beyondstorage/go-storage/blob/master/docs/rfcs/134-write-behavior-consistency.md) + isExist, err := s.bucket.IsObjectExist(rp) + if err != nil { + return + } + + if isExist { + err = s.bucket.DeleteObject(rp) + if err != nil { + return + } + } + + options := make([]oss.Option, 0, 2) + options = append(options, oss.ContentLength(0)) + if opt.HasContentType { + options = append(options, oss.ContentType(opt.ContentType)) + } + if opt.HasStorageClass { + options = append(options, oss.StorageClass(oss.StorageClassType(opt.StorageClass))) + } + if opt.HasServerSideEncryption { + options = append(options, oss.ServerSideEncryption(opt.ServerSideEncryption)) + } + + offset, err := s.bucket.AppendObject(rp, nil, 0, options...) + if err != nil { + return + } + + o = s.newObject(true) + o.Mode = ModeRead | ModeAppend + o.ID = rp + o.Path = path + o.SetAppendOffset(offset) + // set metadata + if opt.HasContentType { + o.SetContentType(opt.ContentType) + } + var sm ObjectSystemMetadata + if opt.HasStorageClass { + sm.StorageClass = opt.StorageClass + } + if opt.HasServerSideEncryption { + sm.ServerSideEncryption = opt.ServerSideEncryption + } + o.SetSystemMetadata(sm) + + return o, nil +} + +func (s *Storage) createDir(ctx context.Context, path string, opt pairStorageCreateDir) (o *Object, err error) { + if !s.features.VirtualDir { + err = NewOperationNotImplementedError("create_dir") + return + } + rp := s.getAbsPath(path) + + // Add `/` at the end of path to simulate directory. + // ref: https://help.aliyun.com/document_detail/31978.html#title-gkg-amg-aes + rp += "/" + + options := make([]oss.Option, 0) + options = append(options, oss.ContentLength(0)) + if opt.HasStorageClass { + options = append(options, oss.StorageClass(oss.StorageClassType(opt.StorageClass))) + } + + err = s.bucket.PutObject(rp, nil, options...) + if err != nil { + return + } + + o = s.newObject(true) + o.Path = path + o.ID = rp + o.Mode |= ModeDir + return +} + +func (s *Storage) createLink(ctx context.Context, path string, target string, opt pairStorageCreateLink) (o *Object, err error) { + rt := s.getAbsPath(target) + rp := s.getAbsPath(path) + + // oss `symlink` supports `overwrite`, so we don't need to check if path exists. + err = s.bucket.PutSymlink(rp, rt) + if err != nil { + return nil, err + } + + o = s.newObject(true) + o.ID = rp + o.Path = path + // oss does not have an absolute path, so when we call `getAbsPath`, it will remove the prefix `/`. + // To ensure that the path matches the one the user gets, we should re-add `/` here. + o.SetLinkTarget("/" + rt) + o.Mode |= ModeLink + + return +} + +func (s *Storage) createMultipart(ctx context.Context, path string, opt pairStorageCreateMultipart) (o *Object, err error) { + rp := s.getAbsPath(path) + + options := make([]oss.Option, 0, 3) + if opt.HasContentType { + options = append(options, oss.ContentType(opt.ContentType)) + } + if opt.HasStorageClass { + options = append(options, oss.StorageClass(oss.StorageClassType(opt.StorageClass))) + } + if opt.HasServerSideEncryption { + options = append(options, oss.ServerSideEncryption(opt.ServerSideEncryption)) + } + if opt.HasServerSideDataEncryption { + options = append(options, oss.ServerSideDataEncryption(opt.ServerSideDataEncryption)) + } + if opt.HasServerSideEncryptionKeyID { + options = append(options, oss.ServerSideEncryptionKeyID(opt.ServerSideEncryptionKeyID)) + } + + output, err := s.bucket.InitiateMultipartUpload(rp, options...) + if err != nil { + return + } + + o = s.newObject(true) + o.ID = rp + o.Path = path + o.Mode |= ModePart + o.SetMultipartID(output.UploadID) + + return o, nil +} + +func (s *Storage) delete(ctx context.Context, path string, opt pairStorageDelete) (err error) { + rp := s.getAbsPath(path) + + if opt.HasMultipartID { + err = s.bucket.AbortMultipartUpload(oss.InitiateMultipartUploadResult{ + Bucket: s.bucket.BucketName, + Key: rp, + UploadID: opt.MultipartID, + }) + if err != nil && checkError(err, responseCodeNoSuchUpload) { + // Omit `NoSuchUpdate` error here + // ref: [GSP-46](https://github.com/beyondstorage/specs/blob/master/rfcs/46-idempotent-delete.md) + err = nil + } + if err != nil { + return + } + return + } + + if opt.HasObjectMode && opt.ObjectMode.IsDir() { + if !s.features.VirtualDir { + err = services.PairUnsupportedError{Pair: ps.WithObjectMode(opt.ObjectMode)} + return + } + + rp += "/" + } + + // OSS DeleteObject is idempotent, so we don't need to check NoSuchKey error. + // + // References + // - [GSP-46](https://github.com/beyondstorage/specs/blob/master/rfcs/46-idempotent-delete.md) + // - https://help.aliyun.com/document_detail/31982.html + err = s.bucket.DeleteObject(rp) + if err != nil { + return err + } + return nil +} + +func (s *Storage) list(ctx context.Context, path string, opt pairStorageList) (oi *ObjectIterator, err error) { + input := &objectPageStatus{ + maxKeys: 200, + prefix: s.getAbsPath(path), + } + + if !opt.HasListMode { + // Support `ListModePrefix` as the default `ListMode`. + // ref: [GSP-654](https://github.com/beyondstorage/go-storage/blob/master/docs/rfcs/654-unify-list-behavior.md) + opt.ListMode = ListModePrefix + } + + var nextFn NextObjectFunc + + switch { + case opt.ListMode.IsPart(): + nextFn = s.nextPartObjectPageByPrefix + case opt.ListMode.IsDir(): + input.delimiter = "/" + nextFn = s.nextObjectPageByDir + case opt.ListMode.IsPrefix(): + nextFn = s.nextObjectPageByPrefix + default: + return nil, services.ListModeInvalidError{Actual: opt.ListMode} + } + + return NewObjectIterator(ctx, nextFn, input), nil +} + +func (s *Storage) listMultipart(ctx context.Context, o *Object, opt pairStorageListMultipart) (pi *PartIterator, err error) { + input := &partPageStatus{ + maxParts: 200, + key: o.ID, + uploadId: o.MustGetMultipartID(), + } + + return NewPartIterator(ctx, s.nextPartPage, input), nil +} + +func (s *Storage) metadata(opt pairStorageMetadata) (meta *StorageMeta) { + meta = NewStorageMeta() + meta.Name = s.bucket.BucketName + meta.WorkDir = s.workDir + // set write restriction + meta.SetWriteSizeMaximum(writeSizeMaximum) + // set append restriction + meta.SetAppendTotalSizeMaximum(appendTotalSizeMaximum) + // set multipart restrictions + meta.SetMultipartNumberMaximum(multipartNumberMaximum) + meta.SetMultipartSizeMaximum(multipartSizeMaximum) + meta.SetMultipartSizeMinimum(multipartSizeMinimum) + return +} + +func (s *Storage) nextObjectPageByDir(ctx context.Context, page *ObjectPage) error { + input := page.Status.(*objectPageStatus) + + output, err := s.bucket.ListObjects( + oss.Marker(input.marker), + oss.MaxKeys(input.maxKeys), + oss.Prefix(input.prefix), + oss.Delimiter(input.delimiter), + ) + if err != nil { + return err + } + + for _, v := range output.CommonPrefixes { + o := s.newObject(true) + o.ID = v + o.Path = s.getRelPath(v) + o.Mode |= ModeDir + + page.Data = append(page.Data, o) + } + + for _, v := range output.Objects { + o, err := s.formatFileObject(v) + if err != nil { + return err + } + + page.Data = append(page.Data, o) + } + + if !output.IsTruncated { + return IterateDone + } + + input.marker = output.NextMarker + return nil +} + +func (s *Storage) nextObjectPageByPrefix(ctx context.Context, page *ObjectPage) error { + input := page.Status.(*objectPageStatus) + + output, err := s.bucket.ListObjects( + oss.Marker(input.marker), + oss.MaxKeys(input.maxKeys), + oss.Prefix(input.prefix), + ) + if err != nil { + return err + } + + for _, v := range output.Objects { + o, err := s.formatFileObject(v) + if err != nil { + return err + } + + page.Data = append(page.Data, o) + } + + if !output.IsTruncated { + return IterateDone + } + + input.marker = output.NextMarker + return nil +} + +func (s *Storage) nextPartObjectPageByPrefix(ctx context.Context, page *ObjectPage) error { + input := page.Status.(*objectPageStatus) + + options := make([]oss.Option, 0, 5) + options = append(options, oss.Delimiter(input.delimiter)) + options = append(options, oss.MaxKeys(input.maxKeys)) + options = append(options, oss.Prefix(input.prefix)) + options = append(options, oss.KeyMarker(input.marker)) + options = append(options, oss.UploadIDMarker(input.partIdMarker)) + + output, err := s.bucket.ListMultipartUploads(options...) + if err != nil { + return err + } + + for _, v := range output.Uploads { + o := s.newObject(true) + o.ID = v.Key + o.Path = s.getRelPath(v.Key) + o.Mode |= ModePart + o.SetMultipartID(v.UploadID) + + page.Data = append(page.Data, o) + } + + if output.NextKeyMarker == "" && output.NextUploadIDMarker == "" { + return IterateDone + } + if !output.IsTruncated { + return IterateDone + } + + input.marker = output.NextKeyMarker + input.partIdMarker = output.NextUploadIDMarker + return nil +} + +func (s *Storage) nextPartPage(ctx context.Context, page *PartPage) error { + input := page.Status.(*partPageStatus) + + imur := oss.InitiateMultipartUploadResult{ + Bucket: s.bucket.BucketName, + Key: input.key, + UploadID: input.uploadId, + } + + options := make([]oss.Option, 0, 2) + options = append(options, oss.MaxParts(input.maxParts)) + options = append(options, oss.PartNumberMarker(input.partNumberMarker)) + + output, err := s.bucket.ListUploadedParts(imur, options...) + if err != nil { + return err + } + + for _, v := range output.UploadedParts { + p := &Part{ + // The returned `PartNumber` is [1, 10000]. + // Set Index=v.PartNumber-1 here to make the `PartNumber` zero-based for user. + Index: v.PartNumber - 1, + ETag: v.ETag, + Size: int64(v.Size), + } + + page.Data = append(page.Data, p) + } + + if !output.IsTruncated { + return IterateDone + } + + partNumberMarker, err := strconv.Atoi(output.NextPartNumberMarker) + if err != nil { + return err + } + + input.partNumberMarker = partNumberMarker + return nil +} + +func (s *Storage) read(ctx context.Context, path string, w io.Writer, opt pairStorageRead) (n int64, err error) { + rp := s.getAbsPath(path) + + options := make([]oss.Option, 0) + if opt.HasOffset && !opt.HasSize { + nr := fmt.Sprintf("%d-", opt.Offset) + options = append(options, oss.NormalizedRange(nr)) + } else if !opt.HasOffset && opt.HasSize { + options = append(options, oss.Range(0, opt.Size-1)) + } else if opt.HasOffset && opt.HasSize { + options = append(options, oss.Range(opt.Offset, opt.Offset+opt.Size-1)) + } + output, err := s.bucket.GetObject(rp, options...) + if err != nil { + return 0, err + } + defer output.Close() + + rc := output + if opt.HasIoCallback { + rc = iowrap.CallbackReadCloser(output, opt.IoCallback) + } + + return io.Copy(w, rc) +} + +func (s *Storage) stat(ctx context.Context, path string, opt pairStorageStat) (o *Object, err error) { + rp := s.getAbsPath(path) + + if symlink, err := s.bucket.GetSymlink(rp); err == nil { + // The path is a symlink. + o = s.newObject(true) + o.ID = rp + o.Path = path + + target := symlink.Get(oss.HTTPHeaderOssSymlinkTarget) + o.SetLinkTarget("/" + target) + + o.Mode |= ModeLink + + return o, nil + } + + if opt.HasMultipartID { + _, err = s.bucket.ListUploadedParts(oss.InitiateMultipartUploadResult{ + Bucket: s.bucket.BucketName, + Key: rp, + UploadID: opt.MultipartID, + }) + if err != nil { + return nil, err + } + + o = s.newObject(true) + o.ID = rp + o.Path = path + o.Mode |= ModePart + o.SetMultipartID(opt.MultipartID) + return o, nil + } + + if opt.HasObjectMode && opt.ObjectMode.IsDir() { + if !s.features.VirtualDir { + err = services.PairUnsupportedError{Pair: ps.WithObjectMode(opt.ObjectMode)} + return + } + + rp += "/" + } + + output, err := s.bucket.GetObjectMeta(rp) + if err != nil { + return nil, err + } + + o = s.newObject(true) + o.ID = rp + o.Path = path + if opt.HasObjectMode && opt.ObjectMode.IsDir() { + o.Mode |= ModeDir + } else { + o.Mode |= ModeRead + } + + if v := output.Get(headers.ContentLength); v != "" { + size, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, err + } + o.SetContentLength(size) + } + + if v := output.Get(headers.LastModified); v != "" { + lastModified, err := time.Parse(time.RFC1123, v) + if err != nil { + return nil, err + } + o.SetLastModified(lastModified) + } + + // OSS advise us don't use Etag as Content-MD5. + // + // ref: https://help.aliyun.com/document_detail/31965.html + if v := output.Get(headers.ETag); v != "" { + o.SetEtag(v) + } + + if v := output.Get(headers.ContentType); v != "" { + o.SetContentType(v) + } + + var sm ObjectSystemMetadata + if v := output.Get(storageClassHeader); v != "" { + sm.StorageClass = v + } + if v := output.Get(serverSideEncryptionHeader); v != "" { + sm.ServerSideEncryption = v + } + if v := output.Get(serverSideEncryptionKeyIdHeader); v != "" { + sm.ServerSideEncryptionKeyID = v + } + o.SetSystemMetadata(sm) + + return o, nil +} + +func (s *Storage) write(ctx context.Context, path string, r io.Reader, size int64, opt pairStorageWrite) (n int64, err error) { + if size > writeSizeMaximum { + err = fmt.Errorf("size limit exceeded: %w", services.ErrRestrictionDissatisfied) + return + } + + // According to GSP-751, we should allow the user to pass in a nil io.Reader. + // Since oss supports reader passed in as nil, we do not need to determine the case where the reader is nil and the size is 0. + // ref: https://github.com/beyondstorage/go-storage/blob/master/docs/rfcs/751-write-empty-file-behavior.md + if r == nil && size != 0 { + return 0, fmt.Errorf("reader is nil but size is not 0") + } else { + r = io.LimitReader(r, size) + } + + if opt.HasIoCallback { + r = iowrap.CallbackReader(r, opt.IoCallback) + } + + rp := s.getAbsPath(path) + + options := make([]oss.Option, 0, 3) + options = append(options, oss.ContentLength(size)) + if opt.HasContentMd5 { + options = append(options, oss.ContentMD5(opt.ContentMd5)) + } + if opt.HasStorageClass { + options = append(options, oss.StorageClass(oss.StorageClassType(opt.StorageClass))) + } + if opt.HasServerSideEncryption { + options = append(options, oss.ServerSideEncryption(opt.ServerSideEncryption)) + } + if opt.HasServerSideDataEncryption { + options = append(options, oss.ServerSideDataEncryption(opt.ServerSideDataEncryption)) + } + if opt.HasServerSideEncryptionKeyID { + options = append(options, oss.ServerSideEncryptionKeyID(opt.ServerSideEncryptionKeyID)) + } + + err = s.bucket.PutObject(rp, r, options...) + if err != nil { + return + } + return size, nil +} + +func (s *Storage) writeAppend(ctx context.Context, o *Object, r io.Reader, size int64, opt pairStorageWriteAppend) (n int64, err error) { + rp := o.GetID() + + if opt.HasIoCallback { + r = iowrap.CallbackReader(r, opt.IoCallback) + } + + offset, _ := o.GetAppendOffset() + + options := make([]oss.Option, 0, 1) + options = append(options, oss.ContentLength(size)) + if opt.HasContentMd5 { + options = append(options, oss.ContentMD5(opt.ContentMd5)) + } + + offset, err = s.bucket.AppendObject(rp, r, offset, options...) + if err != nil { + return + } + + o.SetAppendOffset(offset) + + return size, err +} + +func (s *Storage) writeMultipart(ctx context.Context, o *Object, r io.Reader, size int64, index int, opt pairStorageWriteMultipart) (n int64, part *Part, err error) { + if index < 0 || index >= multipartNumberMaximum { + err = fmt.Errorf("multipart number limit exceeded: %w", services.ErrRestrictionDissatisfied) + return + } + if size > multipartSizeMaximum { + err = fmt.Errorf("size limit exceeded: %w", services.ErrRestrictionDissatisfied) + return + } + + imur := oss.InitiateMultipartUploadResult{ + Bucket: s.bucket.BucketName, + Key: o.ID, + UploadID: o.MustGetMultipartID(), + } + + options := make([]oss.Option, 0, 1) + options = append(options, oss.ContentLength(size)) + if opt.HasContentMd5 { + options = append(options, oss.ContentMD5(opt.ContentMd5)) + } + + // For OSS, the `partNumber` is [1, 10000]. But for user, the `partNumber` is zero-based. + // Set partNumber=index+1 here to ensure pass in the effective `partNumber` for `UpdatePart`. + // ref: https://help.aliyun.com/document_detail/31993.html + output, err := s.bucket.UploadPart(imur, r, size, index+1, options...) + if err != nil { + return + } + + part = &Part{ + // Set part.Index=index instead of part.Index=output.PartNumber to maintain `partNumber` consistency for user. + Index: index, + Size: size, + ETag: output.ETag, + } + return size, part, nil +} diff --git a/services/oss/tests/README.md b/services/oss/tests/README.md new file mode 100644 index 000000000..48fd8b8f6 --- /dev/null +++ b/services/oss/tests/README.md @@ -0,0 +1,32 @@ +## How run integration tests + +### Run tests locally + +Copy example files and update corresponding values. + +```shell +cp Makefile.env.exmaple Makefile.env +``` + +Run tests + +```shell +make integration_test +``` + +### Run tests in CI + +Set following environment variables: + +```shell +export STORAGE_OSS_INTEGRATION_TEST=on +export STORAGE_OSS_CREDENTIAL=hmac:access_key:secret_key +export STORAGE_OSS_NAME=bucketname +export STORAGE_OSS_ENDPOINT=https:oss-region-name.aliyuncs.com +``` + +Run tests + +```shell +make integration_test +``` diff --git a/services/oss/tests/storage_test.go b/services/oss/tests/storage_test.go new file mode 100644 index 000000000..9fa6c8c68 --- /dev/null +++ b/services/oss/tests/storage_test.go @@ -0,0 +1,43 @@ +package tests + +import ( + "os" + "testing" + + "go.beyondstorage.io/v5/tests" +) + +func TestStorage(t *testing.T) { + if os.Getenv("STORAGE_OSS_INTEGRATION_TEST") != "on" { + t.Skipf("STORAGE_OSS_INTEGRATION_TEST is not 'on', skipped") + } + tests.TestStorager(t, setupTest(t)) +} + +func TestAppend(t *testing.T) { + if os.Getenv("STORAGE_OSS_INTEGRATION_TEST") != "on" { + t.Skipf("STORAGE_OSS_INTEGRATION_TEST is not 'on', skipped") + } + tests.TestAppender(t, setupTest(t)) +} + +func TestMultiparter(t *testing.T) { + if os.Getenv("STORAGE_OSS_INTEGRATION_TEST") != "on" { + t.Skipf("STORAGE_OSS_INTEGRATION_TEST is not 'on', skipped") + } + tests.TestMultiparter(t, setupTest(t)) +} + +func TestDirer(t *testing.T) { + if os.Getenv("STORAGE_OSS_INTEGRATION_TEST") != "on" { + t.Skipf("STORAGE_OSS_INTEGRATION_TEST is not 'on', skipped") + } + tests.TestDirer(t, setupTest(t)) +} + +func TestLinker(t *testing.T) { + if os.Getenv("STORAGE_OSS_INTEGRATION_TEST") != "on" { + t.Skipf("STORAGE_OSS_INTEGRATION_TEST is not 'on', skipped") + } + tests.TestLinker(t, setupTest(t)) +} diff --git a/services/oss/tests/utils_test.go b/services/oss/tests/utils_test.go new file mode 100644 index 000000000..c32f40f2c --- /dev/null +++ b/services/oss/tests/utils_test.go @@ -0,0 +1,30 @@ +package tests + +import ( + "os" + "testing" + + "github.com/google/uuid" + + oss "go.beyondstorage.io/services/oss/v3" + ps "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/types" +) + +func setupTest(t *testing.T) types.Storager { + t.Log("Setup test for oss") + + store, err := oss.NewStorager( + ps.WithCredential(os.Getenv("STORAGE_OSS_CREDENTIAL")), + ps.WithName(os.Getenv("STORAGE_OSS_NAME")), + ps.WithEndpoint(os.Getenv("STORAGE_OSS_ENDPOINT")), + ps.WithWorkDir("/"+uuid.New().String()+"/"), + oss.WithStorageFeatures(oss.StorageFeatures{ + VirtualDir: true, + }), + ) + if err != nil { + t.Errorf("new storager: %v", err) + } + return store +} diff --git a/services/oss/tools.go b/services/oss/tools.go new file mode 100644 index 000000000..71b69b050 --- /dev/null +++ b/services/oss/tools.go @@ -0,0 +1,6 @@ +//go:build tools +// +build tools + +package oss + +import _ "go.beyondstorage.io/v5/cmd/definitions" diff --git a/services/oss/utils.go b/services/oss/utils.go new file mode 100644 index 000000000..acb8679c5 --- /dev/null +++ b/services/oss/utils.go @@ -0,0 +1,334 @@ +package oss + +import ( + "fmt" + "strings" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + + "go.beyondstorage.io/credential" + "go.beyondstorage.io/endpoint" + ps "go.beyondstorage.io/v5/pairs" + "go.beyondstorage.io/v5/pkg/httpclient" + "go.beyondstorage.io/v5/services" + typ "go.beyondstorage.io/v5/types" +) + +// Service is the aliyun oss *Service config. +type Service struct { + service *oss.Client + + defaultPairs DefaultServicePairs + features ServiceFeatures + + typ.UnimplementedServicer +} + +// String implements Servicer.String +func (s *Service) String() string { + return fmt.Sprintf("Servicer oss") +} + +// Storage is the aliyun object storage service. +type Storage struct { + bucket *oss.Bucket + + name string + workDir string + + defaultPairs DefaultStoragePairs + features StorageFeatures + + typ.UnimplementedStorager + typ.UnimplementedAppender + typ.UnimplementedMultiparter + typ.UnimplementedDirer + typ.UnimplementedLinker +} + +// String implements Storager.String +func (s *Storage) String() string { + return fmt.Sprintf( + "Storager oss {Name: %s, WorkDir: %s}", + s.bucket.BucketName, s.workDir, + ) +} + +// New will create both Servicer and Storager. +func New(pairs ...typ.Pair) (typ.Servicer, typ.Storager, error) { + return newServicerAndStorager(pairs...) +} + +// NewServicer will create Servicer only. +func NewServicer(pairs ...typ.Pair) (typ.Servicer, error) { + return newServicer(pairs...) +} + +// NewStorager will create Storager only. +func NewStorager(pairs ...typ.Pair) (typ.Storager, error) { + _, store, err := newServicerAndStorager(pairs...) + return store, err +} + +func newServicer(pairs ...typ.Pair) (srv *Service, err error) { + defer func() { + if err != nil { + err = services.InitError{Op: "new_servicer", Type: Type, Err: formatError(err), Pairs: pairs} + } + }() + + srv = &Service{} + + opt, err := parsePairServiceNew(pairs) + if err != nil { + return nil, err + } + + cp, err := credential.Parse(opt.Credential) + if err != nil { + return nil, err + } + if cp.Protocol() != credential.ProtocolHmac { + return nil, services.PairUnsupportedError{Pair: ps.WithCredential(opt.Credential)} + } + ak, sk := cp.Hmac() + + ep, err := endpoint.Parse(opt.Endpoint) + if err != nil { + return nil, err + } + + var url string + switch ep.Protocol() { + case endpoint.ProtocolHTTP: + url, _, _ = ep.HTTP() + case endpoint.ProtocolHTTPS: + url, _, _ = ep.HTTPS() + default: + return nil, services.PairUnsupportedError{Pair: ps.WithEndpoint(opt.Endpoint)} + } + + var copts []oss.ClientOption + if opt.HasHTTPClientOptions { + copts = append(copts, oss.HTTPClient(httpclient.New(opt.HTTPClientOptions))) + } + + srv.service, err = oss.New(url, ak, sk, copts...) + if err != nil { + return nil, err + } + + if opt.HasDefaultServicePairs { + srv.defaultPairs = opt.DefaultServicePairs + } + if opt.HasServiceFeatures { + srv.features = opt.ServiceFeatures + } + return +} +func newServicerAndStorager(pairs ...typ.Pair) (srv *Service, store *Storage, err error) { + srv, err = newServicer(pairs...) + if err != nil { + return + } + + store, err = srv.newStorage(pairs...) + if err != nil { + err = services.InitError{Op: "new_storager", Type: Type, Err: formatError(err), Pairs: pairs} + return nil, nil, err + } + return srv, store, nil +} + +// All available storage classes are listed here. +const ( + // ref: https://www.alibabacloud.com/help/doc-detail/31984.htm + storageClassHeader = "x-oss-storage-class" + + // ref: https://www.alibabacloud.com/help/doc-detail/51374.htm + StorageClassStandard = "STANDARD" + StorageClassIA = "IA" + StorageClassArchive = "Archive" +) + +func formatError(err error) error { + if _, ok := err.(services.InternalError); ok { + return err + } + + switch e := err.(type) { + case oss.ServiceError: + switch e.Code { + case "": + switch e.StatusCode { + case 404: + return fmt.Errorf("%w: %v", services.ErrObjectNotExist, err) + default: + return fmt.Errorf("%w, %v", services.ErrUnexpected, err) + } + case "NoSuchKey": + return fmt.Errorf("%w: %v", services.ErrObjectNotExist, err) + case "AccessDenied": + return fmt.Errorf("%w: %v", services.ErrPermissionDenied, err) + } + case oss.UnexpectedStatusCodeError: + switch e.Got() { + case 404: + return fmt.Errorf("%w: %v", services.ErrObjectNotExist, err) + case 403: + return fmt.Errorf("%w: %v", services.ErrPermissionDenied, err) + } + } + + return fmt.Errorf("%w, %v", services.ErrUnexpected, err) +} + +// newStorage will create a new client. +func (s *Service) newStorage(pairs ...typ.Pair) (st *Storage, err error) { + opt, err := parsePairStorageNew(pairs) + if err != nil { + return nil, err + } + + bucket, err := s.service.Bucket(opt.Name) + if err != nil { + return nil, err + } + + store := &Storage{ + bucket: bucket, + + workDir: "/", + } + + if opt.HasDefaultStoragePairs { + store.defaultPairs = opt.DefaultStoragePairs + } + if opt.HasStorageFeatures { + store.features = opt.StorageFeatures + } + if opt.HasWorkDir { + store.workDir = opt.WorkDir + } + return store, nil +} + +func (s *Service) formatError(op string, err error, name string) error { + if err == nil { + return nil + } + + return services.ServiceError{ + Op: op, + Err: formatError(err), + Servicer: s, + Name: name, + } +} + +// getAbsPath will calculate object storage's abs path +func (s *Storage) getAbsPath(path string) string { + prefix := strings.TrimPrefix(s.workDir, "/") + return prefix + path +} + +// getRelPath will get object storage's rel path. +func (s *Storage) getRelPath(path string) string { + prefix := strings.TrimPrefix(s.workDir, "/") + return strings.TrimPrefix(path, prefix) +} + +func (s *Storage) formatError(op string, err error, path ...string) error { + if err == nil { + return nil + } + + return services.StorageError{ + Op: op, + Err: formatError(err), + Storager: s, + Path: path, + } +} + +func (s *Storage) formatFileObject(v oss.ObjectProperties) (o *typ.Object, err error) { + o = s.newObject(false) + o.ID = v.Key + o.Path = s.getRelPath(v.Key) + if v.Type == "Symlink" { + o.Mode |= typ.ModeLink + } else { + o.Mode |= typ.ModeRead + } + + o.SetContentLength(v.Size) + o.SetLastModified(v.LastModified) + + // OSS advise us don't use Etag as Content-MD5. + // + // ref: https://help.aliyun.com/document_detail/31965.html + if v.ETag != "" { + o.SetEtag(v.ETag) + } + + var sm ObjectSystemMetadata + if value := v.Type; value != "" { + sm.StorageClass = value + } + o.SetSystemMetadata(sm) + + return +} + +func (s *Storage) newObject(done bool) *typ.Object { + return typ.NewObject(s, done) +} + +// All available encryption algorithms are listed here. +const ( + serverSideEncryptionHeader = "x-oss-server-side-encryption" + serverSideEncryptionKeyIdHeader = "x-oss-server-side-encryption-key-id" + + ServerSideEncryptionAES256 = "AES256" + ServerSideEncryptionKMS = "KMS" + ServerSideEncryptionSM4 = "SM4" + + ServerSideDataEncryptionSM4 = "SM4" +) + +// OSS response error code. +// +// ref: https://error-center.alibabacloud.com/status/product/Oss +const ( + // responseCodeNoSuchUpload will be returned while the specified upload does not exist. + responseCodeNoSuchUpload = "NoSuchUpload" +) + +func checkError(err error, code string) bool { + e, ok := err.(oss.ServiceError) + if !ok { + return false + } + + return e.Code == code +} + +// multipartXXX are multipart upload restriction in OSS, see more details at: +// https://help.aliyun.com/document_detail/31993.html +const ( + // multipartNumberMaximum is the max part count supported. + multipartNumberMaximum = 10000 + // multipartSizeMaximum is the maximum size for each part, 5GB. + multipartSizeMaximum = 5 * 1024 * 1024 * 1024 + // multipartSizeMinimum is the minimum size for each part, 100KB. + multipartSizeMinimum = 100 * 1024 +) + +const ( + // writeSizeMaximum is the maximum size for each object with a single PUT operation, 5GB. + // ref: https://help.aliyun.com/document_detail/31978.html#title-gkg-amg-aes + writeSizeMaximum = 5 * 1024 * 1024 * 1024 + // appendSizeMaximum is the total maximum size for an append object, 5GB. + // ref: https://help.aliyun.com/document_detail/31981.html?spm=a2c4g.11186623.6.1684.479a3ea7S8dRgB#title-22f-5c3-0sv + appendTotalSizeMaximum = 5 * 1024 * 1024 * 1024 +)