diff --git a/.github/workflows/go-releaser.yaml b/.github/workflows/go-releaser.yaml new file mode 100644 index 0000000..a93b872 --- /dev/null +++ b/.github/workflows/go-releaser.yaml @@ -0,0 +1,32 @@ +name: goreleaser + +on: + release: + types: [created] + +jobs: + goreleaser: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Fetch Git tags + run: git fetch --force --tags + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: '>=1.22.4' + cache: true + + - name: Goreleaser + uses: goreleaser/goreleaser-action@v6 + with: + distribution: goreleaser + version: latest + args: release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 0000000..846a7f8 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,17 @@ +name: release +on: + push: + branches: + - "release" +jobs: + release: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 20 + - name: release + env: + GITHUB_TOKEN: ${{ secrets.GH_BOT_ACCESS_TOKEN }} + run: npx semantic-release --debug diff --git a/.goreleaser.yaml b/.goreleaser.yaml new file mode 100644 index 0000000..d7d1df9 --- /dev/null +++ b/.goreleaser.yaml @@ -0,0 +1,22 @@ +version: 2 +builds: + - env: + - CGO_ENABLED=0 + goos: + - linux + - windows + - darwin + goarch: + - amd64 + - arm64 + ldflags: + - -w -extldflags '-static' + +archives: + - format_overrides: + - goos: windows + format: zip + name_template: "terraform-provider-tigris_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + +checksum: + name_template: 'checksums.txt' diff --git a/.releaserc.yaml b/.releaserc.yaml new file mode 100644 index 0000000..6cc0aeb --- /dev/null +++ b/.releaserc.yaml @@ -0,0 +1,6 @@ +branches: + - release +plugins: + - "@semantic-release/commit-analyzer" + - "@semantic-release/release-notes-generator" + - "@semantic-release/github" diff --git a/Makefile b/Makefile index 21ffad6..e0f4fd0 100644 --- a/Makefile +++ b/Makefile @@ -5,6 +5,9 @@ INCLUDE_VERSION_IN_FILENAME?=false default: build +install: vet fmtcheck + go install -ldflags="-X github.com/tigrisdata/terraform-provider-tigris/main.version=$(VERSION)" + build: vet @if $(INCLUDE_VERSION_IN_FILENAME); then \ go build -ldflags="-X github.com/tigrisdata/terraform-provider-tigris/main.version=$(VERSION)" -o terraform-provider-tigris_$(VERSION); \ @@ -21,6 +24,7 @@ terraform-provider-lint: tools -R001=false \ -R003=false \ -R012=false \ + -R018=false \ -S006=false \ -S014=false \ -S020=false \ @@ -59,4 +63,4 @@ tools: @echo "==> Installing development tooling..." go generate -tags tools tools/tools.go -.PHONY: build lint terraform-provider-lint vet fmt golangci-lint tools \ No newline at end of file +.PHONY: build install lint terraform-provider-lint vet fmt golangci-lint tools \ No newline at end of file diff --git a/README.md b/README.md index 0d480c8..35d4ebf 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,181 @@ # Terraform Provider for Tigris Buckets This repository contains a Terraform provider that allows you to create and manage Tigris buckets. This provider supports creating, reading, updating, deleting, and importing Tigris buckets with additional customization options like specifying access keys. + +## Usage + +Below is an example of how to use the Tigris provider in your Terraform configuration: + +```hcl +terraform { + required_providers { + tigris = { + source = "https://github.com/tigrisdata/terraform-provider-tigris" + } + } +} + +provider "tigris" { + access_key = "your-access-key" + secret_key = "your-secret-key" +} + +resource "tigris_bucket" "example_bucket" { + bucket = "my-custom-bucket" +} + +resource "tigris_bucket_public_access" "example_bucket_public_access" { + bucket = tigris_bucket.example_bucket.bucket + acl = "private" + public_list_objects = false +} + +resource "tigris_bucket_website_config" "example_website_config" { + bucket = tigris_bucket.example_bucket.bucket + domain_name = tigris_bucket.example_bucket.bucket +} + +resource "tigris_bucket_shadow_config" "example_shadow_config" { + bucket = tigris_bucket.example_bucket.bucket + shadow_bucket = "my-custom-bucket-shadow" + shadow_access_key = "your-shadow-bucket-access-key" + shadow_secret_key = "your-shadow-bucket-secret-key" + shadow_region = "us-west-2" + shadown_endpoint = "https://s3.us-west-2.amazonaws.com" + shadow_write_through = true +} +``` + +### Applying the Configuration + +1. Initialize Terraform: + +```shell +terraform init +``` + +2. Apply the configuration: + +```shell +terraform apply +``` + +## Provider + +The Tigris provider allows you to manage Tigris buckets. + +### Configuration + +The provider can be configured with the following parameters: + +- access_key: (Optional) The access key. Can also be sourced from the AWS_ACCESS_KEY_ID environment variable. +- secret_key: (Optional) The secret key. Can also be sourced from the AWS_SECRET_ACCESS_KEY environment variable. + +## Resources + +### tigris_bucket + +The tigris_bucket resource creates and manages a Tigris bucket. This resource supports the following actions: + +- Create: Creates a new Tigris bucket. +- Read: Retrieves information about the existing Tigris bucket. +- Update: Updates the bucket configuration. +- Delete: Deletes the Tigris bucket. +- Import: Imports an existing Tigris bucket into Terraform’s state. + +#### Configuration + +- bucket: (Required) The name of the Tigris bucket. + +```hcl +resource "tigris_bucket" "example_bucket" { + bucket = "my-custom-bucket" +} +``` + +### tigris_bucket_public_access + +The tigris_bucket_public_access resource creates and manages a Tigris bucket public access configuration. This resource supports the following actions: + +- Create: Creates a new Tigris bucket public access configuration. +- Read: Retrieves information about the existing Tigris bucket public access configuration. +- Update: Updates the bucket public access configuration. +- Delete: Deletes the Tigris bucket public access configuration. +- Import: Imports an existing Tigris bucket public access configuration into Terraform’s state. + +#### Configuration + +- bucket: (Required) The name of the Tigris bucket. +- acl: (Optional) The access control list for the bucket. Defaults to "private". Possible values are "private", and "public-read". + +```hcl +resource "tigris_bucket_public_access" "example_bucket_public_access" { + bucket = "my-custom-bucket" + acl = "private" + public_list_objects = false +} +``` + +### tigris_bucket_website_config + +The tigris_bucket_website_config resource creates and manages a Tigris bucket website configuration. This is used to configure custom domain name for the bucket. + +This resource supports the following actions: + +- Create: Creates a new Tigris bucket website configuration. +- Read: Retrieves information about the existing Tigris bucket website configuration. +- Update: Updates the bucket website configuration. +- Delete: Deletes the Tigris bucket website configuration. +- Import: Imports an existing Tigris bucket website configuration into Terraform’s state. + +#### Configuration + +- bucket: (Required) The name of the Tigris bucket. +- domain_name: (Required) The domain name for the bucket website. + +```hcl +resource "tigris_bucket_website_config" "example_website_config" { + bucket = images.example.com + domain_name = images.example.com +} +``` + +Before using this resource, you must have a bucket created using the tigris_bucket resource. The domain_name must match the bucket name and there must be a CNAME DNS record setup. The CNAME record should point to the Tigris bucket endpoint (e.g., images.example.com CNAME images.example.com.fly.storage.tigris.dev). + +### tigris_bucket_shadow_config + +The tigris_bucket_shadow_config resource creates and manages a Tigris bucket shadow configuration. The shadow configuration is used to setup a source bucket (shadow bucket) that will be used to migrate data to the Tigris bucket. You can read more about how this migration works [here](https://www.tigrisdata.com/docs/migration/). + +This resource supports the following actions: + +- Create: Creates a new Tigris bucket shadow configuration. +- Read: Retrieves information about the existing Tigris bucket shadow configuration. +- Update: Updates the bucket shadow configuration. +- Delete: Deletes the Tigris bucket shadow configuration. +- Import: Imports an existing Tigris bucket shadow configuration into Terraform’s state. + +#### Configuration + +- bucket: (Required) The name of the Tigris bucket. +- shadow_bucket: (Required) The name of the shadow bucket. +- shadow_access_key: (Required) The access key for the shadow bucket. +- shadow_secret_key: (Required) The secret key for the shadow bucket. +- shadow_region: (Optional) The region for the shadow bucket. Defaults to "us-east-1". +- shadow_endpoint: (Optional) The endpoint for the shadow bucket. Defaults to "https://s3.us-east-1.amazonaws.com". +- shadow_write_through: (Optional) Whether to write through to the shadow bucket. Defaults to true. + +```hcl +resource "tigris_bucket_shadow_config" "example_shadow_config" { + bucket = "my-custom-bucket" + shadow_bucket = "my-custom-bucket-shadow" + shadow_access_key = "your-shadow-bucket-access-key" + shadow_secret_key = "your-shadow-bucket-secret-key" + shadow_region = "us-west-2" + shadown_endpoint = "https://s3.us-west-2.amazonaws.com" + shadow_write_through = true +} +``` + +## Contributing + +Contributions are welcome! Please feel free to submit a Pull Request. diff --git a/go.mod b/go.mod index c96cf85..7abf2ae 100644 --- a/go.mod +++ b/go.mod @@ -20,9 +20,11 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 // indirect github.com/aws/smithy-go v1.20.4 // indirect + github.com/google/uuid v1.6.0 // indirect ) require ( + github.com/YakDriver/regexache v0.24.0 github.com/agext/levenshtein v1.2.2 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/aws/aws-sdk-go-v2 v1.30.4 diff --git a/go.sum b/go.sum index 672da7b..b3f4b97 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +github.com/YakDriver/regexache v0.24.0 h1:zUKaixelkswzdqsqPc2sveiV//Mi/msJn0teG8zBDiA= +github.com/YakDriver/regexache v0.24.0/go.mod h1:awcd8uBj614F3ScW06JqlfSGqq2/7vdJHy+RiKzVC+g= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= @@ -53,6 +55,8 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= diff --git a/internal/client.go b/internal/client.go new file mode 100644 index 0000000..fc13d17 --- /dev/null +++ b/internal/client.go @@ -0,0 +1,378 @@ +package internal + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/s3" + s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" + shttp "github.com/aws/smithy-go/transport/http" + "github.com/tigrisdata/terraform-provider-tigris/internal/types" +) + +const ( + // DefaultEndpoint is the default endpoint for Tigris object storage service. + DefaultEndpoint = "https://fly.storage.tigris.dev" + + // DefaultRegion is the default region for Tigris object storage service. + DefaultRegion = "auto" + + // Headers for the requests to Tigris. + HeaderContentType = "Content-Type" + HeaderAccept = "Accept" + HeaderAmzContentSha = "X-Amz-Content-Sha256" + HeaderAmzIdentityId = "S3-Identity-Id" + HeaderAmzAcl = "X-Amz-Acl" + HeaderAmzPublicListObjects = "X-Amz-Acl-Public-List-Objects-Enabled" +) + +type Client struct { + cfg aws.Config + signer *v4.Signer + credentials aws.Credentials + endpoint string + httpClient *http.Client + s3Client *s3.Client +} + +func NewClient(endpoint, accessKeyID, secretAccessKey string) (*Client, error) { + // Load AWS configuration + cfg, err := config.LoadDefaultConfig(context.TODO(), + config.WithRegion(DefaultRegion), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(accessKeyID, secretAccessKey, "")), + ) + if err != nil { + return nil, err + } + + // Create a signer + signer := v4.NewSigner() + + // Create S3 service client + svc := s3.NewFromConfig(cfg, func(o *s3.Options) { + o.BaseEndpoint = aws.String(endpoint) + o.Region = DefaultRegion + }) + + return &Client{ + cfg: cfg, + signer: signer, + credentials: aws.Credentials{ + AccessKeyID: accessKeyID, + SecretAccessKey: secretAccessKey, + }, + endpoint: endpoint, + httpClient: &http.Client{}, + s3Client: svc, + }, nil +} + +func (c *Client) CreateBucket(ctx context.Context, input *types.BucketUpdateInput) error { + if err := validateBucketRequest(input); err != nil { + return err + } + + _, err := c.s3Client.CreateBucket(ctx, &s3.CreateBucketInput{ + Bucket: aws.String(input.Bucket), + }) + + return err +} + +func (c *Client) UpdateBucket(ctx context.Context, input *types.BucketUpdateInput) error { + if err := validateBucketRequest(input); err != nil { + return err + } + + // Set all the bucket attributes that need to be updated + upReq := &types.BucketUpdateRequest{} + + // Set the website configuration if it's provided + if input.Website != nil { + upReq.Website = input.Website + } + + // Set the shadow bucket configuration if it's provided + if input.Shadow != nil { + upReq.Shadow = input.Shadow + } + + body, err := json.Marshal(upReq) + if err != nil { + return fmt.Errorf("failed to marshal update request: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPatch, c.bucketURL(input.Bucket, nil), bytes.NewReader(body)) + if err != nil { + return fmt.Errorf("failed to create update request: %w", err) + } + + // Update bucket attributes that need to be updated via headers + // Update the ACL if it's provided + if input.ACL != nil { + req.Header.Set(HeaderAmzAcl, string(*input.ACL)) + } + if input.PublicObjectsListEnabled != nil { + req.Header.Set(HeaderAmzPublicListObjects, fmt.Sprintf("%t", *input.PublicObjectsListEnabled)) + } + + //nolint:contextcheck + resp, err := c.doRequestWithRetry(req) + if err != nil { + return fmt.Errorf("failed to send update request: %w", err) + } + defer resp.Body.Close() + + var upResp types.BucketUpdateResponse + err = json.NewDecoder(resp.Body).Decode(&upResp) + if err != nil { + return fmt.Errorf("request failed with code: %d", resp.StatusCode) + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("update failed with error: %s", upResp.ErrorMessage) + } + + return nil +} + +func (c *Client) HeadBucket(ctx context.Context, bucketName string) (bool, error) { + _, err := c.s3Client.HeadBucket(ctx, &s3.HeadBucketInput{ + Bucket: aws.String(bucketName), + }, withHeader(HeaderAmzIdentityId, c.credentials.AccessKeyID)) + + exists := true + if err != nil { + var notFoundErr *s3types.NotFound + if ok := errors.As(err, ¬FoundErr); ok { + exists = false + return exists, nil + } + } + + return exists, err +} + +func (c *Client) DeleteBucket(ctx context.Context, bucketName string) error { + _, err := c.s3Client.DeleteBucket(ctx, &s3.DeleteBucketInput{ + Bucket: aws.String(bucketName), + }) + + return err +} + +func (c *Client) GetBucketMetadata(ctx context.Context, bucketName string) (*types.BucketMetadata, error) { + params := map[string]string{ + "metadata": "", + } + req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.bucketURL(bucketName, params), nil) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + //nolint:contextcheck + resp, err := c.doRequestWithRetry(req) + if err != nil { + return nil, fmt.Errorf("failed to send request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("request failed with code: %d", resp.StatusCode) + } + + // Parse the response body into a BucketMetadata struct + var metadata types.BucketMetadata + err = json.NewDecoder(resp.Body).Decode(&metadata) + if err != nil { + return nil, fmt.Errorf("failed to read bucket metadata: %w", err) + } + + return &metadata, nil +} + +func (c *Client) FindBucketWithRetry(ctx context.Context, bucketName string) (bool, error) { + maxRetries := 5 + backoffDelay := 3 * time.Second + maxBackoffDelay := 60 * time.Second + + var exists bool + + for i := 0; i < maxRetries; i++ { + exists, err := c.HeadBucket(ctx, bucketName) + if err != nil { + return false, err + } + + // Retry the request if the bucket does not exist + if !exists { + // Exponential backoff before retrying + time.Sleep(backoffDelay) + backoffDelay *= 2 // Double the delay for each retry + if backoffDelay > maxBackoffDelay { + backoffDelay = maxBackoffDelay + } + + continue + } + + // Break out of the loop if the request was successful + break + } + + return exists, nil +} + +func (c *Client) doRequestWithRetry(req *http.Request) (*http.Response, error) { + maxRetries := 5 + backoffDelay := 3 * time.Second + maxBackoffDelay := 60 * time.Second + + var resp *http.Response + var err error + + for i := 0; i < maxRetries; i++ { + // Clone the request to avoid issues with mutated request objects + clonedReq, err := cloneRequest(req) + if err != nil { + return nil, fmt.Errorf("failed to clone request: %w", err) + } + + resp, err = c.doSignedRequest(clonedReq) + if err != nil { + return nil, fmt.Errorf("failed to send request: %w", err) + } + + // Check if the response status code indicates a server-side error (5xx) + if resp.StatusCode >= 500 { + resp.Body.Close() + + // Exponential backoff before retrying + time.Sleep(backoffDelay) + backoffDelay *= 2 // Double the delay for each retry + if backoffDelay > maxBackoffDelay { + backoffDelay = maxBackoffDelay + } + + continue + } + + // Break out of the loop if the request was successful + break + } + + return resp, err +} + +func (c *Client) doSignedRequest(req *http.Request) (*http.Response, error) { + // Sign the request + err := c.signRequest(req) + if err != nil { + return nil, fmt.Errorf("failed to sign request: %w", err) + } + + // Send the signed request using the wrapped http.Client + return c.httpClient.Do(req) +} + +func (c *Client) bucketURL(bucketName string, queryParams map[string]string) string { + baseURL := fmt.Sprintf("%s/%s", c.endpoint, bucketName) + if len(queryParams) == 0 { + return baseURL + } + + // Add query parameters to the URL + query := url.Values{} + for key, value := range queryParams { + query.Add(key, value) + } + + return fmt.Sprintf("%s?%s", baseURL, query.Encode()) +} + +func (c *Client) signRequest(req *http.Request) error { + // Get the current time for the request + now := time.Now() + + // Set default headers + req.Header.Set(HeaderContentType, "application/json") + req.Header.Set(HeaderAccept, "application/json") + + // Buffer the request body if it exists + var bodyBytes []byte + var payloadHash string + if req.Body != nil { + var err error + bodyBytes, err = io.ReadAll(req.Body) + if err != nil { + return fmt.Errorf("failed to read request body: %w", err) + } + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + + // Calculate the payload hash + hash := sha256.New() + hash.Write(bodyBytes) + payloadHash = hex.EncodeToString(hash.Sum(nil)) + } else { + // If there's no body, the hash should be the SHA-256 of an empty string + payloadHash = hex.EncodeToString(sha256.New().Sum(nil)) + } + + // set the content sha256 header + req.Header.Set(HeaderAmzContentSha, payloadHash) + + // Sign the request using the signer + err := c.signer.SignHTTP(context.TODO(), c.credentials, req, payloadHash, "s3", DefaultRegion, now) + if err != nil { + return fmt.Errorf("failed to sign request: %w", err) + } + + return nil +} + +func cloneRequest(req *http.Request) (*http.Request, error) { + // Create a shallow copy of the request + clonedReq := req.Clone(req.Context()) + + // Clone the body if it exists and is seekable + if req.Body != nil { + var buf bytes.Buffer + _, err := buf.ReadFrom(req.Body) + if err != nil { + return nil, fmt.Errorf("failed to read request body: %w", err) + } + + // Restore the original body to be read again + req.Body = io.NopCloser(bytes.NewReader(buf.Bytes())) + // Set the cloned request's body + clonedReq.Body = io.NopCloser(bytes.NewReader(buf.Bytes())) + } + + return clonedReq, nil +} + +func validateBucketRequest(input *types.BucketUpdateInput) error { + if input.Bucket == "" { + return errors.New("bucket name is required") + } + + return nil +} + +func withHeader(key, value string) func(*s3.Options) { + return func(options *s3.Options) { + options.APIOptions = append(options.APIOptions, shttp.AddHeaderValue(key, value)) + } +} diff --git a/internal/names/names.go b/internal/names/names.go new file mode 100644 index 0000000..5314355 --- /dev/null +++ b/internal/names/names.go @@ -0,0 +1,15 @@ +package names + +const ( + // Attributes for the terraform resources. + AttrBucket = "bucket" + AttrAcl = "acl" + AttrPublicListObjects = "public_list_objects" + AttrDomainName = "domain_name" + AttrShadowAccessKey = "shadow_access_key" + AttrShadowSecretKey = "shadow_secret_key" + AttrShadowRegion = "shadow_region" + AttrShadowBucket = "shadow_bucket" + AttrShadowEndpoint = "shadow_endpoint" + AttrShadowWriteThrough = "shadow_write_through" +) diff --git a/internal/provider.go b/internal/provider.go index be9cd30..e21bfb7 100644 --- a/internal/provider.go +++ b/internal/provider.go @@ -2,21 +2,11 @@ package internal import ( - "context" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -const ( - // DefaultEndpoint is the default endpoint for Tigris object storage service. - DefaultEndpoint = "https://fly.storage.tigris.dev" -) - func Provider() *schema.Provider { return &schema.Provider{ Schema: map[string]*schema.Schema{ @@ -41,7 +31,10 @@ func Provider() *schema.Provider { }, }, ResourcesMap: map[string]*schema.Resource{ - "tigris_bucket": resourceTigrisBucket(), + "tigris_bucket": resourceTigrisBucket(), + "tigris_bucket_public_access": resourceTigrisBucketPublicAccess(), + "tigris_bucket_website_config": resourceTigrisBucketWebsiteConfig(), + "tigris_bucket_shadow_config": resourceTigrisBucketShadowConfig(), }, ConfigureFunc: providerConfigure, } @@ -52,18 +45,10 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { secretKey := d.Get("secret_key").(string) endpoint := d.Get("endpoint").(string) - cfg, err := config.LoadDefaultConfig(context.TODO(), - config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(accessKey, secretKey, "")), - ) + svc, err := NewClient(endpoint, accessKey, secretKey) if err != nil { return nil, fmt.Errorf("unable to load SDK config, %w", err) } - // Create S3 service client - svc := s3.NewFromConfig(cfg, func(o *s3.Options) { - o.BaseEndpoint = aws.String(endpoint) - o.Region = "auto" - }) - return svc, nil } diff --git a/internal/resource_bucket.go b/internal/resource_bucket.go index c4abd05..fc82a09 100644 --- a/internal/resource_bucket.go +++ b/internal/resource_bucket.go @@ -2,27 +2,39 @@ package internal import ( "context" - "errors" "fmt" + "strings" + "time" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/YakDriver/regexache" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/tigrisdata/terraform-provider-tigris/internal/names" + "github.com/tigrisdata/terraform-provider-tigris/internal/types" ) func resourceTigrisBucket() *schema.Resource { return &schema.Resource{ - Description: "Provides a Tigris bucket resource. This can be used to create and manage Tigris buckets.", - Create: resourceS3BucketCreate, - Read: resourceS3BucketRead, - Update: resourceS3BucketUpdate, - Delete: resourceS3BucketDelete, + Description: "Provides a Tigris bucket resource. This can be used to create and manage Tigris buckets.", + CreateWithoutTimeout: resourceBucketCreate, + ReadWithoutTimeout: resourceBucketRead, + UpdateWithoutTimeout: resourceBucketUpdate, + DeleteWithoutTimeout: resourceBucketDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Read: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, + Schema: map[string]*schema.Schema{ - "bucket_name": { + names.AttrBucket: { Type: schema.TypeString, Required: true, Description: "The name of the Tigris bucket.", @@ -31,59 +43,102 @@ func resourceTigrisBucket() *schema.Resource { } } -func resourceS3BucketCreate(d *schema.ResourceData, meta interface{}) error { - svc := meta.(*s3.Client) +func resourceBucketCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + svc := meta.(*Client) - bucketName := d.Get("bucket_name").(string) + bucketName := d.Get(names.AttrBucket).(string) + if err := validBucketName(bucketName); err != nil { + return diag.FromErr(fmt.Errorf("invalid bucket name, %w", err)) + } - _, err := svc.CreateBucket(context.TODO(), &s3.CreateBucketInput{ - Bucket: aws.String(bucketName), + input := &types.BucketUpdateInput{ + Bucket: bucketName, + } + + tflog.Info(ctx, "Creating bucket", map[string]interface{}{ + "bucket_name": bucketName, }) + + err := svc.CreateBucket(ctx, input) if err != nil { - return fmt.Errorf("unable to create bucket, %w", err) + return diag.FromErr(fmt.Errorf("unable to create bucket, %w", err)) } + tflog.Info(ctx, "Bucket created successfully", map[string]interface{}{ + "bucket_name": bucketName, + }) + d.SetId(bucketName) - return resourceS3BucketRead(d, meta) + + return resourceBucketRead(ctx, d, meta) } -func resourceS3BucketRead(d *schema.ResourceData, meta interface{}) error { - svc := meta.(*s3.Client) +func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + svc := meta.(*Client) bucketName := d.Id() - _, err := svc.HeadBucket(context.TODO(), &s3.HeadBucketInput{ - Bucket: aws.String(bucketName), + tflog.Info(ctx, "Checking bucket existence", map[string]interface{}{ + "bucket_name": bucketName, }) + + exists, err := svc.HeadBucket(ctx, bucketName) + if !exists { + tflog.Warn(ctx, "Bucket not found, removing from state", map[string]interface{}{ + "bucket_name": bucketName, + }) + + d.SetId("") + return nil + } if err != nil { - var notFoundErr *types.NotFound - if ok := errors.As(err, ¬FoundErr); ok { - d.SetId("") - return nil - } - return fmt.Errorf("unable to read bucket, %w", err) + return diag.FromErr(fmt.Errorf("unable to read bucket, %w", err)) } + d.Set(names.AttrBucket, bucketName) + return nil } -func resourceS3BucketUpdate(d *schema.ResourceData, meta interface{}) error { - // Since S3 buckets have limited update capabilities, this might be a no-op - return resourceS3BucketRead(d, meta) +func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + // This resource cannot be updated + return resourceBucketRead(ctx, d, meta) } -func resourceS3BucketDelete(d *schema.ResourceData, meta interface{}) error { - svc := meta.(*s3.Client) +func resourceBucketDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + svc := meta.(*Client) bucketName := d.Id() - _, err := svc.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{ - Bucket: aws.String(bucketName), - }) + err := svc.DeleteBucket(ctx, bucketName) if err != nil { - return fmt.Errorf("unable to delete bucket, %w", err) + return diag.FromErr(fmt.Errorf("unable to delete bucket, %w", err)) } d.SetId("") return nil } + +// validBucketName validates bucket name. Buckets names have to be DNS-compliant. +func validBucketName(value string) error { + if (len(value) < 3) || (len(value) > 63) { + return fmt.Errorf("%q must contain from 3 to 63 characters", value) + } + if !regexache.MustCompile(`^[0-9a-z-.]+$`).MatchString(value) { + return fmt.Errorf("only lowercase alphanumeric characters and hyphens allowed in %q", value) + } + if regexache.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`).MatchString(value) { + return fmt.Errorf("%q must not be formatted as an IP address", value) + } + if strings.HasPrefix(value, `.`) { + return fmt.Errorf("%q cannot start with a period", value) + } + if strings.HasSuffix(value, `.`) { + return fmt.Errorf("%q cannot end with a period", value) + } + if strings.Contains(value, `..`) { + return fmt.Errorf("%q can be only one period between labels", value) + } + + return nil +} diff --git a/internal/resource_bucket_public_access.go b/internal/resource_bucket_public_access.go new file mode 100644 index 0000000..1c688ea --- /dev/null +++ b/internal/resource_bucket_public_access.go @@ -0,0 +1,231 @@ +package internal + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/tigrisdata/terraform-provider-tigris/internal/names" + "github.com/tigrisdata/terraform-provider-tigris/internal/types" +) + +func resourceTigrisBucketPublicAccess() *schema.Resource { + return &schema.Resource{ + Description: "Provides a Tigris bucket resource. This can be used to create and manage Tigris buckets.", + CreateWithoutTimeout: resourceBucketPublicAccessCreate, + ReadWithoutTimeout: resourceBucketPublicAccessRead, + UpdateWithoutTimeout: resourceBucketPublicAccessUpdate, + DeleteWithoutTimeout: resourceBucketPublicAccessDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Read: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Schema: map[string]*schema.Schema{ + names.AttrBucket: { + Type: schema.TypeString, + Required: true, + Description: "The name of the Tigris bucket.", + }, + names.AttrAcl: { + Type: schema.TypeString, + Optional: true, + Default: string(types.BucketCannedACLPrivate), + Description: "The canned ACL to apply to the bucket.", + ValidateFunc: validation.StringInSlice(bucketCannedACL_Values(), false), + }, + names.AttrPublicListObjects: { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to allow public listing of objects in the bucket.", + }, + }, + } +} + +func resourceBucketPublicAccessCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + svc := meta.(*Client) + + bucketName := d.Get(names.AttrBucket).(string) + publicListObjects := d.Get(names.AttrPublicListObjects).(bool) + + input := &types.BucketUpdateInput{ + Bucket: bucketName, + PublicObjectsListEnabled: &publicListObjects, + } + + acl := types.BucketCannedACLPrivate + if v, ok := d.GetOk(names.AttrAcl); ok { + acl = types.BucketCannedACL(v.(string)) + } + input.ACL = &acl + + tflog.Info(ctx, "Creating bucket public access config", map[string]interface{}{ + "bucket_name": bucketName, + }) + + if err := svc.UpdateBucket(ctx, input); err != nil { + return diag.FromErr(fmt.Errorf("unable to create bucket public access config, %w", err)) + } + + tflog.Info(ctx, "Bucket public access config created successfully", map[string]interface{}{ + "bucket_name": bucketName, + }) + + d.SetId(bucketName) + + return resourceBucketPublicAccessRead(ctx, d, meta) +} + +func resourceBucketPublicAccessRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + svc := meta.(*Client) + + bucketName := d.Id() + + tflog.Info(ctx, "Checking bucket existence", map[string]interface{}{ + "bucket_name": bucketName, + }) + + exists, err := svc.HeadBucket(ctx, bucketName) + if !exists { + tflog.Warn(ctx, "Bucket not found, removing from state", map[string]interface{}{ + "bucket_name": bucketName, + }) + + d.SetId("") + return nil + } + if err != nil { + return diag.FromErr(fmt.Errorf("unable to read bucket, %w", err)) + } + + d.Set(names.AttrBucket, bucketName) + + tflog.Info(ctx, "Fetching bucket metadata", map[string]interface{}{ + "bucket_name": bucketName, + }) + + metadata, err := svc.GetBucketMetadata(ctx, bucketName) + if err != nil { + return diag.FromErr(fmt.Errorf("unable to read bucket metadata, %w", err)) + } + + tflog.Info(ctx, "Fetched bucket metadata", map[string]interface{}{ + "bucket_name": bucketName, + }) + + acl := metadata.GetBucketCannedACL() + d.Set(names.AttrAcl, string(acl)) + + d.Set(names.AttrPublicListObjects, metadata.GetPublicObjectsListEnabled()) + + return nil +} + +func resourceBucketPublicAccessUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + svc := meta.(*Client) + + bucketName := d.Id() + + input := &types.BucketUpdateInput{ + Bucket: bucketName, + } + needsUpdate := false + + tflog.Info(ctx, "Updating bucket public access configuration", map[string]interface{}{ + "bucket_name": bucketName, + }) + + // + // Bucket ACL. + // + if d.HasChange(names.AttrAcl) { + acl := types.BucketCannedACL(d.Get(names.AttrAcl).(string)) + if acl == "" { + acl = types.BucketCannedACLPrivate + } + input.ACL = &acl + + tflog.Info(ctx, "Will update bucket ACL", map[string]interface{}{ + "bucket_name": bucketName, + }) + + needsUpdate = true + } + + // + // Bucket Public Objects List. + // + if d.HasChange(names.AttrPublicListObjects) { + publicListObjects := d.Get(names.AttrPublicListObjects).(bool) + input.PublicObjectsListEnabled = &publicListObjects + + tflog.Info(ctx, "Will update bucket public list objects", map[string]interface{}{ + "bucket_name": bucketName, + }) + + needsUpdate = true + } + + if needsUpdate { + err := svc.UpdateBucket(ctx, input) + if err != nil { + return diag.FromErr(fmt.Errorf("unable to update bucket, %w", err)) + } + } + + return resourceBucketRead(ctx, d, meta) +} + +func resourceBucketPublicAccessDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + svc := meta.(*Client) + + bucketName := d.Id() + + acl := types.BucketCannedACLPrivate + publicObjectsListEnabled := true + input := &types.BucketUpdateInput{ + Bucket: bucketName, + ACL: &acl, + PublicObjectsListEnabled: &publicObjectsListEnabled, + } + + tflog.Info(ctx, "Deleting bucket public access configuration", map[string]interface{}{ + "bucket_name": bucketName, + }) + + err := svc.UpdateBucket(ctx, input) + if err != nil { + return diag.FromErr(fmt.Errorf("unable to delete bucket public access configuration, %w", err)) + } + + tflog.Info(ctx, "Bucket public access configuration deleted successfully", map[string]interface{}{ + "bucket_name": bucketName, + }) + + d.SetId("") + return nil +} + +func bucketCannedACL_Values() []string { + var acl types.BucketCannedACL + + values := []string{} + for _, value := range acl.Values() { + values = append(values, string(value)) + } + + return values +} diff --git a/internal/resource_bucket_shadow.go b/internal/resource_bucket_shadow.go new file mode 100644 index 0000000..3f679e6 --- /dev/null +++ b/internal/resource_bucket_shadow.go @@ -0,0 +1,242 @@ +package internal + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/tigrisdata/terraform-provider-tigris/internal/names" + "github.com/tigrisdata/terraform-provider-tigris/internal/types" +) + +func resourceTigrisBucketShadowConfig() *schema.Resource { + return &schema.Resource{ + Description: "Provides a Tigris bucket shadow configuration resource.", + CreateWithoutTimeout: resourceBucketShadowCreate, + ReadWithoutTimeout: resourceBucketShadowRead, + UpdateWithoutTimeout: resourceBucketShadowUpdate, + DeleteWithoutTimeout: resourceBucketShadowDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Read: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Schema: map[string]*schema.Schema{ + names.AttrBucket: { + Type: schema.TypeString, + Required: true, + Description: "The name of the Tigris bucket.", + }, + names.AttrShadowBucket: { + Type: schema.TypeString, + Required: true, + Description: "The name of the shadow bucket.", + }, + names.AttrShadowAccessKey: { + Type: schema.TypeString, + Required: true, + Sensitive: true, + Description: "The access key for the shadow bucket.", + }, + names.AttrShadowSecretKey: { + Type: schema.TypeString, + Required: true, + Sensitive: true, + Description: "The secret key for the shadow bucket.", + }, + names.AttrShadowRegion: { + Type: schema.TypeString, + Optional: true, + Default: "us-east-1", + Description: "The region for the shadow bucket.", + }, + names.AttrShadowEndpoint: { + Type: schema.TypeString, + Optional: true, + Default: "https://s3.us-east-1.amazonaws.com", + Description: "The endpoint for the shadow bucket.", + }, + names.AttrShadowWriteThrough: { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Whether to write through to the shadow bucket.", + }, + }, + } +} + +func resourceBucketShadowCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + svc := meta.(*Client) + + bucketName := d.Get(names.AttrBucket).(string) + shadowConfig := &types.BucketShadowConfig{ + Name: d.Get(names.AttrShadowBucket).(string), + AccessKey: d.Get(names.AttrShadowAccessKey).(string), + SecretKey: d.Get(names.AttrShadowSecretKey).(string), + Region: d.Get(names.AttrShadowRegion).(string), + Endpoint: d.Get(names.AttrShadowEndpoint).(string), + WriteThrough: d.Get(names.AttrShadowWriteThrough).(bool), + } + + input := &types.BucketUpdateInput{ + Bucket: bucketName, + Shadow: shadowConfig, + } + + tflog.Info(ctx, "Creating bucket shadow config", map[string]interface{}{ + "bucket_name": bucketName, + }) + + if err := svc.UpdateBucket(ctx, input); err != nil { + return diag.FromErr(fmt.Errorf("unable to create bucket shadow config, %w", err)) + } + + tflog.Info(ctx, "Bucket shadow config created successfully", map[string]interface{}{ + "bucket_name": bucketName, + }) + + d.SetId(bucketName) + + return resourceBucketShadowRead(ctx, d, meta) +} + +func resourceBucketShadowRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + svc := meta.(*Client) + + bucketName := d.Id() + + tflog.Info(ctx, "Checking bucket existence", map[string]interface{}{ + "bucket_name": bucketName, + }) + + exists, err := svc.HeadBucket(ctx, bucketName) + if !exists { + tflog.Warn(ctx, "Bucket not found, removing from state", map[string]interface{}{ + "bucket_name": bucketName, + }) + + d.SetId("") + return nil + } + if err != nil { + return diag.FromErr(fmt.Errorf("unable to read bucket, %w", err)) + } + + d.Set(names.AttrBucket, bucketName) + + tflog.Info(ctx, "Fetching bucket metadata", map[string]interface{}{ + "bucket_name": bucketName, + }) + + metadata, err := svc.GetBucketMetadata(ctx, bucketName) + if err != nil { + return diag.FromErr(fmt.Errorf("unable to read bucket metadata, %w", err)) + } + + tflog.Info(ctx, "Fetched bucket metadata", map[string]interface{}{ + "bucket_name": bucketName, + }) + + if metadata.Shadow != nil && metadata.Shadow.Name != "" { + d.Set(names.AttrShadowBucket, metadata.Shadow.Name) + d.Set(names.AttrShadowAccessKey, metadata.Shadow.AccessKey) + d.Set(names.AttrShadowSecretKey, metadata.Shadow.SecretKey) + d.Set(names.AttrShadowRegion, metadata.Shadow.Region) + d.Set(names.AttrShadowEndpoint, metadata.Shadow.Endpoint) + d.Set(names.AttrShadowWriteThrough, metadata.Shadow.WriteThrough) + } else { + tflog.Warn(ctx, "Bucket shadow configuration not found, removing from state", map[string]interface{}{ + "id": d.Id(), + }) + + d.SetId("") + } + + return nil +} + +func resourceBucketShadowUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + svc := meta.(*Client) + + bucketName := d.Id() + + input := &types.BucketUpdateInput{ + Bucket: bucketName, + } + needsUpdate := false + + tflog.Info(ctx, "Updating bucket shadow configuration", map[string]interface{}{ + "bucket_name": bucketName, + }) + + // + // Bucket Shadow Config. + // + if d.HasChangesExcept(names.AttrBucket) { + input.Shadow = &types.BucketShadowConfig{ + Name: d.Get(names.AttrShadowBucket).(string), + AccessKey: d.Get(names.AttrShadowAccessKey).(string), + SecretKey: d.Get(names.AttrShadowSecretKey).(string), + Region: d.Get(names.AttrShadowRegion).(string), + Endpoint: d.Get(names.AttrShadowEndpoint).(string), + WriteThrough: d.Get(names.AttrShadowWriteThrough).(bool), + } + + tflog.Info(ctx, "Will update bucket shadow config", map[string]interface{}{ + "bucket_name": bucketName, + }) + + needsUpdate = true + } + + if needsUpdate { + err := svc.UpdateBucket(ctx, input) + if err != nil { + return diag.FromErr(fmt.Errorf("unable to update bucket shadow configuration, %w", err)) + } + } + + tflog.Info(ctx, "Bucket shadow configuration updated successfully", map[string]interface{}{ + "bucket_name": bucketName, + }) + + return resourceBucketWebsiteRead(ctx, d, meta) +} + +func resourceBucketShadowDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + svc := meta.(*Client) + + bucketName := d.Id() + + input := &types.BucketUpdateInput{ + Bucket: bucketName, + Shadow: &types.BucketShadowConfig{}, + } + + tflog.Info(ctx, "Deleting bucket shadow configuration", map[string]interface{}{ + "bucket_name": bucketName, + }) + + err := svc.UpdateBucket(ctx, input) + if err != nil { + return diag.FromErr(fmt.Errorf("unable to delete bucket shadow configuration, %w", err)) + } + + tflog.Info(ctx, "Bucket shadow configuration deleted successfully", map[string]interface{}{ + "bucket_name": bucketName, + }) + + d.SetId("") + return nil +} diff --git a/internal/resource_bucket_website.go b/internal/resource_bucket_website.go new file mode 100644 index 0000000..c097b8a --- /dev/null +++ b/internal/resource_bucket_website.go @@ -0,0 +1,199 @@ +package internal + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/tigrisdata/terraform-provider-tigris/internal/names" + "github.com/tigrisdata/terraform-provider-tigris/internal/types" +) + +func resourceTigrisBucketWebsiteConfig() *schema.Resource { + return &schema.Resource{ + Description: "Provides a Tigris bucket website configuration resource.", + CreateWithoutTimeout: resourceBucketWebsiteCreate, + ReadWithoutTimeout: resourceBucketWebsiteRead, + UpdateWithoutTimeout: resourceBucketWebsiteUpdate, + DeleteWithoutTimeout: resourceBucketWebsiteDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Read: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Schema: map[string]*schema.Schema{ + names.AttrBucket: { + Type: schema.TypeString, + Required: true, + Description: "The name of the Tigris bucket.", + }, + names.AttrDomainName: { + Type: schema.TypeString, + Optional: true, + Description: "The custom domain name to apply to the bucket.", + }, + }, + } +} + +func resourceBucketWebsiteCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + svc := meta.(*Client) + + bucketName := d.Get(names.AttrBucket).(string) + website_domain := d.Get(names.AttrDomainName).(string) + + input := &types.BucketUpdateInput{ + Bucket: bucketName, + Website: &types.BucketWebsiteConfig{ + DomainName: website_domain, + }, + } + + tflog.Info(ctx, "Creating bucket website config", map[string]interface{}{ + "bucket_name": bucketName, + }) + + if err := svc.UpdateBucket(ctx, input); err != nil { + return diag.FromErr(fmt.Errorf("unable to create bucket website config, %w", err)) + } + + tflog.Info(ctx, "Bucket website config created successfully", map[string]interface{}{ + "bucket_name": bucketName, + }) + + d.SetId(bucketName) + + return resourceBucketWebsiteRead(ctx, d, meta) +} + +func resourceBucketWebsiteRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + svc := meta.(*Client) + + bucketName := d.Id() + + tflog.Info(ctx, "Checking bucket existence", map[string]interface{}{ + "bucket_name": bucketName, + }) + + exists, err := svc.HeadBucket(ctx, bucketName) + if !exists { + tflog.Warn(ctx, "Bucket not found, removing from state", map[string]interface{}{ + "bucket_name": bucketName, + }) + + d.SetId("") + return nil + } + if err != nil { + return diag.FromErr(fmt.Errorf("unable to read bucket, %w", err)) + } + + d.Set(names.AttrBucket, bucketName) + + tflog.Info(ctx, "Fetching bucket metadata", map[string]interface{}{ + "bucket_name": bucketName, + }) + + metadata, err := svc.GetBucketMetadata(ctx, bucketName) + if err != nil { + return diag.FromErr(fmt.Errorf("unable to read bucket metadata, %w", err)) + } + + tflog.Info(ctx, "Fetched bucket metadata", map[string]interface{}{ + "bucket_name": bucketName, + }) + + if metadata.Website != nil && metadata.Website.DomainName != "" { + d.Set(names.AttrDomainName, metadata.Website.DomainName) + } else { + tflog.Warn(ctx, "Bucket website configuration not found, removing from state", map[string]interface{}{ + "id": d.Id(), + }) + + d.SetId("") + } + + return nil +} + +func resourceBucketWebsiteUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + svc := meta.(*Client) + + bucketName := d.Id() + + input := &types.BucketUpdateInput{ + Bucket: bucketName, + } + needsUpdate := false + + tflog.Info(ctx, "Updating bucket website configuration", map[string]interface{}{ + "bucket_name": bucketName, + }) + + // + // Bucket Domain Name. + // + if d.HasChange(names.AttrDomainName) { + input.Website = &types.BucketWebsiteConfig{ + DomainName: d.Get(names.AttrDomainName).(string), + } + + tflog.Info(ctx, "Will update bucket domain name", map[string]interface{}{ + "bucket_name": bucketName, + }) + + needsUpdate = true + } + + if needsUpdate { + err := svc.UpdateBucket(ctx, input) + if err != nil { + return diag.FromErr(fmt.Errorf("unable to update bucket website configuration, %w", err)) + } + } + + tflog.Info(ctx, "Bucket website configuration updated successfully", map[string]interface{}{ + "bucket_name": bucketName, + }) + + return resourceBucketWebsiteRead(ctx, d, meta) +} + +func resourceBucketWebsiteDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + svc := meta.(*Client) + + bucketName := d.Id() + + input := &types.BucketUpdateInput{ + Bucket: bucketName, + Website: &types.BucketWebsiteConfig{ + DomainName: "", + }, + } + + tflog.Info(ctx, "Deleting bucket website configuration", map[string]interface{}{ + "bucket_name": bucketName, + }) + + err := svc.UpdateBucket(ctx, input) + if err != nil { + return diag.FromErr(fmt.Errorf("unable to delete bucket website configuration, %w", err)) + } + + tflog.Info(ctx, "Bucket website configuration deleted successfully", map[string]interface{}{ + "bucket_name": bucketName, + }) + + d.SetId("") + return nil +} diff --git a/internal/types/types.go b/internal/types/types.go new file mode 100644 index 0000000..414c983 --- /dev/null +++ b/internal/types/types.go @@ -0,0 +1,98 @@ +package types + +type BucketCannedACL string + +// Enum values for BucketCannedACL. +const ( + BucketCannedACLPrivate BucketCannedACL = "private" + BucketCannedACLPublicRead BucketCannedACL = "public-read" +) + +func (BucketCannedACL) Values() []BucketCannedACL { + return []BucketCannedACL{ + BucketCannedACLPrivate, + BucketCannedACLPublicRead, + } +} + +type BucketMetadata struct { + Name string `json:"name"` + CacheControl string `json:"cache_control"` + ObjectRegions string `json:"object_regions"` + MD *BucketMD `json:"md"` + Shadow *BucketShadowConfig `json:"shadow_bucket"` + Website *BucketWebsiteConfig `json:"website"` +} + +type BucketMD struct { + ACL *BucketCannedACL `json:"X-Amz-Acl"` + PublicObjectsListEnabled *string `json:"x-amz-acl-public-list-objects-enabled"` +} + +func (b *BucketMetadata) GetBucketCannedACL() BucketCannedACL { + if b.MD == nil || b.MD.ACL == nil { + return BucketCannedACLPrivate + } + + return *b.MD.ACL +} + +func (b *BucketMetadata) GetPublicObjectsListEnabled() bool { + if b.MD == nil || b.MD.PublicObjectsListEnabled == nil { + return true + } + + if *b.MD.PublicObjectsListEnabled == "true" { + return true + } + + return false +} + +type BucketWebsiteConfig struct { + DomainName string `json:"domain_name"` +} + +type BucketShadowConfig struct { + AccessKey string `json:"access_key"` + SecretKey string `json:"secret_key"` + Region string `json:"region"` + Name string `json:"name"` + Endpoint string `json:"endpoint"` + WriteThrough bool `json:"write_through"` +} + +// BucketUpdateInput is the input for the UpdateBucket function. +type BucketUpdateInput struct { + // The name of the bucket to create. + Bucket string + + // The canned ACL to apply to the bucket. + ACL *BucketCannedACL + + // Whether to enable public object listing. + PublicObjectsListEnabled *bool + + // The website configuration for the bucket. + Website *BucketWebsiteConfig + + // The shadow bucket configuration for the bucket. + Shadow *BucketShadowConfig +} + +// BucketUpdateRequest is the request body for the UpdateBucket API. +type BucketUpdateRequest struct { + Website *BucketWebsiteConfig `json:"website"` + Shadow *BucketShadowConfig `json:"shadow_bucket"` +} + +type BucketUpdateResponse struct { + // The success status of the update. + Update string `json:"Update"` + + // The error message if the update failed. + ErrorMessage string `json:"Message"` + + // The error code if the update failed. + ErrorCode string `json:"Code"` +}