diff --git a/agent/Gopkg.lock b/agent/Gopkg.lock
index 5264cd83178..a7209a12a9e 100644
--- a/agent/Gopkg.lock
+++ b/agent/Gopkg.lock
@@ -10,7 +10,7 @@
version = "v0.4.7"
[[projects]]
- digest = "1:dc032a34ef4d303739c77ec93053abf28c49402e85e7256715b1a5fdb93d1b30"
+ digest = "1:7998438535fbfe9617b33556fd5ddd16972579d857cd8fe80f1abc3d1f02ce4a"
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
@@ -88,12 +88,17 @@
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
- digest = "1:50e893a85575fa48dc4982a279e50e2fd8b74e4f7c587860c1e25c77083b8125"
+ digest = "1:de747471fda94a360884bd972aa5633e65a77666283ec9c0d9c0755b871f3a9b"
name = "github.com/cihub/seelog"
- packages = ["."]
+ packages = [
+ ".",
+ "archive",
+ "archive/gzip",
+ "archive/tar",
+ "archive/zip",
+ ]
pruneopts = "UT"
- revision = "d2c6e5aa9fbfdd1c624e140287063c7730654115"
- version = "v2.6"
+ revision = "f561c5e57575bb1e0a2167028b7339b3a8d16fb4"
[[projects]]
digest = "1:d9209774a52def50b9f6e60d4422383cc10b6370ab5eed7be05c54e90fb12825"
diff --git a/agent/Gopkg.toml b/agent/Gopkg.toml
index b23d056148c..8fdec544a3a 100644
--- a/agent/Gopkg.toml
+++ b/agent/Gopkg.toml
@@ -31,7 +31,7 @@ required = ["github.com/golang/mock/mockgen/model"]
[[constraint]]
name = "github.com/cihub/seelog"
- version ="2.6"
+ revision = "f561c5e57575bb1e0a2167028b7339b3a8d16fb4"
[[constraint]]
name = "github.com/containerd/cgroups"
diff --git a/agent/acs/update_handler/updater.go b/agent/acs/update_handler/updater.go
index 5bbe5e15ca2..fe964a71a96 100644
--- a/agent/acs/update_handler/updater.go
+++ b/agent/acs/update_handler/updater.go
@@ -29,7 +29,6 @@ import (
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/engine"
"github.com/aws/amazon-ecs-agent/agent/httpclient"
- "github.com/aws/amazon-ecs-agent/agent/logger"
"github.com/aws/amazon-ecs-agent/agent/sighandlers"
"github.com/aws/amazon-ecs-agent/agent/sighandlers/exitcodes"
"github.com/aws/amazon-ecs-agent/agent/statemanager"
@@ -40,8 +39,6 @@ import (
"github.com/cihub/seelog"
)
-var log = logger.ForModule("updater")
-
const desiredImageFile = "desired-image"
// update describes metadata around an update 2-phase request
@@ -93,7 +90,7 @@ func (u *updater) stageUpdateHandler() func(req *ecsacs.StageUpdateMessage) {
defer u.Unlock()
if req == nil || req.MessageId == nil {
- log.Error("Nil request to stage update or missing MessageID")
+ seelog.Error("Nil request to stage update or missing MessageID")
return
}
@@ -118,11 +115,11 @@ func (u *updater) stageUpdateHandler() func(req *ecsacs.StageUpdateMessage) {
return
}
- log.Debug("Staging update", "update", req)
+ seelog.Debug("Staging update", "update", req)
if u.stage != updateNone {
if u.updateID != "" && u.updateID == *req.UpdateInfo.Signature {
- log.Debug("Update already in progress, acking duplicate message", "id", u.updateID)
+ seelog.Debug("Update already in progress, acking duplicate message", "id", u.updateID)
// Acking here is safe as any currently-downloading update will already be holding
// the update lock. A failed download will nack and clear state (while holding the
// update lock) before this code is reached, meaning that the above conditional will
@@ -215,7 +212,7 @@ func (u *updater) performUpdateHandler(saver statemanager.Saver, taskEngine engi
u.Lock()
defer u.Unlock()
- log.Debug("Got perform update request")
+ seelog.Debug("Got perform update request")
if !u.config.UpdatesEnabled {
reason := "Updates are disabled"
@@ -230,7 +227,7 @@ func (u *updater) performUpdateHandler(saver statemanager.Saver, taskEngine engi
}
if u.stage != updateDownloaded {
- log.Error("Nacking PerformUpdate; not downloaded")
+ seelog.Error("Nacking PerformUpdate; not downloaded")
reason := "Cannot perform update; update not downloaded"
u.acs.MakeRequest(&ecsacs.NackRequest{
Cluster: req.ClusterArn,
@@ -248,9 +245,9 @@ func (u *updater) performUpdateHandler(saver statemanager.Saver, taskEngine engi
err := sighandlers.FinalSave(saver, taskEngine)
if err != nil {
- log.Crit("Error saving before update exit", "err", err)
+ seelog.Critical("Error saving before update exit", "err", err)
} else {
- log.Debug("Saved state!")
+ seelog.Debug("Saved state!")
}
u.fs.Exit(exitcodes.ExitUpdate)
}
diff --git a/agent/engine/dockerstate/docker_task_engine_state.go b/agent/engine/dockerstate/docker_task_engine_state.go
index af1033af0a7..5035eb36dff 100644
--- a/agent/engine/dockerstate/docker_task_engine_state.go
+++ b/agent/engine/dockerstate/docker_task_engine_state.go
@@ -22,12 +22,9 @@ import (
apieni "github.com/aws/amazon-ecs-agent/agent/api/eni"
apitask "github.com/aws/amazon-ecs-agent/agent/api/task"
"github.com/aws/amazon-ecs-agent/agent/engine/image"
- "github.com/aws/amazon-ecs-agent/agent/logger"
"github.com/cihub/seelog"
)
-var log = logger.ForModule("dockerstate")
-
// TaskEngineState keeps track of all mappings between tasks we know about
// and containers docker runs
type TaskEngineState interface {
@@ -201,7 +198,7 @@ func (state *DockerTaskEngineState) ENIByMac(mac string) (*apieni.ENIAttachment,
// AddENIAttachment adds the eni into the state
func (state *DockerTaskEngineState) AddENIAttachment(eniAttachment *apieni.ENIAttachment) {
if eniAttachment == nil {
- log.Debug("Cannot add empty eni attachment information")
+ seelog.Debug("Cannot add empty eni attachment information")
return
}
@@ -219,7 +216,7 @@ func (state *DockerTaskEngineState) AddENIAttachment(eniAttachment *apieni.ENIAt
// RemoveENIAttachment removes the eni from state and stop managing
func (state *DockerTaskEngineState) RemoveENIAttachment(mac string) {
if mac == "" {
- log.Debug("Cannot remove empty eni attachment information")
+ seelog.Debug("Cannot remove empty eni attachment information")
return
}
state.lock.Lock()
@@ -326,13 +323,13 @@ func (state *DockerTaskEngineState) AddContainer(container *apicontainer.DockerC
state.lock.Lock()
defer state.lock.Unlock()
if task == nil || container == nil {
- log.Crit("Addcontainer called with nil task/container")
+ seelog.Critical("Addcontainer called with nil task/container")
return
}
_, exists := state.tasks[task.Arn]
if !exists {
- log.Debug("AddContainer called with unknown task; adding", "arn", task.Arn)
+ seelog.Debug("AddContainer called with unknown task; adding", "arn", task.Arn)
state.tasks[task.Arn] = task
}
@@ -357,11 +354,11 @@ func (state *DockerTaskEngineState) AddContainer(container *apicontainer.DockerC
// AddImageState adds an image.ImageState to be stored
func (state *DockerTaskEngineState) AddImageState(imageState *image.ImageState) {
if imageState == nil {
- log.Debug("Cannot add empty image state")
+ seelog.Debug("Cannot add empty image state")
return
}
if imageState.Image.ImageID == "" {
- log.Debug("Cannot add image state with empty image id")
+ seelog.Debug("Cannot add image state with empty image id")
return
}
state.lock.Lock()
@@ -455,7 +452,7 @@ func (state *DockerTaskEngineState) removeV3EndpointIDToTaskContainerUnsafe(v3En
// RemoveImageState removes an image.ImageState
func (state *DockerTaskEngineState) RemoveImageState(imageState *image.ImageState) {
if imageState == nil {
- log.Debug("Cannot remove empty image state")
+ seelog.Debug("Cannot remove empty image state")
return
}
state.lock.Lock()
@@ -463,7 +460,7 @@ func (state *DockerTaskEngineState) RemoveImageState(imageState *image.ImageStat
imageState, ok := state.imageStates[imageState.Image.ImageID]
if !ok {
- log.Debug("Image State is not found. Cannot be removed")
+ seelog.Debug("Image State is not found. Cannot be removed")
return
}
delete(state.imageStates, imageState.Image.ImageID)
diff --git a/agent/functional_tests/tests/functionaltests_unix_test.go b/agent/functional_tests/tests/functionaltests_unix_test.go
index 860482b5567..c33203d9410 100644
--- a/agent/functional_tests/tests/functionaltests_unix_test.go
+++ b/agent/functional_tests/tests/functionaltests_unix_test.go
@@ -459,7 +459,7 @@ func taskIAMRoles(networkMode string, agent *TestAgent, t *testing.T) {
require.Equal(t, 0, containerMetaData.State.ExitCode, fmt.Sprintf("Container exit code non-zero: %v", containerMetaData.State.ExitCode))
// Search the audit log to verify the credential request
- err = utils.SearchStrInDir(filepath.Join(agent.TestDir, "log"), "audit.log.", *task.TaskArn)
+ err = utils.SearchStrInDir(filepath.Join(agent.TestDir, "log"), "audit.log", *task.TaskArn)
require.NoError(t, err, "Verify credential request failed")
}
@@ -807,8 +807,8 @@ func TestExecutionRole(t *testing.T) {
assert.Len(t, resp.Events, 1, fmt.Sprintf("Get unexpected number of log events: %d", len(resp.Events)))
assert.Equal(t, *resp.Events[0].Message, "hello world", fmt.Sprintf("Got log events message unexpected: %s", *resp.Events[0].Message))
// Search the audit log to verify the credential request from awslogs driver
- err = utils.SearchStrInDir(filepath.Join(agent.TestDir, "log"), "audit.log.", "GetCredentialsExecutionRole")
- err = utils.SearchStrInDir(filepath.Join(agent.TestDir, "log"), "audit.log.", *testTask.TaskArn)
+ err = utils.SearchStrInDir(filepath.Join(agent.TestDir, "log"), "audit.log", "GetCredentialsExecutionRole")
+ err = utils.SearchStrInDir(filepath.Join(agent.TestDir, "log"), "audit.log", *testTask.TaskArn)
require.NoError(t, err, "Verify credential request failed")
}
diff --git a/agent/logger/audit/audit_log.go b/agent/logger/audit/audit_log.go
index aaf8200d6cd..0a151f2fc82 100644
--- a/agent/logger/audit/audit_log.go
+++ b/agent/logger/audit/audit_log.go
@@ -15,11 +15,23 @@ package audit
import (
"fmt"
+ "strconv"
"github.com/aws/amazon-ecs-agent/agent/config"
+ "github.com/aws/amazon-ecs-agent/agent/logger"
"github.com/aws/amazon-ecs-agent/agent/logger/audit/request"
)
+type AuditLogger interface {
+ Log(r request.LogRequest, httpResponseCode int, eventType string)
+ GetContainerInstanceArn() string
+ GetCluster() string
+}
+
+type InfoLogger interface {
+ Info(i ...interface{})
+}
+
type auditLog struct {
containerInstanceArn string
cluster string
@@ -62,3 +74,29 @@ func (a *auditLog) GetCluster() string {
func (a *auditLog) GetContainerInstanceArn() string {
return a.containerInstanceArn
}
+
+func AuditLoggerConfig(cfg *config.Config) string {
+ config := `
+
+
+ `
+ if cfg.CredentialsAuditLogFile != "" {
+ if logger.Config.RolloverType == "size" {
+ config += `
+ `
+ } else {
+ config += `
+ `
+ }
+ }
+ config += `
+
+
+
+
+
+`
+ return config
+}
diff --git a/agent/logger/audit/interface.go b/agent/logger/audit/interface.go
deleted file mode 100644
index 39bd372e9e1..00000000000
--- a/agent/logger/audit/interface.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2014-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"). You may
-// not use this file except in compliance with the License. A copy of the
-// License is located at
-//
-// http://aws.amazon.com/apache2.0/
-//
-// or in the "license" file accompanying this file. This file is distributed
-// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
-// express or implied. See the License for the specific language governing
-// permissions and limitations under the License.
-
-package audit
-
-import "github.com/aws/amazon-ecs-agent/agent/logger/audit/request"
-
-type AuditLogger interface {
- Log(r request.LogRequest, httpResponseCode int, eventType string)
- GetContainerInstanceArn() string
- GetCluster() string
-}
-
-type InfoLogger interface {
- Info(i ...interface{})
-}
diff --git a/agent/logger/audit/seelog_config.go b/agent/logger/audit/seelog_config.go
deleted file mode 100644
index 897487c885c..00000000000
--- a/agent/logger/audit/seelog_config.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2014-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"). You may
-// not use this file except in compliance with the License. A copy of the
-// License is located at
-//
-// http://aws.amazon.com/apache2.0/
-//
-// or in the "license" file accompanying this file. This file is distributed
-// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
-// express or implied. See the License for the specific language governing
-// permissions and limitations under the License.
-
-package audit
-
-import "github.com/aws/amazon-ecs-agent/agent/config"
-
-func AuditLoggerConfig(cfg *config.Config) string {
- config := `
-
-
- `
- if cfg.CredentialsAuditLogFile != "" {
- config += ``
- }
- config += `
-
-
-
-
-
-`
- return config
-}
diff --git a/agent/logger/log.go b/agent/logger/log.go
index 0a1867c5cf8..c74a570f53a 100644
--- a/agent/logger/log.go
+++ b/agent/logger/log.go
@@ -14,87 +14,162 @@
package logger
import (
+ "fmt"
"os"
+ "strconv"
"strings"
"sync"
+ "time"
- log "github.com/cihub/seelog"
+ "github.com/cihub/seelog"
)
const (
- LOGLEVEL_ENV_VAR = "ECS_LOGLEVEL"
- LOGFILE_ENV_VAR = "ECS_LOGFILE"
-
- DEFAULT_LOGLEVEL = "info"
+ LOGLEVEL_ENV_VAR = "ECS_LOGLEVEL"
+ LOGFILE_ENV_VAR = "ECS_LOGFILE"
+ LOG_ROLLOVER_TYPE_ENV_VAR = "ECS_LOG_ROLLOVER_TYPE"
+ LOG_OUTPUT_FORMAT_ENV_VAR = "ECS_LOG_OUTPUT_FORMAT"
+ LOG_MAX_FILE_SIZE_ENV_VAR = "ECS_LOG_MAX_FILE_SIZE_MB"
+ LOG_MAX_ROLL_COUNT_ENV_VAR = "ECS_LOG_MAX_ROLL_COUNT"
+
+ DEFAULT_LOGLEVEL = "info"
+ DEFAULT_ROLLOVER_TYPE = "date"
+ DEFAULT_OUTPUT_FORMAT = "logfmt"
+ DEFAULT_MAX_FILE_SIZE float64 = 10
+ DEFAULT_MAX_ROLL_COUNT int = 24
)
-var logfile string
-var level string
-var levelLock sync.RWMutex
-var levels map[string]string
-var logger OldLogger
+type logConfig struct {
+ RolloverType string
+ MaxRollCount int
+ MaxFileSizeMB float64
+ logfile string
+ level string
+ outputFormat string
+ lock sync.Mutex
+}
-// Initialize this logger once
-var once sync.Once
+var Config *logConfig
-func init() {
- once.Do(initLogger)
+func logfmtFormatter(params string) seelog.FormatterFunc {
+ return func(message string, level seelog.LogLevel, context seelog.LogContextInterface) interface{} {
+ return fmt.Sprintf(`level=%s time=%s msg=%q module=%s
+`, level.String(), context.CallTime().UTC().Format(time.RFC3339), message, context.FileName())
+ }
}
-func initLogger() {
- levels = map[string]string{
- "debug": "debug",
- "info": "info",
- "warn": "warn",
- "error": "error",
- "crit": "critical",
- "none": "off",
+func jsonFormatter(params string) seelog.FormatterFunc {
+ return func(message string, level seelog.LogLevel, context seelog.LogContextInterface) interface{} {
+ return fmt.Sprintf(`{"level": %q, "time": %q, "msg": %q, "module": %q}
+`, level.String(), context.CallTime().UTC().Format(time.RFC3339), message, context.FileName())
}
-
- level = DEFAULT_LOGLEVEL
-
- logger = &Shim{}
-
- envLevel := os.Getenv(LOGLEVEL_ENV_VAR)
-
- logfile = os.Getenv(LOGFILE_ENV_VAR)
- SetLevel(envLevel)
- registerPlatformLogger()
- reloadConfig()
}
func reloadConfig() {
- logger, err := log.LoggerFromConfigAsString(loggerConfig())
+ logger, err := seelog.LoggerFromConfigAsString(seelogConfig())
if err == nil {
- log.ReplaceLogger(logger)
+ seelog.ReplaceLogger(logger)
} else {
- log.Error(err)
+ seelog.Error(err)
}
}
+func seelogConfig() string {
+ c := `
+
+
+ `
+ c += platformLogConfig()
+ if Config.logfile != "" {
+ if Config.RolloverType == "size" {
+ c += `
+ `
+ } else {
+ c += `
+ `
+ }
+ }
+ c += `
+
+
+
+
+
+`
+ return c
+}
+
// SetLevel sets the log level for logging
func SetLevel(logLevel string) {
+ levels := map[string]string{
+ "debug": "debug",
+ "info": "info",
+ "warn": "warn",
+ "error": "error",
+ "crit": "critical",
+ "none": "off",
+ }
parsedLevel, ok := levels[strings.ToLower(logLevel)]
if ok {
- levelLock.Lock()
- defer levelLock.Unlock()
- level = parsedLevel
+ Config.lock.Lock()
+ defer Config.lock.Unlock()
+ Config.level = parsedLevel
reloadConfig()
}
}
// GetLevel gets the log level
func GetLevel() string {
- levelLock.RLock()
- defer levelLock.RUnlock()
+ Config.lock.Lock()
+ defer Config.lock.Unlock()
- return level
+ return Config.level
}
-// ForModule returns an OldLogger instance. OldLogger is deprecated and kept
-// for compatibility reasons. Prefer using Seelog directly.
-func ForModule(module string) OldLogger {
- once.Do(initLogger)
- return logger.New("module", module)
+func init() {
+ Config = &logConfig{
+ logfile: os.Getenv(LOGFILE_ENV_VAR),
+ level: DEFAULT_LOGLEVEL,
+ RolloverType: DEFAULT_ROLLOVER_TYPE,
+ outputFormat: DEFAULT_OUTPUT_FORMAT,
+ MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE,
+ MaxRollCount: DEFAULT_MAX_ROLL_COUNT,
+ }
+
+ SetLevel(os.Getenv(LOGLEVEL_ENV_VAR))
+ if RolloverType := os.Getenv(LOG_ROLLOVER_TYPE_ENV_VAR); RolloverType != "" {
+ Config.RolloverType = RolloverType
+ }
+ if outputFormat := os.Getenv(LOG_OUTPUT_FORMAT_ENV_VAR); outputFormat != "" {
+ Config.outputFormat = outputFormat
+ }
+ if MaxRollCount := os.Getenv(LOG_MAX_ROLL_COUNT_ENV_VAR); MaxRollCount != "" {
+ i, err := strconv.Atoi(MaxRollCount)
+ if err == nil {
+ Config.MaxRollCount = i
+ } else {
+ seelog.Error("Invalid value for "+LOG_MAX_ROLL_COUNT_ENV_VAR, err)
+ }
+ }
+ if MaxFileSizeMB := os.Getenv(LOG_MAX_FILE_SIZE_ENV_VAR); MaxFileSizeMB != "" {
+ f, err := strconv.ParseFloat(MaxFileSizeMB, 64)
+ if err == nil {
+ Config.MaxFileSizeMB = f
+ } else {
+ seelog.Error("Invalid value for "+LOG_MAX_FILE_SIZE_ENV_VAR, err)
+ }
+ }
+
+ if err := seelog.RegisterCustomFormatter("EcsAgentLogfmt", logfmtFormatter); err != nil {
+ seelog.Error(err)
+ }
+ if err := seelog.RegisterCustomFormatter("EcsAgentJson", jsonFormatter); err != nil {
+ seelog.Error(err)
+ }
+
+ registerPlatformLogger()
+ reloadConfig()
}
diff --git a/agent/logger/log_test.go b/agent/logger/log_test.go
new file mode 100644
index 00000000000..789b04ce0da
--- /dev/null
+++ b/agent/logger/log_test.go
@@ -0,0 +1,246 @@
+// +build !windows
+
+// Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"). You may
+// not use this file except in compliance with the License. A copy of the
+// License is located at
+//
+// http://aws.amazon.com/apache2.0/
+//
+// or in the "license" file accompanying this file. This file is distributed
+// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package logger
+
+import (
+ "testing"
+ "time"
+
+ "github.com/cihub/seelog"
+ "github.com/stretchr/testify/require"
+)
+
+func TestLogfmtFormat(t *testing.T) {
+ logfmt := logfmtFormatter("")
+ out := logfmt("This is my log message", seelog.InfoLvl, &LogContextMock{})
+ s, ok := out.(string)
+ require.True(t, ok)
+ require.Equal(t, `level=info time=2018-10-01T01:02:03Z msg="This is my log message" module=mytestmodule.go
+`, s)
+}
+
+func TestJSONFormat(t *testing.T) {
+ jsonF := jsonFormatter("")
+ out := jsonF("This is my log message", seelog.InfoLvl, &LogContextMock{})
+ s, ok := out.(string)
+ require.True(t, ok)
+ require.JSONEq(t, `{"level": "info", "time": "2018-10-01T01:02:03Z", "msg": "This is my log message", "module": "mytestmodule.go"}`, s)
+}
+
+func TestLogfmtFormat_debug(t *testing.T) {
+ logfmt := logfmtFormatter("")
+ out := logfmt("This is my log message", seelog.DebugLvl, &LogContextMock{})
+ s, ok := out.(string)
+ require.True(t, ok)
+ require.Equal(t, `level=debug time=2018-10-01T01:02:03Z msg="This is my log message" module=mytestmodule.go
+`, s)
+}
+
+func TestJSONFormat_debug(t *testing.T) {
+ jsonF := jsonFormatter("")
+ out := jsonF("This is my log message", seelog.DebugLvl, &LogContextMock{})
+ s, ok := out.(string)
+ require.True(t, ok)
+ require.JSONEq(t, `{"level": "debug", "time": "2018-10-01T01:02:03Z", "msg": "This is my log message", "module": "mytestmodule.go"}`, s)
+}
+
+func TestSeelogConfig_Default(t *testing.T) {
+ Config = &logConfig{
+ logfile: "foo.log",
+ level: DEFAULT_LOGLEVEL,
+ RolloverType: DEFAULT_ROLLOVER_TYPE,
+ outputFormat: DEFAULT_OUTPUT_FORMAT,
+ MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE,
+ MaxRollCount: DEFAULT_MAX_ROLL_COUNT,
+ }
+ c := seelogConfig()
+ require.Equal(t, `
+
+
+
+
+
+
+
+
+
+`, c)
+}
+
+func TestSeelogConfig_DebugLevel(t *testing.T) {
+ Config = &logConfig{
+ logfile: "foo.log",
+ level: "debug",
+ RolloverType: DEFAULT_ROLLOVER_TYPE,
+ outputFormat: DEFAULT_OUTPUT_FORMAT,
+ MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE,
+ MaxRollCount: DEFAULT_MAX_ROLL_COUNT,
+ }
+ c := seelogConfig()
+ require.Equal(t, `
+
+
+
+
+
+
+
+
+
+`, c)
+}
+
+func TestSeelogConfig_SizeRollover(t *testing.T) {
+ Config = &logConfig{
+ logfile: "foo.log",
+ level: DEFAULT_LOGLEVEL,
+ RolloverType: "size",
+ outputFormat: DEFAULT_OUTPUT_FORMAT,
+ MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE,
+ MaxRollCount: DEFAULT_MAX_ROLL_COUNT,
+ }
+ c := seelogConfig()
+ require.Equal(t, `
+
+
+
+
+
+
+
+
+
+`, c)
+}
+
+func TestSeelogConfig_SizeRolloverFileSizeChange(t *testing.T) {
+ Config = &logConfig{
+ logfile: "foo.log",
+ level: DEFAULT_LOGLEVEL,
+ RolloverType: "size",
+ outputFormat: DEFAULT_OUTPUT_FORMAT,
+ MaxFileSizeMB: 15,
+ MaxRollCount: DEFAULT_MAX_ROLL_COUNT,
+ }
+ c := seelogConfig()
+ require.Equal(t, `
+
+
+
+
+
+
+
+
+
+`, c)
+}
+
+func TestSeelogConfig_SizeRolloverRollCountChange(t *testing.T) {
+ Config = &logConfig{
+ logfile: "foo.log",
+ level: DEFAULT_LOGLEVEL,
+ RolloverType: "size",
+ outputFormat: DEFAULT_OUTPUT_FORMAT,
+ MaxFileSizeMB: 15,
+ MaxRollCount: 10,
+ }
+ c := seelogConfig()
+ require.Equal(t, `
+
+
+
+
+
+
+
+
+
+`, c)
+}
+
+func TestSeelogConfig_JSONOutput(t *testing.T) {
+ Config = &logConfig{
+ logfile: "foo.log",
+ level: DEFAULT_LOGLEVEL,
+ RolloverType: DEFAULT_ROLLOVER_TYPE,
+ outputFormat: "json",
+ MaxFileSizeMB: DEFAULT_MAX_FILE_SIZE,
+ MaxRollCount: 10,
+ }
+ c := seelogConfig()
+ require.Equal(t, `
+
+
+
+
+
+
+
+
+
+`, c)
+}
+
+type LogContextMock struct{}
+
+// Caller's function name.
+func (l *LogContextMock) Func() string {
+ return ""
+}
+
+// Caller's line number.
+func (l *LogContextMock) Line() int {
+ return 0
+}
+
+// Caller's file short path (in slashed form).
+func (l *LogContextMock) ShortPath() string {
+ return ""
+}
+
+// Caller's file full path (in slashed form).
+func (l *LogContextMock) FullPath() string {
+ return ""
+}
+
+// Caller's file name (without path).
+func (l *LogContextMock) FileName() string {
+ return "mytestmodule.go"
+}
+
+// True if the context is correct and may be used.
+// If false, then an error in context evaluation occurred and
+// all its other data may be corrupted.
+func (l *LogContextMock) IsValid() bool {
+ return true
+}
+
+// Time when log function was called.
+func (l *LogContextMock) CallTime() time.Time {
+ return time.Date(2018, time.October, 1, 1, 2, 3, 0, time.UTC)
+}
+
+// Custom context that can be set by calling logger.SetContext
+func (l *LogContextMock) CustomContext() interface{} {
+ return map[string]string{}
+}
diff --git a/agent/logger/seelog_config.go b/agent/logger/seelog_config.go
deleted file mode 100644
index 3211a115cc1..00000000000
--- a/agent/logger/seelog_config.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"). You may
-// not use this file except in compliance with the License. A copy of the
-// License is located at
-//
-// http://aws.amazon.com/apache2.0/
-//
-// or in the "license" file accompanying this file. This file is distributed
-// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
-// express or implied. See the License for the specific language governing
-// permissions and limitations under the License.
-
-package logger
-
-func loggerConfig() string {
- config := `
-
-
- `
- config += platformLogConfig()
- if logfile != "" {
- config += ``
- }
- config += `
-
-
-
-
-
-
-`
- return config
-}
diff --git a/agent/logger/shim.go b/agent/logger/shim.go
deleted file mode 100644
index 74d6696878d..00000000000
--- a/agent/logger/shim.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2014-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"). You may
-// not use this file except in compliance with the License. A copy of the
-// License is located at
-//
-// http://aws.amazon.com/apache2.0/
-//
-// or in the "license" file accompanying this file. This file is distributed
-// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
-// express or implied. See the License for the specific language governing
-// permissions and limitations under the License.
-
-package logger
-
-import (
- "fmt"
-
- log "github.com/cihub/seelog"
-)
-
-type OldLogger interface {
- // New returns an OldLogger instance with the passed ctx plus existing ctx
- New(ctx ...interface{}) OldLogger
-
- // Log a message at the given level with context key/value pairs
- Debug(msg string, ctx ...interface{})
- Info(msg string, ctx ...interface{})
- Warn(msg string, ctx ...interface{})
- Error(msg string, ctx ...interface{})
- Crit(msg string, ctx ...interface{})
-}
-
-type Shim struct {
- ctx []interface{}
-}
-
-func (s *Shim) New(ctx ...interface{}) OldLogger {
- if len(ctx)%2 != 0 {
- log.Warnf("New ctx log with uneven ctx length. ctx length: %n", len(ctx))
- ctx = nil
- }
- return &Shim{
- ctx: append(s.ctx, ctx...),
- }
-}
-
-func (s *Shim) Debug(msg string, ctx ...interface{}) {
- log.Debug(s.formatMessage(msg, ctx...))
-}
-
-func (s *Shim) Info(msg string, ctx ...interface{}) {
- log.Info(s.formatMessage(msg, ctx...))
-}
-
-func (s *Shim) Warn(msg string, ctx ...interface{}) {
- log.Warn(s.formatMessage(msg, ctx...))
-}
-
-func (s *Shim) Error(msg string, ctx ...interface{}) {
- log.Error(s.formatMessage(msg, ctx...))
-}
-
-func (s *Shim) Crit(msg string, ctx ...interface{}) {
- log.Critical(s.formatMessage(msg, ctx...))
-}
-
-func (s *Shim) formatMessage(msg string, ctx ...interface{}) string {
- if len(ctx)%2 != 0 {
- log.Warnf("Log message with uneven ctx length. msg: %s, ctx length: %n", msg, len(ctx))
- return msg + " [malformed ctx omitted]"
- }
- fullCtx := append(s.ctx, ctx...)
- var retval string
- for i := 0; i < len(fullCtx); i += 2 {
- retval += fmt.Sprintf(" %v=\"%+v\"", fullCtx[i], fullCtx[i+1])
- }
- return msg + retval
-}
diff --git a/agent/logger/shim_test.go b/agent/logger/shim_test.go
deleted file mode 100644
index 6f6540d8bdd..00000000000
--- a/agent/logger/shim_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// +build unit
-
-// Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License"). You may
-// not use this file except in compliance with the License. A copy of the
-// License is located at
-//
-// http://aws.amazon.com/apache2.0/
-//
-// or in the "license" file accompanying this file. This file is distributed
-// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
-// express or implied. See the License for the specific language governing
-// permissions and limitations under the License.
-
-package logger
-
-import (
- "testing"
-)
-
-func TestFormatMessage_NoCtx(t *testing.T) {
- shim := &Shim{}
- simpleMessage := "simple message"
- formatted := shim.formatMessage(simpleMessage)
- expected := simpleMessage
- if formatted != expected {
- t.Errorf("Formatted message %s does not match expected %s", formatted, expected)
- }
-}
-
-func TestFormatMessage_UnevenCtx(t *testing.T) {
- shim := &Shim{}
- simpleMessage := "simple message"
- formatted := shim.formatMessage(simpleMessage, simpleMessage)
- expected := simpleMessage + " [malformed ctx omitted]"
- if formatted != expected {
- t.Errorf("Formatted message %s does not match expected %s", formatted, expected)
- }
-}
-
-func TestFormatMessage_SimpleCtx(t *testing.T) {
- shim := &Shim{}
- simpleMessage := "simple message"
- formatted := shim.formatMessage(simpleMessage, simpleMessage, simpleMessage)
- expected := simpleMessage + " " + simpleMessage + "=\"" + simpleMessage + "\""
- if formatted != expected {
- t.Errorf("Formatted message %s does not match expected %s", formatted, expected)
- }
-}
-
-func TestFormatMessage_Struct(t *testing.T) {
- shim := &Shim{}
- simpleMessage := "simple message"
- formatted := shim.formatMessage(simpleMessage, simpleMessage, simpleMessage, "struct", struct{ hello string }{
- hello: "world",
- })
- expected := simpleMessage + " " + simpleMessage + "=\"" + simpleMessage + "\" " + "struct=\"{hello:world}\""
- if formatted != expected {
- t.Errorf("Formatted message %s does not match expected %s", formatted, expected)
- }
-}
-
-func TestFormatMessage_TopCtx(t *testing.T) {
- shim := &Shim{ctx: []interface{}{"test", "value"}}
- simpleMessage := "simple message"
- formatted := shim.formatMessage(simpleMessage)
- expected := simpleMessage + " test=\"value\""
- if formatted != expected {
- t.Errorf("Formatted message %s does not match expected %s", formatted, expected)
- }
-}
-
-func TestFormatMessage_NewCtx(t *testing.T) {
- shim := (&Shim{ctx: []interface{}{"test", "value"}}).New("test2", "value2").(*Shim)
- simpleMessage := "simple message"
- formatted := shim.formatMessage(simpleMessage, "test3", "value3")
- expected := simpleMessage + " test=\"value\" test2=\"value2\" test3=\"value3\""
- if formatted != expected {
- t.Errorf("Formatted message %s does not match expected %s", formatted, expected)
- }
-}
diff --git a/agent/statemanager/state_manager.go b/agent/statemanager/state_manager.go
index e693245f753..045a33073de 100644
--- a/agent/statemanager/state_manager.go
+++ b/agent/statemanager/state_manager.go
@@ -26,8 +26,9 @@ import (
"time"
"github.com/aws/amazon-ecs-agent/agent/config"
- "github.com/aws/amazon-ecs-agent/agent/logger"
"github.com/aws/amazon-ecs-agent/agent/metrics"
+
+ "github.com/cihub/seelog"
)
const (
@@ -107,8 +108,6 @@ const (
minSaveInterval = 10 * time.Second
)
-var log = logger.ForModule("statemanager")
-
// Saveable types should be able to be json serializable and deserializable
// Properly, this should have json.Marshaler/json.Unmarshaler here, but string
// and so on can be marshaled/unmarshaled sanely but don't fit those interfaces.
@@ -210,7 +209,7 @@ func AddSaveable(name string, saveable Saveable) Option {
return (Option)(func(m StateManager) {
manager, ok := m.(*basicStateManager)
if !ok {
- log.Crit("Unable to add to state manager; unknown instantiation")
+ seelog.Critical("Unable to add to state manager; unknown instantiation")
return
}
manager.state.Data[name] = &saveable
@@ -253,13 +252,13 @@ func (manager *basicStateManager) Save() error {
func (manager *basicStateManager) ForceSave() error {
manager.savingLock.Lock()
defer manager.savingLock.Unlock()
- log.Info("Saving state!")
+ seelog.Info("Saving state!")
s := manager.state
s.Version = ECSDataVersion
data, err := json.Marshal(s)
if err != nil {
- log.Error("Error saving state; could not marshal data; this is odd", "err", err)
+ seelog.Error("Error saving state; could not marshal data; this is odd", "err", err)
return err
}
return manager.writeFile(data)
@@ -269,10 +268,10 @@ func (manager *basicStateManager) ForceSave() error {
// the passed State object.
func (manager *basicStateManager) Load() error {
s := manager.state
- log.Info("Loading state!")
+ seelog.Info("Loading state!")
data, err := manager.readFile()
if err != nil {
- log.Error("Error reading existing state file", "err", err)
+ seelog.Error("Error reading existing state file", "err", err)
return err
}
if data == nil {
@@ -293,24 +292,24 @@ func (manager *basicStateManager) Load() error {
var intermediate intermediateState
err = json.Unmarshal(data, &intermediate)
if err != nil {
- log.Debug("Could not unmarshal into intermediate")
+ seelog.Debug("Could not unmarshal into intermediate")
return err
}
for key, rawJSON := range intermediate.Data {
actualPointer, ok := manager.state.Data[key]
if !ok {
- log.Error("Loading state: potentially malformed json key of " + key)
+ seelog.Error("Loading state: potentially malformed json key of " + key)
continue
}
err = json.Unmarshal(rawJSON, actualPointer)
if err != nil {
- log.Debug("Could not unmarshal into actual")
+ seelog.Debug("Could not unmarshal into actual")
return err
}
}
- log.Debug("Loaded state!", "state", s)
+ seelog.Debug("Loaded state!", "state", s)
return nil
}
@@ -319,7 +318,7 @@ func (manager *basicStateManager) dryRun(data []byte) error {
tmps := versionOnlyState{}
err := json.Unmarshal(data, &tmps)
if err != nil {
- log.Crit("Could not unmarshal existing state; corrupted data?", "err", err, "data", data)
+ seelog.Critical("Could not unmarshal existing state; corrupted data?", "err", err, "data", data)
return err
}
if tmps.Version > ECSDataVersion {
diff --git a/agent/utils/utils.go b/agent/utils/utils.go
index 3ebc3abe547..dac740a65c5 100644
--- a/agent/utils/utils.go
+++ b/agent/utils/utils.go
@@ -27,13 +27,10 @@ import (
"strings"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
- "github.com/aws/amazon-ecs-agent/agent/logger"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
)
-var log = logger.ForModule("util")
-
func DefaultIfBlank(str string, default_value string) string {
if len(str) == 0 {
return default_value
diff --git a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
index de8adce636c..af4f6154d70 100644
--- a/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
+++ b/agent/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
@@ -6,12 +6,11 @@ package restjson
//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go
import (
- "encoding/json"
- "io"
"strings"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
"github.com/aws/aws-sdk-go/private/protocol/rest"
)
@@ -33,6 +32,9 @@ func Build(r *request.Request) {
rest.Build(r)
if t := rest.PayloadType(r.Params); t == "structure" || t == "" {
+ if v := r.HTTPRequest.Header.Get("Content-Type"); len(v) == 0 {
+ r.HTTPRequest.Header.Set("Content-Type", "application/json")
+ }
jsonrpc.Build(r)
}
}
@@ -56,17 +58,11 @@ func UnmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
var jsonErr jsonErrorResponse
- err := json.NewDecoder(r.HTTPResponse.Body).Decode(&jsonErr)
- if err == io.EOF {
+ err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body)
+ if err != nil {
r.Error = awserr.NewRequestFailure(
- awserr.New("SerializationError", r.HTTPResponse.Status, nil),
- r.HTTPResponse.StatusCode,
- r.RequestID,
- )
- return
- } else if err != nil {
- r.Error = awserr.NewRequestFailure(
- awserr.New("SerializationError", "failed decoding REST JSON error response", err),
+ awserr.New(request.ErrCodeSerialization,
+ "failed to unmarshal response error", err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/efs/api.go b/agent/vendor/github.com/aws/aws-sdk-go/service/efs/api.go
index f24886fa39b..bc4815e74e8 100644
--- a/agent/vendor/github.com/aws/aws-sdk-go/service/efs/api.go
+++ b/agent/vendor/github.com/aws/aws-sdk-go/service/efs/api.go
@@ -93,13 +93,14 @@ func (c *EFS) CreateFileSystemRequest(input *CreateFileSystemInput) (req *reques
// scale to higher levels of aggregate throughput and operations per second
// with a tradeoff of slightly higher latencies for most file operations. The
// performance mode can't be changed after the file system has been created.
-// For more information, see Amazon EFS: Performance Modes (http://docs.aws.amazon.com/efs/latest/ug/performance.html#performancemodes.html).
+// For more information, see Amazon EFS: Performance Modes (https://docs.aws.amazon.com/efs/latest/ug/performance.html#performancemodes.html).
//
// After the file system is fully created, Amazon EFS sets its lifecycle state
// to available, at which point you can create one or more mount targets for
// the file system in your VPC. For more information, see CreateMountTarget.
-// You mount your Amazon EFS file system on an EC2 instances in your VPC via
-// the mount target. For more information, see Amazon EFS: How it Works (http://docs.aws.amazon.com/efs/latest/ug/how-it-works.html).
+// You mount your Amazon EFS file system on an EC2 instances in your VPC by
+// using the mount target. For more information, see Amazon EFS: How it Works
+// (https://docs.aws.amazon.com/efs/latest/ug/how-it-works.html).
//
// This operation requires permissions for the elasticfilesystem:CreateFileSystem
// action.
@@ -205,14 +206,14 @@ func (c *EFS) CreateMountTargetRequest(input *CreateMountTargetInput) (req *requ
// CreateMountTarget API operation for Amazon Elastic File System.
//
// Creates a mount target for a file system. You can then mount the file system
-// on EC2 instances via the mount target.
+// on EC2 instances by using the mount target.
//
// You can create one mount target in each Availability Zone in your VPC. All
// EC2 instances in a VPC within a given Availability Zone share a single mount
// target for a given file system. If you have multiple subnets in an Availability
// Zone, you create a mount target in one of the subnets. EC2 instances do not
// need to be in the same subnet as the mount target in order to access their
-// file system. For more information, see Amazon EFS: How it Works (http://docs.aws.amazon.com/efs/latest/ug/how-it-works.html).
+// file system. For more information, see Amazon EFS: How it Works (https://docs.aws.amazon.com/efs/latest/ug/how-it-works.html).
//
// In the request, you also specify a file system ID for which you are creating
// the mount target and the file system's lifecycle state must be available.
@@ -231,9 +232,9 @@ func (c *EFS) CreateMountTargetRequest(input *CreateMountTargetInput) (req *requ
// a MountTargetId and an IpAddress. You use this IP address when mounting the
// file system in an EC2 instance. You can also use the mount target's DNS name
// when mounting the file system. The EC2 instance on which you mount the file
-// system via the mount target can resolve the mount target's DNS name to its
-// IP address. For more information, see How it Works: Implementation Overview
-// (http://docs.aws.amazon.com/efs/latest/ug/how-it-works.html#how-it-works-implementation).
+// system by using the mount target can resolve the mount target's DNS name
+// to its IP address. For more information, see How it Works: Implementation
+// Overview (https://docs.aws.amazon.com/efs/latest/ug/how-it-works.html#how-it-works-implementation).
//
// Note that you can create mount targets for a file system in only one VPC,
// and there can be only one mount target per Availability Zone. That is, if
@@ -250,42 +251,36 @@ func (c *EFS) CreateMountTargetRequest(input *CreateMountTargetInput) (req *requ
//
// * Creates a new mount target in the specified subnet.
//
-// * Also creates a new network interface in the subnet as follows:
-//
-// If the request provides an IpAddress, Amazon EFS assigns that IP address
-// to the network interface. Otherwise, Amazon EFS assigns a free address
-// in the subnet (in the same way that the Amazon EC2 CreateNetworkInterface
-// call does when a request does not specify a primary private IP address).
-//
-// If the request provides SecurityGroups, this network interface is associated
+// * Also creates a new network interface in the subnet as follows: If the
+// request provides an IpAddress, Amazon EFS assigns that IP address to the
+// network interface. Otherwise, Amazon EFS assigns a free address in the
+// subnet (in the same way that the Amazon EC2 CreateNetworkInterface call
+// does when a request does not specify a primary private IP address). If
+// the request provides SecurityGroups, this network interface is associated
// with those security groups. Otherwise, it belongs to the default security
-// group for the subnet's VPC.
-//
-// Assigns the description Mount target fsmt-id for file system fs-id where
-// fsmt-id is the mount target ID, and fs-id is the FileSystemId.
-//
-// Sets the requesterManaged property of the network interface to true, and
-// the requesterId value to EFS.
-//
-// Each Amazon EFS mount target has one corresponding requester-managed EC2
-// network interface. After the network interface is created, Amazon EFS
-// sets the NetworkInterfaceId field in the mount target's description to
-// the network interface ID, and the IpAddress field to its address. If network
-// interface creation fails, the entire CreateMountTarget operation fails.
+// group for the subnet's VPC. Assigns the description Mount target fsmt-id
+// for file system fs-id where fsmt-id is the mount target ID, and fs-id
+// is the FileSystemId. Sets the requesterManaged property of the network
+// interface to true, and the requesterId value to EFS. Each Amazon EFS mount
+// target has one corresponding requester-managed EC2 network interface.
+// After the network interface is created, Amazon EFS sets the NetworkInterfaceId
+// field in the mount target's description to the network interface ID, and
+// the IpAddress field to its address. If network interface creation fails,
+// the entire CreateMountTarget operation fails.
//
// The CreateMountTarget call returns only after creating the network interface,
// but while the mount target state is still creating, you can check the mount
// target creation status by calling the DescribeMountTargets operation, which
// among other things returns the mount target state.
//
-// We recommend you create a mount target in each of the Availability Zones.
+// We recommend that you create a mount target in each of the Availability Zones.
// There are cost considerations for using a file system in an Availability
// Zone through a mount target created in another Availability Zone. For more
// information, see Amazon EFS (http://aws.amazon.com/efs/). In addition, by
// always using a mount target local to the instance's Availability Zone, you
// eliminate a partial failure scenario. If the Availability Zone in which your
-// mount target is created goes down, then you won't be able to access your
-// file system through that mount target.
+// mount target is created goes down, then you can't access your file system
+// through that mount target.
//
// This operation requires permissions for the following action on the file
// system:
@@ -341,8 +336,8 @@ func (c *EFS) CreateMountTargetRequest(input *CreateMountTargetInput) (req *requ
// The calling account has reached the limit for elastic network interfaces
// for the specific AWS Region. The client should try to delete some elastic
// network interfaces or get the account limit raised. For more information,
-// see Amazon VPC Limits (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Appendix_Limits.html)
-// in the Amazon VPC User Guide (see the Network interfaces per VPC entry in
+// see Amazon VPC Limits (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Appendix_Limits.html)
+// in the Amazon VPC User Guide (see the Network interfaces per VPC entry in
// the table).
//
// * ErrCodeSecurityGroupLimitExceeded "SecurityGroupLimitExceeded"
@@ -416,8 +411,7 @@ func (c *EFS) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, o
output = &CreateTagsOutput{}
req = c.newRequest(op, input, output)
- req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler)
- req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
@@ -511,8 +505,7 @@ func (c *EFS) DeleteFileSystemRequest(input *DeleteFileSystemInput) (req *reques
output = &DeleteFileSystemOutput{}
req = c.newRequest(op, input, output)
- req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler)
- req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
@@ -618,8 +611,7 @@ func (c *EFS) DeleteMountTargetRequest(input *DeleteMountTargetInput) (req *requ
output = &DeleteMountTargetOutput{}
req = c.newRequest(op, input, output)
- req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler)
- req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
@@ -627,14 +619,14 @@ func (c *EFS) DeleteMountTargetRequest(input *DeleteMountTargetInput) (req *requ
//
// Deletes the specified mount target.
//
-// This operation forcibly breaks any mounts of the file system via the mount
-// target that is being deleted, which might disrupt instances or applications
+// This operation forcibly breaks any mounts of the file system by using the
+// mount target that is being deleted, which might disrupt instances or applications
// using those mounts. To avoid applications getting cut off abruptly, you might
// consider unmounting any mounts of the mount target, if feasible. The operation
-// also deletes the associated network interface. Uncommitted writes may be
+// also deletes the associated network interface. Uncommitted writes might be
// lost, but breaking a mount target using this operation does not corrupt the
// file system itself. The file system you created remains. You can mount an
-// EC2 instance in your VPC via another mount target.
+// EC2 instance in your VPC by using another mount target.
//
// This operation requires permissions for the following action on the file
// system:
@@ -735,17 +727,16 @@ func (c *EFS) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, o
output = &DeleteTagsOutput{}
req = c.newRequest(op, input, output)
- req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler)
- req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// DeleteTags API operation for Amazon Elastic File System.
//
// Deletes the specified tags from a file system. If the DeleteTags request
-// includes a tag key that does not exist, Amazon EFS ignores it and doesn't
+// includes a tag key that doesn't exist, Amazon EFS ignores it and doesn't
// cause an error. For more information about tags and related restrictions,
-// see Tag Restrictions (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)
+// see Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)
// in the AWS Billing and Cost Management User Guide.
//
// This operation requires permissions for the elasticfilesystem:DeleteTags
@@ -843,9 +834,10 @@ func (c *EFS) DescribeFileSystemsRequest(input *DescribeFileSystemsInput) (req *
//
// When retrieving all file system descriptions, you can optionally specify
// the MaxItems parameter to limit the number of descriptions in a response.
-// If more file system descriptions remain, Amazon EFS returns a NextMarker,
-// an opaque token, in the response. In this case, you should send a subsequent
-// request with the Marker request parameter set to the value of NextMarker.
+// Currently, this number is automatically set to 10. If more file system descriptions
+// remain, Amazon EFS returns a NextMarker, an opaque token, in the response.
+// In this case, you should send a subsequent request with the Marker request
+// parameter set to the value of NextMarker.
//
// To retrieve a list of your file system descriptions, this operation is used
// in an iterative process, where DescribeFileSystems is called first without
@@ -853,9 +845,6 @@ func (c *EFS) DescribeFileSystemsRequest(input *DescribeFileSystemsInput) (req *
// set to the value of the NextMarker from the previous response until the response
// has no NextMarker.
//
-// The implementation may return fewer than MaxItems file system descriptions
-// while still including a NextMarker value.
-//
// The order of file systems returned in the response of one DescribeFileSystems
// call and the order of file systems returned across the responses of a multi-call
// iteration is unspecified.
@@ -904,6 +893,100 @@ func (c *EFS) DescribeFileSystemsWithContext(ctx aws.Context, input *DescribeFil
return out, req.Send()
}
+const opDescribeLifecycleConfiguration = "DescribeLifecycleConfiguration"
+
+// DescribeLifecycleConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeLifecycleConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeLifecycleConfiguration for more information on using the DescribeLifecycleConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeLifecycleConfigurationRequest method.
+// req, resp := client.DescribeLifecycleConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticfilesystem-2015-02-01/DescribeLifecycleConfiguration
+func (c *EFS) DescribeLifecycleConfigurationRequest(input *DescribeLifecycleConfigurationInput) (req *request.Request, output *DescribeLifecycleConfigurationOutput) {
+ op := &request.Operation{
+ Name: opDescribeLifecycleConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/2015-02-01/file-systems/{FileSystemId}/lifecycle-configuration",
+ }
+
+ if input == nil {
+ input = &DescribeLifecycleConfigurationInput{}
+ }
+
+ output = &DescribeLifecycleConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeLifecycleConfiguration API operation for Amazon Elastic File System.
+//
+// Returns the current LifecycleConfiguration object for the specified Amazon
+// EFS file system. EFS lifecycle management uses the LifecycleConfiguration
+// object to identify which files to move to the EFS Infrequent Access (IA)
+// storage class. For a file system without a LifecycleConfiguration object,
+// the call returns an empty array in the response.
+//
+// This operation requires permissions for the elasticfilesystem:DescribeLifecycleConfiguration
+// operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic File System's
+// API operation DescribeLifecycleConfiguration for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInternalServerError "InternalServerError"
+// Returned if an error occurred on the server side.
+//
+// * ErrCodeBadRequest "BadRequest"
+// Returned if the request is malformed or contains an error such as an invalid
+// parameter value or a missing required parameter.
+//
+// * ErrCodeFileSystemNotFound "FileSystemNotFound"
+// Returned if the specified FileSystemId value doesn't exist in the requester's
+// AWS account.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticfilesystem-2015-02-01/DescribeLifecycleConfiguration
+func (c *EFS) DescribeLifecycleConfiguration(input *DescribeLifecycleConfigurationInput) (*DescribeLifecycleConfigurationOutput, error) {
+ req, out := c.DescribeLifecycleConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// DescribeLifecycleConfigurationWithContext is the same as DescribeLifecycleConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeLifecycleConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EFS) DescribeLifecycleConfigurationWithContext(ctx aws.Context, input *DescribeLifecycleConfigurationInput, opts ...request.Option) (*DescribeLifecycleConfigurationOutput, error) {
+ req, out := c.DescribeLifecycleConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opDescribeMountTargetSecurityGroups = "DescribeMountTargetSecurityGroups"
// DescribeMountTargetSecurityGroupsRequest generates a "aws/request.Request" representing the
@@ -1147,7 +1230,7 @@ func (c *EFS) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Reques
//
// Returns the tags associated with a file system. The order of tags returned
// in the response of one DescribeTags call and the order of tags returned across
-// the responses of a multi-call iteration (when using pagination) is unspecified.
+// the responses of a multiple-call iteration (when using pagination) is unspecified.
//
// This operation requires permissions for the elasticfilesystem:DescribeTags
// action.
@@ -1232,8 +1315,7 @@ func (c *EFS) ModifyMountTargetSecurityGroupsRequest(input *ModifyMountTargetSec
output = &ModifyMountTargetSecurityGroupsOutput{}
req = c.newRequest(op, input, output)
- req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler)
- req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
@@ -1308,6 +1390,123 @@ func (c *EFS) ModifyMountTargetSecurityGroupsWithContext(ctx aws.Context, input
return out, req.Send()
}
+const opPutLifecycleConfiguration = "PutLifecycleConfiguration"
+
+// PutLifecycleConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutLifecycleConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutLifecycleConfiguration for more information on using the PutLifecycleConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutLifecycleConfigurationRequest method.
+// req, resp := client.PutLifecycleConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticfilesystem-2015-02-01/PutLifecycleConfiguration
+func (c *EFS) PutLifecycleConfigurationRequest(input *PutLifecycleConfigurationInput) (req *request.Request, output *PutLifecycleConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutLifecycleConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/2015-02-01/file-systems/{FileSystemId}/lifecycle-configuration",
+ }
+
+ if input == nil {
+ input = &PutLifecycleConfigurationInput{}
+ }
+
+ output = &PutLifecycleConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PutLifecycleConfiguration API operation for Amazon Elastic File System.
+//
+// Enables lifecycle management by creating a new LifecycleConfiguration object.
+// A LifecycleConfiguration object defines when files in an Amazon EFS file
+// system are automatically transitioned to the lower-cost EFS Infrequent Access
+// (IA) storage class. A LifecycleConfiguration applies to all files in a file
+// system.
+//
+// Each Amazon EFS file system supports one lifecycle configuration, which applies
+// to all files in the file system. If a LifecycleConfiguration object already
+// exists for the specified file system, a PutLifecycleConfiguration call modifies
+// the existing configuration. A PutLifecycleConfiguration call with an empty
+// LifecyclePolicies array in the request body deletes any existing LifecycleConfiguration
+// and disables lifecycle management.
+//
+// In the request, specify the following:
+//
+// * The ID for the file system for which you are enabling, disabling, or
+// modifying lifecycle management.
+//
+// * A LifecyclePolicies array of LifecyclePolicy objects that define when
+// files are moved to the IA storage class. The array can contain only one
+// LifecyclePolicy item.
+//
+// This operation requires permissions for the elasticfilesystem:PutLifecycleConfiguration
+// operation.
+//
+// To apply a LifecycleConfiguration object to an encrypted file system, you
+// need the same AWS Key Management Service (AWS KMS) permissions as when you
+// created the encrypted file system.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Elastic File System's
+// API operation PutLifecycleConfiguration for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeBadRequest "BadRequest"
+// Returned if the request is malformed or contains an error such as an invalid
+// parameter value or a missing required parameter.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// Returned if an error occurred on the server side.
+//
+// * ErrCodeFileSystemNotFound "FileSystemNotFound"
+// Returned if the specified FileSystemId value doesn't exist in the requester's
+// AWS account.
+//
+// * ErrCodeIncorrectFileSystemLifeCycleState "IncorrectFileSystemLifeCycleState"
+// Returned if the file system's lifecycle state is not "available".
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticfilesystem-2015-02-01/PutLifecycleConfiguration
+func (c *EFS) PutLifecycleConfiguration(input *PutLifecycleConfigurationInput) (*PutLifecycleConfigurationOutput, error) {
+ req, out := c.PutLifecycleConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// PutLifecycleConfigurationWithContext is the same as PutLifecycleConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutLifecycleConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *EFS) PutLifecycleConfigurationWithContext(ctx aws.Context, input *PutLifecycleConfigurationInput, opts ...request.Option) (*PutLifecycleConfigurationOutput, error) {
+ req, out := c.PutLifecycleConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opUpdateFileSystem = "UpdateFileSystem"
// UpdateFileSystemRequest generates a "aws/request.Request" representing the
@@ -1417,56 +1616,65 @@ func (c *EFS) UpdateFileSystemWithContext(ctx aws.Context, input *UpdateFileSyst
type CreateFileSystemInput struct {
_ struct{} `type:"structure"`
- // String of up to 64 ASCII characters. Amazon EFS uses this to ensure idempotent
+ // A string of up to 64 ASCII characters. Amazon EFS uses this to ensure idempotent
// creation.
//
// CreationToken is a required field
CreationToken *string `min:"1" type:"string" required:"true"`
// A Boolean value that, if true, creates an encrypted file system. When creating
- // an encrypted file system, you have the option of specifying a CreateFileSystemRequest$KmsKeyId
+ // an encrypted file system, you have the option of specifying CreateFileSystemRequest$KmsKeyId
// for an existing AWS Key Management Service (AWS KMS) customer master key
// (CMK). If you don't specify a CMK, then the default CMK for Amazon EFS, /aws/elasticfilesystem,
// is used to protect the encrypted file system.
Encrypted *bool `type:"boolean"`
// The ID of the AWS KMS CMK to be used to protect the encrypted file system.
- // This parameter is only required if you want to use a non-default CMK. If
- // this parameter is not specified, the default CMK for Amazon EFS is used.
- // This ID can be in one of the following formats:
+ // This parameter is only required if you want to use a nondefault CMK. If this
+ // parameter is not specified, the default CMK for Amazon EFS is used. This
+ // ID can be in one of the following formats:
//
- // * Key ID - A unique identifier of the key, for example, 1234abcd-12ab-34cd-56ef-1234567890ab.
+ // * Key ID - A unique identifier of the key, for example 1234abcd-12ab-34cd-56ef-1234567890ab.
//
- // * ARN - An Amazon Resource Name (ARN) for the key, for example, arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.
+ // * ARN - An Amazon Resource Name (ARN) for the key, for example arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.
//
- // * Key alias - A previously created display name for a key. For example,
+ // * Key alias - A previously created display name for a key, for example
// alias/projectKey1.
//
- // * Key alias ARN - An ARN for a key alias, for example, arn:aws:kms:us-west-2:444455556666:alias/projectKey1.
+ // * Key alias ARN - An ARN for a key alias, for example arn:aws:kms:us-west-2:444455556666:alias/projectKey1.
//
// If KmsKeyId is specified, the CreateFileSystemRequest$Encrypted parameter
// must be set to true.
KmsKeyId *string `min:"1" type:"string"`
- // The PerformanceMode of the file system. We recommend generalPurpose performance
+ // The performance mode of the file system. We recommend generalPurpose performance
// mode for most file systems. File systems using the maxIO performance mode
// can scale to higher levels of aggregate throughput and operations per second
- // with a tradeoff of slightly higher latencies for most file operations. This
- // can't be changed after the file system has been created.
+ // with a tradeoff of slightly higher latencies for most file operations. The
+ // performance mode can't be changed after the file system has been created.
PerformanceMode *string `type:"string" enum:"PerformanceMode"`
// The throughput, measured in MiB/s, that you want to provision for a file
- // system that you're creating. The limit on throughput is 1024 MiB/s. You can
- // get these limits increased by contacting AWS Support. For more information,
- // see Amazon EFS Limits That You Can Increase (http://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits)
+ // system that you're creating. Valid values are 1-1024. Required if ThroughputMode
+ // is set to provisioned. The upper limit for throughput is 1024 MiB/s. You
+ // can get this limit increased by contacting AWS Support. For more information,
+ // see Amazon EFS Limits That You Can Increase (https://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits)
// in the Amazon EFS User Guide.
- ProvisionedThroughputInMibps *float64 `type:"double"`
+ ProvisionedThroughputInMibps *float64 `min:"1" type:"double"`
+
+ // A value that specifies to create one or more tags associated with the file
+ // system. Each tag is a user-defined key-value pair. Name your file system
+ // on creation by including a "Key":"Name","Value":"{value}" key-value pair.
+ Tags []*Tag `type:"list"`
// The throughput mode for the file system to be created. There are two throughput
- // modes to choose from for your file system: bursting and provisioned. You
- // can decrease your file system's throughput in Provisioned Throughput mode
- // or change between the throughput modes as long as it’s been more than 24
- // hours since the last decrease or throughput mode change.
+ // modes to choose from for your file system: bursting and provisioned. If you
+ // set ThroughputMode to provisioned, you must also set a value for ProvisionedThroughPutInMibps.
+ // You can decrease your file system's throughput in Provisioned Throughput
+ // mode or change between the throughput modes as long as it’s been more than
+ // 24 hours since the last decrease or throughput mode change. For more, see
+ // Specifying Throughput with Provisioned Mode (https://docs.aws.amazon.com/efs/latest/ug/performance.html#provisioned-throughput)
+ // in the Amazon EFS User Guide.
ThroughputMode *string `type:"string" enum:"ThroughputMode"`
}
@@ -1492,6 +1700,19 @@ func (s *CreateFileSystemInput) Validate() error {
if s.KmsKeyId != nil && len(*s.KmsKeyId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("KmsKeyId", 1))
}
+ if s.ProvisionedThroughputInMibps != nil && *s.ProvisionedThroughputInMibps < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("ProvisionedThroughputInMibps", 1))
+ }
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -1529,6 +1750,12 @@ func (s *CreateFileSystemInput) SetProvisionedThroughputInMibps(v float64) *Crea
return s
}
+// SetTags sets the Tags field's value.
+func (s *CreateFileSystemInput) SetTags(v []*Tag) *CreateFileSystemInput {
+ s.Tags = v
+ return s
+}
+
// SetThroughputMode sets the ThroughputMode field's value.
func (s *CreateFileSystemInput) SetThroughputMode(v string) *CreateFileSystemInput {
s.ThroughputMode = &v
@@ -1538,7 +1765,7 @@ func (s *CreateFileSystemInput) SetThroughputMode(v string) *CreateFileSystemInp
type CreateMountTargetInput struct {
_ struct{} `type:"structure"`
- // ID of the file system for which to create the mount target.
+ // The ID of the file system for which to create the mount target.
//
// FileSystemId is a required field
FileSystemId *string `type:"string" required:"true"`
@@ -1550,7 +1777,7 @@ type CreateMountTargetInput struct {
// for the same VPC as subnet specified.
SecurityGroups []*string `type:"list"`
- // ID of the subnet to add the mount target in.
+ // The ID of the subnet to add the mount target in.
//
// SubnetId is a required field
SubnetId *string `type:"string" required:"true"`
@@ -1609,13 +1836,13 @@ func (s *CreateMountTargetInput) SetSubnetId(v string) *CreateMountTargetInput {
type CreateTagsInput struct {
_ struct{} `type:"structure"`
- // ID of the file system whose tags you want to modify (String). This operation
+ // The ID of the file system whose tags you want to modify (String). This operation
// modifies the tags only, not the file system.
//
// FileSystemId is a required field
FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"`
- // Array of Tag objects to add. Each Tag object is a key-value pair.
+ // An array of Tag objects to add. Each Tag object is a key-value pair.
//
// Tags is a required field
Tags []*Tag `type:"list" required:"true"`
@@ -1637,6 +1864,9 @@ func (s *CreateTagsInput) Validate() error {
if s.FileSystemId == nil {
invalidParams.Add(request.NewErrParamRequired("FileSystemId"))
}
+ if s.FileSystemId != nil && len(*s.FileSystemId) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 1))
+ }
if s.Tags == nil {
invalidParams.Add(request.NewErrParamRequired("Tags"))
}
@@ -1686,7 +1916,7 @@ func (s CreateTagsOutput) GoString() string {
type DeleteFileSystemInput struct {
_ struct{} `type:"structure"`
- // ID of the file system you want to delete.
+ // The ID of the file system you want to delete.
//
// FileSystemId is a required field
FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"`
@@ -1708,6 +1938,9 @@ func (s *DeleteFileSystemInput) Validate() error {
if s.FileSystemId == nil {
invalidParams.Add(request.NewErrParamRequired("FileSystemId"))
}
+ if s.FileSystemId != nil && len(*s.FileSystemId) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 1))
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -1738,7 +1971,7 @@ func (s DeleteFileSystemOutput) GoString() string {
type DeleteMountTargetInput struct {
_ struct{} `type:"structure"`
- // ID of the mount target to delete (String).
+ // The ID of the mount target to delete (String).
//
// MountTargetId is a required field
MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"`
@@ -1760,6 +1993,9 @@ func (s *DeleteMountTargetInput) Validate() error {
if s.MountTargetId == nil {
invalidParams.Add(request.NewErrParamRequired("MountTargetId"))
}
+ if s.MountTargetId != nil && len(*s.MountTargetId) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("MountTargetId", 1))
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -1790,12 +2026,12 @@ func (s DeleteMountTargetOutput) GoString() string {
type DeleteTagsInput struct {
_ struct{} `type:"structure"`
- // ID of the file system whose tags you want to delete (String).
+ // The ID of the file system whose tags you want to delete (String).
//
// FileSystemId is a required field
FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"`
- // List of tag keys to delete.
+ // A list of tag keys to delete.
//
// TagKeys is a required field
TagKeys []*string `type:"list" required:"true"`
@@ -1817,6 +2053,9 @@ func (s *DeleteTagsInput) Validate() error {
if s.FileSystemId == nil {
invalidParams.Add(request.NewErrParamRequired("FileSystemId"))
}
+ if s.FileSystemId != nil && len(*s.FileSystemId) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 1))
+ }
if s.TagKeys == nil {
invalidParams.Add(request.NewErrParamRequired("TagKeys"))
}
@@ -1870,10 +2109,9 @@ type DescribeFileSystemsInput struct {
Marker *string `location:"querystring" locationName:"Marker" type:"string"`
// (Optional) Specifies the maximum number of file systems to return in the
- // response (integer). This parameter value must be greater than 0. The number
- // of items that Amazon EFS returns is the minimum of the MaxItems parameter
- // specified in the request and the service's internal maximum number of items
- // per page.
+ // response (integer). Currently, this number is automatically set to 10, and
+ // other values are ignored. The response is paginated at 10 per page if you
+ // have more than 10 file systems.
MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"`
}
@@ -1930,7 +2168,7 @@ func (s *DescribeFileSystemsInput) SetMaxItems(v int64) *DescribeFileSystemsInpu
type DescribeFileSystemsOutput struct {
_ struct{} `type:"structure"`
- // Array of file system descriptions.
+ // An array of file system descriptions.
FileSystems []*FileSystemDescription `type:"list"`
// Present if provided by caller in the request (String).
@@ -1969,10 +2207,76 @@ func (s *DescribeFileSystemsOutput) SetNextMarker(v string) *DescribeFileSystems
return s
}
+type DescribeLifecycleConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the file system whose LifecycleConfiguration object you want to
+ // retrieve (String).
+ //
+ // FileSystemId is a required field
+ FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DescribeLifecycleConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeLifecycleConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeLifecycleConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeLifecycleConfigurationInput"}
+ if s.FileSystemId == nil {
+ invalidParams.Add(request.NewErrParamRequired("FileSystemId"))
+ }
+ if s.FileSystemId != nil && len(*s.FileSystemId) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFileSystemId sets the FileSystemId field's value.
+func (s *DescribeLifecycleConfigurationInput) SetFileSystemId(v string) *DescribeLifecycleConfigurationInput {
+ s.FileSystemId = &v
+ return s
+}
+
+type DescribeLifecycleConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of lifecycle management policies. Currently, EFS supports a maximum
+ // of one policy per file system.
+ LifecyclePolicies []*LifecyclePolicy `type:"list"`
+}
+
+// String returns the string representation
+func (s DescribeLifecycleConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeLifecycleConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetLifecyclePolicies sets the LifecyclePolicies field's value.
+func (s *DescribeLifecycleConfigurationOutput) SetLifecyclePolicies(v []*LifecyclePolicy) *DescribeLifecycleConfigurationOutput {
+ s.LifecyclePolicies = v
+ return s
+}
+
type DescribeMountTargetSecurityGroupsInput struct {
_ struct{} `type:"structure"`
- // ID of the mount target whose security groups you want to retrieve.
+ // The ID of the mount target whose security groups you want to retrieve.
//
// MountTargetId is a required field
MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"`
@@ -1994,6 +2298,9 @@ func (s *DescribeMountTargetSecurityGroupsInput) Validate() error {
if s.MountTargetId == nil {
invalidParams.Add(request.NewErrParamRequired("MountTargetId"))
}
+ if s.MountTargetId != nil && len(*s.MountTargetId) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("MountTargetId", 1))
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -2010,7 +2317,7 @@ func (s *DescribeMountTargetSecurityGroupsInput) SetMountTargetId(v string) *Des
type DescribeMountTargetSecurityGroupsOutput struct {
_ struct{} `type:"structure"`
- // Array of security groups.
+ // An array of security groups.
//
// SecurityGroups is a required field
SecurityGroups []*string `type:"list" required:"true"`
@@ -2044,8 +2351,9 @@ type DescribeMountTargetsInput struct {
// the previous returning call left off.
Marker *string `location:"querystring" locationName:"Marker" type:"string"`
- // (Optional) Maximum number of mount targets to return in the response. It
- // must be an integer with a value greater than zero.
+ // (Optional) Maximum number of mount targets to return in the response. Currently,
+ // this number is automatically set to 10, and other values are ignored. The
+ // response is paginated at 10 per page if you have more than 10 mount targets.
MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"`
// (Optional) ID of the mount target that you want to have described (String).
@@ -2148,18 +2456,19 @@ func (s *DescribeMountTargetsOutput) SetNextMarker(v string) *DescribeMountTarge
type DescribeTagsInput struct {
_ struct{} `type:"structure"`
- // ID of the file system whose tag set you want to retrieve.
+ // The ID of the file system whose tag set you want to retrieve.
//
// FileSystemId is a required field
FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"`
- // (Optional) Opaque pagination token returned from a previous DescribeTags
+ // (Optional) An opaque pagination token returned from a previous DescribeTags
// operation (String). If present, it specifies to continue the list from where
// the previous call left off.
Marker *string `location:"querystring" locationName:"Marker" type:"string"`
- // (Optional) Maximum number of file system tags to return in the response.
- // It must be an integer with a value greater than zero.
+ // (Optional) The maximum number of file system tags to return in the response.
+ // Currently, this number is automatically set to 10, and other values are ignored.
+ // The response is paginated at 10 per page if you have more than 10 tags.
MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"`
}
@@ -2179,6 +2488,9 @@ func (s *DescribeTagsInput) Validate() error {
if s.FileSystemId == nil {
invalidParams.Add(request.NewErrParamRequired("FileSystemId"))
}
+ if s.FileSystemId != nil && len(*s.FileSystemId) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 1))
+ }
if s.MaxItems != nil && *s.MaxItems < 1 {
invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1))
}
@@ -2253,16 +2565,16 @@ func (s *DescribeTagsOutput) SetTags(v []*Tag) *DescribeTagsOutput {
return s
}
-// Description of the file system.
+// A description of the file system.
type FileSystemDescription struct {
_ struct{} `type:"structure"`
- // Time that the file system was created, in seconds (since 1970-01-01T00:00:00Z).
+ // The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z).
//
// CreationTime is a required field
CreationTime *time.Time `type:"timestamp" required:"true"`
- // Opaque string specified in the request.
+ // The opaque string specified in the request.
//
// CreationToken is a required field
CreationToken *string `min:"1" type:"string" required:"true"`
@@ -2270,7 +2582,7 @@ type FileSystemDescription struct {
// A Boolean value that, if true, indicates that the file system is encrypted.
Encrypted *bool `type:"boolean"`
- // ID of the file system, assigned by Amazon EFS.
+ // The ID of the file system, assigned by Amazon EFS.
//
// FileSystemId is a required field
FileSystemId *string `type:"string" required:"true"`
@@ -2279,58 +2591,65 @@ type FileSystemDescription struct {
// that was used to protect the encrypted file system.
KmsKeyId *string `min:"1" type:"string"`
- // Lifecycle phase of the file system.
+ // The lifecycle phase of the file system.
//
// LifeCycleState is a required field
LifeCycleState *string `type:"string" required:"true" enum:"LifeCycleState"`
// You can add tags to a file system, including a Name tag. For more information,
- // see CreateTags. If the file system has a Name tag, Amazon EFS returns the
- // value in this field.
+ // see CreateFileSystem. If the file system has a Name tag, Amazon EFS returns
+ // the value in this field.
Name *string `type:"string"`
- // Current number of mount targets that the file system has. For more information,
+ // The current number of mount targets that the file system has. For more information,
// see CreateMountTarget.
//
// NumberOfMountTargets is a required field
NumberOfMountTargets *int64 `type:"integer" required:"true"`
- // AWS account that created the file system. If the file system was created
+ // The AWS account that created the file system. If the file system was created
// by an IAM user, the parent account to which the user belongs is the owner.
//
// OwnerId is a required field
OwnerId *string `type:"string" required:"true"`
- // The PerformanceMode of the file system.
+ // The performance mode of the file system.
//
// PerformanceMode is a required field
PerformanceMode *string `type:"string" required:"true" enum:"PerformanceMode"`
// The throughput, measured in MiB/s, that you want to provision for a file
- // system. The limit on throughput is 1024 MiB/s. You can get these limits increased
+ // system. Valid values are 1-1024. Required if ThroughputMode is set to provisioned.
+ // The limit on throughput is 1024 MiB/s. You can get these limits increased
// by contacting AWS Support. For more information, see Amazon EFS Limits That
- // You Can Increase (http://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits)
+ // You Can Increase (https://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits)
// in the Amazon EFS User Guide.
- ProvisionedThroughputInMibps *float64 `type:"double"`
-
- // Latest known metered size (in bytes) of data stored in the file system, in
- // its Value field, and the time at which that size was determined in its Timestamp
- // field. The Timestamp value is the integer number of seconds since 1970-01-01T00:00:00Z.
- // The SizeInBytes value doesn't represent the size of a consistent snapshot
- // of the file system, but it is eventually consistent when there are no writes
- // to the file system. That is, SizeInBytes represents actual size only if the
- // file system is not modified for a period longer than a couple of hours. Otherwise,
- // the value is not the exact size that the file system was at any point in
- // time.
+ ProvisionedThroughputInMibps *float64 `min:"1" type:"double"`
+
+ // The latest known metered size (in bytes) of data stored in the file system,
+ // in its Value field, and the time at which that size was determined in its
+ // Timestamp field. The Timestamp value is the integer number of seconds since
+ // 1970-01-01T00:00:00Z. The SizeInBytes value doesn't represent the size of
+ // a consistent snapshot of the file system, but it is eventually consistent
+ // when there are no writes to the file system. That is, SizeInBytes represents
+ // actual size only if the file system is not modified for a period longer than
+ // a couple of hours. Otherwise, the value is not the exact size that the file
+ // system was at any point in time.
//
// SizeInBytes is a required field
SizeInBytes *FileSystemSize `type:"structure" required:"true"`
+ // The tags associated with the file system, presented as an array of Tag objects.
+ //
+ // Tags is a required field
+ Tags []*Tag `type:"list" required:"true"`
+
// The throughput mode for a file system. There are two throughput modes to
- // choose from for your file system: bursting and provisioned. You can decrease
- // your file system's throughput in Provisioned Throughput mode or change between
- // the throughput modes as long as it’s been more than 24 hours since the last
- // decrease or throughput mode change.
+ // choose from for your file system: bursting and provisioned. If you set ThroughputMode
+ // to provisioned, you must also set a value for ProvisionedThroughPutInMibps.
+ // You can decrease your file system's throughput in Provisioned Throughput
+ // mode or change between the throughput modes as long as it’s been more than
+ // 24 hours since the last decrease or throughput mode change.
ThroughputMode *string `type:"string" enum:"ThroughputMode"`
}
@@ -2416,31 +2735,45 @@ func (s *FileSystemDescription) SetSizeInBytes(v *FileSystemSize) *FileSystemDes
return s
}
+// SetTags sets the Tags field's value.
+func (s *FileSystemDescription) SetTags(v []*Tag) *FileSystemDescription {
+ s.Tags = v
+ return s
+}
+
// SetThroughputMode sets the ThroughputMode field's value.
func (s *FileSystemDescription) SetThroughputMode(v string) *FileSystemDescription {
s.ThroughputMode = &v
return s
}
-// Latest known metered size (in bytes) of data stored in the file system, in
-// its Value field, and the time at which that size was determined in its Timestamp
-// field. Note that the value does not represent the size of a consistent snapshot
+// The latest known metered size (in bytes) of data stored in the file system,
+// in its Value field, and the time at which that size was determined in its
+// Timestamp field. The value doesn't represent the size of a consistent snapshot
// of the file system, but it is eventually consistent when there are no writes
-// to the file system. That is, the value will represent the actual size only
-// if the file system is not modified for a period longer than a couple of hours.
+// to the file system. That is, the value represents the actual size only if
+// the file system is not modified for a period longer than a couple of hours.
// Otherwise, the value is not necessarily the exact size the file system was
// at any instant in time.
type FileSystemSize struct {
_ struct{} `type:"structure"`
- // Time at which the size of data, returned in the Value field, was determined.
+ // The time at which the size of data, returned in the Value field, was determined.
// The value is the integer number of seconds since 1970-01-01T00:00:00Z.
Timestamp *time.Time `type:"timestamp"`
- // Latest known metered size (in bytes) of data stored in the file system.
+ // The latest known metered size (in bytes) of data stored in the file system.
//
// Value is a required field
Value *int64 `type:"long" required:"true"`
+
+ // The latest known metered size (in bytes) of data stored in the Infrequent
+ // Access storage class.
+ ValueInIA *int64 `type:"long"`
+
+ // The latest known metered size (in bytes) of data stored in the Standard storage
+ // class.
+ ValueInStandard *int64 `type:"long"`
}
// String returns the string representation
@@ -2465,15 +2798,54 @@ func (s *FileSystemSize) SetValue(v int64) *FileSystemSize {
return s
}
+// SetValueInIA sets the ValueInIA field's value.
+func (s *FileSystemSize) SetValueInIA(v int64) *FileSystemSize {
+ s.ValueInIA = &v
+ return s
+}
+
+// SetValueInStandard sets the ValueInStandard field's value.
+func (s *FileSystemSize) SetValueInStandard(v int64) *FileSystemSize {
+ s.ValueInStandard = &v
+ return s
+}
+
+// Describes a policy used by EFS lifecycle management to transition files to
+// the Infrequent Access (IA) storage class.
+type LifecyclePolicy struct {
+ _ struct{} `type:"structure"`
+
+ // A value that describes the period of time that a file is not accessed, after
+ // which it transitions to the IA storage class. Metadata operations such as
+ // listing the contents of a directory don't count as file access events.
+ TransitionToIA *string `type:"string" enum:"TransitionToIARules"`
+}
+
+// String returns the string representation
+func (s LifecyclePolicy) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecyclePolicy) GoString() string {
+ return s.String()
+}
+
+// SetTransitionToIA sets the TransitionToIA field's value.
+func (s *LifecyclePolicy) SetTransitionToIA(v string) *LifecyclePolicy {
+ s.TransitionToIA = &v
+ return s
+}
+
type ModifyMountTargetSecurityGroupsInput struct {
_ struct{} `type:"structure"`
- // ID of the mount target whose security groups you want to modify.
+ // The ID of the mount target whose security groups you want to modify.
//
// MountTargetId is a required field
MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"`
- // Array of up to five VPC security group IDs.
+ // An array of up to five VPC security group IDs.
SecurityGroups []*string `type:"list"`
}
@@ -2493,6 +2865,9 @@ func (s *ModifyMountTargetSecurityGroupsInput) Validate() error {
if s.MountTargetId == nil {
invalidParams.Add(request.NewErrParamRequired("MountTargetId"))
}
+ if s.MountTargetId != nil && len(*s.MountTargetId) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("MountTargetId", 1))
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -2530,12 +2905,12 @@ func (s ModifyMountTargetSecurityGroupsOutput) GoString() string {
type MountTargetDescription struct {
_ struct{} `type:"structure"`
- // ID of the file system for which the mount target is intended.
+ // The ID of the file system for which the mount target is intended.
//
// FileSystemId is a required field
FileSystemId *string `type:"string" required:"true"`
- // Address at which the file system may be mounted via the mount target.
+ // Address at which the file system can be mounted by using the mount target.
IpAddress *string `type:"string"`
// Lifecycle state of the mount target.
@@ -2548,14 +2923,14 @@ type MountTargetDescription struct {
// MountTargetId is a required field
MountTargetId *string `type:"string" required:"true"`
- // ID of the network interface that Amazon EFS created when it created the mount
- // target.
+ // The ID of the network interface that Amazon EFS created when it created the
+ // mount target.
NetworkInterfaceId *string `type:"string"`
// AWS account ID that owns the resource.
OwnerId *string `type:"string"`
- // ID of the mount target's subnet.
+ // The ID of the mount target's subnet.
//
// SubnetId is a required field
SubnetId *string `type:"string" required:"true"`
@@ -2613,17 +2988,101 @@ func (s *MountTargetDescription) SetSubnetId(v string) *MountTargetDescription {
return s
}
-// A tag is a key-value pair. Allowed characters: letters, whitespace, and numbers,
-// representable in UTF-8, and the following characters: + - = . _ : /
+type PutLifecycleConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the file system for which you are creating the LifecycleConfiguration
+ // object (String).
+ //
+ // FileSystemId is a required field
+ FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"`
+
+ // An array of LifecyclePolicy objects that define the file system's LifecycleConfiguration
+ // object. A LifecycleConfiguration object tells lifecycle management when to
+ // transition files from the Standard storage class to the Infrequent Access
+ // storage class.
+ //
+ // LifecyclePolicies is a required field
+ LifecyclePolicies []*LifecyclePolicy `type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s PutLifecycleConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutLifecycleConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutLifecycleConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutLifecycleConfigurationInput"}
+ if s.FileSystemId == nil {
+ invalidParams.Add(request.NewErrParamRequired("FileSystemId"))
+ }
+ if s.FileSystemId != nil && len(*s.FileSystemId) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 1))
+ }
+ if s.LifecyclePolicies == nil {
+ invalidParams.Add(request.NewErrParamRequired("LifecyclePolicies"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFileSystemId sets the FileSystemId field's value.
+func (s *PutLifecycleConfigurationInput) SetFileSystemId(v string) *PutLifecycleConfigurationInput {
+ s.FileSystemId = &v
+ return s
+}
+
+// SetLifecyclePolicies sets the LifecyclePolicies field's value.
+func (s *PutLifecycleConfigurationInput) SetLifecyclePolicies(v []*LifecyclePolicy) *PutLifecycleConfigurationInput {
+ s.LifecyclePolicies = v
+ return s
+}
+
+type PutLifecycleConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of lifecycle management policies. Currently, EFS supports a maximum
+ // of one policy per file system.
+ LifecyclePolicies []*LifecyclePolicy `type:"list"`
+}
+
+// String returns the string representation
+func (s PutLifecycleConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutLifecycleConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetLifecyclePolicies sets the LifecyclePolicies field's value.
+func (s *PutLifecycleConfigurationOutput) SetLifecyclePolicies(v []*LifecyclePolicy) *PutLifecycleConfigurationOutput {
+ s.LifecyclePolicies = v
+ return s
+}
+
+// A tag is a key-value pair. Allowed characters are letters, white space, and
+// numbers that can be represented in UTF-8, and the following characters:+
+// - = . _ : /
type Tag struct {
_ struct{} `type:"structure"`
- // Tag key (String). The key can't start with aws:.
+ // The tag key (String). The key can't start with aws:.
//
// Key is a required field
Key *string `min:"1" type:"string" required:"true"`
- // Value of the tag key.
+ // The value of the tag key.
//
// Value is a required field
Value *string `type:"string" required:"true"`
@@ -2679,13 +3138,16 @@ type UpdateFileSystemInput struct {
FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"`
// (Optional) The amount of throughput, in MiB/s, that you want to provision
- // for your file system. If you're not updating the amount of provisioned throughput
- // for your file system, you don't need to provide this value in your request.
- ProvisionedThroughputInMibps *float64 `type:"double"`
+ // for your file system. Valid values are 1-1024. Required if ThroughputMode
+ // is changed to provisioned on update. If you're not updating the amount of
+ // provisioned throughput for your file system, you don't need to provide this
+ // value in your request.
+ ProvisionedThroughputInMibps *float64 `min:"1" type:"double"`
// (Optional) The throughput mode that you want your file system to use. If
// you're not updating your throughput mode, you don't need to provide this
- // value in your request.
+ // value in your request. If you are changing the ThroughputMode to provisioned,
+ // you must also set a value for ProvisionedThroughputInMibps.
ThroughputMode *string `type:"string" enum:"ThroughputMode"`
}
@@ -2705,6 +3167,12 @@ func (s *UpdateFileSystemInput) Validate() error {
if s.FileSystemId == nil {
invalidParams.Add(request.NewErrParamRequired("FileSystemId"))
}
+ if s.FileSystemId != nil && len(*s.FileSystemId) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 1))
+ }
+ if s.ProvisionedThroughputInMibps != nil && *s.ProvisionedThroughputInMibps < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("ProvisionedThroughputInMibps", 1))
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -2730,16 +3198,16 @@ func (s *UpdateFileSystemInput) SetThroughputMode(v string) *UpdateFileSystemInp
return s
}
-// Description of the file system.
+// A description of the file system.
type UpdateFileSystemOutput struct {
_ struct{} `type:"structure"`
- // Time that the file system was created, in seconds (since 1970-01-01T00:00:00Z).
+ // The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z).
//
// CreationTime is a required field
CreationTime *time.Time `type:"timestamp" required:"true"`
- // Opaque string specified in the request.
+ // The opaque string specified in the request.
//
// CreationToken is a required field
CreationToken *string `min:"1" type:"string" required:"true"`
@@ -2747,7 +3215,7 @@ type UpdateFileSystemOutput struct {
// A Boolean value that, if true, indicates that the file system is encrypted.
Encrypted *bool `type:"boolean"`
- // ID of the file system, assigned by Amazon EFS.
+ // The ID of the file system, assigned by Amazon EFS.
//
// FileSystemId is a required field
FileSystemId *string `type:"string" required:"true"`
@@ -2756,58 +3224,65 @@ type UpdateFileSystemOutput struct {
// that was used to protect the encrypted file system.
KmsKeyId *string `min:"1" type:"string"`
- // Lifecycle phase of the file system.
+ // The lifecycle phase of the file system.
//
// LifeCycleState is a required field
LifeCycleState *string `type:"string" required:"true" enum:"LifeCycleState"`
// You can add tags to a file system, including a Name tag. For more information,
- // see CreateTags. If the file system has a Name tag, Amazon EFS returns the
- // value in this field.
+ // see CreateFileSystem. If the file system has a Name tag, Amazon EFS returns
+ // the value in this field.
Name *string `type:"string"`
- // Current number of mount targets that the file system has. For more information,
+ // The current number of mount targets that the file system has. For more information,
// see CreateMountTarget.
//
// NumberOfMountTargets is a required field
NumberOfMountTargets *int64 `type:"integer" required:"true"`
- // AWS account that created the file system. If the file system was created
+ // The AWS account that created the file system. If the file system was created
// by an IAM user, the parent account to which the user belongs is the owner.
//
// OwnerId is a required field
OwnerId *string `type:"string" required:"true"`
- // The PerformanceMode of the file system.
+ // The performance mode of the file system.
//
// PerformanceMode is a required field
PerformanceMode *string `type:"string" required:"true" enum:"PerformanceMode"`
// The throughput, measured in MiB/s, that you want to provision for a file
- // system. The limit on throughput is 1024 MiB/s. You can get these limits increased
+ // system. Valid values are 1-1024. Required if ThroughputMode is set to provisioned.
+ // The limit on throughput is 1024 MiB/s. You can get these limits increased
// by contacting AWS Support. For more information, see Amazon EFS Limits That
- // You Can Increase (http://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits)
+ // You Can Increase (https://docs.aws.amazon.com/efs/latest/ug/limits.html#soft-limits)
// in the Amazon EFS User Guide.
- ProvisionedThroughputInMibps *float64 `type:"double"`
-
- // Latest known metered size (in bytes) of data stored in the file system, in
- // its Value field, and the time at which that size was determined in its Timestamp
- // field. The Timestamp value is the integer number of seconds since 1970-01-01T00:00:00Z.
- // The SizeInBytes value doesn't represent the size of a consistent snapshot
- // of the file system, but it is eventually consistent when there are no writes
- // to the file system. That is, SizeInBytes represents actual size only if the
- // file system is not modified for a period longer than a couple of hours. Otherwise,
- // the value is not the exact size that the file system was at any point in
- // time.
+ ProvisionedThroughputInMibps *float64 `min:"1" type:"double"`
+
+ // The latest known metered size (in bytes) of data stored in the file system,
+ // in its Value field, and the time at which that size was determined in its
+ // Timestamp field. The Timestamp value is the integer number of seconds since
+ // 1970-01-01T00:00:00Z. The SizeInBytes value doesn't represent the size of
+ // a consistent snapshot of the file system, but it is eventually consistent
+ // when there are no writes to the file system. That is, SizeInBytes represents
+ // actual size only if the file system is not modified for a period longer than
+ // a couple of hours. Otherwise, the value is not the exact size that the file
+ // system was at any point in time.
//
// SizeInBytes is a required field
SizeInBytes *FileSystemSize `type:"structure" required:"true"`
+ // The tags associated with the file system, presented as an array of Tag objects.
+ //
+ // Tags is a required field
+ Tags []*Tag `type:"list" required:"true"`
+
// The throughput mode for a file system. There are two throughput modes to
- // choose from for your file system: bursting and provisioned. You can decrease
- // your file system's throughput in Provisioned Throughput mode or change between
- // the throughput modes as long as it’s been more than 24 hours since the last
- // decrease or throughput mode change.
+ // choose from for your file system: bursting and provisioned. If you set ThroughputMode
+ // to provisioned, you must also set a value for ProvisionedThroughPutInMibps.
+ // You can decrease your file system's throughput in Provisioned Throughput
+ // mode or change between the throughput modes as long as it’s been more than
+ // 24 hours since the last decrease or throughput mode change.
ThroughputMode *string `type:"string" enum:"ThroughputMode"`
}
@@ -2893,6 +3368,12 @@ func (s *UpdateFileSystemOutput) SetSizeInBytes(v *FileSystemSize) *UpdateFileSy
return s
}
+// SetTags sets the Tags field's value.
+func (s *UpdateFileSystemOutput) SetTags(v []*Tag) *UpdateFileSystemOutput {
+ s.Tags = v
+ return s
+}
+
// SetThroughputMode sets the ThroughputMode field's value.
func (s *UpdateFileSystemOutput) SetThroughputMode(v string) *UpdateFileSystemOutput {
s.ThroughputMode = &v
@@ -2931,3 +3412,17 @@ const (
// ThroughputModeProvisioned is a ThroughputMode enum value
ThroughputModeProvisioned = "provisioned"
)
+
+const (
+ // TransitionToIARulesAfter14Days is a TransitionToIARules enum value
+ TransitionToIARulesAfter14Days = "AFTER_14_DAYS"
+
+ // TransitionToIARulesAfter30Days is a TransitionToIARules enum value
+ TransitionToIARulesAfter30Days = "AFTER_30_DAYS"
+
+ // TransitionToIARulesAfter60Days is a TransitionToIARules enum value
+ TransitionToIARulesAfter60Days = "AFTER_60_DAYS"
+
+ // TransitionToIARulesAfter90Days is a TransitionToIARules enum value
+ TransitionToIARulesAfter90Days = "AFTER_90_DAYS"
+)
diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/efs/doc.go b/agent/vendor/github.com/aws/aws-sdk-go/service/efs/doc.go
index 606891eab2f..ce6c5159449 100644
--- a/agent/vendor/github.com/aws/aws-sdk-go/service/efs/doc.go
+++ b/agent/vendor/github.com/aws/aws-sdk-go/service/efs/doc.go
@@ -7,7 +7,7 @@
// for use with Amazon EC2 instances in the AWS Cloud. With Amazon EFS, storage
// capacity is elastic, growing and shrinking automatically as you add and remove
// files, so your applications have the storage they need, when they need it.
-// For more information, see the User Guide (http://docs.aws.amazon.com/efs/latest/ug/api-reference.html).
+// For more information, see the User Guide (https://docs.aws.amazon.com/efs/latest/ug/api-reference.html).
//
// See https://docs.aws.amazon.com/goto/WebAPI/elasticfilesystem-2015-02-01 for more information on this service.
//
diff --git a/agent/vendor/github.com/aws/aws-sdk-go/service/efs/errors.go b/agent/vendor/github.com/aws/aws-sdk-go/service/efs/errors.go
index b616a864c7d..7a7eb3b1bf7 100644
--- a/agent/vendor/github.com/aws/aws-sdk-go/service/efs/errors.go
+++ b/agent/vendor/github.com/aws/aws-sdk-go/service/efs/errors.go
@@ -100,8 +100,8 @@ const (
// The calling account has reached the limit for elastic network interfaces
// for the specific AWS Region. The client should try to delete some elastic
// network interfaces or get the account limit raised. For more information,
- // see Amazon VPC Limits (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Appendix_Limits.html)
- // in the Amazon VPC User Guide (see the Network interfaces per VPC entry in
+ // see Amazon VPC Limits (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Appendix_Limits.html)
+ // in the Amazon VPC User Guide (see the Network interfaces per VPC entry in
// the table).
ErrCodeNetworkInterfaceLimitExceeded = "NetworkInterfaceLimitExceeded"
diff --git a/agent/vendor/github.com/cihub/seelog/README.markdown b/agent/vendor/github.com/cihub/seelog/README.markdown
index b9acd2d1d44..7dd1ab3532a 100644
--- a/agent/vendor/github.com/cihub/seelog/README.markdown
+++ b/agent/vendor/github.com/cihub/seelog/README.markdown
@@ -69,7 +69,10 @@ Feel free to push issues that could make Seelog better: https://github.com/cihub
Changelog
---------------
-
+* **v2.6** : Config using code and custom formatters
+ * Configuration using code in addition to xml (All internal receiver/dispatcher/logger types are now exported).
+ * Custom formatters. Check [wiki](https://github.com/cihub/seelog/wiki/Custom-formatters)
+ * Bugfixes and internal improvements.
* **v2.5** : Interaction with other systems. Part 2: custom receivers
* Finished custom receivers feature. Check [wiki](https://github.com/cihub/seelog/wiki/custom-receivers)
* Added 'LoggerFromCustomReceiver'
diff --git a/agent/vendor/github.com/cihub/seelog/archive/archive.go b/agent/vendor/github.com/cihub/seelog/archive/archive.go
new file mode 100644
index 00000000000..923036f259e
--- /dev/null
+++ b/agent/vendor/github.com/cihub/seelog/archive/archive.go
@@ -0,0 +1,198 @@
+package archive
+
+import (
+ "archive/tar"
+ "archive/zip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "time"
+
+ "github.com/cihub/seelog/archive/gzip"
+)
+
+// Reader is the interface for reading files from an archive.
+type Reader interface {
+ NextFile() (name string, err error)
+ io.Reader
+}
+
+// ReadCloser is the interface that groups Reader with the Close method.
+type ReadCloser interface {
+ Reader
+ io.Closer
+}
+
+// Writer is the interface for writing files to an archived format.
+type Writer interface {
+ NextFile(name string, fi os.FileInfo) error
+ io.Writer
+}
+
+// WriteCloser is the interface that groups Writer with the Close method.
+type WriteCloser interface {
+ Writer
+ io.Closer
+}
+
+type nopCloser struct{ Reader }
+
+func (nopCloser) Close() error { return nil }
+
+// NopCloser returns a ReadCloser with a no-op Close method wrapping the
+// provided Reader r.
+func NopCloser(r Reader) ReadCloser {
+ return nopCloser{r}
+}
+
+// Copy copies from src to dest until either EOF is reached on src or an error
+// occurs.
+//
+// When the archive format of src matches that of dst, Copy streams the files
+// directly into dst. Otherwise, copy buffers the contents to disk to compute
+// headers before writing to dst.
+func Copy(dst Writer, src Reader) error {
+ switch src := src.(type) {
+ case tarReader:
+ if dst, ok := dst.(tarWriter); ok {
+ return copyTar(dst, src)
+ }
+ case zipReader:
+ if dst, ok := dst.(zipWriter); ok {
+ return copyZip(dst, src)
+ }
+ // Switch on concrete type because gzip has no special methods
+ case *gzip.Reader:
+ if dst, ok := dst.(*gzip.Writer); ok {
+ _, err := io.Copy(dst, src)
+ return err
+ }
+ }
+
+ return copyBuffer(dst, src)
+}
+
+func copyBuffer(dst Writer, src Reader) (err error) {
+ const defaultFileMode = 0666
+
+ buf, err := ioutil.TempFile("", "archive_copy_buffer")
+ if err != nil {
+ return err
+ }
+ defer os.Remove(buf.Name()) // Do not care about failure removing temp
+ defer buf.Close() // Do not care about failure closing temp
+ for {
+ // Handle the next file
+ name, err := src.NextFile()
+ switch err {
+ case io.EOF: // Done copying
+ return nil
+ default: // Failed to write: bail out
+ return err
+ case nil: // Proceed below
+ }
+
+ // Buffer the file
+ if _, err := io.Copy(buf, src); err != nil {
+ return fmt.Errorf("buffer to disk: %v", err)
+ }
+
+ // Seek to the start of the file for full file copy
+ if _, err := buf.Seek(0, os.SEEK_SET); err != nil {
+ return err
+ }
+
+ // Set desired file permissions
+ if err := os.Chmod(buf.Name(), defaultFileMode); err != nil {
+ return err
+ }
+ fi, err := buf.Stat()
+ if err != nil {
+ return err
+ }
+
+ // Write the buffered file
+ if err := dst.NextFile(name, fi); err != nil {
+ return err
+ }
+ if _, err := io.Copy(dst, buf); err != nil {
+ return fmt.Errorf("copy to dst: %v", err)
+ }
+ if err := buf.Truncate(0); err != nil {
+ return err
+ }
+ if _, err := buf.Seek(0, os.SEEK_SET); err != nil {
+ return err
+ }
+ }
+}
+
+type tarReader interface {
+ Next() (*tar.Header, error)
+ io.Reader
+}
+
+type tarWriter interface {
+ WriteHeader(hdr *tar.Header) error
+ io.Writer
+}
+
+type zipReader interface {
+ Files() []*zip.File
+}
+
+type zipWriter interface {
+ CreateHeader(fh *zip.FileHeader) (io.Writer, error)
+}
+
+func copyTar(w tarWriter, r tarReader) error {
+ for {
+ hdr, err := r.Next()
+ switch err {
+ case io.EOF:
+ return nil
+ default: // Handle error
+ return err
+ case nil: // Proceed below
+ }
+
+ info := hdr.FileInfo()
+ // Skip directories
+ if info.IsDir() {
+ continue
+ }
+ if err := w.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if _, err := io.Copy(w, r); err != nil {
+ return err
+ }
+ }
+}
+
+func copyZip(zw zipWriter, r zipReader) error {
+ for _, f := range r.Files() {
+ if err := copyZipFile(zw, f); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func copyZipFile(zw zipWriter, f *zip.File) error {
+ rc, err := f.Open()
+ if err != nil {
+ return err
+ }
+ defer rc.Close() // Read-only
+
+ hdr := f.FileHeader
+ hdr.SetModTime(time.Now())
+ w, err := zw.CreateHeader(&hdr)
+ if err != nil {
+ return err
+ }
+ _, err = io.Copy(w, rc)
+ return err
+}
diff --git a/agent/vendor/github.com/cihub/seelog/archive/gzip/gzip.go b/agent/vendor/github.com/cihub/seelog/archive/gzip/gzip.go
new file mode 100644
index 00000000000..ea121018a90
--- /dev/null
+++ b/agent/vendor/github.com/cihub/seelog/archive/gzip/gzip.go
@@ -0,0 +1,64 @@
+// Package gzip implements reading and writing of gzip format compressed files.
+// See the compress/gzip package for more details.
+package gzip
+
+import (
+ "compress/gzip"
+ "fmt"
+ "io"
+ "os"
+)
+
+// Reader is an io.Reader that can be read to retrieve uncompressed data from a
+// gzip-format compressed file.
+type Reader struct {
+ gzip.Reader
+ name string
+ isEOF bool
+}
+
+// NewReader creates a new Reader reading the given reader.
+func NewReader(r io.Reader, name string) (*Reader, error) {
+ gr, err := gzip.NewReader(r)
+ if err != nil {
+ return nil, err
+ }
+ return &Reader{
+ Reader: *gr,
+ name: name,
+ }, nil
+}
+
+// NextFile returns the file name. Calls subsequent to the first call will
+// return EOF.
+func (r *Reader) NextFile() (name string, err error) {
+ if r.isEOF {
+ return "", io.EOF
+ }
+
+ r.isEOF = true
+ return r.name, nil
+}
+
+// Writer is an io.WriteCloser. Writes to a Writer are compressed and written to w.
+type Writer struct {
+ gzip.Writer
+ name string
+ noMoreFiles bool
+}
+
+// NextFile never returns a next file, and should not be called more than once.
+func (w *Writer) NextFile(name string, _ os.FileInfo) error {
+ if w.noMoreFiles {
+ return fmt.Errorf("gzip: only accepts one file: already received %q and now %q", w.name, name)
+ }
+ w.noMoreFiles = true
+ w.name = name
+ return nil
+}
+
+// NewWriter returns a new Writer. Writes to the returned writer are compressed
+// and written to w.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{Writer: *gzip.NewWriter(w)}
+}
diff --git a/agent/vendor/github.com/cihub/seelog/archive/tar/tar.go b/agent/vendor/github.com/cihub/seelog/archive/tar/tar.go
new file mode 100644
index 00000000000..8dd87f57347
--- /dev/null
+++ b/agent/vendor/github.com/cihub/seelog/archive/tar/tar.go
@@ -0,0 +1,72 @@
+package tar
+
+import (
+ "archive/tar"
+ "io"
+ "os"
+)
+
+// Reader provides sequential access to the contents of a tar archive.
+type Reader struct {
+ tar.Reader
+}
+
+// NewReader creates a new Reader reading from r.
+func NewReader(r io.Reader) *Reader {
+ return &Reader{Reader: *tar.NewReader(r)}
+}
+
+// NextFile advances to the next file in the tar archive.
+func (r *Reader) NextFile() (name string, err error) {
+ hdr, err := r.Next()
+ if err != nil {
+ return "", err
+ }
+ return hdr.Name, nil
+}
+
+// Writer provides sequential writing of a tar archive in POSIX.1 format.
+type Writer struct {
+ tar.Writer
+ closers []io.Closer
+}
+
+// NewWriter creates a new Writer writing to w.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{Writer: *tar.NewWriter(w)}
+}
+
+// NewWriteMultiCloser creates a new Writer writing to w that also closes all
+// closers in order on close.
+func NewWriteMultiCloser(w io.WriteCloser, closers ...io.Closer) *Writer {
+ return &Writer{
+ Writer: *tar.NewWriter(w),
+ closers: closers,
+ }
+}
+
+// NextFile computes and writes a header and prepares to accept the file's
+// contents.
+func (w *Writer) NextFile(name string, fi os.FileInfo) error {
+ if name == "" {
+ name = fi.Name()
+ }
+ hdr, err := tar.FileInfoHeader(fi, name)
+ if err != nil {
+ return err
+ }
+ hdr.Name = name
+ return w.WriteHeader(hdr)
+}
+
+// Close closes the tar archive and all other closers, flushing any unwritten
+// data to the underlying writer.
+func (w *Writer) Close() error {
+ err := w.Writer.Close()
+ for _, c := range w.closers {
+ if cerr := c.Close(); cerr != nil && err == nil {
+ err = cerr
+ }
+ }
+ return err
+}
diff --git a/agent/vendor/github.com/cihub/seelog/archive/zip/zip.go b/agent/vendor/github.com/cihub/seelog/archive/zip/zip.go
new file mode 100644
index 00000000000..4210b03b9fb
--- /dev/null
+++ b/agent/vendor/github.com/cihub/seelog/archive/zip/zip.go
@@ -0,0 +1,89 @@
+package zip
+
+import (
+ "archive/zip"
+ "io"
+ "os"
+)
+
+// Reader provides sequential access to the contents of a zip archive.
+type Reader struct {
+ zip.Reader
+ unread []*zip.File
+ rc io.ReadCloser
+}
+
+// NewReader returns a new Reader reading from r, which is assumed to have the
+// given size in bytes.
+func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
+ zr, err := zip.NewReader(r, size)
+ if err != nil {
+ return nil, err
+ }
+ return &Reader{Reader: *zr}, nil
+}
+
+// NextFile advances to the next file in the zip archive.
+func (r *Reader) NextFile() (name string, err error) {
+ // Initialize unread
+ if r.unread == nil {
+ r.unread = r.Files()[:]
+ }
+
+ // Close previous file
+ if r.rc != nil {
+ r.rc.Close() // Read-only
+ }
+
+ if len(r.unread) == 0 {
+ return "", io.EOF
+ }
+
+ // Open and return next unread
+ f := r.unread[0]
+ name, r.unread = f.Name, r.unread[1:]
+ r.rc, err = f.Open()
+ if err != nil {
+ return "", err
+ }
+ return name, nil
+}
+
+func (r *Reader) Read(p []byte) (n int, err error) {
+ return r.rc.Read(p)
+}
+
+// Files returns the full list of files in the zip archive.
+func (r *Reader) Files() []*zip.File {
+ return r.File
+}
+
+// Writer provides sequential writing of a zip archive.1 format.
+type Writer struct {
+ zip.Writer
+ w io.Writer
+}
+
+// NewWriter returns a new Writer writing to w.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{Writer: *zip.NewWriter(w)}
+}
+
+// NextFile computes and writes a header and prepares to accept the file's
+// contents.
+func (w *Writer) NextFile(name string, fi os.FileInfo) error {
+ if name == "" {
+ name = fi.Name()
+ }
+ hdr, err := zip.FileInfoHeader(fi)
+ if err != nil {
+ return err
+ }
+ hdr.Name = name
+ w.w, err = w.CreateHeader(hdr)
+ return err
+}
+
+func (w *Writer) Write(p []byte) (n int, err error) {
+ return w.w.Write(p)
+}
diff --git a/agent/vendor/github.com/cihub/seelog/cfg_config.go b/agent/vendor/github.com/cihub/seelog/cfg_config.go
index c7d848126fa..76554fca0c5 100644
--- a/agent/vendor/github.com/cihub/seelog/cfg_config.go
+++ b/agent/vendor/github.com/cihub/seelog/cfg_config.go
@@ -27,6 +27,7 @@ package seelog
import (
"bytes"
"encoding/xml"
+ "fmt"
"io"
"os"
)
@@ -186,3 +187,26 @@ func LoggerFromCustomReceiver(receiver CustomReceiver) (LoggerInterface, error)
return createLoggerFromFullConfig(conf)
}
+
+func CloneLogger(logger LoggerInterface) (LoggerInterface, error) {
+ switch logger := logger.(type) {
+ default:
+ return nil, fmt.Errorf("unexpected type %T", logger)
+ case *asyncAdaptiveLogger:
+ clone, err := NewAsyncAdaptiveLogger(logger.commonLogger.config, logger.minInterval, logger.maxInterval, logger.criticalMsgCount)
+ if err != nil {
+ return nil, err
+ }
+ return clone, nil
+ case *asyncLoopLogger:
+ return NewAsyncLoopLogger(logger.commonLogger.config), nil
+ case *asyncTimerLogger:
+ clone, err := NewAsyncTimerLogger(logger.commonLogger.config, logger.interval)
+ if err != nil {
+ return nil, err
+ }
+ return clone, nil
+ case *syncLogger:
+ return NewSyncLogger(logger.commonLogger.config), nil
+ }
+}
diff --git a/agent/vendor/github.com/cihub/seelog/cfg_parser.go b/agent/vendor/github.com/cihub/seelog/cfg_parser.go
index 7fb9aabf057..921bc16a50b 100644
--- a/agent/vendor/github.com/cihub/seelog/cfg_parser.go
+++ b/agent/vendor/github.com/cihub/seelog/cfg_parser.go
@@ -83,6 +83,8 @@ const (
rollingFileDataPatternAttr = "datepattern"
rollingFileArchiveAttr = "archivetype"
rollingFileArchivePathAttr = "archivepath"
+ rollingFileArchiveExplodedAttr = "archiveexploded"
+ rollingFileFullNameAttr = "fullname"
bufferedWriterID = "buffered"
bufferedSizeAttr = "size"
bufferedFlushPeriodAttr = "flushperiod"
@@ -1008,6 +1010,7 @@ func createRollingFileWriter(node *xmlNode, formatFromParent *formatter, formats
var rArchiveType rollingArchiveType
var rArchivePath string
+ var rArchiveExploded bool = false
if !archiveAttrExists {
rArchiveType = rollingArchiveNone
rArchivePath = ""
@@ -1020,12 +1023,27 @@ func createRollingFileWriter(node *xmlNode, formatFromParent *formatter, formats
if rArchiveType == rollingArchiveNone {
rArchivePath = ""
} else {
+ if rArchiveExplodedAttr, ok := node.attributes[rollingFileArchiveExplodedAttr]; ok {
+ if rArchiveExploded, err = strconv.ParseBool(rArchiveExplodedAttr); err != nil {
+ return nil, fmt.Errorf("archive exploded should be true or false, but was %v",
+ rArchiveExploded)
+ }
+ }
+
rArchivePath, ok = node.attributes[rollingFileArchivePathAttr]
- if !ok {
- rArchivePath, ok = rollingArchiveTypesDefaultNames[rArchiveType]
- if !ok {
- return nil, fmt.Errorf("cannot get default filename for archive type = %v",
- rArchiveType)
+ if ok {
+ if rArchivePath == "" {
+ return nil, fmt.Errorf("empty archive path is not supported")
+ }
+ } else {
+ if rArchiveExploded {
+ rArchivePath = rollingArchiveDefaultExplodedName
+
+ } else {
+ rArchivePath, err = rollingArchiveTypeDefaultName(rArchiveType, false)
+ if err != nil {
+ return nil, err
+ }
}
}
}
@@ -1045,7 +1063,7 @@ func createRollingFileWriter(node *xmlNode, formatFromParent *formatter, formats
if rollingType == rollingTypeSize {
err := checkUnexpectedAttribute(node, outputFormatID, rollingFileTypeAttr, rollingFilePathAttr,
rollingFileMaxSizeAttr, rollingFileMaxRollsAttr, rollingFileArchiveAttr,
- rollingFileArchivePathAttr, rollingFileNameModeAttr)
+ rollingFileArchivePathAttr, rollingFileArchiveExplodedAttr, rollingFileNameModeAttr)
if err != nil {
return nil, err
}
@@ -1069,7 +1087,7 @@ func createRollingFileWriter(node *xmlNode, formatFromParent *formatter, formats
}
}
- rollingWriter, err := NewRollingFileWriterSize(path, rArchiveType, rArchivePath, maxSize, maxRolls, nameMode)
+ rollingWriter, err := NewRollingFileWriterSize(path, rArchiveType, rArchivePath, maxSize, maxRolls, nameMode, rArchiveExploded)
if err != nil {
return nil, err
}
@@ -1079,7 +1097,8 @@ func createRollingFileWriter(node *xmlNode, formatFromParent *formatter, formats
} else if rollingType == rollingTypeTime {
err := checkUnexpectedAttribute(node, outputFormatID, rollingFileTypeAttr, rollingFilePathAttr,
rollingFileDataPatternAttr, rollingFileArchiveAttr, rollingFileMaxRollsAttr,
- rollingFileArchivePathAttr, rollingFileNameModeAttr)
+ rollingFileArchivePathAttr, rollingFileArchiveExplodedAttr, rollingFileNameModeAttr,
+ rollingFileFullNameAttr)
if err != nil {
return nil, err
}
@@ -1093,12 +1112,24 @@ func createRollingFileWriter(node *xmlNode, formatFromParent *formatter, formats
}
}
+ fullName := false
+ fn, ok := node.attributes[rollingFileFullNameAttr]
+ if ok {
+ if fn == "true" {
+ fullName = true
+ } else if fn == "false" {
+ fullName = false
+ } else {
+ return nil, errors.New("node '" + node.name + "' has incorrect '" + rollingFileFullNameAttr + "' attribute value")
+ }
+ }
+
dataPattern, ok := node.attributes[rollingFileDataPatternAttr]
if !ok {
return nil, newMissingArgumentError(node.name, rollingFileDataPatternAttr)
}
- rollingWriter, err := NewRollingFileWriterTime(path, rArchiveType, rArchivePath, maxRolls, dataPattern, rollingIntervalAny, nameMode)
+ rollingWriter, err := NewRollingFileWriterTime(path, rArchiveType, rArchivePath, maxRolls, dataPattern, nameMode, rArchiveExploded, fullName)
if err != nil {
return nil, err
}
diff --git a/agent/vendor/github.com/cihub/seelog/common_context.go b/agent/vendor/github.com/cihub/seelog/common_context.go
index 04bc2235e29..230a76ca186 100644
--- a/agent/vendor/github.com/cihub/seelog/common_context.go
+++ b/agent/vendor/github.com/cihub/seelog/common_context.go
@@ -25,21 +25,28 @@
package seelog
import (
+ "errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
+ "sync"
"time"
)
-var workingDir = "/"
+var (
+ workingDir = "/"
+ stackCache map[uintptr]*logContext
+ stackCacheLock sync.RWMutex
+)
func init() {
wd, err := os.Getwd()
if err == nil {
workingDir = filepath.ToSlash(wd) + "/"
}
+ stackCache = make(map[uintptr]*logContext)
}
// Represents runtime caller context.
@@ -69,24 +76,54 @@ func currentContext(custom interface{}) (LogContextInterface, error) {
return specifyContext(1, custom)
}
-func extractCallerInfo(skip int) (fullPath string, shortPath string, funcName string, line int, err error) {
- pc, fp, ln, ok := runtime.Caller(skip)
- if !ok {
- err = fmt.Errorf("error during runtime.Caller")
- return
+func extractCallerInfo(skip int) (*logContext, error) {
+ var stack [1]uintptr
+ if runtime.Callers(skip+1, stack[:]) != 1 {
+ return nil, errors.New("error during runtime.Callers")
+ }
+ pc := stack[0]
+
+ // do we have a cache entry?
+ stackCacheLock.RLock()
+ ctx, ok := stackCache[pc]
+ stackCacheLock.RUnlock()
+ if ok {
+ return ctx, nil
+ }
+
+ // look up the details of the given caller
+ funcInfo := runtime.FuncForPC(pc)
+ if funcInfo == nil {
+ return nil, errors.New("error during runtime.FuncForPC")
}
- line = ln
- fullPath = fp
- if strings.HasPrefix(fp, workingDir) {
- shortPath = fp[len(workingDir):]
+
+ var shortPath string
+ fullPath, line := funcInfo.FileLine(pc)
+ if strings.HasPrefix(fullPath, workingDir) {
+ shortPath = fullPath[len(workingDir):]
} else {
- shortPath = fp
+ shortPath = fullPath
}
- funcName = runtime.FuncForPC(pc).Name()
+ funcName := funcInfo.Name()
if strings.HasPrefix(funcName, workingDir) {
funcName = funcName[len(workingDir):]
}
- return
+
+ ctx = &logContext{
+ funcName: funcName,
+ line: line,
+ shortPath: shortPath,
+ fullPath: fullPath,
+ fileName: filepath.Base(fullPath),
+ }
+
+ // save the details in the cache; note that it's possible we might
+ // have written an entry into the map in between the test above and
+ // this section, but the behaviour is still correct
+ stackCacheLock.Lock()
+ stackCache[pc] = ctx
+ stackCacheLock.Unlock()
+ return ctx, nil
}
// Returns context of the function with placed "skip" stack frames of the caller
@@ -100,12 +137,15 @@ func specifyContext(skip int, custom interface{}) (LogContextInterface, error) {
err := fmt.Errorf("can not skip negative stack frames")
return &errorContext{callTime, err}, err
}
- fullPath, shortPath, funcName, line, err := extractCallerInfo(skip + 2)
+ caller, err := extractCallerInfo(skip + 2)
if err != nil {
return &errorContext{callTime, err}, err
}
- _, fileName := filepath.Split(fullPath)
- return &logContext{funcName, line, shortPath, fullPath, fileName, callTime, custom}, nil
+ ctx := new(logContext)
+ *ctx = *caller
+ ctx.callTime = callTime
+ ctx.custom = custom
+ return ctx, nil
}
// Represents a normal runtime caller context.
diff --git a/agent/vendor/github.com/cihub/seelog/format.go b/agent/vendor/github.com/cihub/seelog/format.go
index 32682f3462a..ec47b45704b 100644
--- a/agent/vendor/github.com/cihub/seelog/format.go
+++ b/agent/vendor/github.com/cihub/seelog/format.go
@@ -99,6 +99,7 @@ var formatterFuncs = map[string]FormatterFunc{
"UTCTime": formatterUTCTime,
"Ns": formatterNs,
"UTCNs": formatterUTCNs,
+ "r": formatterr,
"n": formattern,
"t": formattert,
}
@@ -422,6 +423,10 @@ func formatterUTCNs(message string, level LogLevel, context LogContextInterface)
return context.CallTime().UTC().UnixNano()
}
+func formatterr(message string, level LogLevel, context LogContextInterface) interface{} {
+ return "\r"
+}
+
func formattern(message string, level LogLevel, context LogContextInterface) interface{} {
return "\n"
}
diff --git a/agent/vendor/github.com/cihub/seelog/internals_fsutils.go b/agent/vendor/github.com/cihub/seelog/internals_fsutils.go
index 5baa6ba6165..c0a0e0e4686 100644
--- a/agent/vendor/github.com/cihub/seelog/internals_fsutils.go
+++ b/agent/vendor/github.com/cihub/seelog/internals_fsutils.go
@@ -1,11 +1,8 @@
package seelog
import (
- "archive/zip"
- "bytes"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"sync"
@@ -275,15 +272,6 @@ func getOpenFilesByDirectoryAsync(
return nil
}
-func copyFile(sf *os.File, dst string) (int64, error) {
- df, err := os.Create(dst)
- if err != nil {
- return 0, err
- }
- defer df.Close()
- return io.Copy(df, sf)
-}
-
// fileExists return flag whether a given file exists
// and operation error if an unclassified failure occurs.
func fileExists(path string) (bool, error) {
@@ -330,74 +318,3 @@ func tryRemoveFile(filePath string) (err error) {
}
return
}
-
-// Unzips a specified zip file. Returns filename->filebytes map.
-func unzip(archiveName string) (map[string][]byte, error) {
- // Open a zip archive for reading.
- r, err := zip.OpenReader(archiveName)
- if err != nil {
- return nil, err
- }
- defer r.Close()
-
- // Files to be added to archive
- // map file name to contents
- files := make(map[string][]byte)
-
- // Iterate through the files in the archive,
- // printing some of their contents.
- for _, f := range r.File {
- rc, err := f.Open()
- if err != nil {
- return nil, err
- }
-
- bts, err := ioutil.ReadAll(rc)
- rcErr := rc.Close()
-
- if err != nil {
- return nil, err
- }
- if rcErr != nil {
- return nil, rcErr
- }
-
- files[f.Name] = bts
- }
-
- return files, nil
-}
-
-// Creates a zip file with the specified file names and byte contents.
-func createZip(archiveName string, files map[string][]byte) error {
- // Create a buffer to write our archive to.
- buf := new(bytes.Buffer)
-
- // Create a new zip archive.
- w := zip.NewWriter(buf)
-
- // Write files
- for fpath, fcont := range files {
- f, err := w.Create(fpath)
- if err != nil {
- return err
- }
- _, err = f.Write([]byte(fcont))
- if err != nil {
- return err
- }
- }
-
- // Make sure to check the error on Close.
- err := w.Close()
- if err != nil {
- return err
- }
-
- err = ioutil.WriteFile(archiveName, buf.Bytes(), defaultFilePermissions)
- if err != nil {
- return err
- }
-
- return nil
-}
diff --git a/agent/vendor/github.com/cihub/seelog/writers_rollingfilewriter.go b/agent/vendor/github.com/cihub/seelog/writers_rollingfilewriter.go
index 2422a67cfe7..9535a579811 100644
--- a/agent/vendor/github.com/cihub/seelog/writers_rollingfilewriter.go
+++ b/agent/vendor/github.com/cihub/seelog/writers_rollingfilewriter.go
@@ -26,13 +26,20 @@ package seelog
import (
"fmt"
+ "io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
+ "sync"
"time"
+
+ "github.com/cihub/seelog/archive"
+ "github.com/cihub/seelog/archive/gzip"
+ "github.com/cihub/seelog/archive/tar"
+ "github.com/cihub/seelog/archive/zip"
)
// Common constants
@@ -71,27 +78,6 @@ func rollingNameModeFromString(rollingNameStr string) (rollingNameMode, bool) {
return 0, false
}
-type rollingIntervalType uint8
-
-const (
- rollingIntervalAny = iota
- rollingIntervalDaily
-)
-
-var rollingInvervalTypesStringRepresentation = map[rollingIntervalType]string{
- rollingIntervalDaily: "daily",
-}
-
-func rollingIntervalTypeFromString(rollingTypeStr string) (rollingIntervalType, bool) {
- for tp, tpStr := range rollingInvervalTypesStringRepresentation {
- if tpStr == rollingTypeStr {
- return tp, true
- }
- }
-
- return 0, false
-}
-
var rollingTypesStringRepresentation = map[rollingType]string{
rollingTypeSize: "size",
rollingTypeTime: "date",
@@ -113,11 +99,87 @@ type rollingArchiveType uint8
const (
rollingArchiveNone = iota
rollingArchiveZip
+ rollingArchiveGzip
)
var rollingArchiveTypesStringRepresentation = map[rollingArchiveType]string{
rollingArchiveNone: "none",
rollingArchiveZip: "zip",
+ rollingArchiveGzip: "gzip",
+}
+
+type archiver func(f *os.File, exploded bool) archive.WriteCloser
+
+type unarchiver func(f *os.File) (archive.ReadCloser, error)
+
+type compressionType struct {
+ extension string
+ handleMultipleEntries bool
+ archiver archiver
+ unarchiver unarchiver
+}
+
+var compressionTypes = map[rollingArchiveType]compressionType{
+ rollingArchiveZip: {
+ extension: ".zip",
+ handleMultipleEntries: true,
+ archiver: func(f *os.File, _ bool) archive.WriteCloser {
+ return zip.NewWriter(f)
+ },
+ unarchiver: func(f *os.File) (archive.ReadCloser, error) {
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ r, err := zip.NewReader(f, fi.Size())
+ if err != nil {
+ return nil, err
+ }
+ return archive.NopCloser(r), nil
+ },
+ },
+ rollingArchiveGzip: {
+ extension: ".gz",
+ handleMultipleEntries: false,
+ archiver: func(f *os.File, exploded bool) archive.WriteCloser {
+ gw := gzip.NewWriter(f)
+ if exploded {
+ return gw
+ }
+ return tar.NewWriteMultiCloser(gw, gw)
+ },
+ unarchiver: func(f *os.File) (archive.ReadCloser, error) {
+ gr, err := gzip.NewReader(f, f.Name())
+ if err != nil {
+ return nil, err
+ }
+
+ // Determine if the gzip is a tar
+ tr := tar.NewReader(gr)
+ _, err = tr.Next()
+ isTar := err == nil
+
+ // Reset to beginning of file
+ if _, err := f.Seek(0, os.SEEK_SET); err != nil {
+ return nil, err
+ }
+ gr.Reset(f)
+
+ if isTar {
+ return archive.NopCloser(tar.NewReader(gr)), nil
+ }
+ return gr, nil
+ },
+ },
+}
+
+func (compressionType *compressionType) rollingArchiveTypeName(name string, exploded bool) string {
+ if !compressionType.handleMultipleEntries && !exploded {
+ return name + ".tar" + compressionType.extension
+ } else {
+ return name + compressionType.extension
+ }
+
}
func rollingArchiveTypeFromString(rollingArchiveTypeStr string) (rollingArchiveType, bool) {
@@ -130,22 +192,30 @@ func rollingArchiveTypeFromString(rollingArchiveTypeStr string) (rollingArchiveT
return 0, false
}
-// Default names for different archivation types
-var rollingArchiveTypesDefaultNames = map[rollingArchiveType]string{
- rollingArchiveZip: "log.zip",
+// Default names for different archive types
+var rollingArchiveDefaultExplodedName = "old"
+
+func rollingArchiveTypeDefaultName(archiveType rollingArchiveType, exploded bool) (string, error) {
+ compressionType, ok := compressionTypes[archiveType]
+ if !ok {
+ return "", fmt.Errorf("cannot get default filename for archive type = %v", archiveType)
+ }
+ return compressionType.rollingArchiveTypeName("log", exploded), nil
}
// rollerVirtual is an interface that represents all virtual funcs that are
// called in different rolling writer subtypes.
type rollerVirtual interface {
- needsToRoll() (bool, error) // Returns true if needs to switch to another file.
+ needsToRoll() bool // Returns true if needs to switch to another file.
isFileRollNameValid(rname string) bool // Returns true if logger roll file name (postfix/prefix/etc.) is ok.
sortFileRollNamesAsc(fs []string) ([]string, error) // Sorts logger roll file names in ascending order of their creation by logger.
- // Creates a new froll history file using the contents of current file and special filename of the latest roll (prefix/ postfix).
- // If lastRollName is empty (""), then it means that there is no latest roll (current is the first one)
- getNewHistoryRollFileName(lastRollName string) string
- getCurrentModifiedFileName(originalFileName string, first bool) (string, error) // Returns filename modified according to specific logger rules
+ // getNewHistoryRollFileName is called whenever we are about to roll the
+ // current log file. It returns the name the current log file should be
+ // rolled to.
+ getNewHistoryRollFileName(otherHistoryFiles []string) string
+
+ getCurrentFileName() string
}
// rollingFileWriter writes received messages to a file, until time interval passes
@@ -154,42 +224,47 @@ type rollerVirtual interface {
// files count, if you want, and then the rolling writer would delete older ones when
// the files count exceed the specified limit.
type rollingFileWriter struct {
- fileName string // current file name. May differ from original in date rolling loggers
- originalFileName string // original one
- currentDirPath string
- currentFile *os.File
- currentFileSize int64
- rollingType rollingType // Rolling mode (Files roll by size/date/...)
- archiveType rollingArchiveType
- archivePath string
- maxRolls int
- nameMode rollingNameMode
- self rollerVirtual // Used for virtual calls
-}
-
-func newRollingFileWriter(fpath string, rtype rollingType, atype rollingArchiveType, apath string, maxr int, namemode rollingNameMode) (*rollingFileWriter, error) {
+ fileName string // log file name
+ currentDirPath string
+ currentFile *os.File
+ currentName string
+ currentFileSize int64
+ rollingType rollingType // Rolling mode (Files roll by size/date/...)
+ archiveType rollingArchiveType
+ archivePath string
+ archiveExploded bool
+ fullName bool
+ maxRolls int
+ nameMode rollingNameMode
+ self rollerVirtual // Used for virtual calls
+ rollLock sync.Mutex
+}
+
+func newRollingFileWriter(fpath string, rtype rollingType, atype rollingArchiveType, apath string, maxr int, namemode rollingNameMode,
+ archiveExploded bool, fullName bool) (*rollingFileWriter, error) {
rw := new(rollingFileWriter)
rw.currentDirPath, rw.fileName = filepath.Split(fpath)
if len(rw.currentDirPath) == 0 {
rw.currentDirPath = "."
}
- rw.originalFileName = rw.fileName
rw.rollingType = rtype
rw.archiveType = atype
rw.archivePath = apath
rw.nameMode = namemode
rw.maxRolls = maxr
+ rw.archiveExploded = archiveExploded
+ rw.fullName = fullName
return rw, nil
}
func (rw *rollingFileWriter) hasRollName(file string) bool {
switch rw.nameMode {
case rollingNameModePostfix:
- rname := rw.originalFileName + rollingLogHistoryDelimiter
+ rname := rw.fileName + rollingLogHistoryDelimiter
return strings.HasPrefix(file, rname)
case rollingNameModePrefix:
- rname := rollingLogHistoryDelimiter + rw.originalFileName
+ rname := rollingLogHistoryDelimiter + rw.fileName
return strings.HasSuffix(file, rname)
}
return false
@@ -212,7 +287,7 @@ func (rw *rollingFileWriter) getSortedLogHistory() ([]string, error) {
}
var validRollNames []string
for _, file := range files {
- if file != rw.fileName && rw.hasRollName(file) {
+ if rw.hasRollName(file) {
rname := rw.getFileRollName(file)
if rw.self.isFileRollNameValid(rname) {
validRollNames = append(validRollNames, rname)
@@ -225,7 +300,7 @@ func (rw *rollingFileWriter) getSortedLogHistory() ([]string, error) {
}
validSortedFiles := make([]string, len(sortedTails))
for i, v := range sortedTails {
- validSortedFiles[i] = rw.createFullFileName(rw.originalFileName, v)
+ validSortedFiles[i] = rw.createFullFileName(rw.fileName, v)
}
return validSortedFiles, nil
}
@@ -240,32 +315,140 @@ func (rw *rollingFileWriter) createFileAndFolderIfNeeded(first bool) error {
return err
}
}
+ rw.currentName = rw.self.getCurrentFileName()
+ filePath := filepath.Join(rw.currentDirPath, rw.currentName)
- rw.fileName, err = rw.self.getCurrentModifiedFileName(rw.originalFileName, first)
+ // This will either open the existing file (without truncating it) or
+ // create if necessary. Append mode avoids any race conditions.
+ rw.currentFile, err = os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, defaultFilePermissions)
if err != nil {
return err
}
- filePath := filepath.Join(rw.currentDirPath, rw.fileName)
- // If exists
- stat, err := os.Lstat(filePath)
- if err == nil {
- rw.currentFile, err = os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND, defaultFilePermissions)
+ stat, err := rw.currentFile.Stat()
+ if err != nil {
+ rw.currentFile.Close()
+ rw.currentFile = nil
+ return err
+ }
+
+ rw.currentFileSize = stat.Size()
+ return nil
+}
- stat, err = os.Lstat(filePath)
+func (rw *rollingFileWriter) archiveExplodedLogs(logFilename string, compressionType compressionType) (err error) {
+ closeWithError := func(c io.Closer) {
+ if cerr := c.Close(); cerr != nil && err == nil {
+ err = cerr
+ }
+ }
+
+ rollPath := filepath.Join(rw.currentDirPath, logFilename)
+ src, err := os.Open(rollPath)
+ if err != nil {
+ return err
+ }
+ defer src.Close() // Read-only
+
+ // Buffer to a temporary file on the same partition
+ // Note: archivePath is a path to a directory when handling exploded logs
+ dst, err := rw.tempArchiveFile(rw.archivePath)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ closeWithError(dst)
if err != nil {
- return err
+ os.Remove(dst.Name()) // Can't do anything when we fail to remove temp file
+ return
}
- rw.currentFileSize = stat.Size()
- } else {
- rw.currentFile, err = os.Create(filePath)
- rw.currentFileSize = 0
+ // Finalize archive by swapping the buffered archive into place
+ err = os.Rename(dst.Name(), filepath.Join(rw.archivePath,
+ compressionType.rollingArchiveTypeName(logFilename, true)))
+ }()
+
+ // archive entry
+ w := compressionType.archiver(dst, true)
+ defer closeWithError(w)
+ fi, err := src.Stat()
+ if err != nil {
+ return err
+ }
+ if err := w.NextFile(logFilename, fi); err != nil {
+ return err
+ }
+ _, err = io.Copy(w, src)
+ return err
+}
+
+func (rw *rollingFileWriter) archiveUnexplodedLogs(compressionType compressionType, rollsToDelete int, history []string) (err error) {
+ closeWithError := func(c io.Closer) {
+ if cerr := c.Close(); cerr != nil && err == nil {
+ err = cerr
+ }
}
+
+ // Buffer to a temporary file on the same partition
+ // Note: archivePath is a path to a file when handling unexploded logs
+ dst, err := rw.tempArchiveFile(filepath.Dir(rw.archivePath))
if err != nil {
return err
}
+ defer func() {
+ closeWithError(dst)
+ if err != nil {
+ os.Remove(dst.Name()) // Can't do anything when we fail to remove temp file
+ return
+ }
+
+ // Finalize archive by moving the buffered archive into place
+ err = os.Rename(dst.Name(), rw.archivePath)
+ }()
+
+ w := compressionType.archiver(dst, false)
+ defer closeWithError(w)
+
+ src, err := os.Open(rw.archivePath)
+ switch {
+ // Archive exists
+ case err == nil:
+ defer src.Close() // Read-only
+
+ r, err := compressionType.unarchiver(src)
+ if err != nil {
+ return err
+ }
+ defer r.Close() // Read-only
+
+ if err := archive.Copy(w, r); err != nil {
+ return err
+ }
+
+ // Failed to stat
+ case !os.IsNotExist(err):
+ return err
+ }
+ // Add new files to the archive
+ for i := 0; i < rollsToDelete; i++ {
+ rollPath := filepath.Join(rw.currentDirPath, history[i])
+ src, err := os.Open(rollPath)
+ if err != nil {
+ return err
+ }
+ defer src.Close() // Read-only
+ fi, err := src.Stat()
+ if err != nil {
+ return err
+ }
+ if err := w.NextFile(src.Name(), fi); err != nil {
+ return err
+ }
+ if _, err := io.Copy(w, src); err != nil {
+ return err
+ }
+ }
return nil
}
@@ -279,44 +462,21 @@ func (rw *rollingFileWriter) deleteOldRolls(history []string) error {
return nil
}
- switch rw.archiveType {
- case rollingArchiveZip:
- var files map[string][]byte
-
- // If archive exists
- _, err := os.Lstat(rw.archivePath)
- if nil == err {
- // Extract files and content from it
- files, err = unzip(rw.archivePath)
- if err != nil {
- return err
- }
+ if rw.archiveType != rollingArchiveNone {
+ if rw.archiveExploded {
+ os.MkdirAll(rw.archivePath, defaultDirectoryPermissions)
- // Remove the original file
- err = tryRemoveFile(rw.archivePath)
- if err != nil {
- return err
+ // Archive logs
+ for i := 0; i < rollsToDelete; i++ {
+ rw.archiveExplodedLogs(history[i], compressionTypes[rw.archiveType])
}
} else {
- files = make(map[string][]byte)
- }
+ os.MkdirAll(filepath.Dir(rw.archivePath), defaultDirectoryPermissions)
- // Add files to the existing files map, filled above
- for i := 0; i < rollsToDelete; i++ {
- rollPath := filepath.Join(rw.currentDirPath, history[i])
- bts, err := ioutil.ReadFile(rollPath)
- if err != nil {
- return err
- }
-
- files[rollPath] = bts
- }
-
- // Put the final file set to zip file.
- if err = createZip(rw.archivePath, files); err != nil {
- return err
+ rw.archiveUnexplodedLogs(compressionTypes[rw.archiveType], rollsToDelete, history)
}
}
+
var err error
// In all cases (archive files or not) the files should be deleted.
for i := 0; i < rollsToDelete; i++ {
@@ -332,97 +492,87 @@ func (rw *rollingFileWriter) deleteOldRolls(history []string) error {
func (rw *rollingFileWriter) getFileRollName(fileName string) string {
switch rw.nameMode {
case rollingNameModePostfix:
- return fileName[len(rw.originalFileName+rollingLogHistoryDelimiter):]
+ return fileName[len(rw.fileName+rollingLogHistoryDelimiter):]
case rollingNameModePrefix:
- return fileName[:len(fileName)-len(rw.originalFileName+rollingLogHistoryDelimiter)]
+ return fileName[:len(fileName)-len(rw.fileName+rollingLogHistoryDelimiter)]
}
return ""
}
-func (rw *rollingFileWriter) Write(bytes []byte) (n int, err error) {
- if rw.currentFile == nil {
- err := rw.createFileAndFolderIfNeeded(true)
- if err != nil {
- return 0, err
- }
+func (rw *rollingFileWriter) roll() error {
+ // First, close current file.
+ err := rw.currentFile.Close()
+ if err != nil {
+ return err
}
- // needs to roll if:
- // * file roller max file size exceeded OR
- // * time roller interval passed
- nr, err := rw.self.needsToRoll()
+ rw.currentFile = nil
+
+ // Current history of all previous log files.
+ // For file roller it may be like this:
+ // * ...
+ // * file.log.4
+ // * file.log.5
+ // * file.log.6
+ //
+ // For date roller it may look like this:
+ // * ...
+ // * file.log.11.Aug.13
+ // * file.log.15.Aug.13
+ // * file.log.16.Aug.13
+ // Sorted log history does NOT include current file.
+ history, err := rw.getSortedLogHistory()
if err != nil {
- return 0, err
+ return err
}
- if nr {
- // First, close current file.
- err = rw.currentFile.Close()
+ // Renames current file to create a new roll history entry
+ // For file roller it may be like this:
+ // * ...
+ // * file.log.4
+ // * file.log.5
+ // * file.log.6
+ // n file.log.7 <---- RENAMED (from file.log)
+ newHistoryName := rw.createFullFileName(rw.fileName,
+ rw.self.getNewHistoryRollFileName(history))
+
+ err = os.Rename(filepath.Join(rw.currentDirPath, rw.currentName), filepath.Join(rw.currentDirPath, newHistoryName))
+ if err != nil {
+ return err
+ }
+
+ // Finally, add the newly added history file to the history archive
+ // and, if after that the archive exceeds the allowed max limit, older rolls
+ // must the removed/archived.
+ history = append(history, newHistoryName)
+ if len(history) > rw.maxRolls {
+ err = rw.deleteOldRolls(history)
if err != nil {
- return 0, err
+ return err
}
- // Current history of all previous log files.
- // For file roller it may be like this:
- // * ...
- // * file.log.4
- // * file.log.5
- // * file.log.6
- //
- // For date roller it may look like this:
- // * ...
- // * file.log.11.Aug.13
- // * file.log.15.Aug.13
- // * file.log.16.Aug.13
- // Sorted log history does NOT include current file.
- history, err := rw.getSortedLogHistory()
- if err != nil {
+ }
+
+ return nil
+}
+
+func (rw *rollingFileWriter) Write(bytes []byte) (n int, err error) {
+ rw.rollLock.Lock()
+ defer rw.rollLock.Unlock()
+
+ if rw.self.needsToRoll() {
+ if err := rw.roll(); err != nil {
return 0, err
}
- // Renames current file to create a new roll history entry
- // For file roller it may be like this:
- // * ...
- // * file.log.4
- // * file.log.5
- // * file.log.6
- // n file.log.7 <---- RENAMED (from file.log)
- // Time rollers that doesn't modify file names (e.g. 'date' roller) skip this logic.
- var newHistoryName string
- var newRollMarkerName string
- if len(history) > 0 {
- // Create new rname name using last history file name
- newRollMarkerName = rw.self.getNewHistoryRollFileName(rw.getFileRollName(history[len(history)-1]))
- } else {
- // Create first rname name
- newRollMarkerName = rw.self.getNewHistoryRollFileName("")
- }
- if len(newRollMarkerName) != 0 {
- newHistoryName = rw.createFullFileName(rw.fileName, newRollMarkerName)
- } else {
- newHistoryName = rw.fileName
- }
- if newHistoryName != rw.fileName {
- err = os.Rename(filepath.Join(rw.currentDirPath, rw.fileName), filepath.Join(rw.currentDirPath, newHistoryName))
- if err != nil {
- return 0, err
- }
- }
- // Finally, add the newly added history file to the history archive
- // and, if after that the archive exceeds the allowed max limit, older rolls
- // must the removed/archived.
- history = append(history, newHistoryName)
- if len(history) > rw.maxRolls {
- err = rw.deleteOldRolls(history)
- if err != nil {
- return 0, err
- }
- }
+ }
- err = rw.createFileAndFolderIfNeeded(false)
+ if rw.currentFile == nil {
+ err := rw.createFileAndFolderIfNeeded(true)
if err != nil {
return 0, err
}
}
- rw.currentFileSize += int64(len(bytes))
- return rw.currentFile.Write(bytes)
+ n, err = rw.currentFile.Write(bytes)
+ rw.currentFileSize += int64(n)
+ return n, err
}
func (rw *rollingFileWriter) Close() error {
@@ -436,6 +586,14 @@ func (rw *rollingFileWriter) Close() error {
return nil
}
+func (rw *rollingFileWriter) tempArchiveFile(archiveDir string) (*os.File, error) {
+ tmp := filepath.Join(archiveDir, ".seelog_tmp")
+ if err := os.MkdirAll(tmp, defaultDirectoryPermissions); err != nil {
+ return nil, err
+ }
+ return ioutil.TempFile(tmp, "archived_logs")
+}
+
// =============================================================================================
// Different types of rolling writers
// =============================================================================================
@@ -450,8 +608,8 @@ type rollingFileWriterSize struct {
maxFileSize int64
}
-func NewRollingFileWriterSize(fpath string, atype rollingArchiveType, apath string, maxSize int64, maxRolls int, namemode rollingNameMode) (*rollingFileWriterSize, error) {
- rw, err := newRollingFileWriter(fpath, rollingTypeSize, atype, apath, maxRolls, namemode)
+func NewRollingFileWriterSize(fpath string, atype rollingArchiveType, apath string, maxSize int64, maxRolls int, namemode rollingNameMode, archiveExploded bool) (*rollingFileWriterSize, error) {
+ rw, err := newRollingFileWriter(fpath, rollingTypeSize, atype, apath, maxRolls, namemode, archiveExploded, false)
if err != nil {
return nil, err
}
@@ -460,8 +618,8 @@ func NewRollingFileWriterSize(fpath string, atype rollingArchiveType, apath stri
return rws, nil
}
-func (rws *rollingFileWriterSize) needsToRoll() (bool, error) {
- return rws.currentFileSize >= rws.maxFileSize, nil
+func (rws *rollingFileWriterSize) needsToRoll() bool {
+ return rws.currentFileSize >= rws.maxFileSize
}
func (rws *rollingFileWriterSize) isFileRollNameValid(rname string) bool {
@@ -474,13 +632,17 @@ func (rws *rollingFileWriterSize) isFileRollNameValid(rname string) bool {
type rollSizeFileTailsSlice []string
-func (p rollSizeFileTailsSlice) Len() int { return len(p) }
+func (p rollSizeFileTailsSlice) Len() int {
+ return len(p)
+}
func (p rollSizeFileTailsSlice) Less(i, j int) bool {
v1, _ := strconv.Atoi(p[i])
v2, _ := strconv.Atoi(p[j])
return v1 < v2
}
-func (p rollSizeFileTailsSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p rollSizeFileTailsSlice) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
func (rws *rollingFileWriterSize) sortFileRollNamesAsc(fs []string) ([]string, error) {
ss := rollSizeFileTailsSlice(fs)
@@ -488,16 +650,17 @@ func (rws *rollingFileWriterSize) sortFileRollNamesAsc(fs []string) ([]string, e
return ss, nil
}
-func (rws *rollingFileWriterSize) getNewHistoryRollFileName(lastRollName string) string {
+func (rws *rollingFileWriterSize) getNewHistoryRollFileName(otherLogFiles []string) string {
v := 0
- if len(lastRollName) != 0 {
- v, _ = strconv.Atoi(lastRollName)
+ if len(otherLogFiles) != 0 {
+ latest := otherLogFiles[len(otherLogFiles)-1]
+ v, _ = strconv.Atoi(rws.getFileRollName(latest))
}
return fmt.Sprintf("%d", v+1)
}
-func (rws *rollingFileWriterSize) getCurrentModifiedFileName(originalFileName string, first bool) (string, error) {
- return originalFileName, nil
+func (rws *rollingFileWriterSize) getCurrentFileName() string {
+ return rws.fileName
}
func (rws *rollingFileWriterSize) String() string {
@@ -517,48 +680,31 @@ func (rws *rollingFileWriterSize) String() string {
type rollingFileWriterTime struct {
*rollingFileWriter
timePattern string
- interval rollingIntervalType
currentTimeFileName string
}
func NewRollingFileWriterTime(fpath string, atype rollingArchiveType, apath string, maxr int,
- timePattern string, interval rollingIntervalType, namemode rollingNameMode) (*rollingFileWriterTime, error) {
+ timePattern string, namemode rollingNameMode, archiveExploded bool, fullName bool) (*rollingFileWriterTime, error) {
- rw, err := newRollingFileWriter(fpath, rollingTypeTime, atype, apath, maxr, namemode)
+ rw, err := newRollingFileWriter(fpath, rollingTypeTime, atype, apath, maxr, namemode, archiveExploded, fullName)
if err != nil {
return nil, err
}
- rws := &rollingFileWriterTime{rw, timePattern, interval, ""}
+ rws := &rollingFileWriterTime{rw, timePattern, ""}
rws.self = rws
return rws, nil
}
-func (rwt *rollingFileWriterTime) needsToRoll() (bool, error) {
- switch rwt.nameMode {
- case rollingNameModePostfix:
- if rwt.originalFileName+rollingLogHistoryDelimiter+time.Now().Format(rwt.timePattern) == rwt.fileName {
- return false, nil
- }
- case rollingNameModePrefix:
- if time.Now().Format(rwt.timePattern)+rollingLogHistoryDelimiter+rwt.originalFileName == rwt.fileName {
- return false, nil
- }
- }
- if rwt.interval == rollingIntervalAny {
- return true, nil
- }
+func (rwt *rollingFileWriterTime) needsToRoll() bool {
+ newName := time.Now().Format(rwt.timePattern)
- tprev, err := time.ParseInLocation(rwt.timePattern, rwt.getFileRollName(rwt.fileName), time.Local)
- if err != nil {
- return false, err
+ if rwt.currentTimeFileName == "" {
+ // first run; capture the current name
+ rwt.currentTimeFileName = newName
+ return false
}
- diff := time.Now().Sub(tprev)
- switch rwt.interval {
- case rollingIntervalDaily:
- return diff >= 24*time.Hour, nil
- }
- return false, fmt.Errorf("unknown interval type: %d", rwt.interval)
+ return newName != rwt.currentTimeFileName
}
func (rwt *rollingFileWriterTime) isFileRollNameValid(rname string) bool {
@@ -574,7 +720,9 @@ type rollTimeFileTailsSlice struct {
pattern string
}
-func (p rollTimeFileTailsSlice) Len() int { return len(p.data) }
+func (p rollTimeFileTailsSlice) Len() int {
+ return len(p.data)
+}
func (p rollTimeFileTailsSlice) Less(i, j int) bool {
t1, _ := time.ParseInLocation(p.pattern, p.data[i], time.Local)
@@ -582,7 +730,9 @@ func (p rollTimeFileTailsSlice) Less(i, j int) bool {
return t1.Before(t2)
}
-func (p rollTimeFileTailsSlice) Swap(i, j int) { p.data[i], p.data[j] = p.data[j], p.data[i] }
+func (p rollTimeFileTailsSlice) Swap(i, j int) {
+ p.data[i], p.data[j] = p.data[j], p.data[i]
+}
func (rwt *rollingFileWriterTime) sortFileRollNamesAsc(fs []string) ([]string, error) {
ss := rollTimeFileTailsSlice{data: fs, pattern: rwt.timePattern}
@@ -590,36 +740,24 @@ func (rwt *rollingFileWriterTime) sortFileRollNamesAsc(fs []string) ([]string, e
return ss.data, nil
}
-func (rwt *rollingFileWriterTime) getNewHistoryRollFileName(lastRollName string) string {
- return ""
+func (rwt *rollingFileWriterTime) getNewHistoryRollFileName(_ []string) string {
+ newFileName := rwt.currentTimeFileName
+ rwt.currentTimeFileName = time.Now().Format(rwt.timePattern)
+ return newFileName
}
-func (rwt *rollingFileWriterTime) getCurrentModifiedFileName(originalFileName string, first bool) (string, error) {
- if first {
- history, err := rwt.getSortedLogHistory()
- if err != nil {
- return "", err
- }
- if len(history) > 0 {
- return history[len(history)-1], nil
- }
- }
-
- switch rwt.nameMode {
- case rollingNameModePostfix:
- return originalFileName + rollingLogHistoryDelimiter + time.Now().Format(rwt.timePattern), nil
- case rollingNameModePrefix:
- return time.Now().Format(rwt.timePattern) + rollingLogHistoryDelimiter + originalFileName, nil
+func (rwt *rollingFileWriterTime) getCurrentFileName() string {
+ if rwt.fullName {
+ return rwt.createFullFileName(rwt.fileName, time.Now().Format(rwt.timePattern))
}
- return "", fmt.Errorf("Unknown rolling writer mode. Either postfix or prefix must be used")
+ return rwt.fileName
}
func (rwt *rollingFileWriterTime) String() string {
- return fmt.Sprintf("Rolling file writer (By TIME): filename: %s, archive: %s, archivefile: %s, maxInterval: %v, pattern: %s, maxRolls: %v",
+ return fmt.Sprintf("Rolling file writer (By TIME): filename: %s, archive: %s, archivefile: %s, pattern: %s, maxRolls: %v",
rwt.fileName,
rollingArchiveTypesStringRepresentation[rwt.archiveType],
rwt.archivePath,
- rwt.interval,
rwt.timePattern,
rwt.maxRolls)
}