From eb23435aba62e440021752d977329eadf919e8cf Mon Sep 17 00:00:00 2001 From: fenxiong Date: Tue, 14 Jan 2020 17:40:52 -0800 Subject: [PATCH] Revert "Custom contextual loggers (#2319)" This reverts commit 52a57fffecefbb92a7ba4af923948de94f04db1e. --- agent/acs/handler/payload_handler_test.go | 96 ++++---- agent/api/container/container.go | 4 +- agent/api/task/json.go | 14 -- agent/api/task/task.go | 102 ++++---- agent/api/task/task_test.go | 1 - agent/dockerclient/dockerapi/types.go | 12 - agent/engine/docker_task_engine.go | 2 +- agent/engine/task_manager.go | 233 +++++++++--------- agent/logger/contextual_logger.go | 119 --------- agent/logger/log.go | 66 ++--- agent/logger/log_test.go | 77 +----- agent/taskresource/cgroup/cgroup.go | 27 +- agent/taskresource/firelens/firelens_unix.go | 24 +- .../firelens/firelensconfig_unix.go | 3 +- agent/taskresource/firelens/json_unix.go | 2 - agent/taskresource/volume/dockervolume.go | 24 +- 16 files changed, 259 insertions(+), 547 deletions(-) delete mode 100644 agent/logger/contextual_logger.go diff --git a/agent/acs/handler/payload_handler_test.go b/agent/acs/handler/payload_handler_test.go index 3c38fe0ee3f..f33fdecf89d 100644 --- a/agent/acs/handler/payload_handler_test.go +++ b/agent/acs/handler/payload_handler_test.go @@ -16,10 +16,9 @@ package handler import ( "context" - "encoding/json" "errors" "fmt" - "runtime" + "reflect" "sync" "testing" @@ -34,11 +33,11 @@ import ( "github.com/aws/amazon-ecs-agent/agent/eventhandler" "github.com/aws/amazon-ecs-agent/agent/statemanager" mock_statemanager "github.com/aws/amazon-ecs-agent/agent/statemanager/mocks" + "github.com/aws/amazon-ecs-agent/agent/taskresource" mock_wsclient "github.com/aws/amazon-ecs-agent/agent/wsclient/mock" "github.com/aws/aws-sdk-go/aws" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const ( @@ -148,7 +147,13 @@ func TestHandlePayloadMessageStateSaveError(t *testing.T) { }) assert.Error(t, err, "Expected error while adding a task from statemanager") - validateTask(t, addedTask, "t1") + // We expect task to be added to the engine even though it hasn't been saved + expectedTask := &apitask.Task{ + Arn: "t1", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + } + + assert.Equal(t, addedTask, expectedTask, "added task is not expected") } // TestHandlePayloadMessageAckedWhenTaskAdded tests if the handler generates an ack @@ -189,7 +194,12 @@ func TestHandlePayloadMessageAckedWhenTaskAdded(t *testing.T) { // Verify the message id acked assert.Equal(t, aws.StringValue(ackRequested.MessageId), payloadMessageId, "received message is not expected") - validateTask(t, addedTask, "t1") + // Verify if task added == expected task + expectedTask := &apitask.Task{ + Arn: "t1", + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + } + assert.Equal(t, addedTask, expectedTask, "received task is not expected") } // TestHandlePayloadMessageCredentialsAckedWhenTaskAdded tests if the handler generates @@ -280,7 +290,8 @@ func TestHandlePayloadMessageCredentialsAckedWhenTaskAdded(t *testing.T) { SessionToken: credentialsSessionToken, CredentialsID: credentialsId, } - validateTaskAndCredentials(t, taskCredentialsAckRequested, expectedCredentialsAck, addedTask, taskArn, expectedCredentials, "t1") + err = validateTaskAndCredentials(taskCredentialsAckRequested, expectedCredentialsAck, addedTask, taskArn, expectedCredentials) + assert.NoError(t, err, "error validating added task or credentials ack for the same") } // TestAddPayloadTaskAddsNonStoppedTasksAfterStoppedTasks tests if tasks with desired status @@ -360,7 +371,12 @@ func TestPayloadBufferHandler(t *testing.T) { // Verify if payloadMessageId read from the ack buffer is correct assert.Equal(t, aws.StringValue(ackRequested.MessageId), payloadMessageId, "received task is not expected") - validateTask(t, addedTask, "t1") + // Verify if the task added to the engine is correct + expectedTask := &apitask.Task{ + Arn: taskArn, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + } + assert.Equal(t, addedTask, expectedTask, "received task is not expected") } // TestPayloadBufferHandlerWithCredentials tests if the async payloadBufferHandler routine @@ -479,7 +495,8 @@ func TestPayloadBufferHandlerWithCredentials(t *testing.T) { SessionToken: firstTaskCredentialsSessionToken, CredentialsID: firstTaskCredentialsId, } - validateTaskAndCredentials(t, firstTaskCredentialsAckRequested, expectedCredentialsAckForFirstTask, firstAddedTask, firstTaskArn, expectedCredentialsForFirstTask, "t1") + err := validateTaskAndCredentials(firstTaskCredentialsAckRequested, expectedCredentialsAckForFirstTask, firstAddedTask, firstTaskArn, expectedCredentialsForFirstTask) + assert.NoError(t, err, "error validating added task or credentials ack for the same") // Verify the correctness of the second task added to the engine and the // credentials ack generated for it @@ -496,7 +513,8 @@ func TestPayloadBufferHandlerWithCredentials(t *testing.T) { SessionToken: secondTaskCredentialsSessionToken, CredentialsID: secondTaskCredentialsId, } - validateTaskAndCredentials(t, secondTaskCredentialsAckRequested, expectedCredentialsAckForSecondTask, secondAddedTask, secondTaskArn, expectedCredentialsForSecondTask, "t2") + err = validateTaskAndCredentials(secondTaskCredentialsAckRequested, expectedCredentialsAckForSecondTask, secondAddedTask, secondTaskArn, expectedCredentialsForSecondTask) + assert.NoError(t, err, "error validating added task or credentials ack for the same") } // TestAddPayloadTaskAddsExecutionRoles tests the payload handler will add @@ -578,18 +596,24 @@ func TestAddPayloadTaskAddsExecutionRoles(t *testing.T) { // validateTaskAndCredentials compares a task and a credentials ack object // against expected values. It returns an error if either of the the // comparisons fail -func validateTaskAndCredentials( - t *testing.T, - taskCredentialsAck *ecsacs.IAMRoleCredentialsAckRequest, - expectedCredentialsAckForTask *ecsacs.IAMRoleCredentialsAckRequest, +func validateTaskAndCredentials(taskCredentialsAck, expectedCredentialsAckForTask *ecsacs.IAMRoleCredentialsAckRequest, addedTask *apitask.Task, expectedTaskArn string, - expectedTaskCredentials credentials.IAMRoleCredentials, - taskName string, -) { - require.Equal(t, expectedCredentialsAckForTask, taskCredentialsAck) - require.Equal(t, expectedTaskCredentials.CredentialsID, addedTask.GetCredentialsID()) - validateTask(t, addedTask, taskName) + expectedTaskCredentials credentials.IAMRoleCredentials) error { + if !reflect.DeepEqual(taskCredentialsAck, expectedCredentialsAckForTask) { + return fmt.Errorf("Mismatch between expected and received credentials ack requests, expected: %s, got: %s", expectedCredentialsAckForTask.String(), taskCredentialsAck.String()) + } + + expectedTask := &apitask.Task{ + Arn: expectedTaskArn, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + } + expectedTask.SetCredentialsID(expectedTaskCredentials.CredentialsID) + + if !reflect.DeepEqual(addedTask, expectedTask) { + return fmt.Errorf("Mismatch between expected and added tasks, expected: %v, added: %v", expectedTask, addedTask) + } + return nil } func TestPayloadHandlerAddedENIToTask(t *testing.T) { @@ -925,37 +949,3 @@ func TestPayloadHandlerAddedFirelensData(t *testing.T) { assert.NotNil(t, actual.Options) assert.Equal(t, aws.StringValue(expected.Options["enable-ecs-log-metadata"]), actual.Options["enable-ecs-log-metadata"]) } - -func validateTask(t *testing.T, addedTask *apitask.Task, expectedTaskName string) { - // We expect task to be added to the engine even though it hasn't been saved - addedTaskJSON, err := json.Marshal(addedTask) - require.NoError(t, err) - platformFields := "{}" - if runtime.GOOS == "windows" { - platformFields = `{"cpuUnbounded": false, "memoryUnbounded": false}` - } - expectedTaskJSON := fmt.Sprintf(` - { - "Arn": "%s", - "Family": "", - "Version": "", - "Containers": null, - "associations": null, - "resources": {}, - "volumes": null, - "DesiredStatus": "NONE", - "KnownStatus": "NONE", - "KnownTime": "0001-01-01T00:00:00Z", - "PullStartedAt": "0001-01-01T00:00:00Z", - "PullStoppedAt": "0001-01-01T00:00:00Z", - "ExecutionStoppedAt": "0001-01-01T00:00:00Z", - "SentStatus": "NONE", - "StartSequenceNumber": 0, - "StopSequenceNumber": 0, - "executionCredentialsID": "", - "ENI": null, - "AppMesh": null, - "PlatformFields": %s - }`, expectedTaskName, platformFields) - require.JSONEq(t, expectedTaskJSON, string(addedTaskJSON)) -} diff --git a/agent/api/container/container.go b/agent/api/container/container.go index d78d8c3e223..97f3210b605 100644 --- a/agent/api/container/container.go +++ b/agent/api/container/container.go @@ -1001,7 +1001,7 @@ func (c *Container) GetLogDriver() string { hostConfig := &dockercontainer.HostConfig{} err := json.Unmarshal([]byte(*c.DockerConfig.HostConfig), hostConfig) if err != nil { - seelog.Warnf("Encountered error when trying to get log driver for container %s: %v", c.String(), err) + seelog.Warnf("Encountered error when trying to get log driver for container %s: %v", err) return "" } @@ -1021,7 +1021,7 @@ func (c *Container) GetNetworkModeFromHostConfig() string { // TODO return error to differentiate between error and default mode . err := json.Unmarshal([]byte(*c.DockerConfig.HostConfig), hostConfig) if err != nil { - seelog.Warnf("Encountered error when trying to get network mode for container %s: %v", c.String(), err) + seelog.Warnf("Encountered error when trying to get network mode for container %s: %v", err) return "" } diff --git a/agent/api/task/json.go b/agent/api/task/json.go index 8f318426802..ad46e56d066 100644 --- a/agent/api/task/json.go +++ b/agent/api/task/json.go @@ -28,17 +28,3 @@ func (t *Task) MarshalJSON() ([]byte, error) { return json.Marshal((*jTask)(t)) } - -// UnmarshalJSON wraps Go's unmarshalling logic to guarantee that the logger gets created -func (t *Task) UnmarshalJSON(data []byte) error { - err := json.Unmarshal(data, (*jTask)(t)) - if err != nil { - return err - } - t.log.SetContext(map[string]string{ - "taskARN": t.Arn, - "taskFamily": t.Family, - "taskVersion": t.Version, - }) - return nil -} diff --git a/agent/api/task/task.go b/agent/api/task/task.go index 18049071773..11a598c2231 100644 --- a/agent/api/task/task.go +++ b/agent/api/task/task.go @@ -24,7 +24,6 @@ import ( "sync" "time" - "github.com/aws/amazon-ecs-agent/agent/logger" "github.com/aws/amazon-ecs-agent/agent/utils/ttime" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" @@ -254,9 +253,6 @@ type Task struct { // lock is for protecting all fields in the task struct lock sync.RWMutex - - // log is a custom logger with extra context specific to the task struct - log logger.Contextual } // TaskFromACS translates ecsacs.Task to apitask.Task by first marshaling the received @@ -270,11 +266,6 @@ func TaskFromACS(acsTask *ecsacs.Task, envelope *ecsacs.PayloadMessage) (*Task, if err := json.Unmarshal(data, task); err != nil { return nil, err } - task.log.SetContext(map[string]string{ - "taskARN": task.Arn, - "taskFamily": task.Family, - "taskVersion": task.Version, - }) if task.GetDesiredStatus() == apitaskstatus.TaskRunning && envelope.SeqNum != nil { task.StartSequenceNumber = *envelope.SeqNum } else if task.GetDesiredStatus() == apitaskstatus.TaskStopped && envelope.SeqNum != nil { @@ -321,18 +312,18 @@ func (task *Task) PostUnmarshalTask(cfg *config.Config, task.adjustForPlatform(cfg) if task.MemoryCPULimitsEnabled { if err := task.initializeCgroupResourceSpec(cfg.CgroupPath, cfg.CgroupCPUPeriod, resourceFields); err != nil { - task.log.Errorf("could not intialize resource: %v", err) + seelog.Errorf("Task [%s]: could not intialize resource: %v", task.Arn, err) return apierrors.NewResourceInitError(task.Arn, err) } } if err := task.initializeContainerOrderingForVolumes(); err != nil { - task.log.Errorf("could not initialize volumes dependency for container: %v", err) + seelog.Errorf("Task [%s]: could not initialize volumes dependency for container: %v", task.Arn, err) return apierrors.NewResourceInitError(task.Arn, err) } if err := task.initializeContainerOrderingForLinks(); err != nil { - task.log.Errorf("could not initialize links dependency for container: %v", err) + seelog.Errorf("Task [%s]: could not initialize links dependency for container: %v", task.Arn, err) return apierrors.NewResourceInitError(task.Arn, err) } @@ -353,14 +344,14 @@ func (task *Task) PostUnmarshalTask(cfg *config.Config, } if err := task.addGPUResource(cfg); err != nil { - task.log.Errorf("could not initialize GPU associations: %v", err) + seelog.Errorf("Task [%s]: could not initialize GPU associations: %v", task.Arn, err) return apierrors.NewResourceInitError(task.Arn, err) } task.initializeCredentialsEndpoint(credentialsManager) task.initializeContainersV3MetadataEndpoint(utils.NewDynamicUUIDProvider()) if err := task.addNetworkResourceProvisioningDependency(cfg); err != nil { - task.log.Errorf("could not provision network resource: %v", err) + seelog.Errorf("Task [%s]: could not provision network resource: %v", task.Arn, err) return apierrors.NewResourceInitError(task.Arn, err) } // Adds necessary Pause containers for sharing PID or IPC namespaces @@ -372,7 +363,7 @@ func (task *Task) PostUnmarshalTask(cfg *config.Config, if task.requiresCredentialSpecResource() { if err := task.initializeCredentialSpecResource(cfg, credentialsManager, resourceFields); err != nil { - task.log.Errorf("could not initialize credentialspec resource: %v", err) + seelog.Errorf("Task [%s]: could not initialize credentialspec resource: %v", task.Arn, err) return apierrors.NewResourceInitError(task.Arn, err) } } @@ -628,7 +619,7 @@ func (task *Task) addSharedVolumes(SharedVolumeMatchFullConfig bool, ctx context return volumeMetadata.Error } - task.log.Infof("initialize volume: non-autoprovisioned volume not found, adding to task resource %q", vol.Name) + seelog.Infof("initialize volume: Task [%s]: non-autoprovisioned volume not found, adding to task resource %q", task.Arn, vol.Name) // this resource should be created by agent volumeResource, err := taskresourcevolume.NewVolumeResource( ctx, @@ -646,22 +637,22 @@ func (task *Task) addSharedVolumes(SharedVolumeMatchFullConfig bool, ctx context return nil } - task.log.Infof("initialize volume: volume [%s] already exists", volumeConfig.DockerVolumeName) + seelog.Infof("initialize volume: Task [%s]: volume [%s] already exists", task.Arn, volumeConfig.DockerVolumeName) if !SharedVolumeMatchFullConfig { - task.log.Infof("initialize volume: ECS_SHARED_VOLUME_MATCH_FULL_CONFIG is set to false and volume with name [%s] is found", volumeConfig.DockerVolumeName) + seelog.Infof("initialize volume: Task [%s]: ECS_SHARED_VOLUME_MATCH_FULL_CONFIG is set to false and volume with name [%s] is found", task.Arn, volumeConfig.DockerVolumeName) return nil } // validate all the volume metadata fields match to the configuration if len(volumeMetadata.DockerVolume.Labels) == 0 && len(volumeMetadata.DockerVolume.Labels) == len(volumeConfig.Labels) { - task.log.Infof("labels are both empty or null: volume [%s]", volumeConfig.DockerVolumeName) + seelog.Infof("labels are both empty or null: Task [%s]: volume [%s]", task.Arn, volumeConfig.DockerVolumeName) } else if !reflect.DeepEqual(volumeMetadata.DockerVolume.Labels, volumeConfig.Labels) { return errors.Errorf("intialize volume: non-autoprovisioned volume does not match existing volume labels: existing: %v, expected: %v", volumeMetadata.DockerVolume.Labels, volumeConfig.Labels) } if len(volumeMetadata.DockerVolume.Options) == 0 && len(volumeMetadata.DockerVolume.Options) == len(volumeConfig.DriverOpts) { - task.log.Infof("driver options are both empty or null: volume [%s]", volumeConfig.DockerVolumeName) + seelog.Infof("driver options are both empty or null: Task [%s]: volume [%s]", task.Arn, volumeConfig.DockerVolumeName) } else if !reflect.DeepEqual(volumeMetadata.DockerVolume.Options, volumeConfig.DriverOpts) { return errors.Errorf("initialize volume: non-autoprovisioned volume does not match existing volume options: existing: %v, expected: %v", volumeMetadata.DockerVolume.Options, volumeConfig.DriverOpts) @@ -698,7 +689,7 @@ func (task *Task) initializeCredentialsEndpoint(credentialsManager credentials.M // the id. This should never happen as the payload handler sets // credentialsId for the task after adding credentials to the // credentials manager - task.log.Errorf("Unable to get credentials for task") + seelog.Errorf("Unable to get credentials for task: %s", task.Arn) return } @@ -962,7 +953,7 @@ func (task *Task) addFirelensContainerDependency() error { if firelensContainer.HasContainerDependencies() { // If firelens container has any container dependency, we don't add internal container dependency that depends // on it in order to be safe (otherwise we need to deal with circular dependency). - task.log.Warnf("Not adding container dependency to let firelens container %s start first, because it has dependency on other containers.", firelensContainer.Name) + seelog.Warnf("Not adding container dependency to let firelens container %s start first, because it has dependency on other containers.", firelensContainer.Name) return nil } @@ -986,7 +977,7 @@ func (task *Task) addFirelensContainerDependency() error { // If there's no dependency between the app container and the firelens container, make firelens container // start first to be the default behavior by adding a START container depdendency. if !container.DependsOnContainer(firelensContainer.Name) { - task.log.Infof("Adding a START container dependency on firelens container %s for container %s", + seelog.Infof("Adding a START container dependency on firelens container %s for container %s", firelensContainer.Name, container.Name) container.AddContainerDependency(firelensContainer.Name, ContainerOrderingStartCondition) } @@ -1287,7 +1278,7 @@ func (task *Task) UpdateMountPoints(cont *apicontainer.Container, vols []types.M // there was no change // Invariant: task known status is the minimum of container known status func (task *Task) updateTaskKnownStatus() (newStatus apitaskstatus.TaskStatus) { - task.log.Debugf("api/task: Updating task's known status") + seelog.Debugf("api/task: Updating task's known status, task: %s", task.String()) // Set to a large 'impossible' status that can't be the min containerEarliestKnownStatus := apicontainerstatus.ContainerZombie var earliestKnownStatusContainer *apicontainer.Container @@ -1303,17 +1294,19 @@ func (task *Task) updateTaskKnownStatus() (newStatus apitaskstatus.TaskStatus) { } } if earliestKnownStatusContainer == nil { - task.log.Errorf( - "Impossible state found while updating tasks's known status, earliest state recorded as %s", - containerEarliestKnownStatus.String()) + seelog.Criticalf( + "Impossible state found while updating tasks's known status, earliest state recorded as %s for task [%v]", + containerEarliestKnownStatus.String(), task) return apitaskstatus.TaskStatusNone } - task.log.Debugf("api/task: Container with earliest known container is [%s]", - earliestKnownStatusContainer.String()) + seelog.Debugf("api/task: Container with earliest known container is [%s] for task: %s", + earliestKnownStatusContainer.String(), task.String()) // If the essential container is stopped while other containers may be running // don't update the task status until the other containers are stopped. if earliestKnownStatusContainer.IsKnownSteadyState() && essentialContainerStopped { - task.log.Debugf("Essential container is stopped while other containers are running, not updating task status") + seelog.Debugf( + "Essential container is stopped while other containers are running, not updating task status for task: %s", + task.String()) return apitaskstatus.TaskStatusNone } // We can't rely on earliest container known status alone for determining if the @@ -1322,8 +1315,8 @@ func (task *Task) updateTaskKnownStatus() (newStatus apitaskstatus.TaskStatus) { // statuses and compute the min of this earliestKnownTaskStatus := task.getEarliestKnownTaskStatusForContainers() if task.GetKnownStatus() < earliestKnownTaskStatus { - task.log.Infof("api/task: Updating task's known status to: %s", - earliestKnownTaskStatus.String()) + seelog.Infof("api/task: Updating task's known status to: %s, task: %s", + earliestKnownTaskStatus.String(), task.String()) task.SetKnownStatus(earliestKnownTaskStatus) return task.GetKnownStatus() } @@ -1334,7 +1327,7 @@ func (task *Task) updateTaskKnownStatus() (newStatus apitaskstatus.TaskStatus) { // based on the known statuses of all containers in the task func (task *Task) getEarliestKnownTaskStatusForContainers() apitaskstatus.TaskStatus { if len(task.Containers) == 0 { - task.log.Errorf("No containers in the task") + seelog.Criticalf("No containers in the task: %s", task.String()) return apitaskstatus.TaskStatusNone } // Set earliest container status to an impossible to reach 'high' task status @@ -1469,7 +1462,7 @@ func (task *Task) dockerHostConfig(container *apicontainer.Container, dockerCont if task.NvidiaRuntime == "" { return nil, &apierrors.HostConfigError{Msg: "Runtime is not set for GPU containers"} } - task.log.Debugf("Setting runtime as %s for container %s", task.NvidiaRuntime, container.Name) + seelog.Debugf("Setting runtime as %s for container %s", task.NvidiaRuntime, container.Name) hostConfig.Runtime = task.NvidiaRuntime } @@ -1519,8 +1512,8 @@ func (task *Task) getDockerResources(container *apicontainer.Container) dockerco // Convert MB to B and set Memory dockerMem := int64(container.Memory * 1024 * 1024) if dockerMem != 0 && dockerMem < apicontainer.DockerContainerMinimumMemoryInBytes { - task.log.Warnf("container %s memory setting is too low, increasing to %d bytes", - container.Name, apicontainer.DockerContainerMinimumMemoryInBytes) + seelog.Warnf("Task %s container %s memory setting is too low, increasing to %d bytes", + task.Arn, container.Name, apicontainer.DockerContainerMinimumMemoryInBytes) dockerMem = apicontainer.DockerContainerMinimumMemoryInBytes } // Set CPUShares @@ -1563,14 +1556,14 @@ func (task *Task) shouldOverrideNetworkMode(container *apicontainer.Container, d } } if pauseContName == "" { - task.log.Error("Pause container required, but not found in the task") + seelog.Critical("Pause container required, but not found in the task: %s", task.String()) return false, "" } pauseContainer, ok := dockerContainerMap[pauseContName] if !ok || pauseContainer == nil { // This should never be the case and implies a code-bug. - task.log.Errorf("Pause container required, but not found in container map for container: [%s]", - container.String()) + seelog.Criticalf("Pause container required, but not found in container map for container: [%s] in task: %s", + container.String(), task.String()) return false, "" } return true, dockerMappingContainerPrefix + pauseContainer.DockerID @@ -1652,14 +1645,14 @@ func (task *Task) shouldOverridePIDMode(container *apicontainer.Container, docke case pidModeTask: pauseCont, ok := task.ContainerByName(NamespacePauseContainerName) if !ok { - task.log.Errorf("Namespace Pause container not found in the task; Setting Task's Desired Status to Stopped") + seelog.Criticalf("Namespace Pause container not found in the task: %s; Setting Task's Desired Status to Stopped", task.Arn) task.SetDesiredStatus(apitaskstatus.TaskStopped) return false, "" } pauseDockerID, ok := dockerContainerMap[pauseCont.Name] if !ok || pauseDockerID == nil { // Docker container shouldn't be nil or not exist if the Container definition within task exists; implies code-bug - task.log.Errorf("Namespace Pause docker container not found in the task; Setting Task's Desired Status to Stopped") + seelog.Criticalf("Namespace Pause docker container not found in the task: %s; Setting Task's Desired Status to Stopped", task.Arn) task.SetDesiredStatus(apitaskstatus.TaskStopped) return false, "" } @@ -1705,14 +1698,14 @@ func (task *Task) shouldOverrideIPCMode(container *apicontainer.Container, docke case ipcModeTask: pauseCont, ok := task.ContainerByName(NamespacePauseContainerName) if !ok { - task.log.Errorf("Namespace Pause container not found in the task; Setting Task's Desired Status to Stopped") + seelog.Criticalf("Namespace Pause container not found in the task: %s; Setting Task's Desired Status to Stopped", task.Arn) task.SetDesiredStatus(apitaskstatus.TaskStopped) return false, "" } pauseDockerID, ok := dockerContainerMap[pauseCont.Name] if !ok || pauseDockerID == nil { // Docker container shouldn't be nill or not exist if the Container definition within task exists; implies code-bug - task.log.Errorf("Namespace Pause container not found in the task; Setting Task's Desired Status to Stopped") + seelog.Criticalf("Namespace Pause container not found in the task: %s; Setting Task's Desired Status to Stopped", task.Arn) task.SetDesiredStatus(apitaskstatus.TaskStopped) return false, "" } @@ -1771,8 +1764,8 @@ func (task *Task) dockerLinks(container *apicontainer.Container, dockerContainer if len(linkParts) == 2 { linkAlias = linkParts[1] } else { - task.log.Warnf("Link name [%s] found with no linkalias for container: [%s]", - linkName, container.String()) + seelog.Warnf("Link name [%s] found with no linkalias for container: [%s] in task: [%s]", + linkName, container.String(), task.String()) linkAlias = linkName } @@ -1831,9 +1824,9 @@ func (task *Task) dockerHostBinds(container *apicontainer.Container) ([]string, } if hv.Source() == "" || mountPoint.ContainerPath == "" { - task.log.Errorf( - "Unable to resolve volume mounts for container [%s]; invalid path: [%s]; [%s] -> [%s]", - container.Name, mountPoint.SourceVolume, hv.Source(), mountPoint.ContainerPath) + seelog.Errorf( + "Unable to resolve volume mounts for container [%s]; invalid path: [%s]; [%s] -> [%s] in task: [%s]", + container.Name, mountPoint.SourceVolume, hv.Source(), mountPoint.ContainerPath, task.String()) return []string{}, errors.Errorf("Unable to resolve volume mounts; invalid path: %s %s; %s -> %s", container.Name, mountPoint.SourceVolume, hv.Source(), mountPoint.ContainerPath) } @@ -1870,13 +1863,14 @@ func (task *Task) UpdateDesiredStatus() { // updateTaskDesiredStatusUnsafe determines what status the task should properly be at based on the containers' statuses // Invariant: task desired status must be stopped if any essential container is stopped func (task *Task) updateTaskDesiredStatusUnsafe() { - task.log.Debugf("Updating task") + seelog.Debugf("Updating task: [%s]", task.stringUnsafe()) // A task's desired status is stopped if any essential container is stopped // Otherwise, the task's desired status is unchanged (typically running, but no need to change) for _, cont := range task.Containers { if cont.Essential && (cont.KnownTerminal() || cont.DesiredTerminal()) { - task.log.Infof("api/task: Updating task desired status to stopped because of container: [%s]", cont.Name) + seelog.Infof("api/task: Updating task desired status to stopped because of container: [%s]; task: [%s]", + cont.Name, task.stringUnsafe()) task.DesiredStatusUnsafe = apitaskstatus.TaskStopped } } @@ -2210,8 +2204,8 @@ func (task *Task) RecordExecutionStoppedAt(container *apicontainer.Container) { // ExecutionStoppedAt was already recorded. Nothing to left to do here return } - task.log.Infof("recording execution stopped time. Essential container [%s] stopped at: %s", - container.Name, now.String()) + seelog.Infof("Task [%s]: recording execution stopped time. Essential container [%s] stopped at: %s", + task.Arn, container.Name, now.String()) } // GetResources returns the list of task resources from ResourcesMap @@ -2240,9 +2234,9 @@ func (task *Task) AddResource(resourceType string, resource taskresource.TaskRes // SetTerminalReason sets the terminalReason string and this can only be set // once per the task's lifecycle. This field does not accept updates. func (task *Task) SetTerminalReason(reason string) { - task.log.Infof("attempting to set terminal reason for task [%s]", reason) + seelog.Infof("Task [%s]: attempting to set terminal reason for task [%s]", task.Arn, reason) task.terminalReasonOnce.Do(func() { - task.log.Infof("setting terminal reason for task [%s]", reason) + seelog.Infof("Task [%s]: setting terminal reason for task [%s]", task.Arn, reason) // Converts the first letter of terminal reason into capital letter words := strings.Fields(reason) diff --git a/agent/api/task/task_test.go b/agent/api/task/task_test.go index aeca7e3c1e9..fd6e6393579 100644 --- a/agent/api/task/task_test.go +++ b/agent/api/task/task_test.go @@ -1277,7 +1277,6 @@ func TestTaskFromACS(t *testing.T) { seqNum := int64(42) task, err := TaskFromACS(&taskFromAcs, &ecsacs.PayloadMessage{SeqNum: &seqNum}) - expectedTask.log = task.log assert.NoError(t, err) assert.EqualValues(t, expectedTask, task) diff --git a/agent/dockerclient/dockerapi/types.go b/agent/dockerclient/dockerapi/types.go index 2d29d8e7254..a7a5c960d61 100644 --- a/agent/dockerclient/dockerapi/types.go +++ b/agent/dockerclient/dockerapi/types.go @@ -153,15 +153,3 @@ func (event *DockerContainerChangeEvent) String() string { return res } - -// String returns a short human readable string of the container change event -func (event *DockerContainerChangeEvent) ShortString() string { - res := fmt.Sprintf("event type: %s, event container status: %s, docker ID: %s", - event.Type.String(), event.Status.String(), event.DockerID) - - if event.ExitCode != nil { - res += fmt.Sprintf(", ExitCode: %d", aws.IntValue(event.ExitCode)) - } - - return res -} diff --git a/agent/engine/docker_task_engine.go b/agent/engine/docker_task_engine.go index 0363934c6da..14b8875daab 100644 --- a/agent/engine/docker_task_engine.go +++ b/agent/engine/docker_task_engine.go @@ -1392,7 +1392,7 @@ func (engine *DockerTaskEngine) applyContainerState(task *apitask.Task, containe } metadata := transitionFunction(task, container) if metadata.Error != nil { - seelog.Errorf("Task engine [%s]: error transitioning container [%s] to [%s]: %v", + seelog.Infof("Task engine [%s]: error transitioning container [%s] to [%s]: %v", task.Arn, container.Name, nextState.String(), metadata.Error) } else { seelog.Debugf("Task engine [%s]: transitioned container [%s] to [%s]", diff --git a/agent/engine/task_manager.go b/agent/engine/task_manager.go index 26df8082969..39a3a961f6c 100644 --- a/agent/engine/task_manager.go +++ b/agent/engine/task_manager.go @@ -32,7 +32,6 @@ import ( "github.com/aws/amazon-ecs-agent/agent/ecscni" "github.com/aws/amazon-ecs-agent/agent/engine/dependencygraph" "github.com/aws/amazon-ecs-agent/agent/eventstream" - "github.com/aws/amazon-ecs-agent/agent/logger" "github.com/aws/amazon-ecs-agent/agent/statechange" "github.com/aws/amazon-ecs-agent/agent/statemanager" "github.com/aws/amazon-ecs-agent/agent/taskresource" @@ -40,6 +39,7 @@ import ( utilsync "github.com/aws/amazon-ecs-agent/agent/utils/sync" "github.com/aws/amazon-ecs-agent/agent/utils/ttime" + "github.com/cihub/seelog" "github.com/pkg/errors" ) @@ -126,7 +126,6 @@ type managedTask struct { *apitask.Task ctx context.Context cancel context.CancelFunc - log logger.Contextual engine *DockerTaskEngine cfg *config.Config @@ -184,11 +183,6 @@ func (engine *DockerTaskEngine) newManagedTask(task *apitask.Task) *managedTask taskStopWG: engine.taskStopGroup, steadyStatePollInterval: engine.taskSteadyStatePollInterval, } - t.log.SetContext(map[string]string{ - "taskARN": task.Arn, - "taskFamily": task.Family, - "taskVersion": task.Version, - }) engine.managedTasks[task.Arn] = t return t } @@ -211,7 +205,7 @@ func (mtask *managedTask) overseeTask() { for { select { case <-mtask.ctx.Done(): - mtask.log.Infof("parent context cancelled, exit") + seelog.Infof("Managed task [%s]: parent context cancelled, exit", mtask.Arn) return default: } @@ -224,7 +218,8 @@ func (mtask *managedTask) overseeTask() { if !mtask.GetKnownStatus().Terminal() { // If we aren't terminal and we aren't steady state, we should be // able to move some containers along. - mtask.log.Infof("task not steady state or terminal; progressing it") + seelog.Infof("Managed task [%s]: task not steady state or terminal; progressing it", + mtask.Arn) mtask.progressTask() } @@ -235,7 +230,8 @@ func (mtask *managedTask) overseeTask() { // be sufficient to capture state changes. err := mtask.saver.Save() if err != nil { - mtask.log.Warnf("unable to checkpoint task's states to disk: %v", err) + seelog.Warnf("Managed task [%s]: unable to checkpoint task's states to disk: %v", + mtask.Arn, err) } if mtask.GetKnownStatus().Terminal() { @@ -244,11 +240,11 @@ func (mtask *managedTask) overseeTask() { } // We only break out of the above if this task is known to be stopped. Do // onetime cleanup here, including removing the task after a timeout - mtask.log.Infof("task has reached stopped. Waiting for container cleanup") + seelog.Infof("Managed task [%s]: task has reached stopped. Waiting for container cleanup", mtask.Arn) mtask.cleanupCredentials() if mtask.StopSequenceNumber != 0 { - mtask.log.Debugf("marking done for this sequence: %d", - mtask.StopSequenceNumber) + seelog.Debugf("Managed task [%s]: marking done for this sequence: %d", + mtask.Arn, mtask.StopSequenceNumber) mtask.taskStopWG.Done(mtask.StopSequenceNumber) } // TODO: make this idempotent on agent restart @@ -278,8 +274,8 @@ func (mtask *managedTask) waitForHostResources() { return } - mtask.log.Infof("waiting for any previous stops to complete. Sequence number: %d", - mtask.StartSequenceNumber) + seelog.Infof("Managed task [%s]: waiting for any previous stops to complete. Sequence number: %d", + mtask.Arn, mtask.StartSequenceNumber) othersStoppedCtx, cancel := context.WithCancel(mtask.ctx) defer cancel() @@ -297,21 +293,21 @@ func (mtask *managedTask) waitForHostResources() { break } } - mtask.log.Infof("wait over; ready to move towards status: %s", - mtask.GetDesiredStatus().String()) + seelog.Infof("Managed task [%s]: wait over; ready to move towards status: %s", + mtask.Arn, mtask.GetDesiredStatus().String()) } // waitSteady waits for a task to leave steady-state by waiting for a new // event, or a timeout. func (mtask *managedTask) waitSteady() { - mtask.log.Infof("task at steady state: %s", mtask.GetKnownStatus().String()) + seelog.Infof("Managed task [%s]: task at steady state: %s", mtask.Arn, mtask.GetKnownStatus().String()) timeoutCtx, cancel := context.WithTimeout(mtask.ctx, mtask.steadyStatePollInterval) defer cancel() timedOut := mtask.waitEvent(timeoutCtx.Done()) if timedOut { - mtask.log.Debugf("checking to make sure it's still at steadystate") + seelog.Debugf("Managed task [%s]: checking to make sure it's still at steadystate", mtask.Arn) go mtask.engine.checkTaskState(mtask.Task) } } @@ -321,7 +317,7 @@ func (mtask *managedTask) waitSteady() { func (mtask *managedTask) steadyState() bool { select { case <-mtask.ctx.Done(): - mtask.log.Info("Context expired. No longer steady.") + seelog.Info("Context expired. No longer steady.") return false default: taskKnownStatus := mtask.GetKnownStatus() @@ -342,24 +338,25 @@ func (mtask *managedTask) cleanupCredentials() { // channel. When the Done channel is signalled by the context, waitEvent will // return true. func (mtask *managedTask) waitEvent(stopWaiting <-chan struct{}) bool { - mtask.log.Infof("waiting for event for task") + seelog.Infof("Managed task [%s]: waiting for event for task", mtask.Arn) select { case acsTransition := <-mtask.acsMessages: + seelog.Infof("Managed task [%s]: got acs event", mtask.Arn) mtask.handleDesiredStatusChange(acsTransition.desiredStatus, acsTransition.seqnum) return false case dockerChange := <-mtask.dockerMessages: - mtask.log.Infof("got container change event [%s] for container [%s]", - dockerChange.event.ShortString(), dockerChange.container.Name) + seelog.Infof("Managed task [%s]: got container [%s] event: [%s]", + mtask.Arn, dockerChange.container.Name, dockerChange.event.Status.String()) mtask.handleContainerChange(dockerChange) return false case resChange := <-mtask.resourceStateChangeEvent: res := resChange.resource - mtask.log.Infof("got resource change event [%s] for resource [%s]", - res.StatusString(resChange.nextState), res.GetName()) + seelog.Infof("Managed task [%s]: got resource [%s] event: [%s]", + mtask.Arn, res.GetName(), res.StatusString(resChange.nextState)) mtask.handleResourceStateChange(resChange) return false case <-stopWaiting: - mtask.log.Infof("no longer waiting for events") + seelog.Infof("Managed task [%s]: no longer waiting", mtask.Arn) return true } } @@ -371,16 +368,16 @@ func (mtask *managedTask) waitEvent(stopWaiting <-chan struct{}) bool { func (mtask *managedTask) handleDesiredStatusChange(desiredStatus apitaskstatus.TaskStatus, seqnum int64) { // Handle acs message changes this task's desired status to whatever // acs says it should be if it is compatible - mtask.log.Infof("new acs transition to: %s; sequence number: %d; task stop sequence number: %d", - desiredStatus.String(), seqnum, mtask.StopSequenceNumber) + seelog.Infof("Managed task [%s]: new acs transition to: %s; sequence number: %d; task stop sequence number: %d", + mtask.Arn, desiredStatus.String(), seqnum, mtask.StopSequenceNumber) if desiredStatus <= mtask.GetDesiredStatus() { - mtask.log.Infof("redundant task transition from [%s] to [%s], ignoring", - mtask.GetDesiredStatus().String(), desiredStatus.String()) + seelog.Infof("Managed task [%s]: redundant task transition from [%s] to [%s], ignoring", + mtask.Arn, mtask.GetDesiredStatus().String(), desiredStatus.String()) return } if desiredStatus == apitaskstatus.TaskStopped && seqnum != 0 && mtask.GetStopSequenceNumber() == 0 { - mtask.log.Infof("task moving to stopped, adding to stopgroup with sequence number: %d", - seqnum) + seelog.Infof("Managed task [%s]: task moving to stopped, adding to stopgroup with sequence number: %d", + mtask.Arn, seqnum) mtask.SetStopSequenceNumber(seqnum) mtask.taskStopWG.Add(seqnum, 1) } @@ -396,19 +393,22 @@ func (mtask *managedTask) handleContainerChange(containerChange dockerContainerC container := containerChange.container found := mtask.isContainerFound(container) if !found { - mtask.log.Errorf("state error; invoked with another task's container [%s]!", - container.Name) + seelog.Criticalf("Managed task [%s]: state error; invoked with another task's container [%s]!", + mtask.Arn, container.Name) return } + event := containerChange.event + seelog.Infof("Managed task [%s]: handling container change [%v] for container [%s]", + mtask.Arn, event, container.Name) // If this is a backwards transition stopped->running, the first time set it // to be known running so it will be stopped. Subsequently ignore these backward transitions containerKnownStatus := container.GetKnownStatus() mtask.handleStoppedToRunningContainerTransition(event.Status, container) if event.Status <= containerKnownStatus { - mtask.log.Infof("redundant container state change. %s to %s, but already %s", - container.Name, event.Status.String(), containerKnownStatus.String()) + seelog.Infof("Managed task [%s]: redundant container state change. %s to %s, but already %s", + mtask.Arn, container.Name, event.Status.String(), containerKnownStatus.String()) // Only update container metadata when status stays RUNNING if event.Status == containerKnownStatus && event.Status == apicontainerstatus.ContainerRunning { @@ -430,18 +430,18 @@ func (mtask *managedTask) handleContainerChange(containerChange dockerContainerC } mtask.RecordExecutionStoppedAt(container) - mtask.log.Debugf("sending container change event to tcs, container: [%s(%s)], status: %s", - container.Name, event.DockerID, event.Status.String()) + seelog.Debugf("Managed task [%s]: sending container change event to tcs, container: [%s(%s)], status: %s", + mtask.Arn, container.Name, event.DockerID, event.Status.String()) err := mtask.containerChangeEventStream.WriteToEventStream(event) if err != nil { - mtask.log.Warnf("failed to write container [%s] change event to tcs event stream: %v", - container.Name, err) + seelog.Warnf("Managed task [%s]: failed to write container [%s] change event to tcs event stream: %v", + mtask.Arn, container.Name, err) } mtask.emitContainerEvent(mtask.Task, container, "") if mtask.UpdateStatus() { - mtask.log.Infof("container change also resulted in task change [%s]: [%s]", - container.Name, mtask.GetDesiredStatus().String()) + seelog.Infof("Managed task [%s]: container change also resulted in task change [%s]: [%s]", + mtask.Arn, container.Name, mtask.GetDesiredStatus().String()) // If knownStatus changed, let it be known var taskStateChangeReason string if mtask.GetKnownStatus().Terminal() { @@ -457,8 +457,8 @@ func (mtask *managedTask) handleResourceStateChange(resChange resourceStateChang // locate the resource res := resChange.resource if !mtask.isResourceFound(res) { - mtask.log.Errorf("state error; invoked with another task's resource [%s]", - res.GetName()) + seelog.Criticalf("Managed task [%s]: state error; invoked with another task's resource [%s]", + mtask.Arn, res.GetName()) return } @@ -467,8 +467,8 @@ func (mtask *managedTask) handleResourceStateChange(resChange resourceStateChang currentKnownStatus := res.GetKnownStatus() if status <= currentKnownStatus { - mtask.log.Infof("redundant resource state change. %s to %s, but already %s", - res.GetName(), res.StatusString(status), res.StatusString(currentKnownStatus)) + seelog.Infof("Managed task [%s]: redundant resource state change. %s to %s, but already %s", + mtask.Arn, res.GetName(), res.StatusString(status), res.StatusString(currentKnownStatus)) return } @@ -477,11 +477,11 @@ func (mtask *managedTask) handleResourceStateChange(resChange resourceStateChang mtask.engine.saver.Save() return } - mtask.log.Infof("unable to transition resource %s to %s: %v", - res.GetName(), res.StatusString(status), err) + seelog.Infof("Managed task [%s]: unable to transition resource %s to %s: %v", + mtask.Arn, res.GetName(), res.StatusString(status), err) if status == res.SteadyState() { - mtask.log.Errorf("error while creating resource %s, setting the task's desired status to STOPPED", - res.GetName()) + seelog.Errorf("Managed task [%s]: error while creating resource %s, setting the task's desired status to STOPPED", + mtask.Arn, res.GetName()) mtask.SetDesiredStatus(apitaskstatus.TaskStopped) mtask.Task.SetTerminalReason(res.GetTerminalReason()) mtask.engine.saver.Save() @@ -490,8 +490,8 @@ func (mtask *managedTask) handleResourceStateChange(resChange resourceStateChang func (mtask *managedTask) emitResourceChange(change resourceStateChange) { if mtask.ctx.Err() != nil { - mtask.log.Infof("unable to emit resource state change due to closed context: %v", - mtask.ctx.Err()) + seelog.Infof("Managed task [%s]: unable to emit resource state change due to closed context: %v", + mtask.Arn, mtask.ctx.Err()) } mtask.resourceStateChangeEvent <- change } @@ -499,12 +499,13 @@ func (mtask *managedTask) emitResourceChange(change resourceStateChange) { func (mtask *managedTask) emitTaskEvent(task *apitask.Task, reason string) { event, err := api.NewTaskStateChangeEvent(task, reason) if err != nil { - mtask.log.Infof("unable to create task state change event [%s]: %v", reason, err) + seelog.Infof("Managed task [%s]: unable to create task state change event [%s]: %v", + task.Arn, reason, err) return } - mtask.log.Infof("sending task change event [%s]", event.String()) + seelog.Infof("Managed task [%s]: sending task change event [%s]", mtask.Arn, event.String()) mtask.stateChangeEvents <- event - mtask.log.Infof("sent task change event [%s]", event.String()) + seelog.Infof("Managed task [%s]: sent task change event [%s]", mtask.Arn, event.String()) } // emitContainerEvent passes a given event up through the containerEvents channel if necessary. @@ -512,29 +513,30 @@ func (mtask *managedTask) emitTaskEvent(task *apitask.Task, reason string) { func (mtask *managedTask) emitContainerEvent(task *apitask.Task, cont *apicontainer.Container, reason string) { event, err := api.NewContainerStateChangeEvent(task, cont, reason) if err != nil { - mtask.log.Infof("unable to create state change event for container [%s]: %v", cont.Name, err) + seelog.Infof("Managed task [%s]: unable to create state change event for container [%s]: %v", + task.Arn, cont.Name, err) return } - mtask.log.Infof("sending container change event [%s]: %s", - cont.Name, event.String()) + seelog.Infof("Managed task [%s]: sending container change event [%s]: %s", + mtask.Arn, cont.Name, event.String()) mtask.stateChangeEvents <- event - mtask.log.Infof("sent container change event [%s]: %s", - cont.Name, event.String()) + seelog.Infof("Managed task [%s]: sent container change event [%s]: %s", + mtask.Arn, cont.Name, event.String()) } func (mtask *managedTask) emitDockerContainerChange(change dockerContainerChange) { if mtask.ctx.Err() != nil { - mtask.log.Infof("unable to emit docker container change due to closed context: %v", - mtask.ctx.Err()) + seelog.Infof("Managed task [%s]: unable to emit docker container change due to closed context: %v", + mtask.Arn, mtask.ctx.Err()) } mtask.dockerMessages <- change } func (mtask *managedTask) emitACSTransition(transition acsTransition) { if mtask.ctx.Err() != nil { - mtask.log.Infof("unable to emit acs transition due to closed context: %v", - mtask.ctx.Err()) + seelog.Infof("Managed task [%s]: unable to emit acs transition due to closed context: %v", + mtask.Arn, mtask.ctx.Err()) } mtask.acsMessages <- transition } @@ -564,18 +566,20 @@ func (mtask *managedTask) releaseIPInIPAM() { if !mtask.IsNetworkModeAWSVPC() { return } - mtask.log.Infof("IPAM releasing ip for task eni") + seelog.Infof("Managed task [%s]: IPAM releasing ip for task eni", mtask.Arn) cfg, err := mtask.BuildCNIConfig(true, &ecscni.Config{ MinSupportedCNIVersion: config.DefaultMinSupportedCNIVersion, }) if err != nil { - mtask.log.Errorf("failed to release ip; unable to build cni configuration: %v", err) + seelog.Errorf("Managed task [%s]: failed to release ip; unable to build cni configuration: %v", + mtask.Arn, err) return } err = mtask.cniClient.ReleaseIPResource(mtask.ctx, cfg, ipamCleanupTmeout) if err != nil { - mtask.log.Errorf("failed to release ip; IPAM error: %v", err) + seelog.Errorf("Managed task [%s]: failed to release ip; IPAM error: %v", + mtask.Arn, err) return } } @@ -603,8 +607,8 @@ func (mtask *managedTask) handleStoppedToRunningContainerTransition(status apico // because we got an error running it and it ran anyways), the first time // update it to 'known running' so that it will be driven back to stopped mtask.unexpectedStart.Do(func() { - mtask.log.Warnf("stopped container [%s] came back; re-stopping it once", - container.Name) + seelog.Warnf("Managed task [%s]: stopped container [%s] came back; re-stopping it once", + mtask.Arn, container.Name) go mtask.engine.transitionContainer(mtask.Task, container, apicontainerstatus.ContainerStopped) // This will not proceed afterwards because status <= knownstatus below }) @@ -628,8 +632,8 @@ func (mtask *managedTask) handleEventError(containerChange dockerContainerChange // don't want to use cached image for both cases. if mtask.cfg.ImagePullBehavior == config.ImagePullAlwaysBehavior || mtask.cfg.ImagePullBehavior == config.ImagePullOnceBehavior { - mtask.log.Errorf("error while pulling image %s for container %s , moving task to STOPPED: %v", - container.Image, container.Name, event.Error) + seelog.Errorf("Managed task [%s]: error while pulling image %s for container %s , moving task to STOPPED: %v", + mtask.Arn, container.Image, container.Name, event.Error) // The task should be stopped regardless of whether this container is // essential or non-essential. mtask.SetDesiredStatus(apitaskstatus.TaskStopped) @@ -640,8 +644,8 @@ func (mtask *managedTask) handleEventError(containerChange dockerContainerChange // the task fail here, will let create container handle it instead. // If the agent pull behavior is default, use local image cache directly, // assuming it exists. - mtask.log.Errorf("error while pulling container %s and image %s, will try to run anyway: %v", - container.Name, container.Image, event.Error) + seelog.Errorf("Managed task [%s]: error while pulling container %s and image %s, will try to run anyway: %v", + mtask.Arn, container.Name, container.Image, event.Error) // proceed anyway return true case apicontainerstatus.ContainerStopped: @@ -651,16 +655,16 @@ func (mtask *managedTask) handleEventError(containerChange dockerContainerChange fallthrough case apicontainerstatus.ContainerCreated: // No need to explicitly stop containers if this is a * -> NONE/CREATED transition - mtask.log.Errorf("error creating container [%s]; marking its desired status as STOPPED: %v", - container.Name, event.Error) + seelog.Warnf("Managed task [%s]: error creating container [%s]; marking its desired status as STOPPED: %v", + mtask.Arn, container.Name, event.Error) container.SetKnownStatus(currentKnownStatus) container.SetDesiredStatus(apicontainerstatus.ContainerStopped) return false default: // If this is a * -> RUNNING / RESOURCES_PROVISIONED transition, we need to stop // the container. - mtask.log.Errorf("error starting/provisioning container [%s]; marking its desired status as STOPPED: %v", - container.Name, event.Error) + seelog.Warnf("Managed task [%s]: error starting/provisioning container[%s]; marking its desired status as STOPPED: %v", + mtask.Arn, container.Name, event.Error) container.SetKnownStatus(currentKnownStatus) container.SetDesiredStatus(apicontainerstatus.ContainerStopped) errorName := event.Error.ErrorName() @@ -678,8 +682,8 @@ func (mtask *managedTask) handleEventError(containerChange dockerContainerChange } if shouldForceStop { - mtask.log.Warnf("forcing container [%s] to stop", - container.Name) + seelog.Warnf("Managed task [%s]: forcing container [%s] to stop", + mtask.Arn, container.Name) go mtask.engine.transitionContainer(mtask.Task, container, apicontainerstatus.ContainerStopped) } // Container known status not changed, no need for further processing @@ -701,8 +705,8 @@ func (mtask *managedTask) handleContainerStoppedTransitionError(event dockerapi. // could also trigger the progress and have another go at stopping the // container if event.Error.ErrorName() == dockerapi.DockerTimeoutErrorName { - mtask.log.Infof("'%s' error stopping container [%s]. Ignoring state change: %v", - dockerapi.DockerTimeoutErrorName, container.Name, event.Error.Error()) + seelog.Infof("Managed task [%s]: '%s' error stopping container [%s]. Ignoring state change: %v", + mtask.Arn, dockerapi.DockerTimeoutErrorName, container.Name, event.Error.Error()) container.SetKnownStatus(currentKnownStatus) return false } @@ -710,8 +714,8 @@ func (mtask *managedTask) handleContainerStoppedTransitionError(event dockerapi. // reset the known status to the current status and return cannotStopContainerError, ok := event.Error.(cannotStopContainerError) if ok && cannotStopContainerError.IsRetriableError() { - mtask.log.Infof("error stopping the container [%s]. Ignoring state change: %v", - container.Name, cannotStopContainerError.Error()) + seelog.Infof("Managed task [%s]: error stopping the container [%s]. Ignoring state change: %v", + mtask.Arn, container.Name, cannotStopContainerError.Error()) container.SetKnownStatus(currentKnownStatus) return false } @@ -722,8 +726,8 @@ func (mtask *managedTask) handleContainerStoppedTransitionError(event dockerapi. // enough) and get on with it // This can happen in cases where the container we tried to stop // was already stopped or did not exist at all. - mtask.log.Warnf("'docker stop' for container [%s] returned %s: %s", - container.Name, event.Error.ErrorName(), event.Error.Error()) + seelog.Warnf("Managed task [%s]: 'docker stop' for container [%s] returned %s: %s", + mtask.Arn, container.Name, event.Error.ErrorName(), event.Error.Error()) container.SetKnownStatus(apicontainerstatus.ContainerStopped) container.SetDesiredStatus(apicontainerstatus.ContainerStopped) return true @@ -736,7 +740,7 @@ func (mtask *managedTask) handleContainerStoppedTransitionError(event dockerapi. // docker completes. // Container changes may also prompt the task status to change as well. func (mtask *managedTask) progressTask() { - mtask.log.Debugf("progressing containers and resources in task") + seelog.Debugf("Managed task [%s]: progressing containers and resources in task", mtask.Arn) // max number of transitions length to ensure writes will never block on // these and if we exit early transitions can exit the goroutine and it'll // get GC'd eventually @@ -805,7 +809,7 @@ func (mtask *managedTask) progressTask() { mtask.waitForTransition(transitions, transitionChange, transitionChangeEntity) // update the task status if mtask.UpdateStatus() { - mtask.log.Infof("container or resource change also resulted in task change") + seelog.Infof("Managed task [%s]: container or resource change also resulted in task change", mtask.Arn) // If knownStatus changed, let it be known var taskStateChangeReason string @@ -821,14 +825,14 @@ func (mtask *managedTask) progressTask() { func (mtask *managedTask) isWaitingForACSExecutionCredentials(reasons []error) bool { for _, reason := range reasons { if reason == dependencygraph.CredentialsNotResolvedErr { - mtask.log.Infof("waiting for credentials to pull from ECR") + seelog.Infof("Managed task [%s]: waiting for credentials to pull from ECR", mtask.Arn) timeoutCtx, timeoutCancel := context.WithTimeout(mtask.ctx, waitForPullCredentialsTimeout) defer timeoutCancel() timedOut := mtask.waitEvent(timeoutCtx.Done()) if timedOut { - mtask.log.Infof("timed out waiting for acs credentials message") + seelog.Infof("Managed task [%s]: timed out waiting for acs credentials message", mtask.Arn) } return true } @@ -895,8 +899,8 @@ func (mtask *managedTask) startResourceTransitions(transitionFunc resourceTransi knownStatus := res.GetKnownStatus() desiredStatus := res.GetDesiredStatus() if knownStatus >= desiredStatus { - mtask.log.Debugf("resource [%s] has already transitioned to or beyond the desired status %s; current known is %s", - res.GetName(), res.StatusString(desiredStatus), res.StatusString(knownStatus)) + seelog.Debugf("Managed task [%s]: resource [%s] has already transitioned to or beyond the desired status %s; current known is %s", + mtask.Arn, res.GetName(), res.StatusString(desiredStatus), res.StatusString(knownStatus)) continue } anyCanTransition = true @@ -946,12 +950,12 @@ func (mtask *managedTask) applyResourceState(resource taskresource.TaskResource, resStatus := resource.StatusString(nextState) err := resource.ApplyTransition(nextState) if err != nil { - mtask.log.Infof("error transitioning resource [%s] to [%s]: %v", - resName, resStatus, err) + seelog.Infof("Managed task [%s]: error transitioning resource [%s] to [%s]: %v", + mtask.Arn, resName, resStatus, err) return err } - mtask.log.Infof("transitioned resource [%s] to [%s]", - resName, resStatus) + seelog.Infof("Managed task [%s]: transitioned resource [%s] to [%s]", + mtask.Arn, resName, resStatus) return nil } @@ -977,8 +981,8 @@ func (mtask *managedTask) containerNextState(container *apicontainer.Container) containerDesiredStatus := container.GetDesiredStatus() if containerKnownStatus == containerDesiredStatus { - mtask.log.Debugf("container [%s] at desired status: %s", - container.Name, containerDesiredStatus.String()) + seelog.Debugf("Managed task [%s]: container [%s] at desired status: %s", + mtask.Arn, container.Name, containerDesiredStatus.String()) return &containerTransition{ nextState: apicontainerstatus.ContainerStatusNone, actionRequired: false, @@ -987,8 +991,8 @@ func (mtask *managedTask) containerNextState(container *apicontainer.Container) } if containerKnownStatus > containerDesiredStatus { - mtask.log.Debugf("container [%s] has already transitioned beyond desired status(%s): %s", - container.Name, containerKnownStatus.String(), containerDesiredStatus.String()) + seelog.Debugf("Managed task [%s]: container [%s] has already transitioned beyond desired status(%s): %s", + mtask.Arn, container.Name, containerKnownStatus.String(), containerDesiredStatus.String()) return &containerTransition{ nextState: apicontainerstatus.ContainerStatusNone, actionRequired: false, @@ -997,8 +1001,8 @@ func (mtask *managedTask) containerNextState(container *apicontainer.Container) } if blocked, err := dependencygraph.DependenciesAreResolved(container, mtask.Containers, mtask.Task.GetExecutionCredentialsID(), mtask.credentialsManager, mtask.GetResources()); err != nil { - mtask.log.Debugf("can't apply state to container [%s] yet due to unresolved dependencies: %v", - container.Name, err) + seelog.Debugf("Managed task [%s]: can't apply state to container [%s] yet due to unresolved dependencies: %v", + mtask.Arn, container.Name, err) return &containerTransition{ nextState: apicontainerstatus.ContainerStatusNone, actionRequired: false, @@ -1059,16 +1063,17 @@ func (mtask *managedTask) resourceNextState(resource taskresource.TaskResource) } func (mtask *managedTask) handleContainersUnableToTransitionState() { - mtask.log.Error("task in a bad state; it's not steadystate but no containers want to transition") + seelog.Criticalf("Managed task [%s]: task in a bad state; it's not steadystate but no containers want to transition", + mtask.Arn) if mtask.GetDesiredStatus().Terminal() { // Ack, really bad. We want it to stop but the containers don't think // that's possible. let's just break out and hope for the best! - mtask.log.Error("The state is so bad that we're just giving up on it") + seelog.Criticalf("Managed task [%s]: The state is so bad that we're just giving up on it", mtask.Arn) mtask.SetKnownStatus(apitaskstatus.TaskStopped) mtask.emitTaskEvent(mtask.Task, taskUnableToTransitionToStoppedReason) // TODO we should probably panic here } else { - mtask.log.Error("moving task to stopped due to bad state") + seelog.Criticalf("Managed task [%s]: moving task to stopped due to bad state", mtask.Arn) mtask.handleDesiredStatusChange(apitaskstatus.TaskStopped, 0) } } @@ -1080,14 +1085,14 @@ func (mtask *managedTask) waitForTransition(transitions map[string]string, // to ensure that there is at least one container or resource can be processed in the next // progressTask call. This is done by waiting for one transition/acs/docker message. if !mtask.waitEvent(transition) { - mtask.log.Debugf("received non-transition events") + seelog.Debugf("Managed task [%s]: received non-transition events", mtask.Arn) return } transitionedEntity := <-transitionChangeEntity - mtask.log.Debugf("transition for [%s] finished", - transitionedEntity) + seelog.Debugf("Managed task [%s]: transition for [%s] finished", + mtask.Arn, transitionedEntity) delete(transitions, transitionedEntity) - mtask.log.Debugf("still waiting for: %v", transitions) + seelog.Debugf("Managed task [%s]: still waiting for: %v", mtask.Arn, transitions) } func (mtask *managedTask) time() ttime.Time { @@ -1103,7 +1108,7 @@ func (mtask *managedTask) cleanupTask(taskStoppedDuration time.Duration) { cleanupTimeDuration := mtask.GetKnownStatusTime().Add(taskStoppedDuration).Sub(ttime.Now()) cleanupTime := make(<-chan time.Time) if cleanupTimeDuration < 0 { - mtask.log.Infof("Cleanup Duration has been exceeded. Starting cleanup now ") + seelog.Infof("Managed task [%s]: Cleanup Duration has been exceeded. Starting cleanup now ", mtask.Arn) cleanupTime = mtask.time().After(time.Nanosecond) } else { cleanupTime = mtask.time().After(cleanupTimeDuration) @@ -1120,12 +1125,12 @@ func (mtask *managedTask) cleanupTask(taskStoppedDuration time.Duration) { // wait for apitaskstatus.TaskStopped to be sent ok := mtask.waitForStopReported() if !ok { - mtask.log.Errorf("aborting cleanup for task as it is not reported as stopped. SentStatus: %s", - mtask.GetSentStatus().String()) + seelog.Errorf("Managed task [%s]: aborting cleanup for task as it is not reported as stopped. SentStatus: %s", + mtask.Arn, mtask.GetSentStatus().String()) return } - mtask.log.Infof("cleaning up task's containers and data") + seelog.Infof("Managed task [%s]: cleaning up task's containers and data", mtask.Arn) // For the duration of this, simply discard any task events; this ensures the // speedy processing of other events for other tasks @@ -1169,8 +1174,8 @@ func (mtask *managedTask) waitForStopReported() bool { taskStopped = true break } - mtask.log.Warnf("blocking cleanup until the task has been reported stopped. SentStatus: %s (%d/%d)", - sentStatus.String(), i+1, _maxStoppedWaitTimes) + seelog.Warnf("Managed task [%s]: blocking cleanup until the task has been reported stopped. SentStatus: %s (%d/%d)", + mtask.Arn, sentStatus.String(), i+1, _maxStoppedWaitTimes) mtask._time.Sleep(_stoppedSentWaitInterval) } stoppedSentBool <- struct{}{} diff --git a/agent/logger/contextual_logger.go b/agent/logger/contextual_logger.go deleted file mode 100644 index 897c34f3b81..00000000000 --- a/agent/logger/contextual_logger.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package logger - -import ( - "path/filepath" - "runtime" - - "github.com/cihub/seelog" -) - -// Contextual is a logger that can have custom context added to it. Once -// SetContext is called, it will print log messages with the additional context -// appended. Before SetContext is called, it will print messages using the -// default agent logger. -type Contextual struct { - log seelog.LoggerInterface - context map[string]string -} - -// Debugf formats message according to format specifier -// and writes to log with level = Debug. -func (c *Contextual) Debugf(format string, params ...interface{}) { - if c.log == nil { - seelog.Debugf(format, params...) - } else { - c.log.Debugf(format, params...) - } -} - -// Infof formats message according to format specifier -// and writes to log with level = Info. -func (c *Contextual) Infof(format string, params ...interface{}) { - if c.log == nil { - seelog.Infof(format, params...) - } else { - c.log.Infof(format, params...) - } -} - -// Warnf formats message according to format specifier -// and writes to log with level = Warn. -func (c *Contextual) Warnf(format string, params ...interface{}) error { - if c.log == nil { - return seelog.Warnf(format, params...) - } else { - return c.log.Warnf(format, params...) - } -} - -// Errorf formats message according to format specifier -// and writes to log with level = Error. -func (c *Contextual) Errorf(format string, params ...interface{}) error { - if c.log == nil { - return seelog.Errorf(format, params...) - } else { - return c.log.Errorf(format, params...) - } -} - -// Debug formats message using the default formats for its operands -// and writes to log with level = Debug -func (c *Contextual) Debug(v ...interface{}) { - if c.log == nil { - seelog.Debug(v...) - } else { - c.log.Debug(v...) - } -} - -// Info formats message using the default formats for its operands -// and writes to log with level = Info -func (c *Contextual) Info(v ...interface{}) { - if c.log == nil { - seelog.Info(v...) - } else { - c.log.Info(v...) - } -} - -// Warn formats message using the default formats for its operands -// and writes to log with level = Warn -func (c *Contextual) Warn(v ...interface{}) error { - if c.log == nil { - return seelog.Warn(v...) - } else { - return c.log.Warn(v...) - } -} - -// Error formats message using the default formats for its operands -// and writes to log with level = Error -func (c *Contextual) Error(v ...interface{}) error { - if c.log == nil { - return seelog.Error(v...) - } else { - return c.log.Error(v...) - } -} - -func (c *Contextual) SetContext(context map[string]string) { - if c.log == nil { - c.log = InitLogger() - _, f, _, _ := runtime.Caller(1) - context["module"] = filepath.Base(f) - c.log.SetContext(context) - } -} diff --git a/agent/logger/log.go b/agent/logger/log.go index d65409d2b5c..c74a570f53a 100644 --- a/agent/logger/log.go +++ b/agent/logger/log.go @@ -16,7 +16,6 @@ package logger import ( "fmt" "os" - "sort" "strconv" "strings" "sync" @@ -54,45 +53,30 @@ var Config *logConfig func logfmtFormatter(params string) seelog.FormatterFunc { return func(message string, level seelog.LogLevel, context seelog.LogContextInterface) interface{} { - c := getContext(context) - var cSorted []string - for k, v := range c { - cSorted = append(cSorted, k+"="+v) - } - sort.Strings(cSorted) - return fmt.Sprintf(`level=%s time=%s msg=%q %s -`, level.String(), context.CallTime().UTC().Format(time.RFC3339), message, strings.Join(cSorted, " ")) + return fmt.Sprintf(`level=%s time=%s msg=%q module=%s +`, level.String(), context.CallTime().UTC().Format(time.RFC3339), message, context.FileName()) } } func jsonFormatter(params string) seelog.FormatterFunc { return func(message string, level seelog.LogLevel, context seelog.LogContextInterface) interface{} { - c := getContext(context) - var cStr string - for k, v := range c { - cStr += fmt.Sprintf(", %q: %q", k, v) - } - return fmt.Sprintf(`{"level": %q, "time": %q, "msg": %q%s} -`, level.String(), context.CallTime().UTC().Format(time.RFC3339), message, cStr) + return fmt.Sprintf(`{"level": %q, "time": %q, "msg": %q, "module": %q} +`, level.String(), context.CallTime().UTC().Format(time.RFC3339), message, context.FileName()) } } -// gets any custom context that has been added to this logger as a map, as well -// as setting the 'module' context if it has not been set yet. -func getContext(context seelog.LogContextInterface) map[string]string { - c, ok := context.CustomContext().(map[string]string) - if !ok || c == nil { - c = map[string]string{} - } - if _, ok = c["module"]; !ok { - c["module"] = context.FileName() +func reloadConfig() { + logger, err := seelog.LoggerFromConfigAsString(seelogConfig()) + if err == nil { + seelog.ReplaceLogger(logger) + } else { + seelog.Error(err) } - return c } func seelogConfig() string { c := ` - + ` c += platformLogConfig() @@ -112,7 +96,6 @@ func seelogConfig() string { - ` return c @@ -134,7 +117,7 @@ func SetLevel(logLevel string) { Config.lock.Lock() defer Config.lock.Unlock() Config.level = parsedLevel - reloadMainConfig() + reloadConfig() } } @@ -146,24 +129,6 @@ func GetLevel() string { return Config.level } -func InitLogger() seelog.LoggerInterface { - logger, err := seelog.LoggerFromConfigAsString(seelogConfig()) - if err != nil { - seelog.Errorf("Error creating seelog logger: %s", err) - return seelog.Default - } - return logger -} - -func reloadMainConfig() { - logger, err := seelog.LoggerFromConfigAsString(seelogConfig()) - if err == nil { - seelog.ReplaceLogger(logger) - } else { - seelog.Error(err) - } -} - func init() { Config = &logConfig{ logfile: os.Getenv(LOGFILE_ENV_VAR), @@ -174,9 +139,7 @@ func init() { MaxRollCount: DEFAULT_MAX_ROLL_COUNT, } - if level := os.Getenv(LOGLEVEL_ENV_VAR); level != "" { - SetLevel(level) - } + SetLevel(os.Getenv(LOGLEVEL_ENV_VAR)) if RolloverType := os.Getenv(LOG_ROLLOVER_TYPE_ENV_VAR); RolloverType != "" { Config.RolloverType = RolloverType } @@ -206,6 +169,7 @@ func init() { if err := seelog.RegisterCustomFormatter("EcsAgentJson", jsonFormatter); err != nil { seelog.Error(err) } + registerPlatformLogger() - seelog.ReplaceLogger(InitLogger()) + reloadConfig() } diff --git a/agent/logger/log_test.go b/agent/logger/log_test.go index fbe0ae0df56..789b04ce0da 100644 --- a/agent/logger/log_test.go +++ b/agent/logger/log_test.go @@ -1,4 +1,4 @@ -// +build !windows,unit +// +build !windows // Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. // @@ -32,53 +32,12 @@ func TestLogfmtFormat(t *testing.T) { `, s) } -func TestLogfmtFormat_context(t *testing.T) { - logfmt := logfmtFormatter("") - out := logfmt("This is my log message", seelog.InfoLvl, &LogContextMock{ - context: map[string]string{ - "myID": "12345", - "myARN": "arn:12345:/abc", - }, - }) - s, ok := out.(string) - require.True(t, ok) - require.Equal(t, `level=info time=2018-10-01T01:02:03Z msg="This is my log message" module=mytestmodule.go myARN=arn:12345:/abc myID=12345 -`, s) -} - func TestJSONFormat(t *testing.T) { jsonF := jsonFormatter("") out := jsonF("This is my log message", seelog.InfoLvl, &LogContextMock{}) s, ok := out.(string) require.True(t, ok) - require.JSONEq(t, ` - { - "level": "info", - "time": "2018-10-01T01:02:03Z", - "msg": "This is my log message", - "module": "mytestmodule.go" - }`, s) -} - -func TestJSONFormat_context(t *testing.T) { - jsonF := jsonFormatter("") - out := jsonF("This is my log message", seelog.InfoLvl, &LogContextMock{ - context: map[string]string{ - "myID": "12345", - "myARN": "arn:12345:/abc", - }, - }) - s, ok := out.(string) - require.True(t, ok) - require.JSONEq(t, ` - { - "level": "info", - "time": "2018-10-01T01:02:03Z", - "msg": "This is my log message", - "module": "mytestmodule.go", - "myARN":"arn:12345:/abc", - "myID":"12345" - }`, s) + require.JSONEq(t, `{"level": "info", "time": "2018-10-01T01:02:03Z", "msg": "This is my log message", "module": "mytestmodule.go"}`, s) } func TestLogfmtFormat_debug(t *testing.T) { @@ -95,13 +54,7 @@ func TestJSONFormat_debug(t *testing.T) { out := jsonF("This is my log message", seelog.DebugLvl, &LogContextMock{}) s, ok := out.(string) require.True(t, ok) - require.JSONEq(t, ` - { - "level": "debug", - "time": "2018-10-01T01:02:03Z", - "msg": "This is my log message", - "module": "mytestmodule.go" - }`, s) + require.JSONEq(t, `{"level": "debug", "time": "2018-10-01T01:02:03Z", "msg": "This is my log message", "module": "mytestmodule.go"}`, s) } func TestSeelogConfig_Default(t *testing.T) { @@ -115,7 +68,7 @@ func TestSeelogConfig_Default(t *testing.T) { } c := seelogConfig() require.Equal(t, ` - + - `, c) } @@ -140,7 +92,7 @@ func TestSeelogConfig_DebugLevel(t *testing.T) { } c := seelogConfig() require.Equal(t, ` - + - `, c) } @@ -165,7 +116,7 @@ func TestSeelogConfig_SizeRollover(t *testing.T) { } c := seelogConfig() require.Equal(t, ` - + - `, c) } @@ -190,7 +140,7 @@ func TestSeelogConfig_SizeRolloverFileSizeChange(t *testing.T) { } c := seelogConfig() require.Equal(t, ` - + - `, c) } @@ -215,7 +164,7 @@ func TestSeelogConfig_SizeRolloverRollCountChange(t *testing.T) { } c := seelogConfig() require.Equal(t, ` - + - `, c) } @@ -240,7 +188,7 @@ func TestSeelogConfig_JSONOutput(t *testing.T) { } c := seelogConfig() require.Equal(t, ` - + - `, c) } -type LogContextMock struct { - context map[string]string -} +type LogContextMock struct{} // Caller's function name. func (l *LogContextMock) Func() string { @@ -297,5 +242,5 @@ func (l *LogContextMock) CallTime() time.Time { // Custom context that can be set by calling logger.SetContext func (l *LogContextMock) CustomContext() interface{} { - return l.context + return map[string]string{} } diff --git a/agent/taskresource/cgroup/cgroup.go b/agent/taskresource/cgroup/cgroup.go index 808f560bead..8f1d341dd9d 100644 --- a/agent/taskresource/cgroup/cgroup.go +++ b/agent/taskresource/cgroup/cgroup.go @@ -23,11 +23,11 @@ import ( "time" "github.com/aws/amazon-ecs-agent/agent/api/task/status" - "github.com/aws/amazon-ecs-agent/agent/logger" "github.com/aws/amazon-ecs-agent/agent/taskresource" control "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control" resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper" + "github.com/cihub/seelog" "github.com/containerd/cgroups" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" @@ -64,8 +64,6 @@ type CgroupResource struct { statusToTransitions map[resourcestatus.ResourceStatus]func() error // lock is used for fields that are accessed and updated concurrently lock sync.RWMutex - // log is a custom logger with extra context specific to the cgroup struct - log logger.Contextual } // NewCgroupResource is used to return an object that implements the Resource interface @@ -84,12 +82,6 @@ func NewCgroupResource(taskARN string, resourceSpec: resourceSpec, } c.initializeResourceStatusToTransitionFunction() - c.log.SetContext(map[string]string{ - "taskARN": taskARN, - "cgroupRoot": cgroupRoot, - "cgroupMountPath": cgroupMountPath, - "resourceName": resourceName, - }) return c } @@ -167,7 +159,8 @@ func (cgroup *CgroupResource) NextKnownState() resourcestatus.ResourceStatus { func (cgroup *CgroupResource) ApplyTransition(nextState resourcestatus.ResourceStatus) error { transitionFunc, ok := cgroup.statusToTransitions[nextState] if !ok { - cgroup.log.Errorf("unsupported desired state transition %s", cgroup.StatusString(nextState)) + seelog.Errorf("Cgroup Resource [%s]: unsupported desired state transition [%s]: %s", + cgroup.taskARN, cgroup.GetName(), cgroup.StatusString(nextState)) return errors.Errorf("resource [%s]: transition to %s impossible", cgroup.GetName(), cgroup.StatusString(nextState)) } @@ -251,7 +244,7 @@ func (cgroup *CgroupResource) GetCreatedAt() time.Time { func (cgroup *CgroupResource) Create() error { err := cgroup.setupTaskCgroup() if err != nil { - cgroup.log.Errorf("unable to setup cgroup root: %v", err) + seelog.Criticalf("Cgroup resource [%s]: unable to setup cgroup root: %v", cgroup.taskARN, err) return err } return nil @@ -259,10 +252,10 @@ func (cgroup *CgroupResource) Create() error { func (cgroup *CgroupResource) setupTaskCgroup() error { cgroupRoot := cgroup.cgroupRoot - cgroup.log.Info("setting up cgroup") + seelog.Debugf("Cgroup resource [%s]: setting up cgroup at: %s", cgroup.taskARN, cgroupRoot) if cgroup.control.Exists(cgroupRoot) { - cgroup.log.Infof("cgroup at root already exists, skipping creation") + seelog.Debugf("Cgroup resource [%s]: cgroup at %s already exists, skipping creation", cgroup.taskARN, cgroupRoot) return nil } @@ -292,7 +285,7 @@ func (cgroup *CgroupResource) Cleanup() error { // Explicitly handle cgroup deleted error if err != nil { if err == cgroups.ErrCgroupDeleted { - cgroup.log.Warnf("Cgroup at root has already been removed: %v", err) + seelog.Warnf("Cgroup at %s has already been removed: %v", cgroup.cgroupRoot, err) return nil } return errors.Wrapf(err, "resource: cleanup cgroup: unable to remove cgroup at %s", cgroup.cgroupRoot) @@ -350,12 +343,6 @@ func (cgroup *CgroupResource) UnmarshalJSON(b []byte) error { if temp.KnownStatus != nil { cgroup.SetKnownStatus(resourcestatus.ResourceStatus(*temp.KnownStatus)) } - cgroup.log.SetContext(map[string]string{ - "taskARN": cgroup.taskARN, - "cgroupRoot": cgroup.cgroupRoot, - "cgroupMountPath": cgroup.cgroupMountPath, - "resourceName": resourceName, - }) return nil } diff --git a/agent/taskresource/firelens/firelens_unix.go b/agent/taskresource/firelens/firelens_unix.go index d5d94f83d82..835c297129c 100644 --- a/agent/taskresource/firelens/firelens_unix.go +++ b/agent/taskresource/firelens/firelens_unix.go @@ -23,11 +23,11 @@ import ( "sync" "time" + "github.com/cihub/seelog" "github.com/pkg/errors" "github.com/aws/amazon-ecs-agent/agent/api/task/status" "github.com/aws/amazon-ecs-agent/agent/credentials" - "github.com/aws/amazon-ecs-agent/agent/logger" "github.com/aws/amazon-ecs-agent/agent/s3" "github.com/aws/amazon-ecs-agent/agent/s3/factory" "github.com/aws/amazon-ecs-agent/agent/taskresource" @@ -90,8 +90,6 @@ type FirelensResource struct { terminalReason string terminalReasonOnce sync.Once lock sync.RWMutex - // log is a custom logger with extra context specific to the firelens struct - log logger.Contextual } // NewFirelensResource returns a new FirelensResource. @@ -124,24 +122,15 @@ func NewFirelensResource(cluster, taskARN, taskDefinition, ec2InstanceID, dataDi } firelensResource.initStatusToTransition() - firelensResource.initLog() return firelensResource, nil } -func (firelens *FirelensResource) initLog() { - firelens.log.SetContext(map[string]string{ - "taskARN": firelens.taskARN, - "configType": firelens.firelensConfigType, - "resourceName": ResourceName, - }) -} - func (firelens *FirelensResource) parseOptions(options map[string]string) error { if _, ok := options[ecsLogMetadataEnableOption]; ok { val := options[ecsLogMetadataEnableOption] b, err := strconv.ParseBool(val) if err != nil { - firelens.log.Warnf("Invalid value for firelens container option %s was specified: %s. Ignoring it.", ecsLogMetadataEnableOption, val) + seelog.Warnf("Invalid value for firelens container option %s was specified: %s. Ignoring it.", ecsLogMetadataEnableOption, val) } else { firelens.ecsMetadataEnabled = b } @@ -228,7 +217,6 @@ func (firelens *FirelensResource) Initialize(resourceFields *taskresource.Resour firelens.ioutil = ioutilwrapper.NewIOUtil() firelens.s3ClientCreator = factory.NewS3ClientCreator() firelens.credentialsManager = resourceFields.CredentialsManager - firelens.initLog() } // GetNetworkMode returns the network mode of the task. @@ -258,7 +246,7 @@ func (firelens *FirelensResource) DesiredTerminal() bool { func (firelens *FirelensResource) setTerminalReason(reason string) { firelens.terminalReasonOnce.Do(func() { - firelens.log.Infof("firelens resource: setting terminal reason") + seelog.Infof("firelens resource: setting terminal reason for task: [%s]", firelens.taskARN) firelens.terminalReason = reason }) } @@ -485,7 +473,7 @@ func (firelens *FirelensResource) generateConfigFile() error { return errors.Wrapf(err, "unable to generate firelens config file") } - firelens.log.Infof("Generated firelens config file at: %s", confFilePath) + seelog.Infof("Generated firelens config file at: %s", confFilePath) return nil } @@ -516,7 +504,7 @@ func (firelens *FirelensResource) downloadConfigFromS3() error { return errors.Wrapf(err, "unable to download s3 config %s from bucket %s", key, bucket) } - firelens.log.Debugf("Downloaded firelens config file from s3 and saved to: %s", confFilePath) + seelog.Debugf("Downloaded firelens config file from s3 and saved to: %s", confFilePath) return nil } @@ -559,6 +547,6 @@ func (firelens *FirelensResource) Cleanup() error { return fmt.Errorf("unable to remove firelens resource directory %s: %v", firelens.resourceDir, err) } - firelens.log.Infof("Removed firelens resource directory at %s", firelens.resourceDir) + seelog.Infof("Removed firelens resource directory at %s", firelens.resourceDir) return nil } diff --git a/agent/taskresource/firelens/firelensconfig_unix.go b/agent/taskresource/firelens/firelensconfig_unix.go index 9ba84b9a5e3..e24de698a89 100644 --- a/agent/taskresource/firelens/firelensconfig_unix.go +++ b/agent/taskresource/firelens/firelensconfig_unix.go @@ -17,6 +17,7 @@ package firelens import ( "fmt" + "github.com/cihub/seelog" "github.com/pkg/errors" generator "github.com/awslabs/go-config-generator-for-fluentd-and-fluentbit" @@ -194,7 +195,7 @@ func (firelens *FirelensResource) generateConfig() (generator.FluentConfig, erro } config.AddExternalConfig(s3ConfPath, generator.AfterFilters) } - firelens.log.Infof("Included external firelens config file at: %s", firelens.externalConfigValue) + seelog.Infof("Included external firelens config file at: %s", firelens.externalConfigValue) return config, nil } diff --git a/agent/taskresource/firelens/json_unix.go b/agent/taskresource/firelens/json_unix.go index 161bf3acb73..1c96d274828 100644 --- a/agent/taskresource/firelens/json_unix.go +++ b/agent/taskresource/firelens/json_unix.go @@ -121,7 +121,5 @@ func (firelens *FirelensResource) UnmarshalJSON(b []byte) error { firelens.appliedStatusUnsafe = resourcestatus.ResourceStatus(*temp.AppliedStatus) firelens.networkMode = temp.NetworkMode - firelens.initLog() - return nil } diff --git a/agent/taskresource/volume/dockervolume.go b/agent/taskresource/volume/dockervolume.go index 2adb867af53..fb214af2999 100644 --- a/agent/taskresource/volume/dockervolume.go +++ b/agent/taskresource/volume/dockervolume.go @@ -22,9 +22,9 @@ import ( "github.com/aws/amazon-ecs-agent/agent/api/task/status" "github.com/aws/amazon-ecs-agent/agent/dockerclient" "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" - "github.com/aws/amazon-ecs-agent/agent/logger" "github.com/aws/amazon-ecs-agent/agent/taskresource" resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + "github.com/cihub/seelog" "github.com/pkg/errors" ) @@ -63,9 +63,6 @@ type VolumeResource struct { // lock is used for fields that are accessed and updated concurrently lock sync.RWMutex - - // log is a custom logger with extra context specific to the volume resource struct - log logger.Contextual } // DockerVolumeConfig represents docker volume configuration @@ -122,19 +119,9 @@ func NewVolumeResource(ctx context.Context, ctx: ctx, } v.initStatusToTransitions() - v.initLog() return v, nil } -func (vol *VolumeResource) initLog() { - vol.log.SetContext(map[string]string{ - "volumeName": vol.Name, - "dockerVolumeName": vol.VolumeConfig.DockerVolumeName, - "dockerScope": vol.VolumeConfig.Scope, - "resourceName": "volume", - }) -} - func (vol *VolumeResource) Initialize(resourceFields *taskresource.ResourceFields, taskKnownStatus status.TaskStatus, taskDesiredStatus status.TaskStatus) { @@ -186,7 +173,7 @@ func (vol *VolumeResource) GetTerminalReason() string { func (vol *VolumeResource) setTerminalReason(reason string) { vol.terminalReasonOnce.Do(func() { - vol.log.Infof("setting terminal reason [%s] for volume resource", reason) + seelog.Infof("Volume Resource [%s]: setting terminal reason for volume resource", vol.Name) vol.terminalReason = reason }) } @@ -322,7 +309,7 @@ func (vol *VolumeResource) SourcePath() string { // Create performs resource creation func (vol *VolumeResource) Create() error { - vol.log.Infof("Creating volume using driver %s", vol.VolumeConfig.Driver) + seelog.Debugf("Creating volume with name %s using driver %s", vol.VolumeConfig.DockerVolumeName, vol.VolumeConfig.Driver) volumeResponse := vol.client.CreateVolume( vol.ctx, vol.VolumeConfig.DockerVolumeName, @@ -345,11 +332,11 @@ func (vol *VolumeResource) Create() error { func (vol *VolumeResource) Cleanup() error { // Enable volume clean up if it's task scoped if vol.VolumeConfig.Scope != TaskScope { - vol.log.Infof("Volume is shared, not removing") + seelog.Debugf("Volume [%s] is shared, not removing", vol.Name) return nil } - vol.log.Infof("Removing volume") + seelog.Debugf("Removing volume with name %s", vol.Name) err := vol.client.RemoveVolume(vol.ctx, vol.VolumeConfig.DockerVolumeName, dockerclient.RemoveVolumeTimeout) if err != nil { @@ -398,6 +385,5 @@ func (vol *VolumeResource) UnmarshalJSON(b []byte) error { if temp.KnownStatus != nil { vol.SetKnownStatus(resourcestatus.ResourceStatus(*temp.KnownStatus)) } - vol.initLog() return nil }