Skip to content

Commit

Permalink
fix cgroup unit test
Browse files Browse the repository at this point in the history
  • Loading branch information
prateekchaudhry authored and YashdalfTheGray committed Dec 5, 2022
1 parent ec636a8 commit d1fa74b
Show file tree
Hide file tree
Showing 4 changed files with 121 additions and 34 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ permissions: read-all
jobs:
unit-tests:
name: Linux unit tests
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
Expand Down
13 changes: 12 additions & 1 deletion agent/api/task/task_linux_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,8 @@ const (
validTaskArn = "arn:aws:ecs:region:account-id:task/task-id"
invalidTaskArn = "invalid:task::arn"

expectedCgroupRoot = "/ecs/task-id"
expectedCgroupV1Root = "/ecs/task-id"
expectedCgroupV2Root = "ecstasks-task-id.slice"

taskVCPULimit = 2.0
taskMemoryLimit = 512
Expand Down Expand Up @@ -81,6 +82,13 @@ var (
scPauseContainerName = fmt.Sprintf(ServiceConnectPauseContainerNameFormat, scContainerName)
)

func getExpectedCgroupRoot() string {
if config.CgroupV2 {
return expectedCgroupV2Root
}
return expectedCgroupV1Root
}

func TestAddNetworkResourceProvisioningDependencyNop(t *testing.T) {
testTask := &Task{
Containers: []*apicontainer.Container{
Expand Down Expand Up @@ -244,6 +252,7 @@ func TestBuildCgroupRootHappyPath(t *testing.T) {
}

cgroupRoot, err := task.BuildCgroupRoot()
expectedCgroupRoot := getExpectedCgroupRoot()

assert.NoError(t, err)
assert.Equal(t, expectedCgroupRoot, cgroupRoot)
Expand Down Expand Up @@ -450,6 +459,7 @@ func TestOverrideCgroupParentHappyPath(t *testing.T) {
}

hostConfig := &dockercontainer.HostConfig{}
expectedCgroupRoot := getExpectedCgroupRoot()

assert.NoError(t, task.overrideCgroupParent(hostConfig))
assert.NotEmpty(t, hostConfig)
Expand Down Expand Up @@ -482,6 +492,7 @@ func TestPlatformHostConfigOverride(t *testing.T) {
}

hostConfig := &dockercontainer.HostConfig{}
expectedCgroupRoot := getExpectedCgroupRoot()

assert.NoError(t, task.platformHostConfigOverride(hostConfig))
assert.NotEmpty(t, hostConfig)
Expand Down
108 changes: 76 additions & 32 deletions agent/engine/docker_task_engine_linux_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,10 @@ func TestResourceContainerProgression(t *testing.T) {
mockControl := mock_control.NewMockControl(ctrl)
mockIO := mock_ioutilwrapper.NewMockIOUtil(ctrl)
taskID := sleepTask.GetID()
cgroupMemoryPath := fmt.Sprintf("/sys/fs/cgroup/memory/ecs/%s/memory.use_hierarchy", taskID)
cgroupRoot := fmt.Sprintf("/ecs/%s", taskID)
if config.CgroupV2 {
cgroupRoot = fmt.Sprintf("ecstasks-%s.slice", taskID)
}
cgroupResource := cgroup.NewCgroupResource(sleepTask.Arn, mockControl, mockIO, cgroupRoot, cgroupMountPath, specs.LinuxResources{})

sleepTask.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource)
Expand All @@ -110,35 +112,69 @@ func TestResourceContainerProgression(t *testing.T) {
containerEventsWG := sync.WaitGroup{}
client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)
serviceConnectManager.EXPECT().GetAppnetContainerTarballDir().AnyTimes()
gomock.InOrder(
// Ensure that the resource is created first
mockControl.EXPECT().Exists(gomock.Any()).Return(false),
mockControl.EXPECT().Create(gomock.Any()).Return(nil),
mockIO.EXPECT().WriteFile(cgroupMemoryPath, gomock.Any(), gomock.Any()).Return(nil),
imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes(),
client.EXPECT().PullImage(gomock.Any(), sleepContainer.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}),
imageManager.EXPECT().RecordContainerReference(sleepContainer).Return(nil),
imageManager.EXPECT().GetImageStateFromImageName(sleepContainer.Image).Return(nil, false),
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil),
client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(
func(ctx interface{}, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig, containerName string, z time.Duration) {
assert.True(t, strings.Contains(containerName, sleepContainer.Name))
containerEventsWG.Add(1)
go func() {
eventStream <- createDockerEvent(apicontainerstatus.ContainerCreated)
containerEventsWG.Done()
}()
}).Return(dockerapi.DockerContainerMetadata{DockerID: containerID + ":" + sleepContainer.Name}),
// Next, the sleep container is started
client.EXPECT().StartContainer(gomock.Any(), containerID+":"+sleepContainer.Name, defaultConfig.ContainerStartTimeout).Do(
func(ctx interface{}, id string, timeout time.Duration) {
containerEventsWG.Add(1)
go func() {
eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning)
containerEventsWG.Done()
}()
}).Return(dockerapi.DockerContainerMetadata{DockerID: containerID + ":" + sleepContainer.Name}),
)

// Hierarchical memory accounting is always enabled in CgroupV2 and no controller file exists to configure it
if config.CgroupV2 {
gomock.InOrder(
// Ensure that the resource is created first
mockControl.EXPECT().Exists(gomock.Any()).Return(false),
mockControl.EXPECT().Create(gomock.Any()).Return(nil),
imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes(),
client.EXPECT().PullImage(gomock.Any(), sleepContainer.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}),
imageManager.EXPECT().RecordContainerReference(sleepContainer).Return(nil),
imageManager.EXPECT().GetImageStateFromImageName(sleepContainer.Image).Return(nil, false),
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil),
client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(
func(ctx interface{}, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig, containerName string, z time.Duration) {
assert.True(t, strings.Contains(containerName, sleepContainer.Name))
containerEventsWG.Add(1)
go func() {
eventStream <- createDockerEvent(apicontainerstatus.ContainerCreated)
containerEventsWG.Done()
}()
}).Return(dockerapi.DockerContainerMetadata{DockerID: containerID + ":" + sleepContainer.Name}),
// Next, the sleep container is started
client.EXPECT().StartContainer(gomock.Any(), containerID+":"+sleepContainer.Name, defaultConfig.ContainerStartTimeout).Do(
func(ctx interface{}, id string, timeout time.Duration) {
containerEventsWG.Add(1)
go func() {
eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning)
containerEventsWG.Done()
}()
}).Return(dockerapi.DockerContainerMetadata{DockerID: containerID + ":" + sleepContainer.Name}),
)
} else {
cgroupMemoryPath := fmt.Sprintf("/sys/fs/cgroup/memory/ecs/%s/memory.use_hierarchy", taskID)
gomock.InOrder(
// Ensure that the resource is created first
mockControl.EXPECT().Exists(gomock.Any()).Return(false),
mockControl.EXPECT().Create(gomock.Any()).Return(nil),
mockIO.EXPECT().WriteFile(cgroupMemoryPath, gomock.Any(), gomock.Any()).Return(nil),
imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes(),
client.EXPECT().PullImage(gomock.Any(), sleepContainer.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}),
imageManager.EXPECT().RecordContainerReference(sleepContainer).Return(nil),
imageManager.EXPECT().GetImageStateFromImageName(sleepContainer.Image).Return(nil, false),
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil),
client.EXPECT().CreateContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(
func(ctx interface{}, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig, containerName string, z time.Duration) {
assert.True(t, strings.Contains(containerName, sleepContainer.Name))
containerEventsWG.Add(1)
go func() {
eventStream <- createDockerEvent(apicontainerstatus.ContainerCreated)
containerEventsWG.Done()
}()
}).Return(dockerapi.DockerContainerMetadata{DockerID: containerID + ":" + sleepContainer.Name}),
// Next, the sleep container is started
client.EXPECT().StartContainer(gomock.Any(), containerID+":"+sleepContainer.Name, defaultConfig.ContainerStartTimeout).Do(
func(ctx interface{}, id string, timeout time.Duration) {
containerEventsWG.Add(1)
go func() {
eventStream <- createDockerEvent(apicontainerstatus.ContainerRunning)
containerEventsWG.Done()
}()
}).Return(dockerapi.DockerContainerMetadata{DockerID: containerID + ":" + sleepContainer.Name}),
)
}
addTaskToEngine(t, ctx, taskEngine, sleepTask, mockTime, &containerEventsWG)

cleanup := make(chan time.Time, 1)
Expand Down Expand Up @@ -266,6 +302,9 @@ func TestResourceContainerProgressionFailure(t *testing.T) {
mockControl := mock_control.NewMockControl(ctrl)
taskID := sleepTask.GetID()
cgroupRoot := fmt.Sprintf("/ecs/%s", taskID)
if config.CgroupV2 {
cgroupRoot = fmt.Sprintf("ecstasks-%s.slice", taskID)
}
cgroupResource := cgroup.NewCgroupResource(sleepTask.Arn, mockControl, nil, cgroupRoot, cgroupMountPath, specs.LinuxResources{})

sleepTask.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource)
Expand Down Expand Up @@ -343,7 +382,6 @@ func TestTaskCPULimitHappyPath(t *testing.T) {
mockControl := mock_control.NewMockControl(ctrl)
mockIO := mock_ioutilwrapper.NewMockIOUtil(ctrl)
taskID := sleepTask.GetID()
cgroupMemoryPath := fmt.Sprintf("/sys/fs/cgroup/memory/ecs/%s/memory.use_hierarchy", taskID)
if tc.taskCPULimit.Enabled() {
// TODO Currently, the resource Setup() method gets invoked multiple
// times for a task. This is really a bug and a fortunate occurrence
Expand All @@ -360,7 +398,10 @@ func TestTaskCPULimitHappyPath(t *testing.T) {
}
mockControl.EXPECT().Exists(gomock.Any()).Return(false)
mockControl.EXPECT().Create(gomock.Any()).Return(nil)
mockIO.EXPECT().WriteFile(cgroupMemoryPath, gomock.Any(), gomock.Any()).Return(nil)
if !config.CgroupV2 {
cgroupMemoryPath := fmt.Sprintf("/sys/fs/cgroup/memory/ecs/%s/memory.use_hierarchy", taskID)
mockIO.EXPECT().WriteFile(cgroupMemoryPath, gomock.Any(), gomock.Any()).Return(nil)
}
}

for _, container := range sleepTask.Containers {
Expand Down Expand Up @@ -412,6 +453,9 @@ func TestTaskCPULimitHappyPath(t *testing.T) {
taskEngine.AddTask(sleepTaskStop)
taskEngine.AddTask(sleepTaskStop)
cgroupRoot := fmt.Sprintf("/ecs/%s", taskID)
if config.CgroupV2 {
cgroupRoot = fmt.Sprintf("ecstasks-%s.slice", taskID)
}
if tc.taskCPULimit.Enabled() {
mockControl.EXPECT().Remove(cgroupRoot).Return(nil)
}
Expand Down
32 changes: 32 additions & 0 deletions agent/taskresource/cgroup/cgroup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"testing"
"time"

"github.com/aws/amazon-ecs-agent/agent/config"
cgroup "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control"
"github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control/mock_control"
resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status"
Expand All @@ -42,6 +43,9 @@ const (
)

func TestCreateHappyPath(t *testing.T) {
if config.CgroupV2 {
t.Skip("Skipping TestCreateHappyPath for CgroupV2 as memory.use_hierarchy is not created when cgroupV2=true")
}
ctrl := gomock.NewController(t)
defer ctrl.Finish()

Expand All @@ -68,6 +72,9 @@ func TestCreateCgroupPathExists(t *testing.T) {
mockIO := mock_ioutilwrapper.NewMockIOUtil(ctrl)

cgroupRoot := fmt.Sprintf("/ecs/%s", taskID)
if config.CgroupV2 {
cgroupRoot = fmt.Sprintf("ecstasks-%s.slice", taskID)
}

gomock.InOrder(
mockControl.EXPECT().Exists(gomock.Any()).Return(true),
Expand All @@ -85,6 +92,9 @@ func TestCreateCgroupError(t *testing.T) {
mockIO := mock_ioutilwrapper.NewMockIOUtil(ctrl)

cgroupRoot := fmt.Sprintf("/ecs/%s", taskID)
if config.CgroupV2 {
cgroupRoot = fmt.Sprintf("ecstasks-%s.slice", taskID)
}

gomock.InOrder(
mockControl.EXPECT().Exists(gomock.Any()).Return(false),
Expand All @@ -101,6 +111,9 @@ func TestCleanupHappyPath(t *testing.T) {

mockControl := mock_control.NewMockControl(ctrl)
cgroupRoot := fmt.Sprintf("/ecs/%s", taskID)
if config.CgroupV2 {
cgroupRoot = fmt.Sprintf("ecstasks-%s.slice", taskID)
}

mockControl.EXPECT().Remove(cgroupRoot).Return(nil)

Expand All @@ -114,6 +127,9 @@ func TestCleanupRemoveError(t *testing.T) {

mockControl := mock_control.NewMockControl(ctrl)
cgroupRoot := fmt.Sprintf("/ecs/%s", taskID)
if config.CgroupV2 {
cgroupRoot = fmt.Sprintf("ecstasks-%s.slice", taskID)
}

mockControl.EXPECT().Remove(gomock.Any()).Return(errors.New("cgroup remove error"))

Expand All @@ -127,6 +143,9 @@ func TestCleanupCgroupDeletedError(t *testing.T) {

mockControl := mock_control.NewMockControl(ctrl)
cgroupRoot := fmt.Sprintf("/ecs/%s", taskID)
if config.CgroupV2 {
cgroupRoot = fmt.Sprintf("ecstasks-%s.slice", taskID)
}

err := cgroups.ErrCgroupDeleted
wrappedErr := fmt.Errorf("cgroup remove: unable to obtain controller: %w", err)
Expand All @@ -144,6 +163,11 @@ func TestMarshal(t *testing.T) {
"\"createdAt\":\"0001-01-01T00:00:00Z\",\"desiredStatus\":\"CREATED\",\"knownStatus\":\"NONE\",\"resourceSpec\":{}}"

cgroupRoot := "/ecs/taskid"
if config.CgroupV2 {
cgroupRoot = fmt.Sprintf("ecstasks-%s.slice", "taskid")
cgroupStr = "{\"cgroupRoot\":\"ecstasks-taskid.slice\",\"cgroupMountPath\":\"/sys/fs/cgroup\"," +
"\"createdAt\":\"0001-01-01T00:00:00Z\",\"desiredStatus\":\"CREATED\",\"knownStatus\":\"NONE\",\"resourceSpec\":{}}"
}
cgroupMountPath := "/sys/fs/cgroup"

cgroup := NewCgroupResource("", cgroup.New(), nil, cgroupRoot, cgroupMountPath, specs.LinuxResources{})
Expand All @@ -160,6 +184,14 @@ func TestUnmarshal(t *testing.T) {
cgroupMountPath := "/sys/fs/cgroup"
bytes := []byte("{\"CgroupRoot\":\"/ecs/taskid\",\"CgroupMountPath\":\"/sys/fs/cgroup\"," +
"\"CreatedAt\":\"0001-01-01T00:00:00Z\",\"DesiredStatus\":\"CREATED\",\"KnownStatus\":\"NONE\"}")

if config.CgroupV2 {
cgroupRoot = fmt.Sprintf("ecstasks-%s.slice", "taskid")
bytes = []byte("{\"CgroupRoot\":\"ecstasks-taskid.slice\",\"CgroupMountPath\":\"/sys/fs/cgroup\"," +
"\"CreatedAt\":\"0001-01-01T00:00:00Z\",\"DesiredStatus\":\"CREATED\",\"KnownStatus\":\"NONE\"}")

}

unmarshalledCgroup := &CgroupResource{}
err := unmarshalledCgroup.UnmarshalJSON(bytes)
assert.NoError(t, err)
Expand Down

0 comments on commit d1fa74b

Please sign in to comment.