Add unit tests for fargate package

This commit is contained in:
Onur Filiz
2018-04-06 17:56:11 -07:00
committed by Robbie Zhang
parent fcbff0320c
commit e66d36308c
2 changed files with 373 additions and 0 deletions

View File

@@ -0,0 +1,239 @@
package fargate
import (
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)
const (
anyCPURequest = "500m"
anyCPULimit = "2"
anyMemoryRequest = "768Mi"
anyMemoryLimit = "2Gi"
anyContainerName = "any container name"
anyContainerImage = "any container image"
anyContainerReason = "any reason"
anyContainerExitCode = 42
)
var (
anyContainerSpec = corev1.Container{
Name: anyContainerName,
Image: anyContainerImage,
Command: []string{"anyCmd"},
Args: []string{"anyArg1", "anyArg2"},
WorkingDir: "/any/working/dir",
}
)
// TestCreateContainer verifies whether Kubernetes container specs are translated to
// Fargate container definitions correctly.
func TestContainerDefinition(t *testing.T) {
cntrSpec := anyContainerSpec
cntr, err := newContainer(&cntrSpec)
require.NoError(t, err, "failed to create container")
assert.Equal(t, cntrSpec.Name, *cntr.definition.Name, "incorrect name")
assert.Equal(t, cntrSpec.Image, *cntr.definition.Image, "incorrect image")
assert.Equal(t, cntrSpec.Command[0], *cntr.definition.EntryPoint[0], "incorrect command")
assert.Equal(t, cntrSpec.Args[0], *cntr.definition.Command[0], "incorrect args")
assert.Equal(t, cntrSpec.WorkingDir, *cntr.definition.WorkingDirectory, "incorrect working dir")
}
// TestContainerResourceRequirementsDefaults verifies whether the container gets default CPU
// and memory resources when none is specified.
func TestContainerResourceRequirementsDefaults(t *testing.T) {
cntrSpec := anyContainerSpec
cntr, err := newContainer(&cntrSpec)
require.NoError(t, err, "failed to create container")
assert.Equal(t, containerDefaultCPULimit, *cntr.definition.Cpu, "incorrect CPU limit")
assert.Equal(t, containerDefaultMemoryLimit, *cntr.definition.Memory, "incorrect memory limit")
}
// TestContainerResourceRequirementsWithRequestsNoLimits verifies whether the container gets
// correct CPU and memory requests when only requests are specified.
func TestContainerResourceRequirementsWithRequestsNoLimits(t *testing.T) {
cntrSpec := anyContainerSpec
cntrSpec.Resources = corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: resource.MustParse(anyCPURequest),
corev1.ResourceMemory: resource.MustParse(anyMemoryRequest),
},
}
cntr, err := newContainer(&cntrSpec)
require.NoError(t, err, "failed to create container")
assert.Equal(t, int64(512), *cntr.definition.Cpu, "incorrect CPU limit")
assert.Equal(t, int64(768), *cntr.definition.Memory, "incorrect memory limit")
}
// TestContainerResourceRequirementsWithLimitsNoRequests verifies whether the container gets
// correct CPU and memory limits when only limits are specified.
func TestContainerResourceRequirementsWithLimitsNoRequests(t *testing.T) {
cntrSpec := anyContainerSpec
cntrSpec.Resources = corev1.ResourceRequirements{
Limits: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: resource.MustParse(anyCPULimit),
corev1.ResourceMemory: resource.MustParse(anyMemoryLimit),
},
}
cntr, err := newContainer(&cntrSpec)
require.NoError(t, err, "failed to create container")
assert.Equal(t, int64(2048), *cntr.definition.Cpu, "incorrect CPU limit")
assert.Equal(t, int64(2048), *cntr.definition.Memory, "incorrect memory limit")
}
// TestContainerResourceRequirementsWithRequestsAndLimits verifies whether the container gets
// correct CPU and memory limits when both requests and limits are specified.
func TestContainerResourceRequirementsWithRequestsAndLimits(t *testing.T) {
cntrSpec := anyContainerSpec
cntrSpec.Resources = corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: resource.MustParse(anyCPURequest),
corev1.ResourceMemory: resource.MustParse(anyMemoryRequest),
},
Limits: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: resource.MustParse(anyCPULimit),
corev1.ResourceMemory: resource.MustParse(anyMemoryLimit),
},
}
cntr, err := newContainer(&cntrSpec)
require.NoError(t, err, "failed to create container")
assert.Equal(t, int64(2048), *cntr.definition.Cpu, "incorrect CPU limit")
assert.Equal(t, int64(2048), *cntr.definition.Memory, "incorrect memory limit")
}
// TestContainerResourceRequirements verifies whether Kubernetes container resource requirements
// are translated to Fargate container resource requests correctly.
func TestContainerResourceRequirementsTranslations(t *testing.T) {
type testCase struct {
requestedCPU string
requestedMemoryInMiBs string
expectedCPU int64
expectedMemoryInMiBs int64
}
// Expected and observed CPU quantities are in units of 1/1024th vCPUs.
var testCases = []testCase{
{"1m", "100Ki", 1, 1},
{"100m", "500Ki", 102, 1},
{"200m", "300Mi", 204, 300},
{"500m", "500Mi", 512, 500},
{"1000m", "512Mi", 1024, 512},
{"1", "512Mi", 1024, 512},
{"1500m", "1000Mi", 1536, 1000},
{"1500m", "1024Mi", 1536, 1024},
{"2", "2Gi", 2048, 2048},
{"8", "30Gi", 8192, 30 * 1024},
}
for _, tc := range testCases {
t.Run(
fmt.Sprintf("cpu:%s,memory:%s", tc.requestedCPU, tc.requestedMemoryInMiBs),
func(t *testing.T) {
reqs := corev1.ResourceRequirements{
Limits: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: resource.MustParse(tc.requestedCPU),
corev1.ResourceMemory: resource.MustParse(tc.requestedMemoryInMiBs),
},
}
cntrSpec := anyContainerSpec
cntrSpec.Resources = reqs
cntr, err := newContainer(&cntrSpec)
require.NoError(t, err, "failed to create container")
assert.Truef(t,
*cntr.definition.Cpu == tc.expectedCPU && *cntr.definition.Memory == tc.expectedMemoryInMiBs,
"requested (cpu:%v memory:%v) expected (cpu:%v memory:%v) observed (cpu:%v memory:%v)",
tc.requestedCPU, tc.requestedMemoryInMiBs,
tc.expectedCPU, tc.expectedMemoryInMiBs,
*cntr.definition.Cpu, *cntr.definition.Memory)
})
}
}
// TestContainerStatus verifies whether Kubernetes containers report their status correctly for
// all Fargate container state transitions.
func TestContainerStatus(t *testing.T) {
cntrSpec := anyContainerSpec
cntr, err := newContainer(&cntrSpec)
require.NoError(t, err, "failed to create container")
// Fargate container status provisioning.
state := ecs.Container{
Name: aws.String(anyContainerName),
Reason: aws.String(anyContainerReason),
LastStatus: aws.String(containerStatusProvisioning),
ExitCode: aws.Int64(0),
}
status := cntr.getStatus(&state)
assert.Equal(t, anyContainerName, status.Name, "incorrect name")
assert.NotNil(t, status.State.Waiting, "incorrect state")
assert.Equal(t, anyContainerReason, status.State.Waiting.Reason, "incorrect reason")
assert.Nil(t, status.State.Running, "incorrect state")
assert.Nil(t, status.State.Terminated, "incorrect state")
assert.False(t, status.Ready, "incorrect ready")
assert.Equal(t, anyContainerImage, status.Image, "incorrect image")
// Fargate container status pending.
state.LastStatus = aws.String(containerStatusPending)
status = cntr.getStatus(&state)
assert.Equal(t, anyContainerName, status.Name, "incorrect name")
assert.NotNil(t, status.State.Waiting, "incorrect state")
assert.Equal(t, anyContainerReason, status.State.Waiting.Reason, "incorrect reason")
assert.Nil(t, status.State.Running, "incorrect state")
assert.Nil(t, status.State.Terminated, "incorrect state")
assert.False(t, status.Ready, "incorrect ready")
assert.Equal(t, anyContainerImage, status.Image, "incorrect image")
// Fargate container status running.
state.LastStatus = aws.String(containerStatusRunning)
status = cntr.getStatus(&state)
assert.Equal(t, anyContainerName, status.Name, "incorrect name")
assert.Nil(t, status.State.Waiting, "incorrect state")
assert.NotNil(t, status.State.Running, "incorrect state")
assert.False(t, status.State.Running.StartedAt.IsZero(), "incorrect startedat")
assert.Nil(t, status.State.Terminated, "incorrect state")
assert.True(t, status.Ready, "incorrect ready")
assert.Equal(t, anyContainerImage, status.Image, "incorrect image")
// Fargate container status stopped.
state.LastStatus = aws.String(containerStatusStopped)
state.ExitCode = aws.Int64(anyContainerExitCode)
status = cntr.getStatus(&state)
assert.Equal(t, anyContainerName, status.Name, "incorrect name")
assert.Nil(t, status.State.Waiting, "incorrect state")
assert.Nil(t, status.State.Running, "incorrect state")
assert.NotNil(t, status.State.Terminated, "incorrect state")
assert.Equal(t, int32(anyContainerExitCode), status.State.Terminated.ExitCode, "incorrect exitcode")
assert.Equal(t, anyContainerReason, status.State.Terminated.Reason, "incorrect reason")
assert.False(t, status.State.Terminated.StartedAt.IsZero(), "incorrect startedat")
assert.False(t, status.State.Terminated.FinishedAt.IsZero(), "incorrect finishedat")
assert.False(t, status.Ready, "incorrect ready")
assert.Equal(t, anyContainerImage, status.Image, "incorrect image")
}

View File

@@ -0,0 +1,134 @@
package fargate
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
// TestTaskSizeTableInvariants verifies that the task size table is in ascending order by CPU.
// This is necessary for Pod::mapTaskSize to function correctly.
func TestTaskSizeTableInvariants(t *testing.T) {
prevRow := taskSizeTable[0]
for _, row := range taskSizeTable {
assert.True(t, row.cpu >= prevRow.cpu, "Task size table must be in ascending order by CPU")
prevRow = row
}
}
// TestPodResourceRequirements verifies whether Kubernetes pod resource requirements
// are translated to Fargate task resource requests correctly.
func TestPodResourceRequirements(t *testing.T) {
type testCase struct {
podCPU int64
podMemory int64
taskCPU int64
taskMemory int64
}
testCases := []testCase{
{0, 0, 256, 512},
{1, 1, 256, 512},
{200, 256, 256, 512},
{200, 512, 256, 512},
{256, 3072, 512, 3072},
{256, 512, 256, 512},
{256, 1024, 256, 1024},
{256, 2048, 256, 2048},
{512, 1024, 512, 1024},
{512, 2048, 512, 2048},
{512, 3072, 512, 3072},
{512, 4096, 512, 4096},
{1024, 2 * 1024, 1024, 2 * 1024},
{1024, 3 * 1024, 1024, 3 * 1024},
{1024, 4 * 1024, 1024, 4 * 1024},
{1024, 5 * 1024, 1024, 5 * 1024},
{1024, 6 * 1024, 1024, 6 * 1024},
{1024, 7 * 1024, 1024, 7 * 1024},
{1024, 8 * 1024, 1024, 8 * 1024},
{2048, 4 * 1024, 2048, 4 * 1024},
{2048, 5 * 1024, 2048, 5 * 1024},
{2048, 6 * 1024, 2048, 6 * 1024},
{2048, 7 * 1024, 2048, 7 * 1024},
{2048, 8 * 1024, 2048, 8 * 1024},
{2048, 9 * 1024, 2048, 9 * 1024},
{2048, 10 * 1024, 2048, 10 * 1024},
{2048, 11 * 1024, 2048, 11 * 1024},
{2048, 12 * 1024, 2048, 12 * 1024},
{2048, 13 * 1024, 2048, 13 * 1024},
{2048, 14 * 1024, 2048, 14 * 1024},
{2048, 15 * 1024, 2048, 15 * 1024},
{2048, 16 * 1024, 2048, 16 * 1024},
{4096, 8 * 1024, 4096, 8 * 1024},
{4096, 9 * 1024, 4096, 9 * 1024},
{4096, 10 * 1024, 4096, 10 * 1024},
{4096, 11 * 1024, 4096, 11 * 1024},
{4096, 12 * 1024, 4096, 12 * 1024},
{4096, 13 * 1024, 4096, 13 * 1024},
{4096, 14 * 1024, 4096, 14 * 1024},
{4096, 15 * 1024, 4096, 15 * 1024},
{4096, 16 * 1024, 4096, 16 * 1024},
{4096, 17 * 1024, 4096, 17 * 1024},
{4096, 18 * 1024, 4096, 18 * 1024},
{4096, 19 * 1024, 4096, 19 * 1024},
{4096, 20 * 1024, 4096, 20 * 1024},
{4096, 21 * 1024, 4096, 21 * 1024},
{4096, 22 * 1024, 4096, 22 * 1024},
{4096, 23 * 1024, 4096, 23 * 1024},
{4096, 24 * 1024, 4096, 24 * 1024},
{4096, 25 * 1024, 4096, 25 * 1024},
{4096, 26 * 1024, 4096, 26 * 1024},
{4096, 27 * 1024, 4096, 27 * 1024},
{4096, 28 * 1024, 4096, 28 * 1024},
{4096, 29 * 1024, 4096, 29 * 1024},
{4096, 30 * 1024, 4096, 30 * 1024},
{4097, 30 * 1024, 0, 0},
{4096, 30*1024 + 1, 0, 0},
{4096, 32 * 1024, 0, 0},
{8192, 64 * 1024, 0, 0},
}
for _, tc := range testCases {
t.Run(
fmt.Sprintf("cpu:%v,memory:%v", tc.podCPU, tc.podMemory),
func(t *testing.T) {
pod := &Pod{
taskCPU: tc.podCPU,
taskMemory: tc.podMemory,
}
err := pod.mapTaskSize()
if tc.taskCPU != 0 {
// Test case is expected to succeed.
assert.NoErrorf(t, err,
"mapTaskSize failed for (cpu:%v memory:%v)",
tc.podCPU, tc.podMemory)
if err != nil {
return
}
} else {
// Test case is expected to fail.
assert.Errorf(t, err,
"mapTaskSize expected to fail but succeeded for (cpu:%v memory:%v)",
tc.podCPU, tc.podMemory)
return
}
assert.True(t, pod.taskCPU >= tc.podCPU, "pod assigned less cpu than requested")
assert.True(t, pod.taskMemory >= tc.podMemory, "pod assigned less memory than requested")
assert.Truef(t,
pod.taskCPU == tc.taskCPU && pod.taskMemory == tc.taskMemory,
"requested (cpu:%v memory:%v) expected (cpu:%v memory:%v) observed (cpu:%v memory:%v)\n",
tc.podCPU, tc.podMemory, tc.taskCPU, tc.taskMemory, pod.taskCPU, pod.taskMemory)
})
}
}