Skip to content

Commit

Permalink
fixing golangci-lint timeouts
Browse files Browse the repository at this point in the history
```
Run make lint
golangci-lint run ./...
level=error msg="Running error: context loading failed: failed to load packages: timed out to load packages: context deadline exceeded"
level=error msg="Timeout exceeded: try increasing it by passing --timeout option"
```

ref: golangci/golangci-lint#825

Seems like an incompatibility with the client-go pkg and the module go version.
Updating to 1.16 and recreating the go.sum fixes the timeouts and slims down the dependency list.

Also updating the install script for github action script of golangci-lint as it is deprecated

Signed-off-by: Samuel Lang <[email protected]>
  • Loading branch information
universam1 committed Jan 25, 2022
1 parent 211920e commit 160b313
Show file tree
Hide file tree
Showing 14 changed files with 158 additions and 140 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ test:

## lint: runs golangci-lint
lint:
golangci-lint run ./...
golangci-lint run --timeout 5m ./...

## fmt: formats all go files
fmt:
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -601,7 +601,7 @@ In *AWS CNI Mode* (`target-access-mode=AWSCNI`) the controller actively manages

- For security reasons the HostPort requirement might be of concern
- Direct management of the target group members is significantly faster compared to the AWS linked mode, but it requires a running controller for updates. As of now, the controller is not prepared for high availability replicated setup.
- The registration and deregistration is synced with the pod lifecycle, hence a pod in terminating phase is deregistered from the target group before shut down.
- The registration and deregistration is synced with the pod lifecycle, hence a pod in terminating phase is deregistered from the target group before shut down.
- Ingress pods are not bound to nodes in CNI mode and the deployment can scale independently.

### Configuration options
Expand Down
68 changes: 34 additions & 34 deletions aws/adapter.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,8 @@ type Adapter struct {
}

type TargetCNIconfig struct {
Enabled bool
TargetGroupARNs []string
TargetGroupCh chan []string
Enabled bool
TargetGroupCh chan []string
}

type manifest struct {
Expand Down Expand Up @@ -131,14 +130,13 @@ const (
DefaultNLBCrossZone = false
DefaultNLBHTTPEnabled = false

nameTag = "Name"
LoadBalancerTypeApplication = "application"
LoadBalancerTypeNetwork = "network"
IPAddressTypeIPV4 = "ipv4"
IPAddressTypeDualstack = "dualstack"
TargetAccessModeAWSCNI = "AWSCNI"
TargetAccessModeHostPort = "HostPort"
DefaultTargetCNILabelSelector = "kube-system/application=skipper-ingress"
nameTag = "Name"
LoadBalancerTypeApplication = "application"
LoadBalancerTypeNetwork = "network"
IPAddressTypeIPV4 = "ipv4"
IPAddressTypeDualstack = "dualstack"
TargetAccessModeAWSCNI = "AWSCNI"
TargetAccessModeHostPort = "HostPort"
)

var (
Expand Down Expand Up @@ -576,20 +574,14 @@ func (a *Adapter) FindManagedStacks() ([]*Stack, error) {
// config to have relevant Target Groups and registers/deregisters single
// instances (that do not belong to ASG) in relevant Target Groups.
func (a *Adapter) UpdateTargetGroupsAndAutoScalingGroups(stacks []*Stack, problems *problem.List) {
targetGroupARNs := make([]string, 0, len(stacks))
allTargetGroupARNs := make([]string, 0, len(stacks))
for _, stack := range stacks {
if len(stack.TargetGroupARNs) > 0 {
targetGroupARNs = append(targetGroupARNs, stack.TargetGroupARNs...)
allTargetGroupARNs = append(allTargetGroupARNs, stack.TargetGroupARNs...)
}
}

// don't do anything if there are no target groups
if len(targetGroupARNs) == 0 {
return
}

// split the full list into relevant TG types
targetTypesARNs, err := categorizeTargetTypeInstance(a.elbv2, targetGroupARNs)
// split the full list into TG types
targetTypesARNs, err := categorizeTargetTypeInstance(a.elbv2, allTargetGroupARNs)
if err != nil {
problems.Add("failed to categorize Target Type Instance: %w", err)
return
Expand All @@ -600,14 +592,21 @@ func (a *Adapter) UpdateTargetGroupsAndAutoScalingGroups(stacks []*Stack, proble
a.TargetCNI.TargetGroupCh <- targetTypesARNs[elbv2.TargetTypeEnumIp]
}

// remove the IP TGs from the list keeping all other TGs including problematic #127 and nonexistent #436
targetGroupARNs := difference(allTargetGroupARNs, targetTypesARNs[elbv2.TargetTypeEnumIp])
// don't do anything if there are no target groups
if len(targetGroupARNs) == 0 {
return
}

ownerTags := map[string]string{
clusterIDTagPrefix + a.ClusterID(): resourceLifecycleOwned,
kubernetesCreatorTag: a.controllerID,
}

for _, asg := range a.TargetedAutoScalingGroups {
// This call is idempotent and safe to execute every time
if err := updateTargetGroupsForAutoScalingGroup(a.autoscaling, a.elbv2, targetTypesARNs[elbv2.TargetTypeEnumInstance], asg.name, ownerTags); err != nil {
if err := updateTargetGroupsForAutoScalingGroup(a.autoscaling, a.elbv2, targetGroupARNs, asg.name, ownerTags); err != nil {
problems.Add("failed to update target groups for autoscaling group %q: %w", asg.name, err)
}
}
Expand All @@ -624,13 +623,13 @@ func (a *Adapter) UpdateTargetGroupsAndAutoScalingGroups(stacks []*Stack, proble
runningSingleInstances := a.RunningSingleInstances()
if len(runningSingleInstances) != 0 {
// This call is idempotent too
if err := registerTargetsOnTargetGroups(a.elbv2, targetTypesARNs[elbv2.TargetTypeEnumInstance], runningSingleInstances); err != nil {
if err := registerTargetsOnTargetGroups(a.elbv2, targetGroupARNs, runningSingleInstances); err != nil {
problems.Add("failed to register instances %q in target groups: %w", runningSingleInstances, err)
}
}
if len(a.obsoleteInstances) != 0 {
// Deregister instances from target groups and clean up list of obsolete instances
if err := deregisterTargetsOnTargetGroups(a.elbv2, targetTypesARNs[elbv2.TargetTypeEnumInstance], a.obsoleteInstances); err != nil {
if err := deregisterTargetsOnTargetGroups(a.elbv2, targetGroupARNs, a.obsoleteInstances); err != nil {
problems.Add("failed to deregister instances %q in target groups: %w", a.obsoleteInstances, err)
} else {
a.obsoleteInstances = make([]string, 0)
Expand Down Expand Up @@ -1046,8 +1045,9 @@ func nonTargetedASGs(ownedASGs, targetedASGs map[string]*autoScalingGroupDetails

// SetTargetsOnCNITargetGroups implements desired state for CNI target groups
// by polling the current list of targets thus creating a diff of what needs to be added and removed.
func (a *Adapter) SetTargetsOnCNITargetGroups(endpoints []string) error {
for _, targetGroupARN := range a.TargetCNI.TargetGroupARNs {
func (a *Adapter) SetTargetsOnCNITargetGroups(endpoints, cniTargetGroupARNs []string) error {
log.Debugf("setting targets on CNI target groups: '%v'", cniTargetGroupARNs)
for _, targetGroupARN := range cniTargetGroupARNs {
tgh, err := a.elbv2.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{TargetGroupArn: &targetGroupARN})
if err != nil {
log.Errorf("unable to describe target health %v", err)
Expand All @@ -1058,18 +1058,18 @@ func (a *Adapter) SetTargetsOnCNITargetGroups(endpoints []string) error {
for i, target := range tgh.TargetHealthDescriptions {
registeredInstances[i] = *target.Target.Id
}
toregister := difference(endpoints, registeredInstances)
if len(toregister) > 0 {
log.Info("Registering CNI targets: ", toregister)
err := registerTargetsOnTargetGroups(a.elbv2, []string{targetGroupARN}, toregister)
toRegister := difference(endpoints, registeredInstances)
if len(toRegister) > 0 {
log.Info("Registering CNI targets: ", toRegister)
err := registerTargetsOnTargetGroups(a.elbv2, []string{targetGroupARN}, toRegister)
if err != nil {
return err
}
}
toderegister := difference(registeredInstances, endpoints)
if len(toderegister) > 0 {
log.Info("Deregistering CNI targets: ", toderegister)
err := deregisterTargetsOnTargetGroups(a.elbv2, []string{targetGroupARN}, toderegister)
toDeregister := difference(registeredInstances, endpoints)
if len(toDeregister) > 0 {
log.Info("Deregistering CNI targets: ", toDeregister)
err := deregisterTargetsOnTargetGroups(a.elbv2, []string{targetGroupARN}, toDeregister)
if err != nil {
return err
}
Expand Down
11 changes: 5 additions & 6 deletions aws/adapter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -949,6 +949,7 @@ func TestWithxlbHealthyThresholdCount(t *testing.T) {
}

func TestAdapter_SetTargetsOnCNITargetGroups(t *testing.T) {
tgARNs := []string{"asg1"}
thOut := elbv2.DescribeTargetHealthOutput{TargetHealthDescriptions: []*elbv2.TargetHealthDescription{}}
m := &mockElbv2Client{
outputs: elbv2MockOutputs{
Expand All @@ -960,9 +961,7 @@ func TestAdapter_SetTargetsOnCNITargetGroups(t *testing.T) {
a := &Adapter{elbv2: m, TargetCNI: &TargetCNIconfig{}}

t.Run("adding a new endpoint", func(t *testing.T) {
a.TargetCNI.TargetGroupARNs = []string{"asg1"}

require.NoError(t, a.SetTargetsOnCNITargetGroups([]string{"1.1.1.1"}))
require.NoError(t, a.SetTargetsOnCNITargetGroups([]string{"1.1.1.1"}, tgARNs))
require.Equal(t, []*elbv2.RegisterTargetsInput{{
TargetGroupArn: aws.String("asg1"),
Targets: []*elbv2.TargetDescription{{Id: aws.String("1.1.1.1")}},
Expand All @@ -976,7 +975,7 @@ func TestAdapter_SetTargetsOnCNITargetGroups(t *testing.T) {
}
m.rtinputs, m.dtinputs = nil, nil

require.NoError(t, a.SetTargetsOnCNITargetGroups([]string{"1.1.1.1", "2.2.2.2", "3.3.3.3"}))
require.NoError(t, a.SetTargetsOnCNITargetGroups([]string{"1.1.1.1", "2.2.2.2", "3.3.3.3"}, tgARNs))
require.Equal(t, []*elbv2.TargetDescription{
{Id: aws.String("2.2.2.2")},
{Id: aws.String("3.3.3.3")},
Expand All @@ -992,7 +991,7 @@ func TestAdapter_SetTargetsOnCNITargetGroups(t *testing.T) {
}}
m.rtinputs, m.dtinputs = nil, nil

require.NoError(t, a.SetTargetsOnCNITargetGroups([]string{"1.1.1.1", "3.3.3.3"}))
require.NoError(t, a.SetTargetsOnCNITargetGroups([]string{"1.1.1.1", "3.3.3.3"}, tgARNs))
require.Equal(t, []*elbv2.RegisterTargetsInput(nil), m.rtinputs)
require.Equal(t, []*elbv2.TargetDescription{{Id: aws.String("2.2.2.2")}}, m.dtinputs[0].Targets)
})
Expand All @@ -1005,7 +1004,7 @@ func TestAdapter_SetTargetsOnCNITargetGroups(t *testing.T) {
}}
m.rtinputs, m.dtinputs = nil, nil

require.NoError(t, a.SetTargetsOnCNITargetGroups([]string{"1.1.1.1", "2.2.2.2", "3.3.3.3"}))
require.NoError(t, a.SetTargetsOnCNITargetGroups([]string{"1.1.1.1", "2.2.2.2", "3.3.3.3"}, tgARNs))
require.Equal(t, []*elbv2.TargetDescription{{Id: aws.String("3.3.3.3")}}, m.rtinputs[0].Targets)
require.Equal(t, []*elbv2.TargetDescription{{Id: aws.String("4.4.4.4")}}, m.dtinputs[0].Targets)
})
Expand Down
26 changes: 18 additions & 8 deletions controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,8 @@ var (
ingressAPIVersion string
internalDomains []string
targetAccessMode string
targetCNIPodSelector string
targetCNINamespace string
targetCNIPodLabelSelector string
denyInternalDomains bool
denyInternalRespBody string
denyInternalRespContentType string
Expand Down Expand Up @@ -284,17 +285,24 @@ func loadSettings() error {
Default("401").IntVar(&denyInternalRespStatusCode)
kingpin.Flag("target-access-mode", "Target group accessing Ingress via HostPort or AWS VPC CNI. Set to ASG for HostPort access or CNI for pod direct IP access.").
Default(aws.TargetAccessModeHostPort).EnumVar(&targetAccessMode, aws.TargetAccessModeHostPort, aws.TargetAccessModeAWSCNI)
kingpin.Flag("target-cni-pod-selector", "AWS VPC CNI only. Defines the query (namespace/labelselector) for ingress pods that should be linked to target group. Format 'namespace/key=value'.").
Default(aws.DefaultTargetCNILabelSelector).StringVar(&targetCNIPodSelector)
kingpin.Flag("target-cni-namespace", "AWS VPC CNI only. Defines the namespace for ingress pods that should be linked to target group.").StringVar(&targetCNINamespace)
// LabelSelector semantics https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
kingpin.Flag("target-cni-pod-labelselector", "AWS VPC CNI only. Defines the labelselector for ingress pods that should be linked to target group. Supports simple equality and multi value form (a=x,b=y) as well as complex forms (a IN (x,y,z).").StringVar(&targetCNIPodLabelSelector)
kingpin.Parse()

blacklistCertArnMap = make(map[string]bool)
for _, s := range blacklistCertARNs {
blacklistCertArnMap[s] = true
}

if sl := strings.SplitN(targetCNIPodSelector, "/", 2); len(sl) != 2 || sl[0] == "" || sl[1] == "" {
return fmt.Errorf("Invalid target-cni-pod-selector format")
if targetAccessMode == aws.TargetAccessModeAWSCNI {
if targetCNINamespace == "" {
return fmt.Errorf("target-cni-namespace is required when target-access-mode is set to %s", aws.TargetAccessModeAWSCNI)
}
// complex selector formats possible, late validation by the k8s client
if targetCNIPodLabelSelector == "" {
return fmt.Errorf("target-cni-pod-labelselector definition cannot be empty when target-access-mode is set to %s", aws.TargetAccessModeAWSCNI)
}
}

if creationTimeout < 1*time.Minute {
Expand Down Expand Up @@ -456,10 +464,12 @@ func main() {
if err != nil {
log.Fatal(err)
}
if err = kubeAdapter.NewInclusterConfigClientset(ctx); err != nil {
log.Fatal(err)
if targetAccessMode == aws.TargetAccessModeAWSCNI {
if err = kubeAdapter.NewInclusterConfigClientset(ctx); err != nil {
log.Fatal(err)
}
kubeAdapter.WithTargetCNIPodSelector(targetCNINamespace, targetCNIPodLabelSelector)
}
kubeAdapter.WithTargetCNIPodSelector(targetCNIPodSelector)

certificatesPerALB := maxCertsPerALB
if disableSNISupport {
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
module github.com/zalando-incubator/kube-ingress-aws-controller

go 1.13
go 1.16

require (
github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 // indirect
Expand Down
Loading

0 comments on commit 160b313

Please sign in to comment.