Skip to content

Commit

Permalink
Deletion of source DNSEntries has to wait for complete deletion of ta…
Browse files Browse the repository at this point in the history
…rget entries (#407)

* deletion of source DNSEntries has to wait for complete deletion of target entries

* wrap error

Co-authored-by: Marc Vornetran <[email protected]>

---------

Co-authored-by: Marc Vornetran <[email protected]>
  • Loading branch information
MartinWeindel and marc1404 authored Dec 17, 2024
1 parent 02c1a28 commit 0b8df81
Show file tree
Hide file tree
Showing 5 changed files with 438 additions and 1 deletion.
1 change: 1 addition & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ unittests: $(GINKGO)
.PHONY: new-test-integration
new-test-integration: $(REPORT_COLLECTOR) $(SETUP_ENVTEST)
@bash $(GARDENER_HACK_DIR)/test-integration.sh ./test/integration/compound
@bash $(GARDENER_HACK_DIR)/test-integration.sh ./test/integration/source

.PHONY: test
test: $(GINKGO) unittests new-test-integration
Expand Down
5 changes: 5 additions & 0 deletions pkg/controller/provider/mock/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"encoding/json"
"fmt"
"strings"
"time"

"github.com/gardener/controller-manager-library/pkg/logger"
"k8s.io/client-go/util/flowcontrol"
Expand Down Expand Up @@ -39,6 +40,7 @@ type MockConfig struct {
Zones []MockZone `json:"zones"`
FailGetZones bool `json:"failGetZones"`
FailDeleteEntry bool `json:"failDeleteEntry"`
LatencyMillis int `json:"latencyMillis"`
}

var _ provider.DNSHandler = &Handler{}
Expand Down Expand Up @@ -114,6 +116,9 @@ func (h *Handler) ReportZoneStateConflict(zone provider.DNSHostedZone, err error
func (h *Handler) ExecuteRequests(logger logger.LogContext, zone provider.DNSHostedZone, state provider.DNSZoneState, reqs []*provider.ChangeRequest) error {
err := h.executeRequests(logger, zone, state, reqs)
h.cache.ApplyRequests(logger, err, zone, reqs)
if h.mockConfig.LatencyMillis > 0 {
time.Sleep(time.Duration(h.mockConfig.LatencyMillis) * time.Millisecond)
}
return err
}

Expand Down
21 changes: 20 additions & 1 deletion pkg/dns/source/reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,8 @@ func (this *sourceReconciler) Delete(logger logger.LogContext, obj resources.Obj

failed := false
logger.Infof("entry source is deleting -> delete all dns entries")
for _, s := range this.Slaves().GetByOwner(obj) {
slaves := this.Slaves().GetByOwner(obj)
for _, s := range slaves {
logger.Infof("delete dns entry %s(%s)", s.ObjectName(), dnsutils.DNSEntry(s).GetDNSName())
err := s.Delete()
if err != nil && !errors.IsNotFound(err) {
Expand All @@ -363,6 +364,24 @@ func (this *sourceReconciler) Delete(logger logger.LogContext, obj resources.Obj
return reconcile.Delay(logger, nil)
}

// wait for deletion of all dns entries
start := time.Now()
for _, s := range slaves {
for {
_, err := s.GetResource().Get(s.Data())
if err != nil {
if errors.IsNotFound(err) {
break
}
return reconcile.Delay(logger, fmt.Errorf("get dns entry failed %s: %w", s.ObjectName(), err))
}
time.Sleep(500 * time.Millisecond)
if time.Since(start) > 30*time.Second {
return reconcile.Delay(logger, fmt.Errorf("deletion of dns entry %s not finished", s.ObjectName()))
}
}
}

fb := this.state.GetFeedback(obj.ClusterKey())
if fb != nil {
fb.Deleted(logger, "", "deleting dns entries")
Expand Down
234 changes: 234 additions & 0 deletions test/integration/source/dnsentry_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,234 @@
// SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and Gardener contributors
//
// SPDX-License-Identifier: Apache-2.0

package source_test

import (
"context"
"fmt"
"strings"
"time"

"github.com/gardener/controller-manager-library/pkg/controllermanager"
. "github.com/gardener/gardener/pkg/utils/test/matchers"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/json"
"sigs.k8s.io/controller-runtime/pkg/client"

"github.com/gardener/controller-manager-library/pkg/ctxutil"
"github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1"
"github.com/gardener/external-dns-management/pkg/controller/provider/mock"
)

var debug = false

var _ = Describe("DNSEntry source and DNSProvider replication controller tests", func() {
const (
entryCount = 5
)

var (
testRunID string
testRunID2 string
testNamespace1 *corev1.Namespace
testNamespace2 *corev1.Namespace
provider *v1alpha1.DNSProvider
entries []*v1alpha1.DNSEntry

checkMockDatabaseSize = func(expected int) {
dump := mock.TestMock[testRunID].BuildFullDump()
for _, zoneDump := range dump.InMemory {
switch {
case zoneDump.HostedZone.Domain == "first.example.com":
ExpectWithOffset(1, zoneDump.DNSSets).To(HaveLen(expected), "unexpected number of DNS sets in first.example.com")
default:
Fail("unexpected zone domain " + zoneDump.HostedZone.Domain)
}
}
}

checkTargetEntry = func(entry *v1alpha1.DNSEntry) {
Eventually(func(g Gomega) {
g.ExpectWithOffset(1, tc1.client.Get(ctx, client.ObjectKeyFromObject(entry), entry)).To(Succeed())
g.ExpectWithOffset(1, entry.Status.State).To(Equal("Ready"))
g.ExpectWithOffset(1, entry.Status.Targets).To(Equal(entry.Spec.Targets))
g.ExpectWithOffset(1, entry.Status.ObservedGeneration).To(Equal(entry.Generation))

list := &v1alpha1.DNSEntryList{}
g.Expect(tc2.client.List(ctx, list, client.InNamespace(testRunID2))).To(Succeed())
var targetEntry *v1alpha1.DNSEntry
for _, e := range list.Items {
if strings.HasPrefix(e.Name, fmt.Sprintf("%s-dnsentry", entry.Name)) {
targetEntry = &e
break
}
}
g.Expect(targetEntry).NotTo(BeNil())
g.Expect(targetEntry.Spec.DNSName).To(Equal(entry.Spec.DNSName))
g.Expect(targetEntry.Spec.Targets).To(Equal(entry.Spec.Targets))
}).Should(Succeed())
}
)

BeforeEach(func() {
SetDefaultEventuallyTimeout(10 * time.Second)
if debug {
SetDefaultEventuallyTimeout(30 * time.Second)
}

ctxLocal := context.Background()
ctx0 := ctxutil.CancelContext(ctxutil.WaitGroupContext(context.Background(), "main"))
ctx = ctxutil.TickContext(ctx0, controllermanager.DeletionActivity)

By("Create test Namespace")
testNamespace1 = &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "source-",
},
}
testNamespace2 = &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "source-",
},
}
Expect(tc1.client.Create(ctxLocal, testNamespace1)).To(Succeed())
Expect(tc2.client.Create(ctxLocal, testNamespace2)).To(Succeed())
log.Info("Created Namespace for test", "namespaceName", testNamespace1.Name)
log.Info("Created Namespace for test in second cluster", "namespaceName", testNamespace2.Name)
testRunID = testNamespace1.Name
testRunID2 = testNamespace2.Name

DeferCleanup(func() {
By("Delete test Namespace")
Expect(tc1.client.Delete(ctxLocal, testNamespace1)).To(Or(Succeed(), BeNotFoundError()))
Expect(tc2.client.Delete(ctxLocal, testNamespace2)).To(Or(Succeed(), BeNotFoundError()))
})

By("Start manager")
go func() {
defer GinkgoRecover()
args := []string{
"--kubeconfig", tc1.kubeconfigFile,
"--kubeconfig.id=source-id",
"--target", tc2.kubeconfigFile,
"--target.id=target-id",
"--target.disable-deploy-crds",
"--controllers", "compound,dnsentry-source,dnsprovider-replication",
"--omit-lease",
"--dns-delay", "1s",
"--reschedule-delay", "2s",
"--lock-status-check-period", "5s",
"--pool.size", "2",
"--dns-target-class=gardendns",
"--dns-class=gardendns",
"--target-namespace", testRunID2,
"--disable-namespace-restriction",
}
runControllerManager(ctx, args)
}()

DeferCleanup(func() {
By("Stop manager")
if ctx != nil {
ctxutil.Cancel(ctx)
}
})

mcfg := mock.MockConfig{
Name: testRunID,
Zones: []mock.MockZone{
{ZonePrefix: testRunID + ":first:", DNSName: "first.example.com"},
},
LatencyMillis: 900,
}
bytes, err := json.Marshal(&mcfg)
Expect(err).NotTo(HaveOccurred())

providerSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: testRunID,
Name: "mock1-secret",
},
Data: map[string][]byte{},
Type: corev1.SecretTypeOpaque,
}
Expect(tc1.client.Create(ctx, providerSecret)).To(Succeed())
DeferCleanup(func() {
Expect(tc1.client.Delete(ctx, providerSecret)).To(Succeed())
})
provider = &v1alpha1.DNSProvider{
ObjectMeta: metav1.ObjectMeta{
Namespace: testRunID,
Name: "mock1",
},
Spec: v1alpha1.DNSProviderSpec{
Type: "mock-inmemory",
ProviderConfig: &runtime.RawExtension{Raw: bytes},
SecretRef: &corev1.SecretReference{Name: "mock1-secret", Namespace: testRunID},
},
}
Expect(tc1.client.Create(ctx, provider)).To(Succeed())

println("source kubeconfig:", tc1.kubeconfigFile)
println("target kubeconfig:", tc2.kubeconfigFile)

Eventually(func(g Gomega) {
g.Expect(tc1.client.Get(ctx, client.ObjectKeyFromObject(provider), provider)).To(Succeed())
g.Expect(provider.Status.State).To(Equal("Ready"))
}).Should(Succeed())

for i := 0; i < entryCount; i++ {
entries = append(entries, &v1alpha1.DNSEntry{
ObjectMeta: metav1.ObjectMeta{
Namespace: testRunID,
Name: fmt.Sprintf("e%d", i),
},
Spec: v1alpha1.DNSEntrySpec{
DNSName: fmt.Sprintf("e%d.first.example.com", i),
Targets: []string{fmt.Sprintf("1.1.1.%d", i)},
},
})
}
})

It("should create entries on target", func() {
for _, entry := range entries {
Expect(tc1.client.Create(ctx, entry)).To(Succeed())
}

By("check entries")
for _, entry := range entries {
checkTargetEntry(entry)
}

checkMockDatabaseSize(entryCount)

By("delete entries")
for _, entry := range entries {
Expect(tc1.client.Delete(ctx, entry)).To(Succeed())
}

By("wait for deletion")
for _, entry := range entries {
for {
if err := tc1.client.Get(ctx, client.ObjectKeyFromObject(entry), entry); err != nil {
if client.IgnoreNotFound(err) == nil {
break
}
Expect(err).NotTo(HaveOccurred())
}
time.Sleep(100 * time.Millisecond)
}
}

Expect(tc1.client.Delete(ctx, provider)).To(Succeed())

By("check mock database")
checkMockDatabaseSize(0)
})
})
Loading

0 comments on commit 0b8df81

Please sign in to comment.