From c3874a223e26396c2ca922edc1df851529aacb39 Mon Sep 17 00:00:00 2001 From: Brian Fox <878612+onematchfox@users.noreply.github.com> Date: Sat, 27 May 2023 22:18:02 +0200 Subject: [PATCH] fix: implement filtering on cluster `List` API endpoint (#13363) * feat: implement filtering on cluster list endpoint Signed-off-by: OneMatchFox <878612+onematchfox@users.noreply.github.com> * docs: add upgrade notes Signed-off-by: OneMatchFox <878612+onematchfox@users.noreply.github.com> --------- Signed-off-by: OneMatchFox <878612+onematchfox@users.noreply.github.com> --- docs/2.7-2.8.md | 5 - docs/operator-manual/upgrading/2.7-2.8.md | 18 +++ docs/operator-manual/upgrading/overview.md | 1 + mkdocs.yml | 1 + server/cluster/cluster.go | 73 ++++++++++- server/cluster/cluster_test.go | 137 +++++++++++++++++++-- 6 files changed, 215 insertions(+), 20 deletions(-) delete mode 100644 docs/2.7-2.8.md create mode 100644 docs/operator-manual/upgrading/2.7-2.8.md diff --git a/docs/2.7-2.8.md b/docs/2.7-2.8.md deleted file mode 100644 index 32f9e4cf1759c..0000000000000 --- a/docs/2.7-2.8.md +++ /dev/null @@ -1,5 +0,0 @@ -# 2.7 to 2.8 - -## Tini as entrypoint - -With the 2.8 release `entrypoint.sh` will be removed from the containers, because starting with 2.7, the implicit entrypoint is set to `tini` in the `Dockerfile` explicitly, and the kubernetes manifests has been updated to use it. Simply updating the containers without updating the deployment manifests will result in pod startup failures, as the old manifests are relying on `entrypoint.sh` instead of `tini`. Please make sure the manifests are updated properly before moving to 2.8. diff --git a/docs/operator-manual/upgrading/2.7-2.8.md b/docs/operator-manual/upgrading/2.7-2.8.md new file mode 100644 index 0000000000000..58c5098982246 --- /dev/null +++ b/docs/operator-manual/upgrading/2.7-2.8.md @@ -0,0 +1,18 @@ +# v2.7 to 2.8 + +## Tini as entrypoint + +With the 2.8 release `entrypoint.sh` will be removed from the containers, +because starting with 2.7, the implicit entrypoint is set to `tini` in the +`Dockerfile` explicitly, and the kubernetes manifests has been updated to use +it. Simply updating the containers without updating the deployment manifests +will result in pod startup failures, as the old manifests are relying on +`entrypoint.sh` instead of `tini`. Please make sure the manifests are updated +properly before moving to 2.8. + +## Filtering applied to cluster `List` API endpoint + +Prior to `v2.8`, the `List` endpoint on the `ClusterService` did **not** filter +clusters when responding, despite accepting query parameters. This bug has +been addressed, and query parameters are now taken into account to filter the +resulting list of clusters. \ No newline at end of file diff --git a/docs/operator-manual/upgrading/overview.md b/docs/operator-manual/upgrading/overview.md index 0c1ede757c324..419fc7bbb1353 100644 --- a/docs/operator-manual/upgrading/overview.md +++ b/docs/operator-manual/upgrading/overview.md @@ -37,6 +37,7 @@ kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/ +* [v2.7 to v2.8](./2.7-2.8.md) * [v2.6 to v2.7](./2.6-2.7.md) * [v2.5 to v2.6](./2.5-2.6.md) * [v2.4 to v2.5](./2.4-2.5.md) diff --git a/mkdocs.yml b/mkdocs.yml index 7d0da86dd10da..8738987a9a3c9 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -115,6 +115,7 @@ nav: - operator-manual/server-commands/additional-configuration-method.md - Upgrading: - operator-manual/upgrading/overview.md + - operator-manual/upgrading/2.7-2.8.md - operator-manual/upgrading/2.6-2.7.md - operator-manual/upgrading/2.5-2.6.md - operator-manual/upgrading/2.4-2.5.md diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index c7a5e3b5e4cbf..cfcadd762ed6f 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -56,8 +56,21 @@ func (s *Server) List(ctx context.Context, q *cluster.ClusterQuery) (*appv1.Clus return nil, err } + filteredItems := clusterList.Items + + // Filter clusters by id + if filteredItems, err = filterClustersById(filteredItems, q.Id); err != nil { + return nil, err + } + + // Filter clusters by name + filteredItems = filterClustersByName(filteredItems, q.Name) + + // Filter clusters by server + filteredItems = filterClustersByServer(filteredItems, q.Server) + items := make([]appv1.Cluster, 0) - for _, clust := range clusterList.Items { + for _, clust := range filteredItems { if s.enf.Enforce(ctx.Value("claims"), rbacpolicy.ResourceClusters, rbacpolicy.ActionGet, CreateClusterRBACObject(clust.Project, clust.Server)) { items = append(items, clust) } @@ -69,8 +82,62 @@ func (s *Server) List(ctx context.Context, q *cluster.ClusterQuery) (*appv1.Clus if err != nil { return nil, err } - clusterList.Items = items - return clusterList, nil + + cl := *clusterList + cl.Items = items + + return &cl, nil +} + +func filterClustersById(clusters []appv1.Cluster, id *cluster.ClusterID) ([]appv1.Cluster, error) { + if id == nil { + return clusters, nil + } + + var items []appv1.Cluster + + switch id.Type { + case "name": + items = filterClustersByName(clusters, id.Value) + case "name_escaped": + nameUnescaped, err := url.QueryUnescape(id.Value) + if err != nil { + return nil, err + } + items = filterClustersByName(clusters, nameUnescaped) + default: + items = filterClustersByServer(clusters, id.Value) + } + + return items, nil +} + +func filterClustersByName(clusters []appv1.Cluster, name string) []appv1.Cluster { + if name == "" { + return clusters + } + items := make([]appv1.Cluster, 0) + for i := 0; i < len(clusters); i++ { + if clusters[i].Name == name { + items = append(items, clusters[i]) + return items + } + } + return items +} + +func filterClustersByServer(clusters []appv1.Cluster, server string) []appv1.Cluster { + if server == "" { + return clusters + } + items := make([]appv1.Cluster, 0) + for i := 0; i < len(clusters); i++ { + if clusters[i].Server == server { + items = append(items, clusters[i]) + return items + } + } + return items } // Create creates a cluster diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index c5b07a4ee7055..c29a1b2d77c04 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -4,24 +4,15 @@ import ( "context" "encoding/json" "fmt" + "reflect" "testing" "time" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - "github.com/argoproj/gitops-engine/pkg/utils/kube/kubetest" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "k8s.io/client-go/kubernetes/fake" - "k8s.io/utils/pointer" - "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster" clusterapi "github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" servercache "github.com/argoproj/argo-cd/v2/server/cache" "github.com/argoproj/argo-cd/v2/test" cacheutil "github.com/argoproj/argo-cd/v2/util/cache" @@ -30,6 +21,16 @@ import ( dbmocks "github.com/argoproj/argo-cd/v2/util/db/mocks" "github.com/argoproj/argo-cd/v2/util/rbac" "github.com/argoproj/argo-cd/v2/util/settings" + "github.com/argoproj/gitops-engine/pkg/utils/kube/kubetest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/utils/pointer" ) func newServerInMemoryCache() *servercache.Cache { @@ -491,3 +492,115 @@ func getClientset(config map[string]string, ns string, objects ...runtime.Object } return fake.NewSimpleClientset(append(objects, &cm, &secret)...) } + +func TestListCluster(t *testing.T) { + db := &dbmocks.ArgoDB{} + + fooCluster := v1alpha1.Cluster{ + Name: "foo", + Server: "https://127.0.0.1", + Namespaces: []string{"default", "kube-system"}, + } + barCluster := v1alpha1.Cluster{ + Name: "bar", + Server: "https://192.168.0.1", + Namespaces: []string{"default", "kube-system"}, + } + bazCluster := v1alpha1.Cluster{ + Name: "test/ing", + Server: "https://testing.com", + Namespaces: []string{"default", "kube-system"}, + } + + mockClusterList := v1alpha1.ClusterList{ + ListMeta: v1.ListMeta{}, + Items: []v1alpha1.Cluster{fooCluster, barCluster, bazCluster}, + } + + db.On("ListClusters", mock.Anything).Return(&mockClusterList, nil) + + s := NewServer(db, newNoopEnforcer(), newServerInMemoryCache(), &kubetest.MockKubectlCmd{}) + + tests := []struct { + name string + q *cluster.ClusterQuery + want *appv1.ClusterList + wantErr bool + }{ + { + name: "filter by name", + q: &clusterapi.ClusterQuery{ + Name: fooCluster.Name, + }, + want: &v1alpha1.ClusterList{ + ListMeta: v1.ListMeta{}, + Items: []v1alpha1.Cluster{fooCluster}, + }, + }, + { + name: "filter by server", + q: &clusterapi.ClusterQuery{ + Server: barCluster.Server, + }, + want: &v1alpha1.ClusterList{ + ListMeta: v1.ListMeta{}, + Items: []v1alpha1.Cluster{barCluster}, + }, + }, + { + name: "filter by id - name", + q: &clusterapi.ClusterQuery{ + Id: &clusterapi.ClusterID{ + Type: "name", + Value: fooCluster.Name, + }, + }, + want: &v1alpha1.ClusterList{ + ListMeta: v1.ListMeta{}, + Items: []v1alpha1.Cluster{fooCluster}, + }, + }, + { + name: "filter by id - name_escaped", + q: &clusterapi.ClusterQuery{ + Id: &clusterapi.ClusterID{ + Type: "name_escaped", + Value: "test%2fing", + }, + }, + want: &v1alpha1.ClusterList{ + ListMeta: v1.ListMeta{}, + Items: []v1alpha1.Cluster{bazCluster}, + }, + }, + { + name: "filter by id - server", + q: &clusterapi.ClusterQuery{ + Id: &clusterapi.ClusterID{ + Type: "server", + Value: barCluster.Server, + }, + }, + want: &v1alpha1.ClusterList{ + ListMeta: v1.ListMeta{}, + Items: []v1alpha1.Cluster{barCluster}, + }, + }, + } + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + got, err := s.List(context.Background(), tt.q) + if (err != nil) != tt.wantErr { + t.Errorf("Server.List() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Server.List() = %v, want %v", got, tt.want) + } + }) + } +}