diff --git a/cmd/clustertree/cluster-manager/app/manager.go b/cmd/clustertree/cluster-manager/app/manager.go index 87dc0edb2..b5576a35f 100644 --- a/cmd/clustertree/cluster-manager/app/manager.go +++ b/cmd/clustertree/cluster-manager/app/manager.go @@ -213,16 +213,15 @@ func run(ctx context.Context, opts *options.Options) error { } } - // init rootPodController - rootPodReconciler := podcontrollers.RootPodReconciler{ - GlobalLeafManager: globalleafManager, - RootClient: mgr.GetClient(), + rootPodWorkerQueue := podcontrollers.NewRootPodWorkerQueue(&podcontrollers.RootPodWorkerQueueOption{ + Config: config, + RootClient: rootClient, DynamicRootClient: dynamicClient, + GlobalLeafManager: globalleafManager, Options: opts, - } - if err := rootPodReconciler.SetupWithManager(mgr); err != nil { - return fmt.Errorf("error starting rootPodReconciler %s: %v", podcontrollers.RootPodControllerName, err) - } + }) + + go rootPodWorkerQueue.Run(ctx) if !opts.OnewayStorageControllers { rootPVCController := pvc.RootPVCController{ diff --git a/deploy/crds/kosmos.io_clusters.yaml b/deploy/crds/kosmos.io_clusters.yaml index d96895c13..8a9fc23c9 100644 --- a/deploy/crds/kosmos.io_clusters.yaml +++ b/deploy/crds/kosmos.io_clusters.yaml @@ -111,6 +111,9 @@ spec: type: object clusterTreeOptions: properties: + accressKey: + description: secret? + type: string enable: default: true type: boolean @@ -223,6 +226,11 @@ spec: type: array type: object type: array + leafType: + default: k8s + type: string + secretKey: + type: string type: object imageRepository: type: string diff --git a/go.mod b/go.mod index a1e37983f..8db40e219 100644 --- a/go.mod +++ b/go.mod @@ -9,10 +9,13 @@ require ( github.com/coreos/go-iptables v0.6.0 github.com/docker/docker v24.0.6+incompatible github.com/evanphx/json-patch v4.12.0+incompatible + github.com/fatih/structs v1.1.0 github.com/go-logr/logr v1.2.3 github.com/gogo/protobuf v1.3.2 github.com/google/go-cmp v0.5.9 + github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.1 + github.com/mitchellh/mapstructure v1.5.0 github.com/olekukonko/tablewriter v0.0.4 github.com/onsi/ginkgo/v2 v2.9.2 github.com/onsi/gomega v1.27.4 @@ -97,7 +100,6 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.3.0 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect diff --git a/go.sum b/go.sum index d3d8936cd..c9c8d6893 100644 --- a/go.sum +++ b/go.sum @@ -743,6 +743,8 @@ github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZM github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -1117,6 +1119,8 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= diff --git a/pkg/apis/kosmos/v1alpha1/cluster_types.go b/pkg/apis/kosmos/v1alpha1/cluster_types.go index 9c1fe5a25..43f06e0cc 100644 --- a/pkg/apis/kosmos/v1alpha1/cluster_types.go +++ b/pkg/apis/kosmos/v1alpha1/cluster_types.go @@ -101,6 +101,17 @@ type ClusterTreeOptions struct { // LeafModels provide an api to arrange the member cluster with some rules to pretend one or more leaf node // +optional LeafModels []LeafModel `json:"leafModels,omitempty"` + + // +kubebuilder:default="k8s" + // +optional + LeafType string `json:"leafType,omitempty"` + + // secret? + // +optional + AccessKey string `json:"accressKey,omitempty"` + + // +optional + SecretKey string `json:"secretKey,omitempty"` } type LeafModel struct { diff --git a/pkg/clustertree/cluster-manager/cluster_controller.go b/pkg/clustertree/cluster-manager/cluster_controller.go index 5e417c811..a3b5e51f5 100644 --- a/pkg/clustertree/cluster-manager/cluster_controller.go +++ b/pkg/clustertree/cluster-manager/cluster_controller.go @@ -32,6 +32,7 @@ import ( "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers" "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/mcs" podcontrollers "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pod" + leafpodsyncers "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod" "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pv" "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pvc" leafUtils "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/utils" @@ -123,6 +124,19 @@ func (c *ClusterController) Reconcile(ctx context.Context, request reconcile.Req return controllerruntime.Result{RequeueAfter: RequeueTime}, err } + if cluster.Spec.ClusterTreeOptions.LeafType == string(leafUtils.LeafTypeServerless) { + if !cluster.DeletionTimestamp.IsZero() { + // TODO: + return reconcile.Result{}, nil + } + // TODO: .... + if err := CreateOpenApiNode(ctx, cluster, c.RootClientset, c.Options); err != nil { + return controllerruntime.Result{RequeueAfter: RequeueTime}, err + } + // TODO: clean + return reconcile.Result{}, nil + } + config, err := utils.NewConfigFromBytes(cluster.Spec.Kubeconfig, func(config *rest.Config) { config.QPS = utils.DefaultLeafKubeQPS config.Burst = utils.DefaultLeafKubeBurst @@ -206,7 +220,7 @@ func (c *ClusterController) Reconcile(ctx context.Context, request reconcile.Req c.ManagerCancelFuncs[cluster.Name] = &cancel c.ControllerManagersLock.Unlock() - if err = c.setupControllers(mgr, cluster, nodes, leafDynamic, leafNodeSelectors, leafClient, kosmosClient, config); err != nil { + if err = c.setupControllers(mgr, cluster, nodes, leafDynamic, leafNodeSelectors, leafClient, kosmosClient, config, subContext); err != nil { return reconcile.Result{}, fmt.Errorf("failed to setup cluster %s controllers: %v", cluster.Name, err) } @@ -243,7 +257,8 @@ func (c *ClusterController) setupControllers( leafNodeSelector map[string]kosmosv1alpha1.NodeSelector, leafClientset kubernetes.Interface, kosmosClient kosmosversioned.Interface, - leafRestConfig *rest.Config) error { + leafRestConfig *rest.Config, + subContext context.Context) error { c.GlobalLeafManager.AddLeafResource(&leafUtils.LeafResource{ Client: mgr.GetClient(), DynamicClient: clientDynamic, @@ -255,6 +270,8 @@ func (c *ClusterController) setupControllers( IgnoreLabels: strings.Split("", ","), EnableServiceAccount: true, RestConfig: leafRestConfig, + // LeafType: leafUtils.LeafTypeK8s, + LeafType: leafUtils.LeafTypeK8s, }, cluster, nodes) nodeResourcesController := controllers.NodeResourcesController{ @@ -293,14 +310,12 @@ func (c *ClusterController) setupControllers( } } - leafPodController := podcontrollers.LeafPodReconciler{ - RootClient: c.Root, - Namespace: "", - } + leafPodWorkerQueue := podcontrollers.NewLeafPodWorkerQueue(&leafpodsyncers.LeafPodWorkerQueueOption{ + Config: leafRestConfig, + RootClient: c.RootClientset, + }, leafUtils.LeafTypeK8s) // TODO: - if err := leafPodController.SetupWithManager(mgr); err != nil { - return fmt.Errorf("error starting podUpstreamReconciler %s: %v", podcontrollers.LeafPodControllerName, err) - } + go leafPodWorkerQueue.Run(subContext) if !c.Options.OnewayStorageControllers { err := c.setupStorageControllers(mgr, utils.IsOne2OneMode(cluster), cluster.Name) diff --git a/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/k8s/syncer.go b/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/k8s/syncer.go new file mode 100644 index 000000000..5c6792943 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/k8s/syncer.go @@ -0,0 +1,80 @@ +package k8s + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/klog" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/runtime" + "github.com/kosmos.io/kosmos/pkg/utils/podutils" +) + +type leafPodK8sSyncer struct { + LeafClient *kubernetes.Clientset + RootClient kubernetes.Interface +} + +const ( + LeafPodControllerName = "leaf-pod-controller" + LeafPodRequeueTime = 10 * time.Second +) + +func DeletePodInRootCluster(ctx context.Context, rootnamespacedname runtime.NamespacedName, rootClient kubernetes.Interface) error { + rPod, err := rootClient.CoreV1().Pods(rootnamespacedname.Namespace).Get(ctx, rootnamespacedname.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return nil + } else { + return err + } + } + + rPodCopy := rPod.DeepCopy() + + if err := rootClient.CoreV1().Pods(rPodCopy.Namespace).Delete(ctx, rPodCopy.Name, metav1.DeleteOptions{ + GracePeriodSeconds: new(int64), + }); err != nil { + if !errors.IsNotFound(err) { + return err + } + } + + return nil +} + +func (s *leafPodK8sSyncer) Reconcile(ctx context.Context, key runtime.NamespacedName) (runtime.Result, error) { + pod, err := s.LeafClient.CoreV1().Pods(key.Namespace).Get(ctx, key.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + // delete pod in root + if err := DeletePodInRootCluster(ctx, key, s.RootClient); err != nil { + return runtime.Result{RequeueAfter: LeafPodRequeueTime}, nil + } + return runtime.Result{}, nil + } + + klog.Errorf("get %s error: %v", key, err) + return runtime.Result{RequeueAfter: LeafPodRequeueTime}, nil + } + + podCopy := pod.DeepCopy() + + // if ShouldSkipStatusUpdate(podCopy) { + // return reconcile.Result{}, nil + // } + + if podutils.IsKosmosPod(podCopy) { + podutils.FitObjectMeta(&podCopy.ObjectMeta) + podCopy.ResourceVersion = "0" + if _, err := s.RootClient.CoreV1().Pods(podCopy.Namespace).UpdateStatus(ctx, podCopy, metav1.UpdateOptions{}); err != nil && !errors.IsNotFound(err) { + klog.V(4).Info(fmt.Sprintf("error while updating pod status in kubernetes: %s", err)) + return runtime.Result{RequeueAfter: LeafPodRequeueTime}, nil + } + } + return runtime.Result{}, nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/k8s/workerqueue.go b/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/k8s/workerqueue.go new file mode 100644 index 000000000..d959d060d --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/k8s/workerqueue.go @@ -0,0 +1,102 @@ +package k8s + +import ( + "time" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + leafpodsyncers "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/runtime" + "github.com/kosmos.io/kosmos/pkg/utils" + "github.com/kosmos.io/kosmos/pkg/utils/podutils" +) + +func NewLeafPodK8wWorkerQueue(opts *leafpodsyncers.LeafPodWorkerQueueOption) runtime.Controller { + // create the workqueue + queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + + client, err := kubernetes.NewForConfig(opts.Config) + if err != nil { + klog.Fatal(err) + } + + // Create a shared informer factory for Kubernetes pods in the current namespace (if specified) and scheduled to the current node. + podInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions( + client, + 5*time.Second, + ) + + podInformer := podInformerFactory.Core().V1().Pods() + + eventFilter := func(obj interface{}) (bool, *corev1.Pod) { + p, ok := obj.(*corev1.Pod) + + if !ok { + klog.Fatal("convert pod error") + return false, p + } + + if len(p.Spec.NodeName) == 0 { + return false, p + } + + if p.GetNamespace() == utils.ReservedNS { + return false, p + } + + return podutils.IsKosmosPod(p), p + } + + _, err = podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + if flag, pod := eventFilter(obj); flag { + queue.Add(runtime.NamespacedName{ + Name: pod.Name, + Namespace: pod.Namespace, + }) + } + }, + UpdateFunc: func(old interface{}, new interface{}) { + if flag, pod := eventFilter(old); flag { + if !cmp.Equal(old.(*corev1.Pod).Status, new.(*corev1.Pod).Status) { + queue.Add(runtime.NamespacedName{ + Name: pod.Name, + Namespace: pod.Namespace, + }) + } + } + }, + DeleteFunc: func(obj interface{}) { + if flag, pod := eventFilter(obj); flag { + queue.Add(runtime.NamespacedName{ + Name: pod.Name, + Namespace: pod.Namespace, + }) + } + }, + }) + + if err != nil { + klog.Fatalf("add event handler error: %s", err) + panic(err) + } + + leafClient, err := kubernetes.NewForConfig(opts.Config) + if err != nil { + klog.Fatalf("could not build clientset for cluster %s", err) + panic(err) + } + + leafK8sSyncer := &leafPodK8sSyncer{ + LeafClient: leafClient, + RootClient: opts.RootClient, + } + + return runtime.NewK8sWorkerQueue(queue, podInformer.Informer(), leafK8sSyncer) +} diff --git a/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/serverless/model/ECIContainer.go b/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/serverless/model/ECIContainer.go new file mode 100644 index 000000000..bed2bd6ff --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/serverless/model/ECIContainer.go @@ -0,0 +1,30 @@ +package model + +import ( + corev1 "k8s.io/api/core/v1" +) + +type ECIContainer struct { + Name string `json:"name,omitempty"` + Region string `json:"region,omitempty"` + Cpu float64 `json:"cpu,omitempty"` + Memory float64 `json:"memory,omitempty"` + Quantity int32 `json:"quantity,omitempty"` + Volumes []Volume `json:"volumes,omitempty"` + VpcId string `json:"vpcId,omitempty"` + NetworkId string `json:"networkId,omitempty"` + SecurityGroupIds []string `json:"securityGroupIds,omitempty"` + IpId string `json:"ipId,omitempty"` + IpType string `json:"ipType,omitempty"` + ChargeMode string `json:"chargeMode,omitempty"` + BandwidthSize int32 `json:"bandwidthSize,omitempty"` + Ipv4Bandwidth bool `json:"ipv4Bandwidth,omitempty"` + Ipv6Bandwidth bool `json:"ipv6Bandwidth,omitempty"` + Pod corev1.Pod `json:"pod,omitempty"` + EciId string `json:"eciId,omitempty"` +} + +type Volume struct { + ResourceType string `json:"resourceType,omitempty"` + Size int32 `json:"size,omitempty"` +} diff --git a/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/serverless/syncer.go b/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/serverless/syncer.go new file mode 100644 index 000000000..d902558bc --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/serverless/syncer.go @@ -0,0 +1,77 @@ +package openapi + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/klog" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/runtime" + "github.com/kosmos.io/kosmos/pkg/utils/podutils" +) + +type leafPodServerlessSyncer struct { + LeafClient kubernetes.Interface + RootClient kubernetes.Interface +} + +const ( + LeafPodControllerName = "leaf-pod-controller" + LeafPodRequeueTime = 10 * time.Second +) + +func DeletePodInRootCluster(ctx context.Context, rootnamespacedname runtime.NamespacedName, rootClient kubernetes.Interface) error { + rPod, err := rootClient.CoreV1().Pods(rootnamespacedname.Namespace).Get(ctx, rootnamespacedname.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return nil + } else { + return err + } + } + + rPodCopy := rPod.DeepCopy() + + if err := rootClient.CoreV1().Pods(rPodCopy.Namespace).Delete(ctx, rPodCopy.Name, metav1.DeleteOptions{ + GracePeriodSeconds: new(int64), + }); err != nil { + if !errors.IsNotFound(err) { + return err + } + } + + return nil +} + +func (s *leafPodServerlessSyncer) Reconcile(ctx context.Context, key runtime.NamespacedName) (runtime.Result, error) { + // TODO: invoke openapi + pod, err := s.LeafClient.CoreV1().Pods(key.Namespace).Get(ctx, key.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + // delete pod in root + if err := DeletePodInRootCluster(ctx, key, s.RootClient); err != nil { + return runtime.Result{RequeueAfter: LeafPodRequeueTime}, nil + } + return runtime.Result{}, nil + } + + klog.Errorf("get %s error: %v", key, err) + return runtime.Result{RequeueAfter: LeafPodRequeueTime}, nil + } + + podCopy := pod.DeepCopy() + + if podutils.IsKosmosPod(podCopy) { + podutils.FitObjectMeta(&podCopy.ObjectMeta) + podCopy.ResourceVersion = "0" + if _, err := s.RootClient.CoreV1().Pods(podCopy.Namespace).Update(ctx, podCopy, metav1.UpdateOptions{}); err != nil && !errors.IsNotFound(err) { + klog.V(4).Info(fmt.Sprintf("error while updating pod status in kubernetes: %s", err)) + return runtime.Result{RequeueAfter: LeafPodRequeueTime}, nil + } + } + return runtime.Result{}, nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/serverless/workerqueue.go b/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/serverless/workerqueue.go new file mode 100644 index 000000000..e73be88f1 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/serverless/workerqueue.go @@ -0,0 +1,24 @@ +package openapi + +import ( + "k8s.io/client-go/util/workqueue" + + leafpodsyncers "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/runtime" +) + +func NewLeafPodOpenApiWorkerQueue(opts *leafpodsyncers.LeafPodWorkerQueueOption) runtime.Controller { + // create the workqueue + queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + + // TODO: launch go-routing to fetch data + // queue.AddRateLimited(xxxx) + + leafOpenApiSyncer := &leafPodServerlessSyncer{ + // TODO: xxxx + // LeafClient: opts.LeafClient, + RootClient: opts.RootClient, + } + + return runtime.NewOpenApiWorkerQueue(queue, leafOpenApiSyncer) +} diff --git a/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/share.go b/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/share.go new file mode 100644 index 000000000..d4f43824e --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/share.go @@ -0,0 +1,11 @@ +package leafpodsyncers + +import ( + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +type LeafPodWorkerQueueOption struct { + Config *rest.Config + RootClient kubernetes.Interface +} diff --git a/pkg/clustertree/cluster-manager/controllers/pod/leaf_pod_controller.go b/pkg/clustertree/cluster-manager/controllers/pod/leaf_pod_controller.go deleted file mode 100644 index 9f90f5595..000000000 --- a/pkg/clustertree/cluster-manager/controllers/pod/leaf_pod_controller.go +++ /dev/null @@ -1,172 +0,0 @@ -package pod - -import ( - "context" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/kosmos.io/kosmos/pkg/utils" - "github.com/kosmos.io/kosmos/pkg/utils/podutils" -) - -const ( - LeafPodControllerName = "leaf-pod-controller" - LeafPodRequeueTime = 10 * time.Second -) - -type LeafPodReconciler struct { - client.Client - RootClient client.Client - Namespace string -} - -func (r *LeafPodReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - var pod corev1.Pod - if err := r.Get(ctx, request.NamespacedName, &pod); err != nil { - if apierrors.IsNotFound(err) { - // delete pod in root - if err := DeletePodInRootCluster(ctx, request.NamespacedName, r.RootClient); err != nil { - return reconcile.Result{RequeueAfter: LeafPodRequeueTime}, nil - } - return reconcile.Result{}, nil - } - klog.Errorf("get %s error: %v", request.NamespacedName, err) - return reconcile.Result{RequeueAfter: LeafPodRequeueTime}, nil - } - - podCopy := pod.DeepCopy() - - // if ShouldSkipStatusUpdate(podCopy) { - // return reconcile.Result{}, nil - // } - - if podutils.IsKosmosPod(podCopy) { - podutils.FitObjectMeta(&podCopy.ObjectMeta) - podCopy.ResourceVersion = "0" - if err := r.RootClient.Status().Update(ctx, podCopy); err != nil && !apierrors.IsNotFound(err) { - klog.V(4).Info(errors.Wrap(err, "error while updating pod status in kubernetes")) - return reconcile.Result{RequeueAfter: LeafPodRequeueTime}, nil - } - } - return reconcile.Result{}, nil -} - -type rootDeleteOption struct { - GracePeriodSeconds *int64 -} - -func (dopt *rootDeleteOption) ApplyToDelete(opt *client.DeleteOptions) { - opt.GracePeriodSeconds = dopt.GracePeriodSeconds -} - -func NewRootDeleteOption(pod *corev1.Pod) client.DeleteOption { - // TODO - //gracePeriodSeconds := pod.DeletionGracePeriodSeconds - // - //current := metav1.NewTime(time.Now()) - //if pod.DeletionTimestamp.Before(¤t) { - // gracePeriodSeconds = new(int64) - //} - return &rootDeleteOption{ - GracePeriodSeconds: new(int64), - } -} - -func NewLeafDeleteOption(pod *corev1.Pod) client.DeleteOption { - gracePeriodSeconds := new(int64) - if pod.DeletionGracePeriodSeconds != nil { - gracePeriodSeconds = pod.DeletionGracePeriodSeconds - } - - return &rootDeleteOption{ - GracePeriodSeconds: gracePeriodSeconds, - } -} - -func DeletePodInRootCluster(ctx context.Context, rootnamespacedname types.NamespacedName, rootClient client.Client) error { - rPod := corev1.Pod{} - err := rootClient.Get(ctx, rootnamespacedname, &rPod) - - if err != nil { - if apierrors.IsNotFound(err) { - return nil - } else { - return err - } - } - - rPodCopy := rPod.DeepCopy() - deleteOption := NewRootDeleteOption(rPodCopy) - - if err := rootClient.Delete(ctx, rPodCopy, deleteOption); err != nil { - if !apierrors.IsNotFound(err) { - return err - } - } - - return nil -} - -func (r *LeafPodReconciler) SetupWithManager(mgr manager.Manager) error { - if r.Client == nil { - r.Client = mgr.GetClient() - } - - skipFunc := func(obj client.Object) bool { - if obj.GetNamespace() == utils.ReservedNS { - return false - } - - // skip namespace - if len(r.Namespace) > 0 && r.Namespace != obj.GetNamespace() { - return false - } - - p := obj.(*corev1.Pod) - return podutils.IsKosmosPod(p) - } - - return ctrl.NewControllerManagedBy(mgr). - Named(LeafPodControllerName). - WithOptions(controller.Options{}). - For(&corev1.Pod{}, builder.WithPredicates(predicate.Funcs{ - CreateFunc: func(createEvent event.CreateEvent) bool { - // ignore create event - return skipFunc(createEvent.Object) - }, - UpdateFunc: func(updateEvent event.UpdateEvent) bool { - pod1 := updateEvent.ObjectOld.(*corev1.Pod) - pod2 := updateEvent.ObjectNew.(*corev1.Pod) - if !skipFunc(updateEvent.ObjectNew) { - return false - } - return !cmp.Equal(pod1.Status, pod2.Status) - }, - DeleteFunc: func(deleteEvent event.DeleteEvent) bool { - return skipFunc(deleteEvent.Object) - }, - GenericFunc: func(genericEvent event.GenericEvent) bool { - return false - }, - })). - Complete(r) -} - -// func ShouldSkipStatusUpdate(pod *corev1.Pod) bool { -// return pod.Status.Phase == corev1.PodSucceeded || -// pod.Status.Phase == corev1.PodFailed -// } diff --git a/pkg/clustertree/cluster-manager/controllers/pod/leaf_pod_workerqueue.go b/pkg/clustertree/cluster-manager/controllers/pod/leaf_pod_workerqueue.go new file mode 100644 index 000000000..23ee558cc --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/pod/leaf_pod_workerqueue.go @@ -0,0 +1,21 @@ +package pod + +import ( + leafpodsyncers "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod" + k8s "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/k8s" + serverless "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod/serverless" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/runtime" + leafUtils "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/utils" +) + +func NewLeafPodWorkerQueue(opts *leafpodsyncers.LeafPodWorkerQueueOption, leafType leafUtils.LeafType) runtime.Controller { + switch leafType { + case leafUtils.LeafTypeK8s: + return k8s.NewLeafPodK8wWorkerQueue(opts) + case leafUtils.LeafTypeServerless: + return serverless.NewLeafPodOpenApiWorkerQueue(opts) + default: + panic("leaf type not supported") + } + // return runtime.Controller{} +} diff --git a/pkg/clustertree/cluster-manager/controllers/pod/root-pod/k8s/env_resource_manager.go b/pkg/clustertree/cluster-manager/controllers/pod/root-pod/k8s/env_resource_manager.go new file mode 100644 index 000000000..e05e912cc --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/pod/root-pod/k8s/env_resource_manager.go @@ -0,0 +1,79 @@ +package rootpodsyncers + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/dynamic" + + "github.com/kosmos.io/kosmos/pkg/utils" +) + +type envResourceManager struct { + DynamicRootClient dynamic.Interface +} + +// GetConfigMap retrieves the specified config map from the cache. +func (rm *envResourceManager) GetConfigMap(name, namespace string) (*corev1.ConfigMap, error) { + // return rm.configMapLister.ConfigMaps(namespace).Get(name) + obj, err := rm.DynamicRootClient.Resource(utils.GVR_CONFIGMAP).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + retObj := &corev1.ConfigMap{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &retObj); err != nil { + return nil, err + } + + return retObj, nil +} + +// GetSecret retrieves the specified secret from Kubernetes. +func (rm *envResourceManager) GetSecret(name, namespace string) (*corev1.Secret, error) { + // return rm.secretLister.Secrets(namespace).Get(name) + obj, err := rm.DynamicRootClient.Resource(utils.GVR_SECRET).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + retObj := &corev1.Secret{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &retObj); err != nil { + return nil, err + } + + return retObj, nil +} + +// ListServices retrieves the list of services from Kubernetes. +func (rm *envResourceManager) ListServices() ([]*corev1.Service, error) { + // return rm.serviceLister.List(labels.Everything()) + objs, err := rm.DynamicRootClient.Resource(utils.GVR_SERVICE).List(context.TODO(), metav1.ListOptions{ + LabelSelector: labels.Everything().String(), + }) + + if err != nil { + return nil, err + } + + retObj := make([]*corev1.Service, 0) + + for _, obj := range objs.Items { + tmpObj := &corev1.Service{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &tmpObj); err != nil { + return nil, err + } + retObj = append(retObj, tmpObj) + } + + return retObj, nil +} + +func NewEnvResourceManager(client dynamic.Interface) utils.EnvResourceManager { + return &envResourceManager{ + DynamicRootClient: client, + } +} diff --git a/pkg/clustertree/cluster-manager/controllers/pod/storage_handler.go b/pkg/clustertree/cluster-manager/controllers/pod/root-pod/k8s/storage_handler.go similarity index 73% rename from pkg/clustertree/cluster-manager/controllers/pod/storage_handler.go rename to pkg/clustertree/cluster-manager/controllers/pod/root-pod/k8s/storage_handler.go index 9e4c06565..378b049d3 100644 --- a/pkg/clustertree/cluster-manager/controllers/pod/storage_handler.go +++ b/pkg/clustertree/cluster-manager/controllers/pod/root-pod/k8s/storage_handler.go @@ -1,4 +1,4 @@ -package pod +package rootpodsyncers import ( "context" @@ -15,7 +15,7 @@ import ( ) type StorageHandler interface { - BeforeCreateInLeaf(context.Context, *RootPodReconciler, *leafUtils.LeafResource, *unstructured.Unstructured, *corev1.Pod, *leafUtils.ClusterNode) error + BeforeCreateInLeaf(context.Context, *K8sSyncer, *leafUtils.LeafResource, *unstructured.Unstructured, *corev1.Pod, *leafUtils.ClusterNode) error } func NewStorageHandler(gvr schema.GroupVersionResource) (StorageHandler, error) { @@ -33,14 +33,14 @@ func NewStorageHandler(gvr schema.GroupVersionResource) (StorageHandler, error) type ConfigMapHandler struct { } -func (c *ConfigMapHandler) BeforeCreateInLeaf(context.Context, *RootPodReconciler, *leafUtils.LeafResource, *unstructured.Unstructured, *corev1.Pod, *leafUtils.ClusterNode) error { +func (c *ConfigMapHandler) BeforeCreateInLeaf(context.Context, *K8sSyncer, *leafUtils.LeafResource, *unstructured.Unstructured, *corev1.Pod, *leafUtils.ClusterNode) error { return nil } type SecretHandler struct { } -func (s *SecretHandler) BeforeCreateInLeaf(ctx context.Context, r *RootPodReconciler, lr *leafUtils.LeafResource, unstructuredObj *unstructured.Unstructured, rootpod *corev1.Pod, _ *leafUtils.ClusterNode) error { +func (s *SecretHandler) BeforeCreateInLeaf(ctx context.Context, r *K8sSyncer, lr *leafUtils.LeafResource, unstructuredObj *unstructured.Unstructured, rootpod *corev1.Pod, _ *leafUtils.ClusterNode) error { secretObj := &corev1.Secret{} err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredObj.Object, secretObj) if err != nil { @@ -58,7 +58,7 @@ func (s *SecretHandler) BeforeCreateInLeaf(ctx context.Context, r *RootPodReconc type PVCHandler struct { } -func (v *PVCHandler) BeforeCreateInLeaf(_ context.Context, _ *RootPodReconciler, lr *leafUtils.LeafResource, unstructuredObj *unstructured.Unstructured, rootpod *corev1.Pod, cn *leafUtils.ClusterNode) error { +func (v *PVCHandler) BeforeCreateInLeaf(_ context.Context, _ *K8sSyncer, lr *leafUtils.LeafResource, unstructuredObj *unstructured.Unstructured, rootpod *corev1.Pod, cn *leafUtils.ClusterNode) error { if rootpod == nil || len(rootpod.Spec.NodeName) == 0 { return nil } diff --git a/pkg/clustertree/cluster-manager/controllers/pod/root_pod_controller.go b/pkg/clustertree/cluster-manager/controllers/pod/root-pod/k8s/syncer_handle.go similarity index 60% rename from pkg/clustertree/cluster-manager/controllers/pod/root_pod_controller.go rename to pkg/clustertree/cluster-manager/controllers/pod/root-pod/k8s/syncer_handle.go index d557863b1..502794e99 100644 --- a/pkg/clustertree/cluster-manager/controllers/pod/root_pod_controller.go +++ b/pkg/clustertree/cluster-manager/controllers/pod/root-pod/k8s/syncer_handle.go @@ -1,4 +1,4 @@ -package pod +package rootpodsyncers import ( "context" @@ -10,274 +10,235 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" - "k8s.io/klog/v2" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" + "k8s.io/client-go/kubernetes" + "k8s.io/klog" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/kosmos.io/kosmos/cmd/clustertree/cluster-manager/app/options" - kosmosv1alpha1 "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" - "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/extensions/daemonset" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/runtime" leafUtils "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/utils" "github.com/kosmos.io/kosmos/pkg/utils" "github.com/kosmos.io/kosmos/pkg/utils/podutils" ) -const ( - RootPodControllerName = "root-pod-controller" - RootPodRequeueTime = 10 * time.Second -) - -type RootPodReconciler struct { - client.Client - RootClient client.Client - +type K8sSyncer struct { + RootClient kubernetes.Interface + GlobalLeafManager leafUtils.LeafResourceManager + EnvResourceManager utils.EnvResourceManager + Options *options.Options DynamicRootClient dynamic.Interface - envResourceManager utils.EnvResourceManager - - GlobalLeafManager leafUtils.LeafResourceManager - - Options *options.Options } -type envResourceManager struct { - DynamicRootClient dynamic.Interface -} - -// GetConfigMap retrieves the specified config map from the cache. -func (rm *envResourceManager) GetConfigMap(name, namespace string) (*corev1.ConfigMap, error) { - // return rm.configMapLister.ConfigMaps(namespace).Get(name) - obj, err := rm.DynamicRootClient.Resource(utils.GVR_CONFIGMAP).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return nil, err - } +func (r *K8sSyncer) DeletePodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, rootnamespacedname runtime.NamespacedName, cleanflag bool) error { + klog.V(4).Infof("Deleting pod %v/%+v", rootnamespacedname.Namespace, rootnamespacedname.Name) + // leafPod := &corev1.Pod{} - retObj := &corev1.ConfigMap{} - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &retObj); err != nil { - return nil, err + cleanRootPodFunc := func() error { + return DeletePodInRootCluster(ctx, rootnamespacedname, r.RootClient) } - return retObj, nil -} + // leafpod, err := lr.Clientset.Get(ctx, rootnamespacedname, leafPod) + leafPod, err := lr.Clientset.CoreV1().Pods(rootnamespacedname.Namespace).Get(ctx, rootnamespacedname.Name, metav1.GetOptions{}) -// GetSecret retrieves the specified secret from Kubernetes. -func (rm *envResourceManager) GetSecret(name, namespace string) (*corev1.Secret, error) { - // return rm.secretLister.Secrets(namespace).Get(name) - obj, err := rm.DynamicRootClient.Resource(utils.GVR_SECRET).Namespace(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { - return nil, err + if errors.IsNotFound(err) { + if cleanflag { + return cleanRootPodFunc() + } + return nil + } + return err } - retObj := &corev1.Secret{} - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &retObj); err != nil { - return nil, err + if !podutils.IsKosmosPod(leafPod) { + klog.V(4).Info("Pod is not create by kosmos tree, ignore") + return nil } - return retObj, nil -} - -// ListServices retrieves the list of services from Kubernetes. -func (rm *envResourceManager) ListServices() ([]*corev1.Service, error) { - // return rm.serviceLister.List(labels.Everything()) - objs, err := rm.DynamicRootClient.Resource(utils.GVR_SERVICE).List(context.TODO(), metav1.ListOptions{ - LabelSelector: labels.Everything().String(), - }) - + deleteOption := NewLeafDeleteOption(leafPod) + err = lr.Client.Delete(ctx, leafPod, deleteOption) if err != nil { - return nil, err - } - - retObj := make([]*corev1.Service, 0) - - for _, obj := range objs.Items { - tmpObj := &corev1.Service{} - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &tmpObj); err != nil { - return nil, err + if errors.IsNotFound(err) { + klog.V(4).Infof("Tried to delete pod %s/%s, but it did not exist in the cluster", leafPod.Namespace, leafPod.Name) + if cleanflag { + return cleanRootPodFunc() + } + return nil } - retObj = append(retObj, tmpObj) + return fmt.Errorf("could not delete pod: %v", err) } - - return retObj, nil + klog.V(4).Infof("Delete pod %v/%+v success", leafPod.Namespace, leafPod.Name) + return nil } -func NewEnvResourceManager(client dynamic.Interface) utils.EnvResourceManager { - return &envResourceManager{ - DynamicRootClient: client, +func (r *K8sSyncer) CreatePodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, pod *corev1.Pod) error { + if err := podutils.PopulateEnvironmentVariables(ctx, pod, r.EnvResourceManager); err != nil { + // span.SetStatus(err) + return err } -} -func (r *RootPodReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - var cachepod corev1.Pod - if err := r.Get(ctx, request.NamespacedName, &cachepod); err != nil { - if errors.IsNotFound(err) { - // TODO: we cannot get leaf pod when we donnot known the node name of pod, so delete all ... - nodeNames := r.GlobalLeafManager.ListNodes() - for _, nodeName := range nodeNames { - lr, err := r.GlobalLeafManager.GetLeafResourceByNodeName(nodeName) - if err != nil { - // wait for leaf resource init - return reconcile.Result{RequeueAfter: RootPodRequeueTime}, nil - } - if err := r.DeletePodInLeafCluster(ctx, lr, request.NamespacedName, false); err != nil { - klog.Errorf("delete pod in leaf error[1]: %v, %s", err, request.NamespacedName) - return reconcile.Result{RequeueAfter: RootPodRequeueTime}, nil - } - } - return reconcile.Result{}, nil - } - klog.Errorf("get %s error: %v", request.NamespacedName, err) - return reconcile.Result{RequeueAfter: RootPodRequeueTime}, nil + clusterNodeInfo := r.GlobalLeafManager.GetClusterNode(pod.Spec.NodeName) + if clusterNodeInfo == nil { + return fmt.Errorf("clusternode info is nil , name: %s", pod.Spec.NodeName) } - rootpod := *(cachepod.DeepCopy()) + nodeSelector := r.GlobalLeafManager.GetClusterNode(pod.Spec.NodeName).LeafNodeSelector - // node filter - if !strings.HasPrefix(rootpod.Spec.NodeName, utils.KosmosNodePrefix) { - // ignore the pod who donnot has the annotations "kosmos-io/owned-by-cluster" - // TODO: use const - nn := types.NamespacedName{ - Namespace: "", - Name: rootpod.Spec.NodeName, - } + basicPod := podutils.FitPod(pod, lr.IgnoreLabels, clusterNodeInfo.LeafMode, nodeSelector) + klog.V(4).Infof("Creating pod %v/%+v", pod.Namespace, pod.Name) - targetNode := &corev1.Node{} - if err := r.RootClient.Get(ctx, nn, targetNode); err != nil { - return reconcile.Result{RequeueAfter: RootPodRequeueTime}, nil + // create ns + ns := &corev1.Namespace{} + nsKey := types.NamespacedName{ + Name: basicPod.Namespace, + } + if err := lr.Client.Get(ctx, nsKey, ns); err != nil { + if !errors.IsNotFound(err) { + // cannot get ns in root cluster, retry + return err } - - if targetNode.Annotations == nil { - return reconcile.Result{}, nil + klog.V(4).Infof("Namespace %s does not exist for pod %s, creating it", basicPod.Namespace, basicPod.Name) + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: basicPod.Namespace, + }, } - clusterName := targetNode.Annotations[utils.KosmosNodeOwnedByClusterAnnotations] - - if len(clusterName) == 0 { - return reconcile.Result{}, nil + if createErr := lr.Client.Create(ctx, ns); createErr != nil { + if !errors.IsAlreadyExists(createErr) { + klog.V(4).Infof("Namespace %s create failed error: %v", basicPod.Namespace, createErr) + return err + } else { + // namespace already existed, skip create + klog.V(4).Infof("Namespace %s already existed: %v", basicPod.Namespace, createErr) + } } } - // TODO: GlobalLeafResourceManager may not inited.... - // belongs to the current node - if !r.GlobalLeafManager.HasNode(rootpod.Spec.NodeName) { - return reconcile.Result{RequeueAfter: RootPodRequeueTime}, nil + if err := r.createVolumes(ctx, lr, basicPod, clusterNodeInfo); err != nil { + klog.Errorf("Creating Volumes error %+v", basicPod) + return err + } else { + klog.V(4).Infof("Creating Volumes successed %+v", basicPod) + } + + r.convertAuth(ctx, lr, basicPod) + + if !r.Options.MultiClusterService { + r.changeToMasterCoreDNS(ctx, basicPod, r.Options) } - lr, err := r.GlobalLeafManager.GetLeafResourceByNodeName(rootpod.Spec.NodeName) + klog.V(4).Infof("Creating pod %+v", basicPod) + + err := lr.Client.Create(ctx, basicPod) if err != nil { - // wait for leaf resource init - return reconcile.Result{RequeueAfter: RootPodRequeueTime}, nil + return fmt.Errorf("could not create pod: %v", err) } + klog.V(4).Infof("Create pod %v/%+v success", basicPod.Namespace, basicPod.Name) + return nil +} - // skip namespace - if len(lr.Namespace) > 0 && lr.Namespace != rootpod.Namespace { - return reconcile.Result{}, nil +func (r *K8sSyncer) UpdatePodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, rootPod *corev1.Pod, leafPod *corev1.Pod) error { + // TODO: update env + // TODO: update config secret pv pvc ... + klog.V(4).Infof("Updating pod %v/%+v", rootPod.Namespace, rootPod.Name) + + if !podutils.IsKosmosPod(leafPod) { + klog.V(4).Info("Pod is not created by kosmos tree, ignore") + return nil + } + // not used + podutils.FitLabels(leafPod.ObjectMeta.Labels, lr.IgnoreLabels) + podCopy := leafPod.DeepCopy() + // util.GetUpdatedPod update PodCopy container image, annotations, labels. + // recover toleration, affinity, tripped ignore labels. + clusterNodeInfo := r.GlobalLeafManager.GetClusterNode(rootPod.Spec.NodeName) + if clusterNodeInfo == nil { + return fmt.Errorf("clusternode info is nil , name: %s", rootPod.Spec.NodeName) } - // delete pod in leaf - if !rootpod.GetDeletionTimestamp().IsZero() { - if err := r.DeletePodInLeafCluster(ctx, lr, request.NamespacedName, true); err != nil { - klog.Errorf("delete pod in leaf error[1]: %v, %s", err, request.NamespacedName) - return reconcile.Result{RequeueAfter: RootPodRequeueTime}, nil - } - return reconcile.Result{}, nil + nodeSelector := clusterNodeInfo.LeafNodeSelector + + podutils.GetUpdatedPod(podCopy, rootPod, lr.IgnoreLabels, clusterNodeInfo.LeafMode, nodeSelector) + if reflect.DeepEqual(leafPod.Spec, podCopy.Spec) && + reflect.DeepEqual(leafPod.Annotations, podCopy.Annotations) && + reflect.DeepEqual(leafPod.Labels, podCopy.Labels) { + return nil } - leafPod := &corev1.Pod{} - err = lr.Client.Get(ctx, request.NamespacedName, leafPod) + r.convertAuth(ctx, lr, podCopy) - // create pod in leaf - if err != nil { - if errors.IsNotFound(err) { - if err := r.CreatePodInLeafCluster(ctx, lr, &rootpod, r.GlobalLeafManager.GetClusterNode(rootpod.Spec.NodeName).LeafNodeSelector); err != nil { - klog.Errorf("create pod inleaf error, err: %s", err) - return reconcile.Result{RequeueAfter: RootPodRequeueTime}, nil - } else { - return reconcile.Result{}, nil - } - } else { - klog.Errorf("get pod in leaf error[3]: %v, %s", err, request.NamespacedName) - return reconcile.Result{RequeueAfter: RootPodRequeueTime}, nil - } + if !r.Options.MultiClusterService { + r.changeToMasterCoreDNS(ctx, podCopy, r.Options) } - // update pod in leaf - if podutils.ShouldEnqueue(leafPod, &rootpod) { - if err := r.UpdatePodInLeafCluster(ctx, lr, &rootpod, leafPod, r.GlobalLeafManager.GetClusterNode(rootpod.Spec.NodeName).LeafNodeSelector); err != nil { - return reconcile.Result{RequeueAfter: RootPodRequeueTime}, nil - } + klog.V(4).Infof("Updating pod %+v", podCopy) + + err := lr.Client.Update(ctx, podCopy) + if err != nil { + return fmt.Errorf("could not update pod: %v", err) } + klog.V(4).Infof("Update pod %v/%+v success ", rootPod.Namespace, rootPod.Name) + return nil +} + +func (r *K8sSyncer) GetPodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, rootnamespacedname runtime.NamespacedName) (*corev1.Pod, error) { + leafPod := &corev1.Pod{} + err := lr.Client.Get(ctx, types.NamespacedName{ + Namespace: rootnamespacedname.Namespace, + Name: rootnamespacedname.Name, + }, leafPod) + return leafPod, err +} - return reconcile.Result{}, nil +type rootDeleteOption struct { + GracePeriodSeconds *int64 } -func (r *RootPodReconciler) SetupWithManager(mgr manager.Manager) error { - if r.Client == nil { - r.Client = mgr.GetClient() +func (dopt *rootDeleteOption) ApplyToDelete(opt *client.DeleteOptions) { + opt.GracePeriodSeconds = dopt.GracePeriodSeconds +} + +func NewLeafDeleteOption(pod *corev1.Pod) client.DeleteOption { + gracePeriodSeconds := new(int64) + if pod.DeletionGracePeriodSeconds != nil { + gracePeriodSeconds = pod.DeletionGracePeriodSeconds } - r.envResourceManager = NewEnvResourceManager(r.DynamicRootClient) + return &rootDeleteOption{ + GracePeriodSeconds: gracePeriodSeconds, + } +} - skipFunc := func(obj client.Object) bool { - // skip reservedNS - if obj.GetNamespace() == utils.ReservedNS { - return false - } - // don't create pod if pod has label daemonset.kosmos.io/managed="" - if _, ok := obj.GetLabels()[daemonset.ManagedLabel]; ok { - return false +func DeletePodInRootCluster(ctx context.Context, rootnamespacedname runtime.NamespacedName, rootClient kubernetes.Interface) error { + rPod, err := rootClient.CoreV1().Pods(rootnamespacedname.Namespace).Get(ctx, rootnamespacedname.Name, metav1.GetOptions{}) + + if err != nil { + if errors.IsNotFound(err) { + return nil + } else { + return err } + } - p := obj.(*corev1.Pod) + rPodCopy := rPod.DeepCopy() - // skip daemonset - if p.OwnerReferences != nil && len(p.OwnerReferences) > 0 { - for _, or := range p.OwnerReferences { - if or.Kind == "DaemonSet" { - if p.Annotations != nil { - if _, ok := p.Annotations[utils.KosmosDaemonsetAllowAnnotations]; ok { - return true - } - } - return false - } - } + if err := rootClient.CoreV1().Pods(rPodCopy.Namespace).Delete(ctx, rPodCopy.Name, metav1.DeleteOptions{ + GracePeriodSeconds: new(int64), + }); err != nil { + if !errors.IsNotFound(err) { + return err } - return true } - return ctrl.NewControllerManagedBy(mgr). - Named(RootPodControllerName). - WithOptions(controller.Options{}). - For(&corev1.Pod{}, builder.WithPredicates(predicate.Funcs{ - CreateFunc: func(createEvent event.CreateEvent) bool { - return skipFunc(createEvent.Object) - }, - UpdateFunc: func(updateEvent event.UpdateEvent) bool { - return skipFunc(updateEvent.ObjectNew) - }, - DeleteFunc: func(deleteEvent event.DeleteEvent) bool { - return skipFunc(deleteEvent.Object) - }, - GenericFunc: func(genericEvent event.GenericEvent) bool { - // TODO - return false - }, - })). - Complete(r) + return nil } -func (r *RootPodReconciler) createStorageInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, gvr schema.GroupVersionResource, resourcenames []string, rootpod *corev1.Pod, cn *leafUtils.ClusterNode) error { +func (r *K8sSyncer) createStorageInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, gvr schema.GroupVersionResource, resourcenames []string, rootpod *corev1.Pod, cn *leafUtils.ClusterNode) error { ns := rootpod.Namespace storageHandler, err := NewStorageHandler(gvr) if err != nil { @@ -332,7 +293,7 @@ func (r *RootPodReconciler) createStorageInLeafCluster(ctx context.Context, lr * return nil } -func (r *RootPodReconciler) createSAInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, sa string, ns string) (*corev1.ServiceAccount, error) { +func (r *K8sSyncer) createSAInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, sa string, ns string) (*corev1.ServiceAccount, error) { saKey := types.NamespacedName{ Namespace: ns, Name: sa, @@ -362,13 +323,12 @@ func (r *RootPodReconciler) createSAInLeafCluster(ctx context.Context, lr *leafU return newSA, nil } -func (r *RootPodReconciler) createSATokenInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, saName string, ns string) (*corev1.Secret, error) { +func (r *K8sSyncer) createSATokenInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, saName string, ns string) (*corev1.Secret, error) { satokenKey := types.NamespacedName{ Namespace: ns, Name: saName, } - sa := &corev1.ServiceAccount{} - err := r.RootClient.Get(ctx, satokenKey, sa) + sa, err := r.RootClient.CoreV1().ServiceAccounts(satokenKey.Namespace).Get(ctx, satokenKey.Name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("could not find sa %s in master cluster: %v", saName, err) } @@ -397,8 +357,8 @@ func (r *RootPodReconciler) createSATokenInLeafCluster(ctx context.Context, lr * Name: secretName, } - masterSecret := &corev1.Secret{} - err = r.RootClient.Get(ctx, secretKey, masterSecret) + masterSecret, err := r.RootClient.CoreV1().Secrets(secretKey.Namespace).Get(ctx, secretKey.Name, metav1.GetOptions{}) + if err != nil { return nil, fmt.Errorf("could not find secret %s in master cluster: %v", secretName, err) } @@ -421,7 +381,7 @@ func (r *RootPodReconciler) createSATokenInLeafCluster(ctx context.Context, lr * return newSE, nil } -func (r *RootPodReconciler) createCAInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, ns string) (*corev1.ConfigMap, error) { +func (r *K8sSyncer) createCAInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, ns string) (*corev1.ConfigMap, error) { masterCAConfigmapKey := types.NamespacedName{ Namespace: ns, Name: utils.MasterRooTCAName, @@ -437,14 +397,8 @@ func (r *RootPodReconciler) createCAInLeafCluster(ctx context.Context, lr *leafU return masterCA, nil } - ca := &corev1.ConfigMap{} + ca, err := r.RootClient.CoreV1().ConfigMaps(ns).Get(ctx, utils.RooTCAConfigMapName, metav1.GetOptions{}) - rootCAConfigmapKey := types.NamespacedName{ - Namespace: ns, - Name: utils.RooTCAConfigMapName, - } - - err = r.Client.Get(ctx, rootCAConfigmapKey, ca) if err != nil { return nil, fmt.Errorf("could not find configmap %s in master cluster: %v", ca, err) } @@ -463,14 +417,13 @@ func (r *RootPodReconciler) createCAInLeafCluster(ctx context.Context, lr *leafU // changeToMasterCoreDNS point the dns of the pod to the master cluster, so that the pod can access any service. // The master cluster holds all the services in the multi-cluster. -func (r *RootPodReconciler) changeToMasterCoreDNS(ctx context.Context, pod *corev1.Pod, opts *options.Options) { +func (r *K8sSyncer) changeToMasterCoreDNS(ctx context.Context, pod *corev1.Pod, opts *options.Options) { if pod.Spec.DNSPolicy != corev1.DNSClusterFirst && pod.Spec.DNSPolicy != corev1.DNSClusterFirstWithHostNet { return } ns := pod.Namespace - svc := &corev1.Service{} - err := r.RootClient.Get(ctx, types.NamespacedName{Namespace: opts.RootCoreDNSServiceNamespace, Name: opts.RootCoreDNSServiceName}, svc) + svc, err := r.RootClient.CoreV1().Services(opts.RootCoreDNSServiceNamespace).Get(ctx, opts.RootCoreDNSServiceName, metav1.GetOptions{}) if err != nil { return } @@ -492,7 +445,7 @@ func (r *RootPodReconciler) changeToMasterCoreDNS(ctx context.Context, pod *core } } -func (r *RootPodReconciler) convertAuth(ctx context.Context, lr *leafUtils.LeafResource, pod *corev1.Pod) { +func (r *K8sSyncer) convertAuth(ctx context.Context, lr *leafUtils.LeafResource, pod *corev1.Pod) { if pod.Spec.AutomountServiceAccountToken == nil || *pod.Spec.AutomountServiceAccountToken { falseValue := false pod.Spec.AutomountServiceAccountToken = &falseValue @@ -548,7 +501,7 @@ func (r *RootPodReconciler) convertAuth(ctx context.Context, lr *leafUtils.LeafR } } -func (r *RootPodReconciler) createServiceAccountInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, secret *corev1.Secret) error { +func (r *K8sSyncer) createServiceAccountInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, secret *corev1.Secret) error { if !lr.EnableServiceAccount { return nil } @@ -618,7 +571,7 @@ func (r *RootPodReconciler) createServiceAccountInLeafCluster(ctx context.Contex return nil } -func (r *RootPodReconciler) createVolumes(ctx context.Context, lr *leafUtils.LeafResource, basicPod *corev1.Pod, clusterNodeInfo *leafUtils.ClusterNode) error { +func (r *K8sSyncer) createVolumes(ctx context.Context, lr *leafUtils.LeafResource, basicPod *corev1.Pod, clusterNodeInfo *leafUtils.ClusterNode) error { // create secret configmap pvc secretNames, imagePullSecrets := podutils.GetSecrets(basicPod) configMaps := podutils.GetConfigmaps(basicPod) @@ -698,150 +651,3 @@ func (r *RootPodReconciler) createVolumes(ctx context.Context, lr *leafUtils.Lea return nil } - -func (r *RootPodReconciler) CreatePodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, pod *corev1.Pod, nodeSelector kosmosv1alpha1.NodeSelector) error { - if err := podutils.PopulateEnvironmentVariables(ctx, pod, r.envResourceManager); err != nil { - // span.SetStatus(err) - return err - } - - clusterNodeInfo := r.GlobalLeafManager.GetClusterNode(pod.Spec.NodeName) - if clusterNodeInfo == nil { - return fmt.Errorf("clusternode info is nil , name: %s", pod.Spec.NodeName) - } - - basicPod := podutils.FitPod(pod, lr.IgnoreLabels, clusterNodeInfo.LeafMode, nodeSelector) - klog.V(4).Infof("Creating pod %v/%+v", pod.Namespace, pod.Name) - - // create ns - ns := &corev1.Namespace{} - nsKey := types.NamespacedName{ - Name: basicPod.Namespace, - } - if err := lr.Client.Get(ctx, nsKey, ns); err != nil { - if !errors.IsNotFound(err) { - // cannot get ns in root cluster, retry - return err - } - klog.V(4).Infof("Namespace %s does not exist for pod %s, creating it", basicPod.Namespace, basicPod.Name) - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: basicPod.Namespace, - }, - } - - if createErr := lr.Client.Create(ctx, ns); createErr != nil { - if !errors.IsAlreadyExists(createErr) { - klog.V(4).Infof("Namespace %s create failed error: %v", basicPod.Namespace, createErr) - return err - } else { - // namespace already existed, skip create - klog.V(4).Info("Namespace %s already existed: %v", basicPod.Namespace, createErr) - } - } - } - - if err := r.createVolumes(ctx, lr, basicPod, clusterNodeInfo); err != nil { - klog.Errorf("Creating Volumes error %+v", basicPod) - return err - } else { - klog.V(4).Infof("Creating Volumes successed %+v", basicPod) - } - - r.convertAuth(ctx, lr, basicPod) - - if !r.Options.MultiClusterService { - r.changeToMasterCoreDNS(ctx, basicPod, r.Options) - } - - klog.V(4).Infof("Creating pod %+v", basicPod) - - err := lr.Client.Create(ctx, basicPod) - if err != nil { - return fmt.Errorf("could not create pod: %v", err) - } - klog.V(4).Infof("Create pod %v/%+v success", basicPod.Namespace, basicPod.Name) - return nil -} - -func (r *RootPodReconciler) UpdatePodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, rootPod *corev1.Pod, leafPod *corev1.Pod, nodeSelector kosmosv1alpha1.NodeSelector) error { - // TODO: update env - // TODO: update config secret pv pvc ... - klog.V(4).Infof("Updating pod %v/%+v", rootPod.Namespace, rootPod.Name) - - if !podutils.IsKosmosPod(leafPod) { - klog.V(4).Info("Pod is not created by kosmos tree, ignore") - return nil - } - // not used - podutils.FitLabels(leafPod.ObjectMeta.Labels, lr.IgnoreLabels) - podCopy := leafPod.DeepCopy() - // util.GetUpdatedPod update PodCopy container image, annotations, labels. - // recover toleration, affinity, tripped ignore labels. - clusterNodeInfo := r.GlobalLeafManager.GetClusterNode(rootPod.Spec.NodeName) - if clusterNodeInfo == nil { - return fmt.Errorf("clusternode info is nil , name: %s", rootPod.Spec.NodeName) - } - podutils.GetUpdatedPod(podCopy, rootPod, lr.IgnoreLabels, clusterNodeInfo.LeafMode, nodeSelector) - if reflect.DeepEqual(leafPod.Spec, podCopy.Spec) && - reflect.DeepEqual(leafPod.Annotations, podCopy.Annotations) && - reflect.DeepEqual(leafPod.Labels, podCopy.Labels) { - return nil - } - - r.convertAuth(ctx, lr, podCopy) - - if !r.Options.MultiClusterService { - r.changeToMasterCoreDNS(ctx, podCopy, r.Options) - } - - klog.V(4).Infof("Updating pod %+v", podCopy) - - err := lr.Client.Update(ctx, podCopy) - if err != nil { - return fmt.Errorf("could not update pod: %v", err) - } - klog.V(4).Infof("Update pod %v/%+v success ", rootPod.Namespace, rootPod.Name) - return nil -} - -func (r *RootPodReconciler) DeletePodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, rootnamespacedname types.NamespacedName, cleanflag bool) error { - klog.V(4).Infof("Deleting pod %v/%+v", rootnamespacedname.Namespace, rootnamespacedname.Name) - leafPod := &corev1.Pod{} - - cleanRootPodFunc := func() error { - return DeletePodInRootCluster(ctx, rootnamespacedname, r.Client) - } - - err := lr.Client.Get(ctx, rootnamespacedname, leafPod) - - if err != nil { - if errors.IsNotFound(err) { - if cleanflag { - return cleanRootPodFunc() - } - return nil - } - return err - } - - if !podutils.IsKosmosPod(leafPod) { - klog.V(4).Info("Pod is not create by kosmos tree, ignore") - return nil - } - - deleteOption := NewLeafDeleteOption(leafPod) - err = lr.Client.Delete(ctx, leafPod, deleteOption) - if err != nil { - if errors.IsNotFound(err) { - klog.V(4).Infof("Tried to delete pod %s/%s, but it did not exist in the cluster", leafPod.Namespace, leafPod.Name) - if cleanflag { - return cleanRootPodFunc() - } - return nil - } - return fmt.Errorf("could not delete pod: %v", err) - } - klog.V(4).Infof("Delete pod %v/%+v success", leafPod.Namespace, leafPod.Name) - return nil -} diff --git a/pkg/clustertree/cluster-manager/controllers/pod/root-pod/serverless/syncer_handle.go b/pkg/clustertree/cluster-manager/controllers/pod/root-pod/serverless/syncer_handle.go new file mode 100644 index 000000000..6955d4eea --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/pod/root-pod/serverless/syncer_handle.go @@ -0,0 +1,30 @@ +package rootpodsyncers + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/runtime" + leafUtils "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/utils" +) + +type ServerlessSyncer struct { +} + +func (r *ServerlessSyncer) DeletePodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, rootnamespacedname runtime.NamespacedName, cleanflag bool) error { + return fmt.Errorf("not implemented") +} + +func (r *ServerlessSyncer) CreatePodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, pod *corev1.Pod) error { + return fmt.Errorf("not implemented") +} + +func (r *ServerlessSyncer) UpdatePodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, rootpod *corev1.Pod, leafpod *corev1.Pod) error { + return fmt.Errorf("not implemented") +} + +func (r *ServerlessSyncer) GetPodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, rootnamespacedname runtime.NamespacedName) (*corev1.Pod, error) { + return lr.ServerlessClient.ListPods(rootnamespacedname.Name) +} diff --git a/pkg/clustertree/cluster-manager/controllers/pod/root-pod/syncer.go b/pkg/clustertree/cluster-manager/controllers/pod/root-pod/syncer.go new file mode 100644 index 000000000..cda23bb4e --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/pod/root-pod/syncer.go @@ -0,0 +1,188 @@ +package rootpodsyncers + +import ( + "context" + "fmt" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/klog" + + "github.com/kosmos.io/kosmos/cmd/clustertree/cluster-manager/app/options" + rootpodk8ssyncers "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pod/root-pod/k8s" + rootpodopenapisyncers "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pod/root-pod/serverless" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/runtime" + leafUtils "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/utils" + "github.com/kosmos.io/kosmos/pkg/utils" + "github.com/kosmos.io/kosmos/pkg/utils/podutils" +) + +const ( + RootPodControllerName = "root-pod-controller" + RootPodRequeueTime = 10 * time.Second +) + +type RootPodSyncer struct { + Client kubernetes.Interface + GlobalLeafManager leafUtils.LeafResourceManager + EnvResourceManager utils.EnvResourceManager + Options *options.Options + DynamicRootClient dynamic.Interface +} + +func (r *RootPodSyncer) GetSyncer(lr *leafUtils.LeafResource) (RootPodSyncerHandle, error) { + var RootPodSyncer RootPodSyncerHandle + switch lr.GetLeafType() { + case leafUtils.LeafTypeK8s: + RootPodSyncer = &rootpodk8ssyncers.K8sSyncer{ + RootClient: r.Client, + GlobalLeafManager: r.GlobalLeafManager, + EnvResourceManager: r.EnvResourceManager, + Options: r.Options, + DynamicRootClient: r.DynamicRootClient, + } + case leafUtils.LeafTypeServerless: + RootPodSyncer = &rootpodopenapisyncers.ServerlessSyncer{} + } + if RootPodSyncer == nil { + return nil, fmt.Errorf("not implement, DeletePodInLeafCluster") + } else { + return RootPodSyncer, nil + } +} + +func (r *RootPodSyncer) GetPodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, rootnamespacedname runtime.NamespacedName) (*corev1.Pod, error) { + if syncer, err := r.GetSyncer(lr); err != nil { + return nil, err + } else { + return syncer.GetPodInLeafCluster(ctx, lr, rootnamespacedname) + } +} + +func (r *RootPodSyncer) DeletePodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, rootnamespacedname runtime.NamespacedName, cleanflag bool) error { + if syncer, err := r.GetSyncer(lr); err != nil { + return err + } else { + return syncer.DeletePodInLeafCluster(ctx, lr, rootnamespacedname, cleanflag) + } +} + +func (r *RootPodSyncer) CreatePodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, pod *corev1.Pod) error { + if syncer, err := r.GetSyncer(lr); err != nil { + return err + } else { + return syncer.CreatePodInLeafCluster(ctx, lr, pod) + } +} +func (r *RootPodSyncer) UpdatePodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, rootpod *corev1.Pod, leafpod *corev1.Pod) error { + if syncer, err := r.GetSyncer(lr); err != nil { + return err + } else { + return syncer.UpdatePodInLeafCluster(ctx, lr, rootpod, leafpod) + } +} + +func (r *RootPodSyncer) Reconcile(ctx context.Context, key runtime.NamespacedName) (runtime.Result, error) { + cachepod, err := r.Client.CoreV1().Pods(key.Namespace).Get(ctx, key.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + // TODO: we cannot get leaf pod when we donnot known the node name of pod, so delete all ... + nodeNames := r.GlobalLeafManager.ListNodes() + for _, nodeName := range nodeNames { + lr, err := r.GlobalLeafManager.GetLeafResourceByNodeName(nodeName) + if err != nil { + // wait for leaf resource init + return runtime.Result{RequeueAfter: RootPodRequeueTime}, nil + } + if err := r.DeletePodInLeafCluster(ctx, lr, key, false); err != nil { + klog.Errorf("delete pod in leaf error[1]: %v, %s", err, key) + return runtime.Result{RequeueAfter: RootPodRequeueTime}, nil + } + } + return runtime.Result{}, nil + } + klog.Errorf("get %s error: %v", key, err) + return runtime.Result{RequeueAfter: RootPodRequeueTime}, nil + } + + rootpod := *(cachepod.DeepCopy()) + + // node filter + if len(rootpod.Spec.NodeName) == 0 { + return runtime.Result{}, nil + } + if !strings.HasPrefix(rootpod.Spec.NodeName, utils.KosmosNodePrefix) { + // ignore the pod who donnot has the annotations "kosmos-io/owned-by-cluster" + targetNode, err := r.Client.CoreV1().Nodes().Get(ctx, rootpod.Spec.NodeName, metav1.GetOptions{}) + if err != nil { + return runtime.Result{RequeueAfter: RootPodRequeueTime}, nil + } + + if targetNode.Annotations == nil { + return runtime.Result{}, nil + } + + clusterName := targetNode.Annotations[utils.KosmosNodeOwnedByClusterAnnotations] + + if len(clusterName) == 0 { + return runtime.Result{}, nil + } + } + + // TODO: GlobalLeafResourceManager may not inited.... + // belongs to the current node + if !r.GlobalLeafManager.HasNode(rootpod.Spec.NodeName) { + return runtime.Result{RequeueAfter: RootPodRequeueTime}, nil + } + + lr, err := r.GlobalLeafManager.GetLeafResourceByNodeName(rootpod.Spec.NodeName) + if err != nil { + // wait for leaf resource init + return runtime.Result{RequeueAfter: RootPodRequeueTime}, nil + } + + // skip namespace + if len(lr.Namespace) > 0 && lr.Namespace != rootpod.Namespace { + return runtime.Result{}, nil + } + + // delete pod in leaf + if !rootpod.GetDeletionTimestamp().IsZero() { + if err := r.DeletePodInLeafCluster(ctx, lr, key, true); err != nil { + klog.Errorf("delete pod in leaf error[1]: %v, %s", err, key) + return runtime.Result{RequeueAfter: RootPodRequeueTime}, nil + } + return runtime.Result{}, nil + } + + leafPod, err := r.GetPodInLeafCluster(ctx, lr, key) + + // create pod in leaf + if err != nil { + if errors.IsNotFound(err) { + if err := r.CreatePodInLeafCluster(ctx, lr, &rootpod); err != nil { + klog.Errorf("create pod inleaf error, err: %s", err) + return runtime.Result{RequeueAfter: RootPodRequeueTime}, nil + } else { + return runtime.Result{}, nil + } + } else { + klog.Errorf("get pod in leaf error[3]: %v, %s", err, key) + return runtime.Result{RequeueAfter: RootPodRequeueTime}, nil + } + } + + // update pod in leaf + if podutils.ShouldEnqueue(leafPod, &rootpod) { + if err := r.UpdatePodInLeafCluster(ctx, lr, &rootpod, leafPod); err != nil { + return runtime.Result{RequeueAfter: RootPodRequeueTime}, nil + } + } + + return runtime.Result{}, nil +} diff --git a/pkg/clustertree/cluster-manager/controllers/pod/root-pod/syncer_handle.go b/pkg/clustertree/cluster-manager/controllers/pod/root-pod/syncer_handle.go new file mode 100644 index 000000000..4bfa50777 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/pod/root-pod/syncer_handle.go @@ -0,0 +1,17 @@ +package rootpodsyncers + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/runtime" + leafUtils "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/utils" +) + +type RootPodSyncerHandle interface { + DeletePodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, rootnamespacedname runtime.NamespacedName, cleanflag bool) error + CreatePodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, pod *corev1.Pod) error + UpdatePodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, rootpod *corev1.Pod, leafpod *corev1.Pod) error + GetPodInLeafCluster(ctx context.Context, lr *leafUtils.LeafResource, rootnamespacedname runtime.NamespacedName) (*corev1.Pod, error) +} diff --git a/pkg/clustertree/cluster-manager/controllers/pod/root_pod_workerqueue.go b/pkg/clustertree/cluster-manager/controllers/pod/root_pod_workerqueue.go new file mode 100644 index 000000000..ab0462e09 --- /dev/null +++ b/pkg/clustertree/cluster-manager/controllers/pod/root_pod_workerqueue.go @@ -0,0 +1,127 @@ +package pod + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + // "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/dynamic" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + "github.com/kosmos.io/kosmos/cmd/clustertree/cluster-manager/app/options" + rootpodsyncers "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pod/root-pod" + rootpodk8ssyncers "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pod/root-pod/k8s" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/extensions/daemonset" + "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/runtime" + leafUtils "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/utils" + "github.com/kosmos.io/kosmos/pkg/utils" +) + +type RootPodWorkerQueueOption struct { + Config *rest.Config + RootClient kubernetes.Interface + DynamicRootClient dynamic.Interface + GlobalLeafManager leafUtils.LeafResourceManager + Options *options.Options +} + +func NewRootPodWorkerQueue(opts *RootPodWorkerQueueOption) runtime.Controller { + // create the workqueue + queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + + client, err := kubernetes.NewForConfig(opts.Config) + if err != nil { + klog.Fatal(err) + } + + // Create a shared informer factory for Kubernetes pods in the current namespace (if specified) and scheduled to the current node. + podInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions( + client, + 5*time.Second, + ) + + podInformer := podInformerFactory.Core().V1().Pods() + + eventFilter := func(obj interface{}) (bool, *corev1.Pod) { + p, ok := obj.(*corev1.Pod) + + if !ok { + klog.Fatal("convert pod error") + return false, p + } + + // skip reservedNS + if p.GetNamespace() == utils.ReservedNS { + return false, nil + } + // don't create pod if pod has label daemonset.kosmos.io/managed="" + if _, ok := p.GetLabels()[daemonset.ManagedLabel]; ok { + return false, nil + } + + // p := obj.(*corev1.Pod) + + // skip daemonset + if p.OwnerReferences != nil && len(p.OwnerReferences) > 0 { + for _, or := range p.OwnerReferences { + if or.Kind == "DaemonSet" { + if p.Annotations != nil { + if _, ok := p.Annotations[utils.KosmosDaemonsetAllowAnnotations]; ok { + return true, p + } + } + return false, nil + } + } + } + return true, p + } + + _, err = podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + if flag, pod := eventFilter(obj); flag { + queue.Add(runtime.NamespacedName{ + Name: pod.Name, + Namespace: pod.Namespace, + }) + } + }, + UpdateFunc: func(old interface{}, new interface{}) { + if flag, pod := eventFilter(old); flag { + queue.Add(runtime.NamespacedName{ + Name: pod.Name, + Namespace: pod.Namespace, + }) + } + }, + DeleteFunc: func(obj interface{}) { + if flag, pod := eventFilter(obj); flag { + queue.Add(runtime.NamespacedName{ + Name: pod.Name, + Namespace: pod.Namespace, + }) + } + }, + }) + + if err != nil { + klog.Fatalf("add event handler error: %s", err) + panic(err) + } + + envResourceManager := rootpodk8ssyncers.NewEnvResourceManager(opts.DynamicRootClient) + rootK8sK8sSyncer := &rootpodsyncers.RootPodSyncer{ + Client: opts.RootClient, + GlobalLeafManager: opts.GlobalLeafManager, + EnvResourceManager: envResourceManager, + Options: opts.Options, + DynamicRootClient: opts.DynamicRootClient, + } + + return runtime.NewK8sWorkerQueue(queue, podInformer.Informer(), rootK8sK8sSyncer) +} diff --git a/pkg/clustertree/cluster-manager/runtime/controller.go b/pkg/clustertree/cluster-manager/runtime/controller.go new file mode 100644 index 000000000..809c23d49 --- /dev/null +++ b/pkg/clustertree/cluster-manager/runtime/controller.go @@ -0,0 +1,123 @@ +package runtime + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" +) + +const ( + K8S int = iota + OPENAPI +) + +type Func func(key NamespacedName) (Result, error) + +type Reconciler interface { + Reconcile(ctx context.Context, key NamespacedName) (Result, error) +} + +type Controller struct { + // indexer cache.Indexer + queue workqueue.RateLimitingInterface + informer cache.Controller + kind int + Do Reconciler + Workers int +} + +func NewOpenApiWorkerQueue(queue workqueue.RateLimitingInterface, r Reconciler) Controller { + return Controller{ + queue: queue, + kind: OPENAPI, + Do: r, + Workers: 1, + } +} + +func NewK8sWorkerQueue(queue workqueue.RateLimitingInterface, informer cache.SharedIndexInformer, r Reconciler) Controller { + return Controller{ + queue: queue, + kind: K8S, + informer: informer, + Do: r, + Workers: 1, + } +} + +func (c *Controller) runWorker(ctx context.Context) { + for c.processNextItem(ctx) { + } +} +func (c *Controller) processNextItem(ctx context.Context) bool { + // Wait until there is a new item in the working queue + key, quit := c.queue.Get() + if quit { + return false + } + // Tell the queue that we are done with processing this key. + defer c.queue.Done(key) + + // Invoke the method containing the business logic + result, err := c.Reconcile(ctx, key.(NamespacedName)) + // Handle the error if something went wrong during the execution of the business logic + c.handleErr(result, err, key) + return true +} + +func (c *Controller) Run(ctx context.Context) { + defer runtime.HandleCrash() + + stopCh := ctx.Done() + + // Let the workers stop when we are done + defer c.queue.ShutDown() + klog.Info("Starting Pod controller") + + if c.kind == K8S { + go c.informer.Run(stopCh) + + // Wait for all involved caches to be synced, before processing items from the queue is started + if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) { + runtime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) + return + } + } + + for i := 0; i < c.Workers; i++ { + go wait.Until(func() { c.runWorker(ctx) }, time.Second, stopCh) + } + + <-stopCh + klog.Info("Stopping Pod controller") +} + +func (c *Controller) handleErr(result Result, err error, key interface{}) { + if !result.Requeue && result.RequeueAfter == 0 { + // Forget about the #AddRateLimited history of the key on every successful synchronization. + // This ensures that future processing of updates for this key is not delayed because of + // an outdated error history. + c.queue.Forget(key) + return + } + + if err != nil { + klog.Fatal(err, key) + } + + if result.RequeueAfter != 0 { + c.queue.AddAfter(key, result.RequeueAfter) + } else { + c.queue.AddRateLimited(key) + } +} + +func (c *Controller) Reconcile(ctx context.Context, key NamespacedName) (Result, error) { + return c.Do.Reconcile(ctx, key) +} diff --git a/pkg/clustertree/cluster-manager/runtime/namespaced-name.go b/pkg/clustertree/cluster-manager/runtime/namespaced-name.go new file mode 100644 index 000000000..a41a699cd --- /dev/null +++ b/pkg/clustertree/cluster-manager/runtime/namespaced-name.go @@ -0,0 +1,15 @@ +package runtime + +type NamespacedName struct { + Namespace string + Name string +} + +const ( + Separator = '/' +) + +// String returns the general purpose string representation +func (n NamespacedName) String() string { + return n.Namespace + string(Separator) + n.Name +} diff --git a/pkg/clustertree/cluster-manager/runtime/result.go b/pkg/clustertree/cluster-manager/runtime/result.go new file mode 100644 index 000000000..f5b11ffc7 --- /dev/null +++ b/pkg/clustertree/cluster-manager/runtime/result.go @@ -0,0 +1,16 @@ +package runtime + +import "time" + +type Result struct { + Requeue bool + + RequeueAfter time.Duration +} + +func (r *Result) IsZero() bool { + if r == nil { + return true + } + return *r == Result{} +} diff --git a/pkg/clustertree/cluster-manager/serverless_node.go b/pkg/clustertree/cluster-manager/serverless_node.go new file mode 100644 index 000000000..348b9ef91 --- /dev/null +++ b/pkg/clustertree/cluster-manager/serverless_node.go @@ -0,0 +1,287 @@ +package clusterManager + +import ( + "context" + "fmt" + "sync" + "time" + + coordinationv1 "k8s.io/api/coordination/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/util/retry" + "k8s.io/klog" + "k8s.io/utils/pointer" + + "github.com/kosmos.io/kosmos/cmd/clustertree/cluster-manager/app/options" + kosmosv1alpha1 "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" + podcontrollers "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pod" + leafpodsyncers "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/controllers/pod/leaf-pod" + leafUtils "github.com/kosmos.io/kosmos/pkg/clustertree/cluster-manager/utils" + "github.com/kosmos.io/kosmos/pkg/utils" +) + +const ( + NodeLeaseControllerName = "node-lease-controller" + + DefaultLeaseDuration = 40 + DefaultRenewIntervalFraction = 0.25 + + DefaultNodeStatusUpdateInterval = 1 * time.Minute +) + +func CreateOpenApiNode(ctx context.Context, cluster *kosmosv1alpha1.Cluster, rootClientset kubernetes.Interface, opts *options.Options) error { + // create node + nodeNameInRoot := fmt.Sprintf("%s%s", utils.KosmosNodePrefix, cluster.Name) + nodeInRoot, err := createNode(ctx, rootClientset, cluster.Name, nodeNameInRoot, "v1.21.5-eki.0", opts.ListenPort) + if err != nil { + return err + } + + nodes := []*corev1.Node{nodeInRoot} + // lease / resources + nodelease := NewNodeLeaseController(nodes, rootClientset) + + go func() { + if err := nodelease.Start(ctx); err != nil { + klog.Fatal(err) + } + }() + + // pod + if cluster.Spec.ClusterTreeOptions == nil { + return fmt.Errorf("clusterTreeOptions is nil") + } + ak := cluster.Spec.ClusterTreeOptions.AccessKey + sk := cluster.Spec.ClusterTreeOptions.SecretKey + + if len(ak) == 0 || len(sk) == 0 { + return fmt.Errorf("ak/sk is nil") + } + + leafUtils.GetGlobalLeafResourceManager().AddLeafResource(&leafUtils.LeafResource{ + LeafType: leafUtils.LeafTypeServerless, + ServerlessClient: leafUtils.NewServerlessClient(ak, sk), + }, cluster, nodes) + + leafPodWorkerQueue := podcontrollers.NewLeafPodWorkerQueue(&leafpodsyncers.LeafPodWorkerQueueOption{ + // Config: leafRestConfig, + RootClient: rootClientset, + }, leafUtils.LeafTypeServerless) // TODO: + + go leafPodWorkerQueue.Run(ctx) + + return nil +} + +func createNode(ctx context.Context, clientset kubernetes.Interface, clusterName, nodeName, gitVersion string, listenPort int32) (*corev1.Node, error) { + nodeInRoot, err := clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + if err != nil { + if !errors.IsNotFound(err) { + return nil, err + } + + nodeInRoot = utils.BuildNodeTemplate(nodeName) + nodeAnnotations := nodeInRoot.GetAnnotations() + if nodeAnnotations == nil { + nodeAnnotations = make(map[string]string, 1) + } + nodeAnnotations[utils.KosmosNodeOwnedByClusterAnnotations] = clusterName + nodeInRoot.SetAnnotations(nodeAnnotations) + + nodeInRoot.Status.NodeInfo.KubeletVersion = gitVersion + nodeInRoot.Status.DaemonEndpoints = corev1.NodeDaemonEndpoints{ + KubeletEndpoint: corev1.DaemonEndpoint{ + Port: listenPort, + }, + } + + nodeInRoot, err = clientset.CoreV1().Nodes().Create(ctx, nodeInRoot, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + } + return nodeInRoot, nil +} + +func NewNodeLeaseController(nodes []*corev1.Node, rootClient kubernetes.Interface) *NodeLeaseController { + c := &NodeLeaseController{ + rootClient: rootClient, + nodes: nodes, + leaseInterval: getRenewInterval(), + statusInterval: DefaultNodeStatusUpdateInterval, + } + return c +} + +type NodeLeaseController struct { + nodes []*corev1.Node + nodeLock sync.Mutex + rootClient kubernetes.Interface + leaseInterval time.Duration + statusInterval time.Duration +} + +func (c *NodeLeaseController) Start(ctx context.Context) error { + go wait.UntilWithContext(ctx, c.syncLease, c.leaseInterval) + go wait.UntilWithContext(ctx, c.syncNodeStatus, c.statusInterval) + <-ctx.Done() + return nil +} + +func (c *NodeLeaseController) syncNodeStatus(ctx context.Context) { + nodes := make([]*corev1.Node, 0) + c.nodeLock.Lock() + for _, nodeIndex := range c.nodes { + nodeCopy := nodeIndex.DeepCopy() + nodes = append(nodes, nodeCopy) + } + c.nodeLock.Unlock() + + err := c.updateNodeStatus(ctx, nodes) + if err != nil { + klog.Errorf(err.Error()) + } +} + +func (c *NodeLeaseController) updateNodeStatus(ctx context.Context, n []*corev1.Node) error { + copynode, err := c.rootClient.CoreV1().Nodes().Get(ctx, "ylc-vm-001", metav1.GetOptions{}) + for _, node := range n { + node.Status = copynode.DeepCopy().Status + // remove address + node.Status.Addresses = []corev1.NodeAddress{} + _, err := c.rootClient.CoreV1().Nodes().UpdateStatus(ctx, node, metav1.UpdateOptions{}) + if err != nil { + klog.Errorf("Could not update node status in root cluster,Error: %v", err) + } + } + if err != nil { + klog.Errorf("Could not update node status in root cluster,Error: %v", err) + } + return nil +} + +func (c *NodeLeaseController) syncLease(ctx context.Context) { + nodes := make([]*corev1.Node, 0) + c.nodeLock.Lock() + for _, nodeIndex := range c.nodes { + nodeCopy := nodeIndex.DeepCopy() + nodes = append(nodes, nodeCopy) + } + c.nodeLock.Unlock() + + // TODO: ping openapi + // _, err := c.leafClient.Discovery().ServerVersion() + // if err != nil { + // klog.Errorf("failed to ping leaf cluster") + // return + // } + + err := c.createLeaseIfNotExists(ctx, nodes) + if err != nil { + return + } + + err = c.updateLeaseWithRetry(ctx, nodes) + if err != nil { + klog.Errorf("lease has failed, and the maximum number of retries has been reached, %v", err) + return + } + + klog.V(5).Infof("Successfully updated lease") +} + +func (c *NodeLeaseController) createLeaseIfNotExists(ctx context.Context, nodes []*corev1.Node) error { + for _, node := range nodes { + // namespaceName := types.NamespacedName{ + // Namespace: corev1.NamespaceNodeLease, + // Name: node.Name, + // } + _, err := c.rootClient.CoordinationV1().Leases(corev1.NamespaceNodeLease).Get(ctx, node.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + leaseToCreate := c.newLease(node) + _, err := c.rootClient.CoordinationV1().Leases(leaseToCreate.Namespace).Create(ctx, leaseToCreate, metav1.CreateOptions{}) + if err != nil { + klog.Errorf("create lease %s failed", node.Name) + return err + } + } else { + klog.Errorf("get lease %s failed, err: %s", node.Name, err) + return err + } + } + } + return nil +} + +func (c *NodeLeaseController) updateLeaseWithRetry(ctx context.Context, nodes []*corev1.Node) error { + for _, node := range nodes { + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + lease := &coordinationv1.Lease{} + namespaceName := types.NamespacedName{ + Namespace: corev1.NamespaceNodeLease, + Name: node.Name, + } + if tmp, err := c.rootClient.CoordinationV1().Leases(namespaceName.Namespace).Get(ctx, namespaceName.Name, metav1.GetOptions{}); err != nil { + klog.Warningf("get lease %s failed with err %v", node.Name, err) + return err + } else { + lease = tmp + } + + lease.Spec.RenewTime = &metav1.MicroTime{Time: time.Now()} + lease.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version, + Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind, + Name: node.Name, + UID: node.UID, + }, + } + _, err := c.rootClient.CoordinationV1().Leases(namespaceName.Namespace).Update(ctx, lease, metav1.UpdateOptions{}) + if err != nil { + klog.Warningf("update lease %s failed with err %v", node.Name, err) + return err + } + return nil + }) + if err != nil { + return err + } + } + return nil +} + +func (c *NodeLeaseController) newLease(node *corev1.Node) *coordinationv1.Lease { + lease := &coordinationv1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: node.Name, + Namespace: corev1.NamespaceNodeLease, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version, + Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind, + Name: node.Name, + UID: node.UID, + }, + }, + }, + Spec: coordinationv1.LeaseSpec{ + HolderIdentity: pointer.String(node.Name), + LeaseDurationSeconds: pointer.Int32(DefaultLeaseDuration), + RenewTime: &metav1.MicroTime{Time: time.Now()}, + }, + } + return lease +} + +func getRenewInterval() time.Duration { + interval := DefaultLeaseDuration * DefaultRenewIntervalFraction + intervalDuration := time.Second * time.Duration(int(interval)) + return intervalDuration +} diff --git a/pkg/clustertree/cluster-manager/utils/leaf_resource_manager.go b/pkg/clustertree/cluster-manager/utils/leaf_resource_manager.go index 781135568..3443f2c18 100644 --- a/pkg/clustertree/cluster-manager/utils/leaf_resource_manager.go +++ b/pkg/clustertree/cluster-manager/utils/leaf_resource_manager.go @@ -29,6 +29,13 @@ const ( Party ) +type LeafType string + +const ( + LeafTypeK8s LeafType = "k8s" + LeafTypeServerless LeafType = "serverless" +) + type ClusterNode struct { NodeName string LeafMode LeafMode @@ -46,6 +53,12 @@ type LeafResource struct { EnableServiceAccount bool Nodes []ClusterNode RestConfig *rest.Config + LeafType LeafType + ServerlessClient *ServerlessClient +} + +func (lr *LeafResource) GetLeafType() LeafType { + return lr.LeafType } type LeafResourceManager interface { diff --git a/pkg/clustertree/cluster-manager/utils/serverless.go b/pkg/clustertree/cluster-manager/utils/serverless.go new file mode 100644 index 000000000..0ea3e85ec --- /dev/null +++ b/pkg/clustertree/cluster-manager/utils/serverless.go @@ -0,0 +1,61 @@ +package utils + +import ( + "fmt" + + "github.com/mitchellh/mapstructure" + corev1 "k8s.io/api/core/v1" + "k8s.io/klog" + + "github.com/kosmos.io/kosmos/pkg/utils/openapi" +) + +type ServerlessClient struct { + openapi.ApiClient +} + +func NewServerlessClient(accessKey, secretKey string) *ServerlessClient { + apiClient := openapi.NewApiClient(openapi.ApiClientConfig{ + AccessKey: accessKey, + SecretKey: secretKey, + }) + return &ServerlessClient{ + apiClient, + } +} + +// TODO: 会丢一部分数据 +func (s *ServerlessClient) ListPods(eciName string) (*corev1.Pod, error) { + if len(eciName) == 0 { + return nil, fmt.Errorf("eciName is nil") + } + + queryParams := make(map[string]string) + queryParams["eciName"] = eciName + headerParams := make(map[string]string) + response, err := s.Get("/api/web/eci-backend-service/containergroup", openapi.OpenApiParams{ + QueryParams: queryParams, + HeaderParams: headerParams, + }, nil) + + if err != nil { + klog.Fatal(err) + } + + body := response.Body + + record, ok := body["content"].([]map[string]interface{}) + if !ok { + return nil, fmt.Errorf("cannot convert response content to array, eciName: %s", eciName) + } + + if len(record) == 0 { + return nil, fmt.Errorf("cannot get pod, eciName: %s", eciName) + } + + pod := corev1.Pod{} + + err = mapstructure.Decode(record[0], &pod) + + return &pod, err +} diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 1b04c12b5..3393dc129 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -684,6 +684,25 @@ func schema_pkg_apis_kosmos_v1alpha1_ClusterTreeOptions(ref common.ReferenceCall }, }, }, + "leafType": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "accressKey": { + SchemaProps: spec.SchemaProps{ + Description: "secret?", + Type: []string{"string"}, + Format: "", + }, + }, + "secretKey": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, diff --git a/pkg/utils/openapi/client.go b/pkg/utils/openapi/client.go new file mode 100644 index 000000000..839ead71d --- /dev/null +++ b/pkg/utils/openapi/client.go @@ -0,0 +1,237 @@ +package openapi + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "time" +) + +const ( + GET = "GET" + POST = "POST" + DELETE = "DELETE" + PUT = "PUT" + PATCH = "PATCH" + HEAD = "HEAD" +) + +var METHOD_MAP = map[string]string{ + GET: GET, + POST: POST, + DELETE: DELETE, + PUT: PUT, + PATCH: PATCH, + HEAD: HEAD, +} + +type QueryParams map[string]string +type HeaderParams map[string]string +type BodyParams map[string]interface{} + +type OpenApiResponse struct { + RequestId string `json:"requestId,omitempty"` + State string `json:"state,omitempty"` + ErrorCode string `json:"errorCode,omitempty"` + ErrorMessage string `json:"errorMessage,omitempty"` + ErrorDetail string `json:"errorDetail,omitempty"` + Body interface{} `json:"body,omitempty"` +} + +func (r *OpenApiResponse) GetError() string { + return fmt.Sprintf("code: %s, message: %s, detail: %s", r.ErrorCode, r.ErrorMessage, r.ErrorDetail) +} + +type ApiClient struct { + config ApiClientConfig + client http.Client +} + +type ApiClientConfig struct { + Url string + AccessKey string + SecretKey string +} + +type OpenApiParams struct { + HeaderParams map[string]string + QueryParams map[string]string + BodyParams BodyParams + PathParams map[string]string +} + +// nolint +func NewApiClient(config ApiClientConfig) ApiClient { + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + client := http.Client{ + Timeout: 40 * time.Second, + Transport: tr, + } + + if config.Url == "" { + config.Url = DefaultEndpoint + } + + return ApiClient{ + config: config, + client: client, + } +} + +func (c *ApiClient) exec(request *http.Request, headerParams map[string]string) (*OpenApiResponse, error) { + request.Header.Set("Connection", "Keep-Alive") + request.Header.Set("User-Agent", "OpenAPI/2.0/Golang") + request.Header.Set("Accept-Encoding", "gzip") + request.Header.Set("Content-Type", "application/json; charset=utf-8") + for v, k := range headerParams { + request.Header.Set(v, k) + } + + response, err := c.client.Do(request) + + if err != nil { + return nil, err + } + + body, err := io.ReadAll(response.Body) + + if err != nil { + if response.Body != nil { + _ = response.Body.Close() + } + return nil, err + } + + data := &OpenApiResponse{} + + err = json.Unmarshal(body, data) + if err != nil { + return nil, err + } + + return data, nil +} + +func (c *ApiClient) Get(urlTemplate string, params OpenApiParams, r interface{}) (*OpenApiResponse, error) { + return c.Request(GET, urlTemplate, params, r) +} + +func (c *ApiClient) Post(urlTemplate string, params OpenApiParams, r interface{}) (*OpenApiResponse, error) { + return c.Request(POST, urlTemplate, params, r) +} + +func (c *ApiClient) Delete(urlTemplate string, params OpenApiParams, r interface{}) (*OpenApiResponse, error) { + return c.Request(DELETE, urlTemplate, params, r) +} + +func (c *ApiClient) Put(urlTemplate string, params OpenApiParams, r interface{}) (*OpenApiResponse, error) { + return c.Request(PUT, urlTemplate, params, r) +} + +func (c *ApiClient) Head(urlTemplate string, params OpenApiParams, r interface{}) (*OpenApiResponse, error) { + return c.Request(HEAD, urlTemplate, params, r) +} + +func (c *ApiClient) Path(urlTemplate string, params OpenApiParams, r interface{}) (*OpenApiResponse, error) { + return c.Request(PATCH, urlTemplate, params, r) +} + +// conver urlTemplate string to url +func ConvertUrl(urlTemplate string, pathParams map[string]string) string { + latestUrl := "" + if len(pathParams) == 0 { + latestUrl = urlTemplate + } else { + latestUrl = os.Expand(urlTemplate, func(key string) string { + if v, ok := pathParams[key]; ok { + return v + } else { + // to remind you, I won't replace the placeholders + return fmt.Sprintf("${%s}", key) + } + }) + } + return latestUrl +} + +func ValidateMethod(method string) error { + if _, ok := METHOD_MAP[method]; !ok { + return errors.New("method not supported") + } + return nil +} + +// do request +// +// @param method GET POST DELETE PUT PATCH HEAD +// +// @param urlTemplate url template +// +// @param params OpenApiParams include head query body param +// +// @param r convert response to type `r` +// +// @return OpenApiResponse whole response +func (c *ApiClient) Request(method string, urlTemplate string, params OpenApiParams, r interface{}) (*OpenApiResponse, error) { + queryParams, headerParams, bodyParams, pathParams := params.QueryParams, params.HeaderParams, params.BodyParams, params.PathParams + + // validate + if err := ValidateMethod(method); err != nil { + return nil, err + } + + latestUrl := ConvertUrl(urlTemplate, pathParams) + + path, err := Sign(queryParams, latestUrl, method, c.config.AccessKey, c.config.SecretKey) + if err != nil { + return nil, err + } + + var body io.Reader + + if len(bodyParams) > 0 { + jsonData, err := json.Marshal(bodyParams) + if err != nil { + return nil, err + } + body = bytes.NewBuffer(jsonData) + } + + request, err := http.NewRequest(method, c.config.Url+path, body) + + if err != nil { + return nil, err + } + + response, err := c.exec(request, headerParams) + + if err != nil { + return nil, err + } + + if response.State == "OK" { + if r == nil { + return response, nil + } + jsonData, err := json.Marshal(response.Body) + if err != nil { + return response, err + } + + err = json.Unmarshal([]byte(jsonData), &r) + if err != nil { + return response, err + } + } else { + return response, errors.New(response.GetError()) + } + return response, nil + +} diff --git a/pkg/utils/openapi/signature.go b/pkg/utils/openapi/signature.go new file mode 100644 index 000000000..62cc5d9d6 --- /dev/null +++ b/pkg/utils/openapi/signature.go @@ -0,0 +1,137 @@ +package openapi + +// nolint +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "net/url" + "sort" + "strings" + "time" + + "github.com/google/uuid" +) + +const ( + AccessKey = "AccessKey" + TIMESTAMP = "Timestamp" + Version = "Version" + TimestampFormat = "2006-01-02T15:04:05Z" + // TimestampFormat = "2017-01-11T15:15:11Z" + Signature = "Signature" + SecretKeyPrefix = "BC_SIGNATURE&" + SignatureMethod = "SignatureMethod" + SignatureMethodValue = "HmacSHA1" + SignatureVersion = "SignatureVersion" + SignatureVersionValue = "V2.0" + SignatureNonce = "SignatureNonce" + LineSeparator = "\n" + ParameterSeparator = "&" + QueryStartSymbol = "?" + QuerySeparator = "=" + VersionValue = "2016-12-05" +) +const ( + HighMask = 0xf0 + LowMask = 0x0f +) + +var HexCodeTable = []string{ + "0", "1", "2", "3", + "4", "5", "6", "7", + "8", "9", "a", "b", + "c", "d", "e", "f", +} + +func Sign(queryparams QueryParams, path string, method string, accessKey string, secretKey string) (string, error) { + params := make(map[string]string) + for key, value := range queryparams { + params[key] = value + } + // params[Version] = VersionValue + params[AccessKey] = accessKey + now := time.Now() + params[TIMESTAMP] = now.Format(TimestampFormat) + params[SignatureMethod] = SignatureMethodValue + params[SignatureVersion] = SignatureVersionValue + params[SignatureNonce] = nonce() + // params[SignatureNonce] = "9d81ffbeaaf7477390db5df577bb3299" + // 9d81ffbeaaf7477390db5df577bb3299 + keys := make([]string, len(params)) + index := 0 + for key := range params { + keys[index] = key + index++ + } + sort.Strings(keys) + builder := strings.Builder{} + pos := 0 + paramsLen := len(keys) + for _, key := range keys { + value := params[key] + builder.WriteString(PercentEncode(key)) + builder.WriteString(QuerySeparator) + builder.WriteString(PercentEncode(value)) + if pos != paramsLen-1 { + builder.WriteString(ParameterSeparator) + pos++ + } + } + canonicalQueryString := builder.String() + + hashString := convertToHexString(sha256Encode(canonicalQueryString)) + + unescapedPath, err := url.QueryUnescape(path) + if nil != err { + return "", err + } + builder.Reset() + builder.WriteString(strings.ToUpper(method)) + builder.WriteString(LineSeparator) + builder.WriteString(PercentEncode(unescapedPath)) + builder.WriteString(LineSeparator) + builder.WriteString(hashString) + stringToSign := builder.String() + + signature := convertToHexString(hmacSha1(stringToSign, SecretKeyPrefix+secretKey)) + + builder.Reset() + builder.WriteString(unescapedPath) + builder.WriteString(QueryStartSymbol) + builder.WriteString(canonicalQueryString) + builder.WriteString(ParameterSeparator) + builder.WriteString(Signature) + builder.WriteString(QuerySeparator) + builder.WriteString(PercentEncode(signature)) + return builder.String(), nil +} + +func hmacSha1(text string, keyStr string) []byte { + key := []byte(keyStr) + mac := hmac.New(sha1.New, key) + mac.Write([]byte(text)) + return mac.Sum(nil) +} + +func convertToHexString(data []byte) string { + if data == nil { + return "" + } + builder := strings.Builder{} + for _, d := range data { + builder.WriteString(HexCodeTable[(HighMask&d)>>4]) + builder.WriteString(HexCodeTable[LowMask&d]) + } + return builder.String() +} + +func sha256Encode(text string) []byte { + h := sha256.New() + h.Write([]byte(text)) + return h.Sum(nil) +} + +func nonce() string { + return strings.ReplaceAll(uuid.New().String(), "-", "") +} diff --git a/pkg/utils/openapi/string_util.go b/pkg/utils/openapi/string_util.go new file mode 100644 index 000000000..fa18156c7 --- /dev/null +++ b/pkg/utils/openapi/string_util.go @@ -0,0 +1,35 @@ +package openapi + +import ( + "encoding/json" + "net/url" + "strings" +) + +const ( + DefaultEndpoint = "https://ecloud.10086.cn" +) + +func PercentEncode(urlStr string) string { + urlStr = url.QueryEscape(urlStr) + + urlStr = strings.ReplaceAll(urlStr, "+", "%20") + + urlStr = strings.ReplaceAll(urlStr, "*", "%2A") + + urlStr = strings.ReplaceAll(urlStr, "%7E", "~") + + return urlStr +} + +func Beautify(i interface{}) string { + resp, _ := json.MarshalIndent(i, "", " ") + + return string(resp) +} + +func ToJsonString(i interface{}) string { + resp, _ := json.Marshal(i) + + return string(resp) +} diff --git a/vendor/github.com/fatih/structs/.gitignore b/vendor/github.com/fatih/structs/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/github.com/fatih/structs/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/fatih/structs/.travis.yml b/vendor/github.com/fatih/structs/.travis.yml new file mode 100644 index 000000000..a08df7981 --- /dev/null +++ b/vendor/github.com/fatih/structs/.travis.yml @@ -0,0 +1,13 @@ +language: go +go: + - 1.7.x + - 1.8.x + - 1.9.x + - tip +sudo: false +before_install: +- go get github.com/axw/gocov/gocov +- go get github.com/mattn/goveralls +- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi +script: +- $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/fatih/structs/LICENSE b/vendor/github.com/fatih/structs/LICENSE new file mode 100644 index 000000000..34504e4b3 --- /dev/null +++ b/vendor/github.com/fatih/structs/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/fatih/structs/README.md b/vendor/github.com/fatih/structs/README.md new file mode 100644 index 000000000..a75eabf37 --- /dev/null +++ b/vendor/github.com/fatih/structs/README.md @@ -0,0 +1,163 @@ +# Structs [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structs) [![Build Status](http://img.shields.io/travis/fatih/structs.svg?style=flat-square)](https://travis-ci.org/fatih/structs) [![Coverage Status](http://img.shields.io/coveralls/fatih/structs.svg?style=flat-square)](https://coveralls.io/r/fatih/structs) + +Structs contains various utilities to work with Go (Golang) structs. It was +initially used by me to convert a struct into a `map[string]interface{}`. With +time I've added other utilities for structs. It's basically a high level +package based on primitives from the reflect package. Feel free to add new +functions or improve the existing code. + +## Install + +```bash +go get github.com/fatih/structs +``` + +## Usage and Examples + +Just like the standard lib `strings`, `bytes` and co packages, `structs` has +many global functions to manipulate or organize your struct data. Lets define +and declare a struct: + +```go +type Server struct { + Name string `json:"name,omitempty"` + ID int + Enabled bool + users []string // not exported + http.Server // embedded +} + +server := &Server{ + Name: "gopher", + ID: 123456, + Enabled: true, +} +``` + +```go +// Convert a struct to a map[string]interface{} +// => {"Name":"gopher", "ID":123456, "Enabled":true} +m := structs.Map(server) + +// Convert the values of a struct to a []interface{} +// => ["gopher", 123456, true] +v := structs.Values(server) + +// Convert the names of a struct to a []string +// (see "Names methods" for more info about fields) +n := structs.Names(server) + +// Convert the values of a struct to a []*Field +// (see "Field methods" for more info about fields) +f := structs.Fields(server) + +// Return the struct name => "Server" +n := structs.Name(server) + +// Check if any field of a struct is initialized or not. +h := structs.HasZero(server) + +// Check if all fields of a struct is initialized or not. +z := structs.IsZero(server) + +// Check if server is a struct or a pointer to struct +i := structs.IsStruct(server) +``` + +### Struct methods + +The structs functions can be also used as independent methods by creating a new +`*structs.Struct`. This is handy if you want to have more control over the +structs (such as retrieving a single Field). + +```go +// Create a new struct type: +s := structs.New(server) + +m := s.Map() // Get a map[string]interface{} +v := s.Values() // Get a []interface{} +f := s.Fields() // Get a []*Field +n := s.Names() // Get a []string +f := s.Field(name) // Get a *Field based on the given field name +f, ok := s.FieldOk(name) // Get a *Field based on the given field name +n := s.Name() // Get the struct name +h := s.HasZero() // Check if any field is uninitialized +z := s.IsZero() // Check if all fields are uninitialized +``` + +### Field methods + +We can easily examine a single Field for more detail. Below you can see how we +get and interact with various field methods: + + +```go +s := structs.New(server) + +// Get the Field struct for the "Name" field +name := s.Field("Name") + +// Get the underlying value, value => "gopher" +value := name.Value().(string) + +// Set the field's value +name.Set("another gopher") + +// Get the field's kind, kind => "string" +name.Kind() + +// Check if the field is exported or not +if name.IsExported() { + fmt.Println("Name field is exported") +} + +// Check if the value is a zero value, such as "" for string, 0 for int +if !name.IsZero() { + fmt.Println("Name is initialized") +} + +// Check if the field is an anonymous (embedded) field +if !name.IsEmbedded() { + fmt.Println("Name is not an embedded field") +} + +// Get the Field's tag value for tag name "json", tag value => "name,omitempty" +tagValue := name.Tag("json") +``` + +Nested structs are supported too: + +```go +addrField := s.Field("Server").Field("Addr") + +// Get the value for addr +a := addrField.Value().(string) + +// Or get all fields +httpServer := s.Field("Server").Fields() +``` + +We can also get a slice of Fields from the Struct type to iterate over all +fields. This is handy if you wish to examine all fields: + +```go +s := structs.New(server) + +for _, f := range s.Fields() { + fmt.Printf("field name: %+v\n", f.Name()) + + if f.IsExported() { + fmt.Printf("value : %+v\n", f.Value()) + fmt.Printf("is zero : %+v\n", f.IsZero()) + } +} +``` + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * [Cihangir Savas](https://github.com/cihangir) + +## License + +The MIT License (MIT) - see LICENSE.md for more details diff --git a/vendor/github.com/fatih/structs/field.go b/vendor/github.com/fatih/structs/field.go new file mode 100644 index 000000000..e69783230 --- /dev/null +++ b/vendor/github.com/fatih/structs/field.go @@ -0,0 +1,141 @@ +package structs + +import ( + "errors" + "fmt" + "reflect" +) + +var ( + errNotExported = errors.New("field is not exported") + errNotSettable = errors.New("field is not settable") +) + +// Field represents a single struct field that encapsulates high level +// functions around the field. +type Field struct { + value reflect.Value + field reflect.StructField + defaultTag string +} + +// Tag returns the value associated with key in the tag string. If there is no +// such key in the tag, Tag returns the empty string. +func (f *Field) Tag(key string) string { + return f.field.Tag.Get(key) +} + +// Value returns the underlying value of the field. It panics if the field +// is not exported. +func (f *Field) Value() interface{} { + return f.value.Interface() +} + +// IsEmbedded returns true if the given field is an anonymous field (embedded) +func (f *Field) IsEmbedded() bool { + return f.field.Anonymous +} + +// IsExported returns true if the given field is exported. +func (f *Field) IsExported() bool { + return f.field.PkgPath == "" +} + +// IsZero returns true if the given field is not initialized (has a zero value). +// It panics if the field is not exported. +func (f *Field) IsZero() bool { + zero := reflect.Zero(f.value.Type()).Interface() + current := f.Value() + + return reflect.DeepEqual(current, zero) +} + +// Name returns the name of the given field +func (f *Field) Name() string { + return f.field.Name +} + +// Kind returns the fields kind, such as "string", "map", "bool", etc .. +func (f *Field) Kind() reflect.Kind { + return f.value.Kind() +} + +// Set sets the field to given value v. It returns an error if the field is not +// settable (not addressable or not exported) or if the given value's type +// doesn't match the fields type. +func (f *Field) Set(val interface{}) error { + // we can't set unexported fields, so be sure this field is exported + if !f.IsExported() { + return errNotExported + } + + // do we get here? not sure... + if !f.value.CanSet() { + return errNotSettable + } + + given := reflect.ValueOf(val) + + if f.value.Kind() != given.Kind() { + return fmt.Errorf("wrong kind. got: %s want: %s", given.Kind(), f.value.Kind()) + } + + f.value.Set(given) + return nil +} + +// Zero sets the field to its zero value. It returns an error if the field is not +// settable (not addressable or not exported). +func (f *Field) Zero() error { + zero := reflect.Zero(f.value.Type()).Interface() + return f.Set(zero) +} + +// Fields returns a slice of Fields. This is particular handy to get the fields +// of a nested struct . A struct tag with the content of "-" ignores the +// checking of that particular field. Example: +// +// // Field is ignored by this package. +// Field *http.Request `structs:"-"` +// +// It panics if field is not exported or if field's kind is not struct +func (f *Field) Fields() []*Field { + return getFields(f.value, f.defaultTag) +} + +// Field returns the field from a nested struct. It panics if the nested struct +// is not exported or if the field was not found. +func (f *Field) Field(name string) *Field { + field, ok := f.FieldOk(name) + if !ok { + panic("field not found") + } + + return field +} + +// FieldOk returns the field from a nested struct. The boolean returns whether +// the field was found (true) or not (false). +func (f *Field) FieldOk(name string) (*Field, bool) { + value := &f.value + // value must be settable so we need to make sure it holds the address of the + // variable and not a copy, so we can pass the pointer to strctVal instead of a + // copy (which is not assigned to any variable, hence not settable). + // see "https://blog.golang.org/laws-of-reflection#TOC_8." + if f.value.Kind() != reflect.Ptr { + a := f.value.Addr() + value = &a + } + v := strctVal(value.Interface()) + t := v.Type() + + field, ok := t.FieldByName(name) + if !ok { + return nil, false + } + + return &Field{ + field: field, + value: v.FieldByName(name), + }, true +} diff --git a/vendor/github.com/fatih/structs/structs.go b/vendor/github.com/fatih/structs/structs.go new file mode 100644 index 000000000..3a8770652 --- /dev/null +++ b/vendor/github.com/fatih/structs/structs.go @@ -0,0 +1,584 @@ +// Package structs contains various utilities functions to work with structs. +package structs + +import ( + "fmt" + + "reflect" +) + +var ( + // DefaultTagName is the default tag name for struct fields which provides + // a more granular to tweak certain structs. Lookup the necessary functions + // for more info. + DefaultTagName = "structs" // struct's field default tag name +) + +// Struct encapsulates a struct type to provide several high level functions +// around the struct. +type Struct struct { + raw interface{} + value reflect.Value + TagName string +} + +// New returns a new *Struct with the struct s. It panics if the s's kind is +// not struct. +func New(s interface{}) *Struct { + return &Struct{ + raw: s, + value: strctVal(s), + TagName: DefaultTagName, + } +} + +// Map converts the given struct to a map[string]interface{}, where the keys +// of the map are the field names and the values of the map the associated +// values of the fields. The default key string is the struct field name but +// can be changed in the struct field's tag value. The "structs" key in the +// struct's field tag value is the key name. Example: +// +// // Field appears in map as key "myName". +// Name string `structs:"myName"` +// +// A tag value with the content of "-" ignores that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// A tag value with the content of "string" uses the stringer to get the value. Example: +// +// // The value will be output of Animal's String() func. +// // Map will panic if Animal does not implement String(). +// Field *Animal `structs:"field,string"` +// +// A tag value with the option of "flatten" used in a struct field is to flatten its fields +// in the output map. Example: +// +// // The FieldStruct's fields will be flattened into the output map. +// FieldStruct time.Time `structs:",flatten"` +// +// A tag value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Field is not processed further by this package. +// Field time.Time `structs:"myName,omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// A tag value with the option of "omitempty" ignores that particular field if +// the field value is empty. Example: +// +// // Field appears in map as key "myName", but the field is +// // skipped if empty. +// Field string `structs:"myName,omitempty"` +// +// // Field appears in map as key "Field" (the default), but +// // the field is skipped if empty. +// Field string `structs:",omitempty"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. +func (s *Struct) Map() map[string]interface{} { + out := make(map[string]interface{}) + s.FillMap(out) + return out +} + +// FillMap is the same as Map. Instead of returning the output, it fills the +// given map. +func (s *Struct) FillMap(out map[string]interface{}) { + if out == nil { + return + } + + fields := s.structFields() + + for _, field := range fields { + name := field.Name + val := s.value.FieldByName(name) + isSubStruct := false + var finalVal interface{} + + tagName, tagOpts := parseTag(field.Tag.Get(s.TagName)) + if tagName != "" { + name = tagName + } + + // if the value is a zero value and the field is marked as omitempty do + // not include + if tagOpts.Has("omitempty") { + zero := reflect.Zero(val.Type()).Interface() + current := val.Interface() + + if reflect.DeepEqual(current, zero) { + continue + } + } + + if !tagOpts.Has("omitnested") { + finalVal = s.nested(val) + + v := reflect.ValueOf(val.Interface()) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Map, reflect.Struct: + isSubStruct = true + } + } else { + finalVal = val.Interface() + } + + if tagOpts.Has("string") { + s, ok := val.Interface().(fmt.Stringer) + if ok { + out[name] = s.String() + } + continue + } + + if isSubStruct && (tagOpts.Has("flatten")) { + for k := range finalVal.(map[string]interface{}) { + out[k] = finalVal.(map[string]interface{})[k] + } + } else { + out[name] = finalVal + } + } +} + +// Values converts the given s struct's field values to a []interface{}. A +// struct tag with the content of "-" ignores the that particular field. +// Example: +// +// // Field is ignored by this package. +// Field int `structs:"-"` +// +// A value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Fields is not processed further by this package. +// Field time.Time `structs:",omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// A tag value with the option of "omitempty" ignores that particular field and +// is not added to the values if the field value is empty. Example: +// +// // Field is skipped if empty +// Field string `structs:",omitempty"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. +func (s *Struct) Values() []interface{} { + fields := s.structFields() + + var t []interface{} + + for _, field := range fields { + val := s.value.FieldByName(field.Name) + + _, tagOpts := parseTag(field.Tag.Get(s.TagName)) + + // if the value is a zero value and the field is marked as omitempty do + // not include + if tagOpts.Has("omitempty") { + zero := reflect.Zero(val.Type()).Interface() + current := val.Interface() + + if reflect.DeepEqual(current, zero) { + continue + } + } + + if tagOpts.Has("string") { + s, ok := val.Interface().(fmt.Stringer) + if ok { + t = append(t, s.String()) + } + continue + } + + if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { + // look out for embedded structs, and convert them to a + // []interface{} to be added to the final values slice + t = append(t, Values(val.Interface())...) + } else { + t = append(t, val.Interface()) + } + } + + return t +} + +// Fields returns a slice of Fields. A struct tag with the content of "-" +// ignores the checking of that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// It panics if s's kind is not struct. +func (s *Struct) Fields() []*Field { + return getFields(s.value, s.TagName) +} + +// Names returns a slice of field names. A struct tag with the content of "-" +// ignores the checking of that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// It panics if s's kind is not struct. +func (s *Struct) Names() []string { + fields := getFields(s.value, s.TagName) + + names := make([]string, len(fields)) + + for i, field := range fields { + names[i] = field.Name() + } + + return names +} + +func getFields(v reflect.Value, tagName string) []*Field { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + t := v.Type() + + var fields []*Field + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + + if tag := field.Tag.Get(tagName); tag == "-" { + continue + } + + f := &Field{ + field: field, + value: v.FieldByName(field.Name), + } + + fields = append(fields, f) + + } + + return fields +} + +// Field returns a new Field struct that provides several high level functions +// around a single struct field entity. It panics if the field is not found. +func (s *Struct) Field(name string) *Field { + f, ok := s.FieldOk(name) + if !ok { + panic("field not found") + } + + return f +} + +// FieldOk returns a new Field struct that provides several high level functions +// around a single struct field entity. The boolean returns true if the field +// was found. +func (s *Struct) FieldOk(name string) (*Field, bool) { + t := s.value.Type() + + field, ok := t.FieldByName(name) + if !ok { + return nil, false + } + + return &Field{ + field: field, + value: s.value.FieldByName(name), + defaultTag: s.TagName, + }, true +} + +// IsZero returns true if all fields in a struct is a zero value (not +// initialized) A struct tag with the content of "-" ignores the checking of +// that particular field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// A value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Field is not processed further by this package. +// Field time.Time `structs:"myName,omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. It panics if s's kind is not struct. +func (s *Struct) IsZero() bool { + fields := s.structFields() + + for _, field := range fields { + val := s.value.FieldByName(field.Name) + + _, tagOpts := parseTag(field.Tag.Get(s.TagName)) + + if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { + ok := IsZero(val.Interface()) + if !ok { + return false + } + + continue + } + + // zero value of the given field, such as "" for string, 0 for int + zero := reflect.Zero(val.Type()).Interface() + + // current value of the given field + current := val.Interface() + + if !reflect.DeepEqual(current, zero) { + return false + } + } + + return true +} + +// HasZero returns true if a field in a struct is not initialized (zero value). +// A struct tag with the content of "-" ignores the checking of that particular +// field. Example: +// +// // Field is ignored by this package. +// Field bool `structs:"-"` +// +// A value with the option of "omitnested" stops iterating further if the type +// is a struct. Example: +// +// // Field is not processed further by this package. +// Field time.Time `structs:"myName,omitnested"` +// Field *http.Request `structs:",omitnested"` +// +// Note that only exported fields of a struct can be accessed, non exported +// fields will be neglected. It panics if s's kind is not struct. +func (s *Struct) HasZero() bool { + fields := s.structFields() + + for _, field := range fields { + val := s.value.FieldByName(field.Name) + + _, tagOpts := parseTag(field.Tag.Get(s.TagName)) + + if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") { + ok := HasZero(val.Interface()) + if ok { + return true + } + + continue + } + + // zero value of the given field, such as "" for string, 0 for int + zero := reflect.Zero(val.Type()).Interface() + + // current value of the given field + current := val.Interface() + + if reflect.DeepEqual(current, zero) { + return true + } + } + + return false +} + +// Name returns the structs's type name within its package. For more info refer +// to Name() function. +func (s *Struct) Name() string { + return s.value.Type().Name() +} + +// structFields returns the exported struct fields for a given s struct. This +// is a convenient helper method to avoid duplicate code in some of the +// functions. +func (s *Struct) structFields() []reflect.StructField { + t := s.value.Type() + + var f []reflect.StructField + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + // we can't access the value of unexported fields + if field.PkgPath != "" { + continue + } + + // don't check if it's omitted + if tag := field.Tag.Get(s.TagName); tag == "-" { + continue + } + + f = append(f, field) + } + + return f +} + +func strctVal(s interface{}) reflect.Value { + v := reflect.ValueOf(s) + + // if pointer get the underlying element≤ + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + if v.Kind() != reflect.Struct { + panic("not struct") + } + + return v +} + +// Map converts the given struct to a map[string]interface{}. For more info +// refer to Struct types Map() method. It panics if s's kind is not struct. +func Map(s interface{}) map[string]interface{} { + return New(s).Map() +} + +// FillMap is the same as Map. Instead of returning the output, it fills the +// given map. +func FillMap(s interface{}, out map[string]interface{}) { + New(s).FillMap(out) +} + +// Values converts the given struct to a []interface{}. For more info refer to +// Struct types Values() method. It panics if s's kind is not struct. +func Values(s interface{}) []interface{} { + return New(s).Values() +} + +// Fields returns a slice of *Field. For more info refer to Struct types +// Fields() method. It panics if s's kind is not struct. +func Fields(s interface{}) []*Field { + return New(s).Fields() +} + +// Names returns a slice of field names. For more info refer to Struct types +// Names() method. It panics if s's kind is not struct. +func Names(s interface{}) []string { + return New(s).Names() +} + +// IsZero returns true if all fields is equal to a zero value. For more info +// refer to Struct types IsZero() method. It panics if s's kind is not struct. +func IsZero(s interface{}) bool { + return New(s).IsZero() +} + +// HasZero returns true if any field is equal to a zero value. For more info +// refer to Struct types HasZero() method. It panics if s's kind is not struct. +func HasZero(s interface{}) bool { + return New(s).HasZero() +} + +// IsStruct returns true if the given variable is a struct or a pointer to +// struct. +func IsStruct(s interface{}) bool { + v := reflect.ValueOf(s) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + // uninitialized zero value of a struct + if v.Kind() == reflect.Invalid { + return false + } + + return v.Kind() == reflect.Struct +} + +// Name returns the structs's type name within its package. It returns an +// empty string for unnamed types. It panics if s's kind is not struct. +func Name(s interface{}) string { + return New(s).Name() +} + +// nested retrieves recursively all types for the given value and returns the +// nested value. +func (s *Struct) nested(val reflect.Value) interface{} { + var finalVal interface{} + + v := reflect.ValueOf(val.Interface()) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + n := New(val.Interface()) + n.TagName = s.TagName + m := n.Map() + + // do not add the converted value if there are no exported fields, ie: + // time.Time + if len(m) == 0 { + finalVal = val.Interface() + } else { + finalVal = m + } + case reflect.Map: + // get the element type of the map + mapElem := val.Type() + switch val.Type().Kind() { + case reflect.Ptr, reflect.Array, reflect.Map, + reflect.Slice, reflect.Chan: + mapElem = val.Type().Elem() + if mapElem.Kind() == reflect.Ptr { + mapElem = mapElem.Elem() + } + } + + // only iterate over struct types, ie: map[string]StructType, + // map[string][]StructType, + if mapElem.Kind() == reflect.Struct || + (mapElem.Kind() == reflect.Slice && + mapElem.Elem().Kind() == reflect.Struct) { + m := make(map[string]interface{}, val.Len()) + for _, k := range val.MapKeys() { + m[k.String()] = s.nested(val.MapIndex(k)) + } + finalVal = m + break + } + + // TODO(arslan): should this be optional? + finalVal = val.Interface() + case reflect.Slice, reflect.Array: + if val.Type().Kind() == reflect.Interface { + finalVal = val.Interface() + break + } + + // TODO(arslan): should this be optional? + // do not iterate of non struct types, just pass the value. Ie: []int, + // []string, co... We only iterate further if it's a struct. + // i.e []foo or []*foo + if val.Type().Elem().Kind() != reflect.Struct && + !(val.Type().Elem().Kind() == reflect.Ptr && + val.Type().Elem().Elem().Kind() == reflect.Struct) { + finalVal = val.Interface() + break + } + + slices := make([]interface{}, val.Len()) + for x := 0; x < val.Len(); x++ { + slices[x] = s.nested(val.Index(x)) + } + finalVal = slices + default: + finalVal = val.Interface() + } + + return finalVal +} diff --git a/vendor/github.com/fatih/structs/tags.go b/vendor/github.com/fatih/structs/tags.go new file mode 100644 index 000000000..136a31eba --- /dev/null +++ b/vendor/github.com/fatih/structs/tags.go @@ -0,0 +1,32 @@ +package structs + +import "strings" + +// tagOptions contains a slice of tag options +type tagOptions []string + +// Has returns true if the given option is available in tagOptions +func (t tagOptions) Has(opt string) bool { + for _, tagOpt := range t { + if tagOpt == opt { + return true + } + } + + return false +} + +// parseTag splits a struct field's tag into its name and a list of options +// which comes after a name. A tag is in the form of: "name,option1,option2". +// The name can be neglectected. +func parseTag(tag string) (string, tagOptions) { + // tag is one of followings: + // "" + // "name" + // "name,opt" + // "name,opt,opt2" + // ",opt" + + res := strings.Split(tag, ",") + return res[0], res[1:] +} diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md new file mode 100644 index 000000000..c75823490 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -0,0 +1,96 @@ +## 1.5.0 + +* New option `IgnoreUntaggedFields` to ignore decoding to any fields + without `mapstructure` (or the configured tag name) set [GH-277] +* New option `ErrorUnset` which makes it an error if any fields + in a target struct are not set by the decoding process. [GH-225] +* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] +* Decoding to slice from array no longer crashes [GH-265] +* Decode nested struct pointers to map [GH-271] +* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] +* Fix issue where fields with `,omitempty` would sometimes decode + into a map with an empty string key [GH-281] + +## 1.4.3 + +* Fix cases where `json.Number` didn't decode properly [GH-261] + +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + +## 1.2.2 + +* Do not add unsettable (unexported) values to the unused metadata key + or "remain" value. [GH-150] + +## 1.2.1 + +* Go modules checksum mismatch fix + +## 1.2.0 + +* Added support to capture unused values in a field using the `",remain"` value + in the mapstructure tag. There is an example to showcase usage. +* Added `DecoderConfig` option to always squash embedded structs +* `json.Number` can decode into `uint` types +* Empty slices are preserved and not replaced with nil slices +* Fix panic that can occur in when decoding a map into a nil slice of structs +* Improved package documentation for godoc + +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 000000000..0018dc7d9 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 000000000..3a754ca72 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,279 @@ +package mapstructure + +import ( + "encoding" + "errors" + "fmt" + "net" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2, f3} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Value, to reflect.Value) (interface{}, error) { + + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from.Type(), to.Type(), from.Interface()) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + var err error + data := f.Interface() + + newFrom := f + for _, f1 := range fs { + data, err = DecodeHookExec(f1, newFrom, t) + if err != nil { + return nil, err + } + newFrom = reflect.ValueOf(data) + } + + return data, nil + } +} + +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + return func(a, b reflect.Value) (interface{}, error) { + var allErrs string + var out interface{} + var err error + + for _, f := range ff { + out, err = DecodeHookExec(f, a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue + } + + return out, nil + } + + return nil, errors.New(allErrs) + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { + return nil, err + } + return result, nil + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 000000000..47a99e5af --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 000000000..1efb22ac3 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,1540 @@ +// Package mapstructure exposes functionality to convert one arbitrary +// Go type into another, typically to convert a map[string]interface{} +// into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +// +// The simplest function to start with is Decode. +// +// Field Tags +// +// When decoding to a struct, mapstructure will use the field name by +// default to perform the mapping. For example, if a struct has a field +// "Username" then mapstructure will look for a key in the source value +// of "username" (case insensitive). +// +// type User struct { +// Username string +// } +// +// You can change the behavior of mapstructure by using struct tags. +// The default struct tag that mapstructure looks for is "mapstructure" +// but you can customize it using DecoderConfig. +// +// Renaming Fields +// +// To rename the key that mapstructure looks for, use the "mapstructure" +// tag and set a value directly. For example, to change the "username" example +// above to "user": +// +// type User struct { +// Username string `mapstructure:"user"` +// } +// +// Embedded Structs and Squashing +// +// Embedded structs are treated as if they're another field with that name. +// By default, the two structs below are equivalent when decoding with +// mapstructure: +// +// type Person struct { +// Name string +// } +// +// type Friend struct { +// Person +// } +// +// type Friend struct { +// Person Person +// } +// +// This would require an input that looks like below: +// +// map[string]interface{}{ +// "person": map[string]interface{}{"name": "alice"}, +// } +// +// If your "person" value is NOT nested, then you can append ",squash" to +// your tag value and mapstructure will treat it as if the embedded struct +// were part of the struct directly. Example: +// +// type Friend struct { +// Person `mapstructure:",squash"` +// } +// +// Now the following input would be accepted: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// DecoderConfig has a field that changes the behavior of mapstructure +// to always squash embedded structs. +// +// Remainder Values +// +// If there are any unmapped keys in the source value, mapstructure by +// default will silently ignore them. You can error by setting ErrorUnused +// in DecoderConfig. If you're using Metadata you can also maintain a slice +// of the unused keys. +// +// You can also use the ",remain" suffix on your tag to collect all unused +// values in a map. The field with this tag MUST be a map type and should +// probably be a "map[string]interface{}" or "map[interface{}]interface{}". +// See example below: +// +// type Friend struct { +// Name string +// Other map[string]interface{} `mapstructure:",remain"` +// } +// +// Given the input below, Other would be populated with the other +// values that weren't used (everything but "name"): +// +// map[string]interface{}{ +// "name": "bob", +// "address": "123 Maple St.", +// } +// +// Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value. The zero value of all types is specified in the Go +// specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. +// +// type Source struct { +// Age int `mapstructure:",omitempty"` +// } +// +// Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// +// Other Configuration +// +// mapstructure is highly configurable. See the DecoderConfig struct +// for other features and options that are supported. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. + // + // If an error is returned, the entire decode will fail with that error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // If ErrorUnset is true, then it is an error for there to exist + // fields in the result that were not set in the decoding process + // (extra fields). This only applies to decoding to a struct. This + // will affect all nested structs as well. + ErrorUnset bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Squash will squash embedded structs. A squash tag may also be + // added to an individual struct field using a tag. For example: + // + // type Parent struct { + // Child `mapstructure:",squash"` + // } + Squash bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string + + // IgnoreUntaggedFields ignores all struct fields without explicit + // TagName, comparable to `mapstructure:"-"` as default behaviour. + IgnoreUntaggedFields bool + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string + + // Unset is a slice of field names that were found in the result interface + // but weren't set in the decoding process since there was no matching value + // in the input + Unset []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + + if config.Metadata.Unset == nil { + config.Metadata.Unset = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input interface{}) error { + return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + var inputVal reflect.Value + if input != nil { + inputVal = reflect.ValueOf(input) + + // We need to check here if input is a typed nil. Typed nils won't + // match the "input == nil" below so we check that here. + if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { + input = nil + } + } + + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + return nil + } + + if !inputVal.IsValid() { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) + if err != nil { + return fmt.Errorf("error decoding '%s': %s", name, err) + } + } + + var err error + outputKind := getKind(outVal) + addMetaKey := true + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + addMetaKey, err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, outputKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if addMetaKey && d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil + } + + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := strconv.ParseUint(string(jn), 0, 64) + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetUint(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type and based on the input type jump to the proper func + dataVal := reflect.Indirect(reflect.ValueOf(data)) + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + name+"["+strconv.Itoa(i)+"]", + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + errors := make([]string, 0) + + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + + for _, k := range dataVal.MapKeys() { + fieldName := name + "[" + k.String() + "]" + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + tagValue := f.Tag.Get(d.config.TagName) + keyName := f.Name + + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) + + // Determine the name of the key in the map + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue + } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + continue + } + + // If "squash" is specified in the tag, we squash the field down. + squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } + } + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { + keyName = keyNameTagValue + } + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) + if err != nil { + return err + } + + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return true, nil + } + + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return false, err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return false, err + } + } + return false, nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // If we have a non array/slice type then we first attempt to convert. + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []interface{}{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + + // If the input value is nil, then don't allocate since empty != nil + if dataValKind != reflect.Array && dataVal.IsNil() { + return nil + } + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) + return result + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + targetValKeysUnused := make(map[interface{}]struct{}) + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + + // remainField is set to a valid field set with the "remain" tag if + // we are keeping track of remaining values. + var remainField *field + + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } + + // If "squash" is specified in the tag, we squash the field down. + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous + remain := false + + // We always parse the tags cause we're looking for other tags too + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + + if tag == "remain" { + remain = true + break + } + } + + if squash { + if fieldVal.Kind() != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) + } else { + structs = append(structs, fieldVal) + } + continue + } + + // Build our field + if remain { + remainField = &field{fieldType, fieldVal} + } else { + // Normal struct field, store it away + fields = append(fields, field{fieldType, fieldVal}) + } + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if d.config.MatchName(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Remember it for potential errors and metadata. + targetValKeysUnused[fieldName] = struct{}{} + continue + } + } + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = name + "." + fieldName + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errors = appendErrors(errors, err) + } + } + + // If we have a "remain"-tagged field and we have unused keys then + // we put the unused keys directly into the remain field. + if remainField != nil && len(dataValKeysUnused) > 0 { + // Build a map of only the unused values + remain := map[interface{}]interface{}{} + for key := range dataValKeysUnused { + remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() + } + + // Decode it as-if we were just decoding this map onto our map. + if err := d.decodeMap(name, remain, remainField.val); err != nil { + errors = appendErrors(errors, err) + } + + // Set the map to nil so we have none so that the next check will + // not error (ErrorUnused) + dataValKeysUnused = nil + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { + keys := make([]string, 0, len(targetValKeysUnused)) + for rawKey := range targetValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + for rawKey := range targetValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) + } + } + + return nil +} + +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} + +func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields + return true + } + if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + return true + } + } + return false +} + +func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return v + } + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v +} diff --git a/vendor/modules.txt b/vendor/modules.txt index c0d437cbe..3f906c525 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -209,6 +209,9 @@ github.com/exponent-io/jsonpath # github.com/fatih/camelcase v1.0.0 ## explicit github.com/fatih/camelcase +# github.com/fatih/structs v1.1.0 +## explicit +github.com/fatih/structs # github.com/felixge/httpsnoop v1.0.3 ## explicit; go 1.13 github.com/felixge/httpsnoop @@ -399,6 +402,9 @@ github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/mitchellh/go-wordwrap v1.0.0 ## explicit github.com/mitchellh/go-wordwrap +# github.com/mitchellh/mapstructure v1.5.0 +## explicit; go 1.14 +github.com/mitchellh/mapstructure # github.com/moby/locker v1.0.1 ## explicit; go 1.13 github.com/moby/locker