Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix release scripts and RBAC #682

Merged
merged 4 commits into from
Jul 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ Changelog for Cass Operator, new PRs should update the `main / unreleased` secti
* [ENHANCEMENT] [#184](https://github.com/k8ssandra/cass-operator/issues/349) Add CassandraDatacenter.Status fields as metrics also
* [ENHANCEMENT] [#199](https://github.com/k8ssandra/cass-operator/issues/199) If .spec.readOnlyRootFilesystem is set, run the cassandra container with readOnlyRootFilesystem. Also, modify the default SecurityContext to mention runAsNonRoot: true
* [ENHANCEMENT] [#595](https://github.com/k8ssandra/cass-operator/issues/595) Update Vector to 0.39.0 and enforce the TOML file format in the starting command
* [BUGFIX] [#681](https://github.com/k8ssandra/cass-operator/issues/681) Remove nodes rights from the operator as it is not required

## v1.21.1

Expand Down
10 changes: 10 additions & 0 deletions config/manifests/bases/cass-operator.clusterserviceversion.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,16 @@ metadata:
Simple provisioning, turn-key operations, and automated remediation of Apache Cassandra clusters
repository: https://github.com/k8ssandra/cass-operator
support: DataStax
features.operators.openshift.io/disconnected: "false"
features.operators.openshift.io/fips-compliant: "false"
features.operators.openshift.io/proxy-aware: "false"
features.operators.openshift.io/tls-profiles: "false"
features.operators.openshift.io/token-auth-aws: "false"
features.operators.openshift.io/token-auth-azure: "false"
features.operators.openshift.io/token-auth-gcp: "false"
features.operators.openshift.io/cnf: "false"
features.operators.openshift.io/cni: "false"
features.operators.openshift.io/csi: "false"
name: cass-operator.v0.0.0
namespace: placeholder
spec:
Expand Down
1 change: 0 additions & 1 deletion config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ rules:
- apiGroups:
- ""
resources:
- nodes
- persistentvolumes
verbs:
- get
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ var (
// +kubebuilder:rbac:groups=apps,namespace=cass-operator,resources=deployments/finalizers,verbs=update
// +kubebuilder:rbac:groups=core,namespace=cass-operator,resources=pods;endpoints;services;configmaps;secrets;persistentvolumeclaims;events,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,namespace=cass-operator,resources=namespaces,verbs=get
// +kubebuilder:rbac:groups=core,resources=persistentvolumes;nodes,verbs=get;list;watch
// +kubebuilder:rbac:groups=core,resources=persistentvolumes,verbs=get;list;watch
// +kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch
// +kubebuilder:rbac:groups=policy,namespace=cass-operator,resources=poddisruptionbudgets,verbs=get;list;watch;create;update;patch;delete

Expand Down
166 changes: 0 additions & 166 deletions pkg/utils/k8s_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,6 @@ import (
"os"
"strings"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
logf "sigs.k8s.io/controller-runtime/pkg/log"
)

Expand Down Expand Up @@ -44,129 +41,6 @@ func IntersectionStringSet(a, b StringSet) StringSet {
return result
}

// k8s Node helper functions
func GetNodeNameSet(nodes []*corev1.Node) StringSet {
result := StringSet{}
for _, node := range nodes {
result[node.Name] = true
}
return result
}

func hasTaint(node *corev1.Node, taintKey, value string, effect corev1.TaintEffect) bool {
for _, taint := range node.Spec.Taints {
if taint.Key == taintKey && taint.Effect == effect {
if taint.Value == value {
return true
}
}
}
return false
}

func FilterNodesWithFn(nodes []*corev1.Node, fn func(*corev1.Node) bool) []*corev1.Node {
result := []*corev1.Node{}
for _, node := range nodes {
if fn(node) {
result = append(result, node)
}
}
return result
}

func FilterNodesWithTaintKeyValueEffect(nodes []*corev1.Node, taintKey, value string, effect corev1.TaintEffect) []*corev1.Node {
return FilterNodesWithFn(nodes, func(node *corev1.Node) bool {
return hasTaint(node, taintKey, value, effect)
})
}

// k8s Pod helper functions
func IsPodUnschedulable(pod *corev1.Pod) bool {
for _, condition := range pod.Status.Conditions {
if condition.Reason == corev1.PodReasonUnschedulable &&
condition.Type == corev1.PodScheduled &&
condition.Status == corev1.ConditionFalse {
return true
}
}
return false
}

func GetPodNameSet(pods []*corev1.Pod) StringSet {
names := StringSet{}
for _, pod := range pods {
names[pod.Name] = true
}

return names
}

func GetPodNodeNameSet(pods []*corev1.Pod) StringSet {
names := StringSet{}
for _, pod := range pods {
names[pod.Spec.NodeName] = true
}
return names
}

func FilterPodsWithFn(pods []*corev1.Pod, fn func(*corev1.Pod) bool) []*corev1.Pod {
result := []*corev1.Pod{}
for _, pod := range pods {
if fn(pod) {
result = append(result, pod)
}
}
return result
}

func FilterPodsWithNodeInNameSet(pods []*corev1.Pod, nameSet StringSet) []*corev1.Pod {
return FilterPodsWithFn(pods, func(pod *corev1.Pod) bool {
return nameSet[pod.Spec.NodeName]
})
}

func FilterPodsWithAnnotationKey(pods []*corev1.Pod, key string) []*corev1.Pod {
return FilterPodsWithFn(pods, func(pod *corev1.Pod) bool {
annos := pod.ObjectMeta.Annotations
if annos != nil {
_, ok := annos[key]
return ok
}
return false
})
}

func FilterPodsWithLabel(pods []*corev1.Pod, label, value string) []*corev1.Pod {
return FilterPodsWithFn(pods, func(pod *corev1.Pod) bool {
labels := pod.Labels
if labels != nil {
labelValue, ok := labels[label]
return ok && labelValue == value
}
return false
})
}

// k8s PVC helpers
func FilterPVCsWithFn(pvcs []*corev1.PersistentVolumeClaim, fn func(*corev1.PersistentVolumeClaim) bool) []*corev1.PersistentVolumeClaim {
result := []*corev1.PersistentVolumeClaim{}
for _, pvc := range pvcs {
if fn(pvc) {
result = append(result, pvc)
}
}
return result
}

func GetPVCSelectedNodeName(pvc *corev1.PersistentVolumeClaim) string {
annos := pvc.Annotations
if annos == nil {
annos = map[string]string{}
}
pvcNode := annos["volume.kubernetes.io/selected-node"]
return pvcNode
}

//
// Migrated from operator-sdk, these are internal in newer versions
//
Expand Down Expand Up @@ -228,43 +102,3 @@ func GetOperatorNamespace() (string, error) {
func isRunModeLocal() bool {
return os.Getenv(ForceRunModeEnv) == string(LocalRunMode)
}

// GetGVKsFromAddToScheme takes in the runtime scheme and filters out all generic apimachinery meta types.
// It returns just the GVK specific to this scheme.
func GetGVKsFromAddToScheme(addToSchemeFunc func(*runtime.Scheme) error) ([]schema.GroupVersionKind, error) {
s := runtime.NewScheme()
err := addToSchemeFunc(s)
if err != nil {
return nil, err
}
schemeAllKnownTypes := s.AllKnownTypes()
ownGVKs := []schema.GroupVersionKind{}
for gvk := range schemeAllKnownTypes {
if !isKubeMetaKind(gvk.Kind) {
ownGVKs = append(ownGVKs, gvk)
}
}

return ownGVKs, nil
}

func isKubeMetaKind(kind string) bool {
if strings.HasSuffix(kind, "List") ||
kind == "PatchOptions" ||
kind == "GetOptions" ||
kind == "DeleteOptions" ||
kind == "ExportOptions" ||
kind == "APIVersions" ||
kind == "APIGroupList" ||
kind == "APIResourceList" ||
kind == "UpdateOptions" ||
kind == "CreateOptions" ||
kind == "Status" ||
kind == "WatchEvent" ||
kind == "ListOptions" ||
kind == "APIGroup" {
return true
}

return false
}
85 changes: 62 additions & 23 deletions scripts/release-helm-chart.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,55 +6,94 @@ if [[ ! $0 == scripts/* ]]; then
fi

# This script assumes k8ssandra is checked out at ../k8ssandra and is checked out at main
if [ "$#" -ne 1 ]; then
echo "Usage: scripts/release-helm-chart.sh version"
if [ "$#" -le 1 ]; then
echo "Usage: scripts/release-helm-chart.sh version legacy"
echo "Script assumes you are in the correct branch / tag and that k8ssandra repository"
echo "has been checked out to ../k8ssandra/"
echo "has been checked out to ../k8ssandra/. If legacy is set, the script will generate"
echo "CRDs to the chart/crds directory"
exit
fi

# Includes here to get all the updates even if we swap to an older branch
. scripts/lib.sh

LEGACY=false
if [[ $2 == "legacy" ]]; then
LEGACY=true
fi

# This should work with BSD/MacOS mktemp and GNU one
CRD_TMP_DIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'crd')

VERSION=$1
CHART_HOME=../k8ssandra/charts/cass-operator
CRD_TARGET_PATH=$CRD_TMP_DIR
TEMPLATE_HOME=$CHART_HOME/templates
CRD_TARGET_PATH=$TEMPLATE_HOME

# Checkout tag
git checkout v$VERSION

# Create CRDs
kustomize build config/crd --output $CRD_TARGET_PATH
kustomize build config/crd --output $CRD_TMP_DIR

# Rename generated CRDs to shorter format
for f in $CRD_TARGET_PATH/*; do
for f in $CRD_TMP_DIR/*; do
TARGET_FILENAME=$(yq '.spec.names.plural' $f).yaml
mv $f $CRD_TARGET_PATH/$TARGET_FILENAME
mv $f $CRD_TMP_DIR/$TARGET_FILENAME
done

# Add Helm conditionals to the end and beginning of CRDs before applying them to the templates path
echo "Updating CRDs in" $TEMPLATE_HOME
CRD_FILE_NAME=$TEMPLATE_HOME/crds.yaml
echo '{{- if .Values.manageCrds }}' > $CRD_FILE_NAME

declare -a files
files=($CRD_TARGET_PATH/*)
for i in ${!files[@]}; do
echo "Processing " ${files[$i]}
yq -i '.metadata.annotations."helm.sh/resource-policy" = "keep"' ${files[$i]}
cat ${files[$i]} >> $CRD_FILE_NAME
if [[ $i -lt ${#files[@]}-1 ]]; then
echo "---" >> $CRD_FILE_NAME
fi
done
echo '{{- end }}' >> $CRD_FILE_NAME
if [ "$LEGACY" == true ]; then
echo "Updating CRDs for legacy CRD handling in Helm chart"

# Update CRDs for legacy Helm chart
CRD_TARGET_PATH=$CHART_HOME/crds
cp -r $CRD_TMP_DIR/* $CRD_TARGET_PATH
else
# Add Helm conditionals to the end and beginning of CRDs before applying them to the templates path
echo "Updating CRDs in" $TEMPLATE_HOME
CRD_FILE_NAME=$CRD_TARGET_PATH/crds.yaml
echo '{{- if .Values.manageCrds }}' > $CRD_FILE_NAME

declare -a files
files=($CRD_TMP_DIR/*)
for i in ${!files[@]}; do
echo "Processing " ${files[$i]}
yq -i '.metadata.annotations."helm.sh/resource-policy" = "keep"' ${files[$i]}
cat ${files[$i]} >> $CRD_FILE_NAME
if [[ $i -lt ${#files[@]}-1 ]]; then
echo "---" >> $CRD_FILE_NAME
fi
done
echo '{{- end }}' >> $CRD_FILE_NAME
fi

rm -fr $CRD_TMP_DIR

# Update role.yaml
echo "Updating role.yaml"
ROLE_FILE_NAME=$TEMPLATE_HOME/role.yaml
cat <<'EOF' > $ROLE_FILE_NAME
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "k8ssandra-common.fullname" . }}
labels: {{ include "k8ssandra-common.labels" . | indent 4 }}
{{- if .Values.global.clusterScoped }}
EOF
yq -N eval-all '.rules = (.rules as $item ireduce ([]; . *+ $item)) | select(di == 0) | with_entries(select(.key | test("rules")))' config/rbac/role.yaml >> $ROLE_FILE_NAME
echo '{{- else }}' >> $ROLE_FILE_NAME
yq -N 'select(di == 0) | with_entries(select(.key | test("rules")))' config/rbac/role.yaml >> $ROLE_FILE_NAME
cat <<'EOF' >> $ROLE_FILE_NAME
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "k8ssandra-common.fullname" . }}
labels: {{ include "k8ssandra-common.labels" . | indent 4 }}
EOF
yq -N 'select(di == 1) | with_entries(select(.key | test("rules")))' config/rbac/role.yaml >> $ROLE_FILE_NAME
echo '{{- end }}' >> $ROLE_FILE_NAME

# Update version of the Chart.yaml automatically (to next minor one)
CURRENT_VERSION=$(yq '.version' $CHART_HOME/Chart.yaml)
next_minor_version
Expand Down
Loading