Skip to content

Commit

Permalink
fix tidb-operator crash when user modify sts upgrade strategy imprope…
Browse files Browse the repository at this point in the history
…rly (#912) (#969)

* fix tidb-operator crash when user modify sts upgrade strategy incorrectly
  • Loading branch information
sre-bot authored and onlymellb committed Sep 30, 2019
1 parent 90697df commit 0fc9cbc
Show file tree
Hide file tree
Showing 8 changed files with 205 additions and 72 deletions.
11 changes: 3 additions & 8 deletions pkg/manager/member/pd_scaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func TestPDScalerScaleOut(t *testing.T) {

oldSet := newStatefulSetForPDScale()
newSet := oldSet.DeepCopy()
newSet.Spec.Replicas = int32Pointer(7)
newSet.Spec.Replicas = controller.Int32Ptr(7)

scaler, _, pvcIndexer, pvcControl := newFakePDScaler()

Expand Down Expand Up @@ -252,7 +252,7 @@ func TestPDScalerScaleIn(t *testing.T) {

oldSet := newStatefulSetForPDScale()
newSet := oldSet.DeepCopy()
newSet.Spec.Replicas = int32Pointer(3)
newSet.Spec.Replicas = controller.Int32Ptr(3)

scaler, pdControl, pvcIndexer, pvcControl := newFakePDScaler()

Expand Down Expand Up @@ -374,7 +374,7 @@ func newStatefulSetForPDScale() *apps.StatefulSet {
Namespace: metav1.NamespaceDefault,
},
Spec: apps.StatefulSetSpec{
Replicas: int32Pointer(5),
Replicas: controller.Int32Ptr(5),
},
}
return set
Expand All @@ -389,11 +389,6 @@ func newPVCForStatefulSet(set *apps.StatefulSet, memberType v1alpha1.MemberType)
}
}

func int32Pointer(num int) *int32 {
i := int32(num)
return &i
}

func normalPDMember(tc *v1alpha1.TidbCluster) {
tcName := tc.GetName()
tc.Status.PD.Members = map[string]v1alpha1.PDMember{
Expand Down
10 changes: 10 additions & 0 deletions pkg/manager/member/pd_upgrader.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,16 @@ func (pu *pdUpgrader) gracefulUpgrade(tc *v1alpha1.TidbCluster, oldSet *apps.Sta
return nil
}

if oldSet.Spec.UpdateStrategy.Type == apps.OnDeleteStatefulSetStrategyType || oldSet.Spec.UpdateStrategy.RollingUpdate == nil {
// Manually bypass tidb-operator to modify statefulset directly, such as modify pd statefulset's RollingUpdate straregy to OnDelete strategy,
// or set RollingUpdate to nil, skip tidb-operator's rolling update logic in order to speed up the upgrade in the test environment occasionally.
// If we encounter this situation, we will let the native statefulset controller do the upgrade completely, which may be unsafe for upgrading pd.
// Therefore, in the production environment, we should try to avoid modifying the pd statefulset update strategy directly.
newSet.Spec.UpdateStrategy = oldSet.Spec.UpdateStrategy
glog.Warningf("tidbcluster: [%s/%s] pd statefulset %s UpdateStrategy has been modified manually", ns, tcName, oldSet.GetName())
return nil
}

setUpgradePartition(newSet, *oldSet.Spec.UpdateStrategy.RollingUpdate.Partition)
for i := tc.Status.PD.StatefulSet.Replicas - 1; i >= 0; i-- {
podName := pdPodName(tcName, i)
Expand Down
61 changes: 51 additions & 10 deletions pkg/manager/member/pd_upgrader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ func TestPDUpgraderUpgrade(t *testing.T) {
}
SetLastAppliedConfigAnnotation(oldSet)

newSet.Spec.UpdateStrategy.RollingUpdate.Partition = func() *int32 { i := int32(3); return &i }()
newSet.Spec.UpdateStrategy.RollingUpdate.Partition = controller.Int32Ptr(3)

err := upgrader.Upgrade(tc, oldSet, newSet)
test.errExpectFn(g, err)
Expand All @@ -93,13 +93,54 @@ func TestPDUpgraderUpgrade(t *testing.T) {
tc.Status.PD.Synced = true
},
changePods: nil,
changeOldSet: nil,
transferLeaderErr: false,
errExpectFn: func(g *GomegaWithT, err error) {
g.Expect(err).NotTo(HaveOccurred())
},
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(tc.Status.PD.Phase).To(Equal(v1alpha1.UpgradePhase))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(func() *int32 { i := int32(1); return &i }()))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(1)))
},
},
{
name: "modify oldSet update strategy to OnDelete",
changeFn: func(tc *v1alpha1.TidbCluster) {
tc.Status.PD.Synced = true
},
changePods: nil,
changeOldSet: func(set *apps.StatefulSet) {
set.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
Type: apps.OnDeleteStatefulSetStrategyType,
}
},
transferLeaderErr: false,
errExpectFn: func(g *GomegaWithT, err error) {
g.Expect(err).NotTo(HaveOccurred())
},
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(tc.Status.PD.Phase).To(Equal(v1alpha1.UpgradePhase))
g.Expect(newSet.Spec.UpdateStrategy).To(Equal(apps.StatefulSetUpdateStrategy{Type: apps.OnDeleteStatefulSetStrategyType}))
},
},
{
name: "set oldSet's RollingUpdate strategy to nil",
changeFn: func(tc *v1alpha1.TidbCluster) {
tc.Status.PD.Synced = true
},
changePods: nil,
changeOldSet: func(set *apps.StatefulSet) {
set.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
Type: apps.RollingUpdateStatefulSetStrategyType,
}
},
transferLeaderErr: false,
errExpectFn: func(g *GomegaWithT, err error) {
g.Expect(err).NotTo(HaveOccurred())
},
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(tc.Status.PD.Phase).To(Equal(v1alpha1.UpgradePhase))
g.Expect(newSet.Spec.UpdateStrategy).To(Equal(apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType}))
},
},
{
Expand All @@ -117,7 +158,7 @@ func TestPDUpgraderUpgrade(t *testing.T) {
},
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(tc.Status.PD.Phase).To(Equal(v1alpha1.UpgradePhase))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(func() *int32 { i := int32(3); return &i }()))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(3)))
},
},
{
Expand All @@ -133,7 +174,7 @@ func TestPDUpgraderUpgrade(t *testing.T) {
},
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(tc.Status.PD.Phase).To(Equal(v1alpha1.UpgradePhase))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(func() *int32 { i := int32(3); return &i }()))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(3)))
},
},
{
Expand All @@ -149,7 +190,7 @@ func TestPDUpgraderUpgrade(t *testing.T) {
},
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(tc.Status.PD.Phase).To(Equal(v1alpha1.UpgradePhase))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(func() *int32 { i := int32(2); return &i }()))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(2)))
},
},
{
Expand All @@ -165,7 +206,7 @@ func TestPDUpgraderUpgrade(t *testing.T) {
},
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(tc.Status.PD.Phase).To(Equal(v1alpha1.UpgradePhase))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(func() *int32 { i := int32(2); return &i }()))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(2)))
},
},
{
Expand All @@ -180,7 +221,7 @@ func TestPDUpgraderUpgrade(t *testing.T) {
},
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(tc.Status.PD.Phase).To(Equal(v1alpha1.NormalPhase))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(func() *int32 { i := int32(3); return &i }()))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(3)))
},
},
{
Expand All @@ -196,7 +237,7 @@ func TestPDUpgraderUpgrade(t *testing.T) {
},
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(tc.Status.PD.Phase).To(Equal(v1alpha1.UpgradePhase))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(func() *int32 { i := int32(2); return &i }()))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(2)))
},
},
}
Expand Down Expand Up @@ -226,7 +267,7 @@ func newStatefulSetForPDUpgrader() *apps.StatefulSet {
Namespace: metav1.NamespaceDefault,
},
Spec: apps.StatefulSetSpec{
Replicas: int32Pointer(3),
Replicas: controller.Int32Ptr(3),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
Expand All @@ -239,7 +280,7 @@ func newStatefulSetForPDUpgrader() *apps.StatefulSet {
},
UpdateStrategy: apps.StatefulSetUpdateStrategy{
Type: apps.RollingUpdateStatefulSetStrategyType,
RollingUpdate: &apps.RollingUpdateStatefulSetStrategy{Partition: func() *int32 { i := int32(2); return &i }()},
RollingUpdate: &apps.RollingUpdateStatefulSetStrategy{Partition: controller.Int32Ptr(2)},
},
},
Status: apps.StatefulSetStatus{
Expand Down
10 changes: 10 additions & 0 deletions pkg/manager/member/tidb_upgrader.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,16 @@ func (tdu *tidbUpgrader) Upgrade(tc *v1alpha1.TidbCluster, oldSet *apps.Stateful
return nil
}

if oldSet.Spec.UpdateStrategy.Type == apps.OnDeleteStatefulSetStrategyType || oldSet.Spec.UpdateStrategy.RollingUpdate == nil {
// Manually bypass tidb-operator to modify statefulset directly, such as modify tidb statefulset's RollingUpdate strategy to OnDelete strategy,
// or set RollingUpdate to nil, skip tidb-operator's rolling update logic in order to speed up the upgrade in the test environment occasionally.
// If we encounter this situation, we will let the native statefulset controller do the upgrade completely, which may be unsafe for upgrading tidb.
// Therefore, in the production environment, we should try to avoid modifying the tidb statefulset update strategy directly.
newSet.Spec.UpdateStrategy = oldSet.Spec.UpdateStrategy
glog.Warningf("tidbcluster: [%s/%s] tidb statefulset %s UpdateStrategy has been modified manually", ns, tcName, oldSet.GetName())
return nil
}

setUpgradePartition(newSet, *oldSet.Spec.UpdateStrategy.RollingUpdate.Partition)
for i := tc.Status.TiDB.StatefulSet.Replicas - 1; i >= 0; i-- {
podName := tidbPodName(tcName, i)
Expand Down
64 changes: 54 additions & 10 deletions pkg/manager/member/tidb_upgrader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ func TestTiDBUpgrader_Upgrade(t *testing.T) {
getLastAppliedConfigErr bool
resignDDLOwnerError bool
errorExpect bool
changeOldSet func(set *apps.StatefulSet)
expectFn func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet)
}

Expand All @@ -61,6 +62,10 @@ func TestTiDBUpgrader_Upgrade(t *testing.T) {
}

oldSet := newStatefulSetForTiDBUpgrader()
if test.changeOldSet != nil {
test.changeOldSet(oldSet)
}

newSet := oldSet.DeepCopy()
if test.getLastAppliedConfigErr {
oldSet.SetAnnotations(map[string]string{LastAppliedConfigAnnotation: "fake apply config"})
Expand All @@ -85,7 +90,42 @@ func TestTiDBUpgrader_Upgrade(t *testing.T) {
},
getLastAppliedConfigErr: false,
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal((func() *int32 { i := int32(0); return &i }())))
g.Expect(tc.Status.TiDB.Phase).To(Equal(v1alpha1.UpgradePhase))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(0)))
},
},
{
name: "modify oldSet update strategy to OnDelete",
changeFn: func(tc *v1alpha1.TidbCluster) {
tc.Status.PD.Phase = v1alpha1.NormalPhase
tc.Status.TiKV.Phase = v1alpha1.NormalPhase
},
getLastAppliedConfigErr: false,
changeOldSet: func(set *apps.StatefulSet) {
set.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
Type: apps.OnDeleteStatefulSetStrategyType,
}
},
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(tc.Status.TiDB.Phase).To(Equal(v1alpha1.UpgradePhase))
g.Expect(newSet.Spec.UpdateStrategy).To(Equal(apps.StatefulSetUpdateStrategy{Type: apps.OnDeleteStatefulSetStrategyType}))
},
},
{
name: "set oldSet's RollingUpdate strategy to nil",
changeFn: func(tc *v1alpha1.TidbCluster) {
tc.Status.PD.Phase = v1alpha1.NormalPhase
tc.Status.TiKV.Phase = v1alpha1.NormalPhase
},
changeOldSet: func(set *apps.StatefulSet) {
set.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
Type: apps.RollingUpdateStatefulSetStrategyType,
}
},
getLastAppliedConfigErr: false,
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(tc.Status.TiDB.Phase).To(Equal(v1alpha1.UpgradePhase))
g.Expect(newSet.Spec.UpdateStrategy).To(Equal(apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType}))
},
},
{
Expand All @@ -96,7 +136,7 @@ func TestTiDBUpgrader_Upgrade(t *testing.T) {
},
getLastAppliedConfigErr: false,
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal((func() *int32 { i := int32(1); return &i }())))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(1)))
},
},
{
Expand All @@ -107,7 +147,7 @@ func TestTiDBUpgrader_Upgrade(t *testing.T) {
},
getLastAppliedConfigErr: false,
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal((func() *int32 { i := int32(1); return &i }())))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(1)))
},
},
{
Expand All @@ -119,7 +159,8 @@ func TestTiDBUpgrader_Upgrade(t *testing.T) {
},
getLastAppliedConfigErr: false,
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal((func() *int32 { i := int32(1); return &i }())))
g.Expect(tc.Status.TiDB.Phase).To(Equal(v1alpha1.UpgradePhase))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(1)))
},
},
{
Expand All @@ -131,7 +172,7 @@ func TestTiDBUpgrader_Upgrade(t *testing.T) {
getLastAppliedConfigErr: true,
errorExpect: true,
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal((func() *int32 { i := int32(1); return &i }())))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(1)))
},
},
{
Expand All @@ -147,7 +188,8 @@ func TestTiDBUpgrader_Upgrade(t *testing.T) {
getLastAppliedConfigErr: false,
errorExpect: true,
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal((func() *int32 { i := int32(1); return &i }())))
g.Expect(tc.Status.TiDB.Phase).To(Equal(v1alpha1.UpgradePhase))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(1)))
},
},
{
Expand All @@ -160,7 +202,8 @@ func TestTiDBUpgrader_Upgrade(t *testing.T) {
resignDDLOwnerError: true,
errorExpect: true,
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal((func() *int32 { i := int32(1); return &i }())))
g.Expect(tc.Status.TiDB.Phase).To(Equal(v1alpha1.UpgradePhase))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(1)))
g.Expect(tc.Status.TiDB.ResignDDLOwnerRetryCount).To(Equal(int32(1)))
},
},
Expand All @@ -175,7 +218,8 @@ func TestTiDBUpgrader_Upgrade(t *testing.T) {
resignDDLOwnerError: true,
errorExpect: false,
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal((func() *int32 { i := int32(0); return &i }())))
g.Expect(tc.Status.TiDB.Phase).To(Equal(v1alpha1.UpgradePhase))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(0)))
g.Expect(tc.Status.TiDB.ResignDDLOwnerRetryCount).To(Equal(int32(0)))
},
},
Expand All @@ -201,7 +245,7 @@ func newStatefulSetForTiDBUpgrader() *apps.StatefulSet {
Namespace: metav1.NamespaceDefault,
},
Spec: apps.StatefulSetSpec{
Replicas: int32Pointer(2),
Replicas: controller.Int32Ptr(2),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
Expand All @@ -214,7 +258,7 @@ func newStatefulSetForTiDBUpgrader() *apps.StatefulSet {
},
UpdateStrategy: apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType,
RollingUpdate: &apps.RollingUpdateStatefulSetStrategy{
Partition: int32Pointer(1),
Partition: controller.Int32Ptr(1),
},
},
},
Expand Down
4 changes: 2 additions & 2 deletions pkg/manager/member/tikv_scaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func TestTiKVScalerScaleOut(t *testing.T) {

oldSet := newStatefulSetForPDScale()
newSet := oldSet.DeepCopy()
newSet.Spec.Replicas = int32Pointer(7)
newSet.Spec.Replicas = controller.Int32Ptr(7)

scaler, _, pvcIndexer, _, pvcControl := newFakeTiKVScaler()

Expand Down Expand Up @@ -171,7 +171,7 @@ func TestTiKVScalerScaleIn(t *testing.T) {

oldSet := newStatefulSetForPDScale()
newSet := oldSet.DeepCopy()
newSet.Spec.Replicas = int32Pointer(3)
newSet.Spec.Replicas = controller.Int32Ptr(3)

pod := &corev1.Pod{
TypeMeta: metav1.TypeMeta{Kind: "Pod", APIVersion: "v1"},
Expand Down
10 changes: 10 additions & 0 deletions pkg/manager/member/tikv_upgrader.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,16 @@ func (tku *tikvUpgrader) Upgrade(tc *v1alpha1.TidbCluster, oldSet *apps.Stateful
return nil
}

if oldSet.Spec.UpdateStrategy.Type == apps.OnDeleteStatefulSetStrategyType || oldSet.Spec.UpdateStrategy.RollingUpdate == nil {
// Manually bypass tidb-operator to modify statefulset directly, such as modify tikv statefulset's RollingUpdate strategy to OnDelete strategy,
// or set RollingUpdate to nil, skip tidb-operator's rolling update logic in order to speed up the upgrade in the test environment occasionally.
// If we encounter this situation, we will let the native statefulset controller do the upgrade completely, which may be unsafe for upgrading tikv.
// Therefore, in the production environment, we should try to avoid modifying the tikv statefulset update strategy directly.
newSet.Spec.UpdateStrategy = oldSet.Spec.UpdateStrategy
glog.Warningf("tidbcluster: [%s/%s] tikv statefulset %s UpdateStrategy has been modified manually", ns, tcName, oldSet.GetName())
return nil
}

setUpgradePartition(newSet, *oldSet.Spec.UpdateStrategy.RollingUpdate.Partition)
for i := tc.Status.TiKV.StatefulSet.Replicas - 1; i >= 0; i-- {
store := tku.getStoreByOrdinal(tc, i)
Expand Down
Loading

0 comments on commit 0fc9cbc

Please sign in to comment.