-
Notifications
You must be signed in to change notification settings - Fork 50
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add notready status upload part for one2cluster in UpdateRootNodeStatus #778
base: main
Are you sure you want to change the base?
Conversation
if err != nil && healthStatus == http.StatusNotFound { | ||
healthStatus, err := healthEndpointCheck(leafClient, "/healthz") | ||
if err != nil { | ||
klog.Errorf("Failed to healthEndpointCheck healthStatus is %v, err is : %v ", healthStatus, err) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
klog.Errorf("Failed to healthEndpointCheck healthStatus is %v, err is : %v ", healthStatus, err) | |
klog.Errorf("Health check failed. Current health status: %v, error: %v", healthStatus, err) | |
return false, false | ||
} | ||
if healthStatus != http.StatusOK { | ||
klog.Infof("Member cluster %v isn't healthy", h.Cluster.Name) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
klog.Infof("Member cluster %v isn't healthy", h.Cluster.Name) | |
klog.Warnf("Member cluster %v isn't healthy", h.Cluster.Name) |
Type: corev1.NodeReady, | ||
Status: corev1.ConditionFalse, | ||
Reason: "LeafNodesNotReady", | ||
Message: "All leaf nodes are not ready.", |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Message: "All leaf nodes are not ready.", | |
Message: "All leaf cluster‘s master nodes are not ready.", |
@@ -150,7 +151,15 @@ func (h ClassificationHandler) UpdateRootNodeStatus(ctx context.Context, nodesIn | |||
Effect: utils.KosmosNodeTaintEffect, | |||
}) | |||
} else { | |||
rootCopy.Status.Conditions = utils.NodeConditions() | |||
//rootCopy.Status.Conditions = utils.NodeConditions() | |||
online, healthy := h.getClusterHealthStatus(h.LeafClientset) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
online, healthy := h.getClusterHealthStatus(h.LeafClientset) | |
// TODO A node's status should be set to false only after it has been determined that it is offline for three consecutive timing cycles. | |
online, healthy := h.getClusterHealthStatus(h.LeafClientset) |
return []corev1.NodeCondition{ | ||
{ | ||
Type: corev1.NodeReady, | ||
Status: corev1.ConditionFalse, |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Status: corev1.ConditionFalse, | |
Status: corev1.ConditionTrue, |
The unhealthy member cluster does not necessarily mean that the nodes are in a not-ready state.
Signed-off-by: luchunling <[email protected]>
99cbabb
to
c4173c8
Compare
What type of PR is this?
/kind feature
What does this PR do?
Add notready status upload part for one2cluster in UpdateRootNodeStatus
Which issue(s) does this PR fix?
Fixes #When the main cluster cannot connect to the sub-cluster, rootnode is notready
When connecting to the sub-cluster, when all master nodes are notready, rootnode is notready
Special notes for your reviewer:
Does this PR introduce a user-facing change?