Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

service container: less verbose error logs #17058

Merged
merged 1 commit into from
Jan 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions libpod/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,9 @@ func (p *Pod) maybeStopServiceContainer() error {
}
logrus.Debugf("Stopping service container %s", serviceCtr.ID())
if err := serviceCtr.Stop(); err != nil {
logrus.Errorf("Stopping service container %s: %v", serviceCtr.ID(), err)
if !errors.Is(err, define.ErrCtrStopped) {
logrus.Errorf("Stopping service container %s: %v", serviceCtr.ID(), err)
}
}
})
return nil
Expand Down Expand Up @@ -239,7 +241,9 @@ func (p *Pod) maybeRemoveServiceContainer() error {
timeout := uint(0)
logrus.Debugf("Removing service container %s", serviceCtr.ID())
if err := p.runtime.RemoveContainer(context.Background(), serviceCtr, true, false, &timeout); err != nil {
logrus.Errorf("Removing service container %s: %v", serviceCtr.ID(), err)
if !errors.Is(err, define.ErrNoSuchCtr) {
logrus.Errorf("Removing service container %s: %v", serviceCtr.ID(), err)
}
}
})
return nil
Expand Down
67 changes: 67 additions & 0 deletions test/system/700-play.bats
Original file line number Diff line number Diff line change
Expand Up @@ -531,3 +531,70 @@ spec:
run_podman pod rm -a -f
run_podman rm -a -f
}

@test "podman kube play - multi-pod YAML" {
vrothberg marked this conversation as resolved.
Show resolved Hide resolved
skip_if_remote "service containers only work locally"
skip_if_journald_unavailable

# Create the YAMl file
yaml_source="$PODMAN_TMPDIR/test.yaml"
cat >$yaml_source <<EOF
apiVersion: v1
kind: Pod
metadata:
labels:
app: pod1
name: pod1
spec:
containers:
- command:
- top
image: $IMAGE
name: ctr1
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: pod2
name: pod2
spec:
containers:
- command:
- top
image: $IMAGE
name: ctr2
EOF
# Run `play kube` in the background as it will wait for the service
# container to exit.
timeout --foreground -v --kill=10 60 \
$PODMAN play kube --service-container=true --log-driver journald $yaml_source &>/dev/null &
vrothberg marked this conversation as resolved.
Show resolved Hide resolved

# The name of the service container is predictable: the first 12 characters
# of the hash of the YAML file followed by the "-service" suffix
yaml_sha=$(sha256sum $yaml_source)
service_container="${yaml_sha:0:12}-service"
# Wait for the containers to be running
container_1=pod1-ctr1
container_2=pod1-ctr2
for i in $(seq 1 20); do
run_podman "?" container wait $container_1 $container_2 $service_container --condition="running"
if [[ $status == 0 ]]; then
break
fi
sleep 0.5
# Just for debugging
run_podman ps -a
done
if [[ $status != 0 ]]; then
die "container $container_1, $container_2 and/or $service_container did not start"
fi

# Stop the pods, make sure that no ugly error logs show up and that the
# service container will implicitly get stopped as well
run_podman pod stop pod1 pod2
assert "$output" !~ "Stopping"
_ensure_container_running $service_container false

run_podman kube down $yaml_source
}