From 3a5c37caaf5254ff4d454a4b94e333a37df3f4ef Mon Sep 17 00:00:00 2001 From: Bilal Akhtar Date: Mon, 2 Nov 2020 09:30:39 -0500 Subject: [PATCH] util, server: Add GetCgroupCPU, use it to reflect cpu usage Enhances the util/cgroups package to also support getting CPU limits from the process' current cgroup. The cpu limit / share for the current cgroup is denoted in two separate variables: the cpu period, and the cpu quota. When (quota / period) = numCPUs, this cgroup has access to all the CPUs on the system. This PR also updates SampleEnvironment to call this method and adjust the cpu usage % accordingly. Release note (bug fix): Improve accuracy of reported CPU usage when running in containers. --- pkg/server/status/runtime.go | 5 +- pkg/util/cgroups/cgroups.go | 244 +++++++++++++++++++++++++++++-- pkg/util/cgroups/cgroups_test.go | 235 +++++++++++++++++++++++++++++ 3 files changed, 467 insertions(+), 17 deletions(-) diff --git a/pkg/server/status/runtime.go b/pkg/server/status/runtime.go index 45d1e95b1ad7..e2e79e24bb2c 100644 --- a/pkg/server/status/runtime.go +++ b/pkg/server/status/runtime.go @@ -19,6 +19,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/build" + "github.com/cockroachdb/cockroach/pkg/util/cgroups" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/metric" @@ -440,6 +441,8 @@ func (rsr *RuntimeStatSampler) SampleEnvironment( if err := cpuTime.Get(pid); err != nil { log.Errorf(ctx, "unable to get cpu usage: %v", err) } + cgroupCPU, _ := cgroups.GetCgroupCPU() + cpuShare := cgroupCPU.CPUShares() fds := gosigar.ProcFDUsage{} if err := fds.Get(pid); err != nil { @@ -500,7 +503,7 @@ func (rsr *RuntimeStatSampler) SampleEnvironment( stime := int64(cpuTime.Sys) * 1e6 uPerc := float64(utime-rsr.last.utime) / dur sPerc := float64(stime-rsr.last.stime) / dur - combinedNormalizedPerc := (sPerc + uPerc) / float64(runtime.NumCPU()) + combinedNormalizedPerc := (sPerc + uPerc) / cpuShare gcPausePercent := float64(uint64(gc.PauseTotal)-rsr.last.gcPauseTime) / dur rsr.last.now = now rsr.last.utime = utime diff --git a/pkg/util/cgroups/cgroups.go b/pkg/util/cgroups/cgroups.go index 83d9713d985d..b549c858d7ab 100644 --- a/pkg/util/cgroups/cgroups.go +++ b/pkg/util/cgroups/cgroups.go @@ -18,14 +18,22 @@ import ( "math" "os" "path/filepath" + "runtime" "strconv" + "strings" "github.com/cockroachdb/errors" ) const ( - cgroupV1MemLimitFilename = "memory.stat" - cgroupV2MemLimitFilename = "memory.max" + cgroupV1MemLimitFilename = "memory.stat" + cgroupV2MemLimitFilename = "memory.max" + cgroupV1CPUQuotaFilename = "cpu.cfs_quota_us" + cgroupV1CPUPeriodFilename = "cpu.cfs_period_us" + cgroupV1CPUSysUsageFilename = "cpuacct.usage_sys" + cgroupV1CPUUserUsageFilename = "cpuacct.usage_user" + cgroupV2CPUMaxFilename = "cpu.max" + cgroupV2CPUStatFilename = "cpu.stat" ) // GetMemoryLimit attempts to retrieve the cgroup memory limit for the current @@ -38,7 +46,7 @@ func GetMemoryLimit() (limit int64, warnings string, err error) { // cgroup memory limit detection path implemented here as // /proc/self/cgroup file -> /proc/self/mountinfo mounts -> cgroup version -> version specific limit check func getCgroupMem(root string) (limit int64, warnings string, err error) { - path, err := detectMemCntrlPath(filepath.Join(root, "/proc/self/cgroup")) + path, err := detectCntrlPath(filepath.Join(root, "/proc/self/cgroup"), "memory") if err != nil { return 0, "", err } @@ -48,16 +56,16 @@ func getCgroupMem(root string) (limit int64, warnings string, err error) { return 0, "no cgroup memory controller detected", nil } - mount, ver, err := getCgroupDetails(filepath.Join(root, "/proc/self/mountinfo"), path) + mount, ver, err := getCgroupDetails(filepath.Join(root, "/proc/self/mountinfo"), path, "memory") if err != nil { return 0, "", err } switch ver { case 1: - limit, warnings, err = detectLimitInV1(filepath.Join(root, mount)) + limit, warnings, err = detectMemLimitInV1(filepath.Join(root, mount)) case 2: - limit, warnings, err = detectLimitInV2(filepath.Join(root, mount, path)) + limit, warnings, err = detectMemLimitInV2(filepath.Join(root, mount, path)) default: limit, err = 0, fmt.Errorf("detected unknown cgroup version index: %d", ver) } @@ -65,8 +73,136 @@ func getCgroupMem(root string) (limit int64, warnings string, err error) { return limit, warnings, err } +func readFile(filepath string) (res []byte, err error) { + var f *os.File + f, err = os.Open(filepath) + if err != nil { + return nil, err + } + defer func() { + err = errors.CombineErrors(err, f.Close()) + }() + res, err = ioutil.ReadAll(f) + return res, err +} + +func cgroupFileToUint64(filepath, desc string) (res uint64, err error) { + contents, err := readFile(filepath) + if err != nil { + return 0, errors.Wrapf(err, "error when reading %s from cgroup v1 at %s", desc, filepath) + } + res, err = strconv.ParseUint(string(bytes.TrimSpace(contents)), 10, 64) + if err != nil { + return 0, errors.Wrapf(err, "error when parsing %s from cgroup v1 at %s", desc, filepath) + } + return res, err +} + +func cgroupFileToInt64(filepath, desc string) (res int64, err error) { + contents, err := readFile(filepath) + if err != nil { + return 0, errors.Wrapf(err, "error when reading %s from cgroup v1 at %s", desc, filepath) + } + res, err = strconv.ParseInt(string(bytes.TrimSpace(contents)), 10, 64) + if err != nil { + return 0, errors.Wrapf(err, "error when parsing %s from cgroup v1 at %s", desc, filepath) + } + return res, nil +} + +func detectCPUQuotaInV1(cRoot string) (period, quota int64, err error) { + quotaFilePath := filepath.Join(cRoot, cgroupV1CPUQuotaFilename) + periodFilePath := filepath.Join(cRoot, cgroupV1CPUPeriodFilename) + quota, err = cgroupFileToInt64(quotaFilePath, "cpu quota") + if err != nil { + return 0, 0, err + } + period, err = cgroupFileToInt64(periodFilePath, "cpu period") + if err != nil { + return 0, 0, err + } + + return period, quota, err +} + +func detectCPUUsageInV1(cRoot string) (stime, utime uint64, err error) { + sysFilePath := filepath.Join(cRoot, cgroupV1CPUSysUsageFilename) + userFilePath := filepath.Join(cRoot, cgroupV1CPUUserUsageFilename) + stime, err = cgroupFileToUint64(sysFilePath, "cpu system time") + if err != nil { + return 0, 0, err + } + utime, err = cgroupFileToUint64(userFilePath, "cpu user time") + if err != nil { + return 0, 0, err + } + + return stime, utime, err +} + +func detectCPUQuotaInV2(cRoot string) (period, quota int64, err error) { + maxFilePath := filepath.Join(cRoot, cgroupV2CPUMaxFilename) + contents, err := readFile(maxFilePath) + if err != nil { + return 0, 0, errors.Wrapf(err, "error when read cpu quota from cgroup v2 at %s", maxFilePath) + } + fields := strings.Fields(string(contents)) + if len(fields) > 2 || len(fields) == 0 { + return 0, 0, errors.Errorf("unexpected format when reading cpu quota from cgroup v2 at %s: %s", maxFilePath, contents) + } + if fields[0] == "max" { + // Negative quota denotes no limit. + quota = -1 + } else { + quota, err = strconv.ParseInt(fields[0], 10, 64) + if err != nil { + return 0, 0, errors.Wrapf(err, "error when reading cpu quota from cgroup v2 at %s", maxFilePath) + } + } + if len(fields) == 2 { + period, err = strconv.ParseInt(fields[1], 10, 64) + if err != nil { + return 0, 0, errors.Wrapf(err, "error when reading cpu period from cgroup v2 at %s", maxFilePath) + } + } + return period, quota, nil +} + +func detectCPUUsageInV2(cRoot string) (stime, utime uint64, err error) { + statFilePath := filepath.Join(cRoot, cgroupV2CPUStatFilename) + var stat *os.File + stat, err = os.Open(statFilePath) + if err != nil { + return 0, 0, errors.Wrapf(err, "can't read cpu usage from cgroup v2 at %s", statFilePath) + } + defer func() { + err = errors.CombineErrors(err, stat.Close()) + }() + + scanner := bufio.NewScanner(stat) + for scanner.Scan() { + fields := bytes.Fields(scanner.Bytes()) + if len(fields) != 2 || (string(fields[0]) != "user_usec" && string(fields[0]) != "system_usec") { + continue + } + keyField := string(fields[0]) + + trimmed := string(bytes.TrimSpace(fields[1])) + usageVar := &stime + if keyField == "user_usec" { + usageVar = &utime + } + *usageVar, err = strconv.ParseUint(trimmed, 10, 64) + if err != nil { + return 0, 0, errors.Wrapf(err, "can't read cpu usage %s from cgroup v1 at %s", keyField, statFilePath) + } + } + + return stime, utime, err +} + // Finds memory limit for cgroup V1 via looking in [contoller mount path]/memory.stat -func detectLimitInV1(cRoot string) (limit int64, warnings string, err error) { +func detectMemLimitInV1(cRoot string) (limit int64, warnings string, err error) { statFilePath := filepath.Join(cRoot, cgroupV1MemLimitFilename) stat, err := os.Open(statFilePath) if err != nil { @@ -98,7 +234,7 @@ func detectLimitInV1(cRoot string) (limit int64, warnings string, err error) { // Finds memory limit for cgroup V2 via looking into [controller mount path]/[leaf path]/memory.max // TODO(vladdy): this implementation was based on podman+criu environment. It may cover not // all the cases when v2 becomes more widely used in container world. -func detectLimitInV2(cRoot string) (limit int64, warnings string, err error) { +func detectMemLimitInV2(cRoot string) (limit int64, warnings string, err error) { limitFilePath := filepath.Join(cRoot, cgroupV2MemLimitFilename) var buf []byte @@ -120,10 +256,10 @@ func detectLimitInV2(cRoot string) (limit int64, warnings string, err error) { // The controller is defined via either type `memory` for cgroup v1 or via empty type for cgroup v2, // where the type is the second field in /proc/[pid]/cgroup file -func detectMemCntrlPath(cgroupFilePath string) (string, error) { +func detectCntrlPath(cgroupFilePath string, controller string) (string, error) { cgroup, err := os.Open(cgroupFilePath) if err != nil { - return "", errors.Wrapf(err, "failed to read memory cgroup from cgroups file: %s", cgroupFilePath) + return "", errors.Wrapf(err, "failed to read %s cgroup from cgroups file: %s", controller, cgroupFilePath) } defer func() { _ = cgroup.Close() }() @@ -142,7 +278,7 @@ func detectMemCntrlPath(cgroupFilePath string) (string, error) { // but no known container solutions support it afaik if f0 == "0" && f1 == "" { unifiedPathIfFound = string(fields[2]) - } else if f1 == "memory" { + } else if f1 == controller { return string(fields[2]), nil } } @@ -152,7 +288,7 @@ func detectMemCntrlPath(cgroupFilePath string) (string, error) { // Reads /proc/[pid]/mountinfo for cgoup or cgroup2 mount which defines the used version. // See http://man7.org/linux/man-pages/man5/proc.5.html for `mountinfo` format. -func getCgroupDetails(mountinfoPath string, cRoot string) (string, int, error) { +func getCgroupDetails(mountinfoPath string, cRoot string, controller string) (string, int, error) { info, err := os.Open(mountinfoPath) if err != nil { return "", 0, errors.Wrapf(err, "failed to read mounts info from file: %s", mountinfoPath) @@ -168,7 +304,7 @@ func getCgroupDetails(mountinfoPath string, cRoot string) (string, int, error) { continue } - ver, ok := detectCgroupVersion(fields) + ver, ok := detectCgroupVersion(fields, controller) if ok && (ver == 1 && string(fields[3]) == cRoot) || ver == 2 { return string(fields[4]), ver, nil } @@ -178,7 +314,7 @@ func getCgroupDetails(mountinfoPath string, cRoot string) (string, int, error) { } // Return version of cgroup mount for memory controller if found -func detectCgroupVersion(fields [][]byte) (_ int, found bool) { +func detectCgroupVersion(fields [][]byte, controller string) (_ int, found bool) { if len(fields) < 10 { return 0, false } @@ -201,9 +337,9 @@ func detectCgroupVersion(fields [][]byte) (_ int, found bool) { pos++ - // Check for memory controller specifically in cgroup v1 (it is listed in super options field), + // Check for controller specifically in cgroup v1 (it is listed in super options field), // as the limit can't be found if it is not enforced - if bytes.Equal(fields[pos], []byte("cgroup")) && bytes.Contains(fields[pos+2], []byte("memory")) { + if bytes.Equal(fields[pos], []byte("cgroup")) && bytes.Contains(fields[pos+2], []byte(controller)) { return 1, true } else if bytes.Equal(fields[pos], []byte("cgroup2")) { return 2, true @@ -211,3 +347,79 @@ func detectCgroupVersion(fields [][]byte) (_ int, found bool) { return 0, false } + +// CPUUsage returns CPU usage and quotas for an entire cgroup. +type CPUUsage struct { + // System time and user time taken by this cgroup or process. In nanoseconds. + Stime, Utime uint64 + // CPU period and quota for this process, in microseconds. This cgroup has + // access to up to (quota/period) proportion of CPU resources on the system. + // For instance, if there are 4 CPUs, quota = 150000, period = 100000, + // this cgroup can use around ~1.5 CPUs, or 37.5% of total scheduler time. + // If quota is -1, it's unlimited. + Period, Quota int64 + // NumCPUs is the number of CPUs in the system. Always returned even if + // not called from a cgroup. + NumCPU int +} + +// CPUShares returns the number of CPUs this cgroup can be expected to +// max out. If there's no limit, NumCPU is returned. +func (c CPUUsage) CPUShares() float64 { + if c.Period <= 0 || c.Quota <= 0 { + return float64(c.NumCPU) + } + return float64(c.Quota) / float64(c.Period) +} + +// GetCgroupCPU returns the CPU usage and quota for the current cgroup. +func GetCgroupCPU() (CPUUsage, error) { + cpuusage, err := getCgroupCPU("/") + cpuusage.NumCPU = runtime.NumCPU() + return cpuusage, err +} + +// Helper function for getCgroupCPU. Root is always "/", except in tests. +func getCgroupCPU(root string) (CPUUsage, error) { + path, err := detectCntrlPath(filepath.Join(root, "/proc/self/cgroup"), "cpu,cpuacct") + if err != nil { + return CPUUsage{}, err + } + + // No CPU controller detected + if path == "" { + return CPUUsage{}, errors.New("no cpu controller detected") + } + + mount, ver, err := getCgroupDetails(filepath.Join(root, "/proc/self/mountinfo"), path, "cpu,cpuacct") + if err != nil { + return CPUUsage{}, err + } + + var res CPUUsage + + switch ver { + case 1: + res.Period, res.Quota, err = detectCPUQuotaInV1(filepath.Join(root, mount)) + if err != nil { + return res, err + } + res.Stime, res.Utime, err = detectCPUUsageInV1(filepath.Join(root, mount)) + if err != nil { + return res, err + } + case 2: + res.Period, res.Quota, err = detectCPUQuotaInV2(filepath.Join(root, mount, path)) + if err != nil { + return res, err + } + res.Stime, res.Utime, err = detectCPUUsageInV2(filepath.Join(root, mount, path)) + if err != nil { + return res, err + } + default: + return CPUUsage{}, fmt.Errorf("detected unknown cgroup version index: %d", ver) + } + + return res, nil +} diff --git a/pkg/util/cgroups/cgroups_test.go b/pkg/util/cgroups/cgroups_test.go index 19e47cbc064e..87e116c5a1ac 100644 --- a/pkg/util/cgroups/cgroups_test.go +++ b/pkg/util/cgroups/cgroups_test.go @@ -115,6 +115,140 @@ func TestCgroupsGetMemory(t *testing.T) { } } +func TestCgroupsGetCPU(t *testing.T) { + for _, tc := range []struct { + name string + paths map[string]string + errMsg string + period int64 + quota int64 + user uint64 + system uint64 + }{ + { + name: "fails to find cgroup version when cgroup file is not present", + errMsg: "failed to read cpu,cpuacct cgroup from cgroups file:", + }, + { + name: "doesn't detect limit for cgroup v1 without cpu controller", + paths: map[string]string{ + "/proc/self/cgroup": v1CgroupWithoutCPUController, + "/proc/self/mountinfo": v1MountsWithoutCPUController, + }, + errMsg: "no cpu controller detected", + }, + { + name: "fails to find mount details when mountinfo is not present", + paths: map[string]string{ + "/proc/self/cgroup": v1CgroupWithCPUController, + }, + errMsg: "failed to read mounts info from file:", + }, + { + name: "fails to find cgroup v1 version when there is no cpu mount", + paths: map[string]string{ + "/proc/self/cgroup": v1CgroupWithCPUController, + "/proc/self/mountinfo": v1MountsWithoutCPUController, + }, + errMsg: "failed to detect cgroup root mount and version", + }, + { + name: "fetches the cpu quota and usage for cgroup v1", + paths: map[string]string{ + "/proc/self/cgroup": v1CgroupWithCPUController, + "/proc/self/mountinfo": v1MountsWithCPUController, + "/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us": "12345", + "/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us": "67890", + "/sys/fs/cgroup/cpu,cpuacct/cpuacct.usage_sys": "123", + "/sys/fs/cgroup/cpu,cpuacct/cpuacct.usage_user": "456", + }, + quota: int64(12345), + period: int64(67890), + system: uint64(123), + user: uint64(456), + }, + { + name: "fetches the cpu quota for cgroup v1 even if usage nonexistent", + paths: map[string]string{ + "/proc/self/cgroup": v1CgroupWithCPUController, + "/proc/self/mountinfo": v1MountsWithCPUController, + "/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us": "-1", + "/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us": "67890", + }, + quota: int64(-1), + period: int64(67890), + errMsg: "error when reading cpu system time from cgroup v1", + }, + { + name: "fails when the stat file is missing for cgroup v2", + paths: map[string]string{ + "/proc/self/cgroup": v2CgroupWithMemoryController, + "/proc/self/mountinfo": v2Mounts, + }, + errMsg: "error when read cpu quota from cgroup v2", + }, + { + name: "fails when unable to parse limit for cgroup v2", + paths: map[string]string{ + "/proc/self/cgroup": v2CgroupWithMemoryController, + "/proc/self/mountinfo": v2Mounts, + "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.max": "foo bar\n", + }, + errMsg: "error when reading cpu quota from cgroup v2 at", + }, + { + name: "fetches the cpu quota and usage for cgroup v2", + paths: map[string]string{ + "/proc/self/cgroup": v2CgroupWithMemoryController, + "/proc/self/mountinfo": v2Mounts, + "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.max": "100 1000\n", + "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.stat": "user_usec 100\nsystem_usec 200", + }, + quota: int64(100), + period: int64(1000), + user: uint64(100), + system: uint64(200), + }, + { + name: "recognizes `max` as the cpu quota for cgroup v2", + paths: map[string]string{ + "/proc/self/cgroup": v2CgroupWithMemoryController, + "/proc/self/mountinfo": v2Mounts, + "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.max": "max 1000\n", + "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.stat": "user_usec 100\nsystem_usec 200", + }, + quota: int64(-1), + period: int64(1000), + user: uint64(100), + system: uint64(200), + }, + { + name: "fetches the cpu quota for cgroup v2 even if usage nonexistent", + paths: map[string]string{ + "/proc/self/cgroup": v2CgroupWithMemoryController, + "/proc/self/mountinfo": v2Mounts, + "/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.max": "100 1000\n", + }, + quota: int64(100), + period: int64(1000), + errMsg: "can't read cpu usage from cgroup v2", + }, + } { + t.Run(tc.name, func(t *testing.T) { + dir := createFiles(t, tc.paths) + defer func() { _ = os.RemoveAll(dir) }() + + cpuusage, err := getCgroupCPU(dir) + require.True(t, testutils.IsError(err, tc.errMsg), + "%v %v", err, tc.errMsg) + require.Equal(t, tc.quota, cpuusage.Quota) + require.Equal(t, tc.period, cpuusage.Period) + require.Equal(t, tc.system, cpuusage.Stime) + require.Equal(t, tc.user, cpuusage.Utime) + }) + } +} + func createFiles(t *testing.T, paths map[string]string) (dir string) { dir, err := ioutil.TempDir("", "") require.NoError(t, err) @@ -150,6 +284,29 @@ const ( 3:hugetlb:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 2:freezer:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 1:name=systemd:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +` + v1CgroupWithCPUController = `11:blkio:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +10:devices:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +9:perf_event:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +8:cpu,cpuacct:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +7:pids:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +6:cpuset:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +5:memory:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +4:net_cls,net_prio:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +3:hugetlb:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +2:freezer:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +1:name=systemd:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +` + v1CgroupWithoutCPUController = `10:blkio:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +9:devices:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +8:perf_event:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +7:pids:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +6:cpuset:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +5:memory:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +4:net_cls,net_prio:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +3:hugetlb:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +2:freezer:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 +1:name=systemd:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 ` v2CgroupWithMemoryController = `0::/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope ` @@ -232,6 +389,84 @@ const ( 222 626 0:75 /null /proc/timer_list rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 223 626 0:75 /null /proc/sched_debug rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 224 702 0:101 / /sys/firmware ro,relatime - tmpfs tmpfs ro +` + v1MountsWithCPUController = `625 367 0:71 / / rw,relatime master:85 - overlay overlay rw,lowerdir=/var/lib/docker/overlay2/l/DOLSFLPSKANL4GJ7XKF3OG6PKN:/var/lib/docker/overlay2/l/P7UJPLDFEUSRQ7CZILB7L4T5OP:/var/lib/docker/overlay2/l/FSKO5FFFNQ6XOSVF7T6R2DWZVZ:/var/lib/docker/overlay2/l/YNE4EZZE2GW2DIXRBUP47LB3GU:/var/lib/docker/overlay2/l/F2JNS7YWT5CU7FUXHNV5JUJWQY,upperdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/diff,workdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/work +626 625 0:79 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +687 625 0:75 / /dev rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 +691 687 0:82 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 +702 625 0:159 / /sys ro,nosuid,nodev,noexec,relatime - sysfs sysfs ro +703 702 0:99 / /sys/fs/cgroup ro,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755 +705 703 0:23 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/systemd ro,nosuid,nodev,noexec,relatime master:9 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd +711 703 0:25 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/freezer ro,nosuid,nodev,noexec,relatime master:10 - cgroup cgroup rw,freezer +726 703 0:26 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/hugetlb ro,nosuid,nodev,noexec,relatime master:11 - cgroup cgroup rw,hugetlb +727 703 0:27 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/net_cls,net_prio ro,nosuid,nodev,noexec,relatime master:12 - cgroup cgroup rw,net_cls,net_prio +733 703 0:28 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/memory ro,nosuid,nodev,noexec,relatime master:13 - cgroup cgroup rw,memory +734 703 0:29 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/cpuset ro,nosuid,nodev,noexec,relatime master:14 - cgroup cgroup rw,cpuset +735 703 0:30 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/pids ro,nosuid,nodev,noexec,relatime master:15 - cgroup cgroup rw,pids +736 703 0:31 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/cpu,cpuacct ro,nosuid,nodev,noexec,relatime master:16 - cgroup cgroup rw,cpu,cpuacct +737 703 0:32 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/perf_event ro,nosuid,nodev,noexec,relatime master:17 - cgroup cgroup rw,perf_event +740 703 0:33 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/devices ro,nosuid,nodev,noexec,relatime master:18 - cgroup cgroup rw,devices +742 703 0:34 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/blkio ro,nosuid,nodev,noexec,relatime master:19 - cgroup cgroup rw,blkio +744 687 0:78 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +746 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/volumes/kubernetes.io~empty-dir/cockroach-env /etc/cockroach-env ro,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota +760 687 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/containers/cockroachdb/3e868c1f /dev/termination-log rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota +776 625 259:3 / /cockroach/cockroach-data rw,relatime - ext4 /dev/nvme2n1 rw,data=ordered +814 625 0:68 / /cockroach/cockroach-certs ro,relatime - tmpfs tmpfs rw +815 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/resolv.conf /etc/resolv.conf rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota +816 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/hostname /etc/hostname rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota +817 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/etc-hosts /etc/hosts rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota +818 687 0:77 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k +819 625 0:69 / /run/secrets/kubernetes.io/serviceaccount ro,relatime - tmpfs tmpfs rw +368 626 0:79 /bus /proc/bus ro,relatime - proc proc rw +375 626 0:79 /fs /proc/fs ro,relatime - proc proc rw +376 626 0:79 /irq /proc/irq ro,relatime - proc proc rw +381 626 0:79 /sys /proc/sys ro,relatime - proc proc rw +397 626 0:79 /sysrq-trigger /proc/sysrq-trigger ro,relatime - proc proc rw +213 626 0:70 / /proc/acpi ro,relatime - tmpfs tmpfs ro +216 626 0:75 /null /proc/kcore rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 +217 626 0:75 /null /proc/keys rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 +218 626 0:75 /null /proc/latency_stats rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 +222 626 0:75 /null /proc/timer_list rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 +223 626 0:75 /null /proc/sched_debug rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 +224 702 0:101 / /sys/firmware ro,relatime - tmpfs tmpfs ro +` + v1MountsWithoutCPUController = `625 367 0:71 / / rw,relatime master:85 - overlay overlay rw,lowerdir=/var/lib/docker/overlay2/l/DOLSFLPSKANL4GJ7XKF3OG6PKN:/var/lib/docker/overlay2/l/P7UJPLDFEUSRQ7CZILB7L4T5OP:/var/lib/docker/overlay2/l/FSKO5FFFNQ6XOSVF7T6R2DWZVZ:/var/lib/docker/overlay2/l/YNE4EZZE2GW2DIXRBUP47LB3GU:/var/lib/docker/overlay2/l/F2JNS7YWT5CU7FUXHNV5JUJWQY,upperdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/diff,workdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/work +626 625 0:79 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +687 625 0:75 / /dev rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 +691 687 0:82 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 +702 625 0:159 / /sys ro,nosuid,nodev,noexec,relatime - sysfs sysfs ro +703 702 0:99 / /sys/fs/cgroup ro,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755 +705 703 0:23 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/systemd ro,nosuid,nodev,noexec,relatime master:9 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd +711 703 0:25 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/freezer ro,nosuid,nodev,noexec,relatime master:10 - cgroup cgroup rw,freezer +726 703 0:26 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/hugetlb ro,nosuid,nodev,noexec,relatime master:11 - cgroup cgroup rw,hugetlb +727 703 0:27 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/net_cls,net_prio ro,nosuid,nodev,noexec,relatime master:12 - cgroup cgroup rw,net_cls,net_prio +734 703 0:29 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/cpuset ro,nosuid,nodev,noexec,relatime master:14 - cgroup cgroup rw,cpuset +735 703 0:30 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/pids ro,nosuid,nodev,noexec,relatime master:15 - cgroup cgroup rw,pids +737 703 0:32 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/perf_event ro,nosuid,nodev,noexec,relatime master:17 - cgroup cgroup rw,perf_event +740 703 0:33 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/devices ro,nosuid,nodev,noexec,relatime master:18 - cgroup cgroup rw,devices +742 703 0:34 /kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/blkio ro,nosuid,nodev,noexec,relatime master:19 - cgroup cgroup rw,blkio +744 687 0:78 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +746 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/volumes/kubernetes.io~empty-dir/cockroach-env /etc/cockroach-env ro,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota +760 687 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/containers/cockroachdb/3e868c1f /dev/termination-log rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota +776 625 259:3 / /cockroach/cockroach-data rw,relatime - ext4 /dev/nvme2n1 rw,data=ordered +814 625 0:68 / /cockroach/cockroach-certs ro,relatime - tmpfs tmpfs rw +815 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/resolv.conf /etc/resolv.conf rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota +816 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/hostname /etc/hostname rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota +817 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/etc-hosts /etc/hosts rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota +818 687 0:77 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k +819 625 0:69 / /run/secrets/kubernetes.io/serviceaccount ro,relatime - tmpfs tmpfs rw +368 626 0:79 /bus /proc/bus ro,relatime - proc proc rw +375 626 0:79 /fs /proc/fs ro,relatime - proc proc rw +376 626 0:79 /irq /proc/irq ro,relatime - proc proc rw +381 626 0:79 /sys /proc/sys ro,relatime - proc proc rw +397 626 0:79 /sysrq-trigger /proc/sysrq-trigger ro,relatime - proc proc rw +213 626 0:70 / /proc/acpi ro,relatime - tmpfs tmpfs ro +216 626 0:75 /null /proc/kcore rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 +217 626 0:75 /null /proc/keys rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 +218 626 0:75 /null /proc/latency_stats rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 +222 626 0:75 /null /proc/timer_list rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 +223 626 0:75 /null /proc/sched_debug rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755 +224 702 0:101 / /sys/firmware ro,relatime - tmpfs tmpfs ro ` v2Mounts = `371 344 0:35 / / rw,relatime - overlay overlay rw,context="system_u:object_r:container_file_t:s0:c200,c321",lowerdir=/var/lib/containers/storage/overlay/l/SPNDOAU3AZNJMNKU3F5THCA36R,upperdir=/var/lib/containers/storage/overlay/7dcd88f815bded7b833fb5dc0f25de897250bcfa828624c0d78393689d0bc312/diff,workdir=/var/lib/containers/storage/overlay/7dcd88f815bded7b833fb5dc0f25de897250bcfa828624c0d78393689d0bc312/work