From c97defb4ce8664e839fde5e3d86e43791c04a05f Mon Sep 17 00:00:00 2001 From: nicklesimba Date: Thu, 9 Jun 2022 13:01:03 -0500 Subject: [PATCH 1/2] Vendoring changes to include go-cron scheduling Signed-off-by: nicklesimba --- go.mod | 9 +- go.sum | 5 + vendor/github.com/go-co-op/gocron/.gitignore | 19 + .../github.com/go-co-op/gocron/.golangci.yaml | 49 + .../go-co-op/gocron/CODE_OF_CONDUCT.md | 73 + .../go-co-op/gocron/CONTRIBUTING.md | 40 + vendor/github.com/go-co-op/gocron/LICENSE | 21 + vendor/github.com/go-co-op/gocron/Makefile | 33 + vendor/github.com/go-co-op/gocron/README.md | 132 ++ vendor/github.com/go-co-op/gocron/SECURITY.md | 15 + vendor/github.com/go-co-op/gocron/executor.go | 111 ++ vendor/github.com/go-co-op/gocron/gocron.go | 101 ++ vendor/github.com/go-co-op/gocron/job.go | 381 ++++++ .../github.com/go-co-op/gocron/scheduler.go | 1205 +++++++++++++++++ .../github.com/go-co-op/gocron/timeHelper.go | 25 + vendor/github.com/robfig/cron/v3/.gitignore | 22 + vendor/github.com/robfig/cron/v3/.travis.yml | 1 + vendor/github.com/robfig/cron/v3/LICENSE | 21 + vendor/github.com/robfig/cron/v3/README.md | 125 ++ vendor/github.com/robfig/cron/v3/chain.go | 92 ++ .../robfig/cron/v3/constantdelay.go | 27 + vendor/github.com/robfig/cron/v3/cron.go | 355 +++++ vendor/github.com/robfig/cron/v3/doc.go | 231 ++++ vendor/github.com/robfig/cron/v3/logger.go | 86 ++ vendor/github.com/robfig/cron/v3/option.go | 45 + vendor/github.com/robfig/cron/v3/parser.go | 434 ++++++ vendor/github.com/robfig/cron/v3/spec.go | 188 +++ vendor/golang.org/x/sync/AUTHORS | 3 + vendor/golang.org/x/sync/CONTRIBUTORS | 3 + vendor/golang.org/x/sync/LICENSE | 27 + vendor/golang.org/x/sync/PATENTS | 22 + .../golang.org/x/sync/semaphore/semaphore.go | 136 ++ .../x/sync/singleflight/singleflight.go | 212 +++ vendor/modules.txt | 10 + 34 files changed, 4257 insertions(+), 2 deletions(-) create mode 100644 vendor/github.com/go-co-op/gocron/.gitignore create mode 100644 vendor/github.com/go-co-op/gocron/.golangci.yaml create mode 100644 vendor/github.com/go-co-op/gocron/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-co-op/gocron/CONTRIBUTING.md create mode 100644 vendor/github.com/go-co-op/gocron/LICENSE create mode 100644 vendor/github.com/go-co-op/gocron/Makefile create mode 100644 vendor/github.com/go-co-op/gocron/README.md create mode 100644 vendor/github.com/go-co-op/gocron/SECURITY.md create mode 100644 vendor/github.com/go-co-op/gocron/executor.go create mode 100644 vendor/github.com/go-co-op/gocron/gocron.go create mode 100644 vendor/github.com/go-co-op/gocron/job.go create mode 100644 vendor/github.com/go-co-op/gocron/scheduler.go create mode 100644 vendor/github.com/go-co-op/gocron/timeHelper.go create mode 100644 vendor/github.com/robfig/cron/v3/.gitignore create mode 100644 vendor/github.com/robfig/cron/v3/.travis.yml create mode 100644 vendor/github.com/robfig/cron/v3/LICENSE create mode 100644 vendor/github.com/robfig/cron/v3/README.md create mode 100644 vendor/github.com/robfig/cron/v3/chain.go create mode 100644 vendor/github.com/robfig/cron/v3/constantdelay.go create mode 100644 vendor/github.com/robfig/cron/v3/cron.go create mode 100644 vendor/github.com/robfig/cron/v3/doc.go create mode 100644 vendor/github.com/robfig/cron/v3/logger.go create mode 100644 vendor/github.com/robfig/cron/v3/option.go create mode 100644 vendor/github.com/robfig/cron/v3/parser.go create mode 100644 vendor/github.com/robfig/cron/v3/spec.go create mode 100644 vendor/golang.org/x/sync/AUTHORS create mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 vendor/golang.org/x/sync/singleflight/singleflight.go diff --git a/go.mod b/go.mod index 20b89ee4e..b3460d078 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,9 @@ require ( github.com/containernetworking/cni v0.8.1 github.com/containernetworking/plugins v0.8.2 github.com/coreos/etcd v3.3.25+incompatible + github.com/go-co-op/gocron v1.13.0 github.com/imdario/mergo v0.3.12 + github.com/json-iterator/go v1.1.12 // indirect github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.1.1-0.20210510153419-66a699ae3b05 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.17.0 @@ -21,7 +23,11 @@ require ( sigs.k8s.io/controller-runtime v0.11.2 ) -require sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect +require ( + github.com/robfig/cron/v3 v3.0.1 // indirect + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect + sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect +) require ( github.com/coreos/go-semver v0.3.0 // indirect @@ -44,7 +50,6 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/go.sum b/go.sum index 8b498d3e7..f4e537a6e 100644 --- a/go.sum +++ b/go.sum @@ -171,6 +171,8 @@ github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSy github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-co-op/gocron v1.13.0 h1:BjkuNImPy5NuIPEifhWItFG7pYyr27cyjS6BN9w/D4c= +github.com/go-co-op/gocron v1.13.0/go.mod h1:GD5EIEly1YNW+LovFVx5dzbYVcIc8544K99D8UVRpGo= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -489,6 +491,8 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -739,6 +743,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/go-co-op/gocron/.gitignore b/vendor/github.com/go-co-op/gocron/.gitignore new file mode 100644 index 000000000..f6409f90d --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/.gitignore @@ -0,0 +1,19 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test +local_testing + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +vendor/ + +# IDE project files +.idea diff --git a/vendor/github.com/go-co-op/gocron/.golangci.yaml b/vendor/github.com/go-co-op/gocron/.golangci.yaml new file mode 100644 index 000000000..611fb3659 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/.golangci.yaml @@ -0,0 +1,49 @@ +run: + timeout: 2m + issues-exit-code: 1 + tests: true + +issues: + max-same-issues: 100 + exclude-rules: + - path: _test\.go + linters: + - bodyclose + - errcheck + - gosec + +linters: + enable: + - bodyclose + - deadcode + - errcheck + - gofmt + - revive + - gosec + - gosimple + - govet + - ineffassign + - misspell + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + +output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" + format: colored-line-number + # print lines of code with issue, default is true + print-issued-lines: true + # print linter name in the end of issue text, default is true + print-linter-name: true + # make issues output unique by line, default is true + uniq-by-line: true + # add a prefix to the output file references; default is no prefix + path-prefix: "" + # sorts results by: filepath, line and column + sort-results: true + +linters-settings: + golint: + min-confidence: 0.8 diff --git a/vendor/github.com/go-co-op/gocron/CODE_OF_CONDUCT.md b/vendor/github.com/go-co-op/gocron/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..7d913b55b --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/CODE_OF_CONDUCT.md @@ -0,0 +1,73 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone. And we mean everyone! + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and kind language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team initially on Slack to coordinate private communication. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/vendor/github.com/go-co-op/gocron/CONTRIBUTING.md b/vendor/github.com/go-co-op/gocron/CONTRIBUTING.md new file mode 100644 index 000000000..b2d3be83f --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/CONTRIBUTING.md @@ -0,0 +1,40 @@ +# Contributing to gocron + +Thank you for coming to contribute to gocron! We welcome new ideas, PRs and general feedback. + +## Reporting Bugs + +If you find a bug then please let the project know by opening an issue after doing the following: + +- Do a quick search of the existing issues to make sure the bug isn't already reported +- Try and make a minimal list of steps that can reliably reproduce the bug you are experiencing +- Collect as much information as you can to help identify what the issue is (project version, configuration files, etc) + +## Suggesting Enhancements + +If you have a use case that you don't see a way to support yet, we would welcome the feedback in an issue. Before opening the issue, please consider: + +- Is this a common use case? +- Is it simple to understand? + +You can help us out by doing the following before raising a new issue: + +- Check that the feature hasn't been requested already by searching existing issues +- Try and reduce your enhancement into a single, concise and deliverable request, rather than a general idea +- Explain your own use cases as the basis of the request + +## Adding Features + +Pull requests are always welcome. However, before going through the trouble of implementing a change it's worth creating a bug or feature request issue. +This allows us to discuss the changes and make sure they are a good fit for the project. + +Please always make sure a pull request has been: + +- Unit tested with `make test` +- Linted with `make lint` +- Vetted with `make vet` +- Formatted with `make fmt` or validated with `make check-fmt` + +## Writing Tests + +Tests should follow the [table driven test pattern](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go). See other tests in the code base for additional examples. diff --git a/vendor/github.com/go-co-op/gocron/LICENSE b/vendor/github.com/go-co-op/gocron/LICENSE new file mode 100644 index 000000000..3357d57d7 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2014, 辣椒面 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-co-op/gocron/Makefile b/vendor/github.com/go-co-op/gocron/Makefile new file mode 100644 index 000000000..08cbf82b9 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/Makefile @@ -0,0 +1,33 @@ +.PHONY: fmt check-fmt lint vet test + +GO_PKGS := $(shell go list -f {{.Dir}} ./...) + +fmt: + @go list -f {{.Dir}} ./... | xargs -I{} gofmt -w -s {} + +check-fmt: + @echo "Checking formatting..." + @FMT="0"; \ + for pkg in $(GO_PKGS); do \ + OUTPUT=`gofmt -l $$pkg/*.go`; \ + if [ -n "$$OUTPUT" ]; then \ + echo "$$OUTPUT"; \ + FMT="1"; \ + fi; \ + done ; \ + if [ "$$FMT" -eq "1" ]; then \ + echo "Problem with formatting in files above."; \ + exit 1; \ + else \ + echo "Success - way to run gofmt!"; \ + fi + +lint: +# Add -set_exit_status=true when/if we want to enforce the linter rules + @golint -min_confidence 0.8 -set_exit_status $(GO_PKGS) + +vet: + @go vet $(GO_FLAGS) $(GO_PKGS) + +test: + @go test -race -v $(GO_FLAGS) -count=1 $(GO_PKGS) diff --git a/vendor/github.com/go-co-op/gocron/README.md b/vendor/github.com/go-co-op/gocron/README.md new file mode 100644 index 000000000..7805fc0a2 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/README.md @@ -0,0 +1,132 @@ +# gocron: A Golang Job Scheduling Package. + +[![CI State](https://github.com/go-co-op/gocron/workflows/Go%20Test/badge.svg)](https://github.com/go-co-op/gocron/actions?query=workflow%3A"lint") ![Go Report Card](https://goreportcard.com/badge/github.com/go-co-op/gocron) [![Go Doc](https://godoc.org/github.com/go-co-op/gocron?status.svg)](https://pkg.go.dev/github.com/go-co-op/gocron) + +gocron is a job scheduling package which lets you run Go functions at pre-determined intervals using a simple, human-friendly syntax. + +gocron is a Golang scheduler implementation similar to the Ruby module [clockwork](https://github.com/tomykaira/clockwork) and the Python job scheduling package [schedule](https://github.com/dbader/schedule). + +See also these two great articles that were used for design input: + +- [Rethinking Cron](http://adam.herokuapp.com/past/2010/4/13/rethinking_cron/) +- [Replace Cron with Clockwork](http://adam.herokuapp.com/past/2010/6/30/replace_cron_with_clockwork/) + +If you want to chat, you can find us at Slack! [](https://gophers.slack.com/archives/CQ7T0T1FW) + +## Concepts + +- **Scheduler**: The scheduler tracks all the jobs assigned to it and makes sure they are passed to the executor when ready to be run. The scheduler is able to manage overall aspects of job behavior like limiting how many jobs are running at one time. +- **Job**: The job is simply aware of the task (go function) it's provided and is therefore only able to perform actions related to that task like preventing itself from overruning a previous task that is taking a long time. +- **Executor**: The executor, as it's name suggests, is simply responsible for calling the task (go function) that the job hands to it when sent by the scheduler. + +## Examples + +```golang +s := gocron.NewScheduler(time.UTC) + +s.Every(5).Seconds().Do(func(){ ... }) + +// strings parse to duration +s.Every("5m").Do(func(){ ... }) + +s.Every(5).Days().Do(func(){ ... }) + +s.Every(1).Month(1, 2, 3).Do(func(){ ... }) + +// set time +s.Every(1).Day().At("10:30").Do(func(){ ... }) + +// set multiple times +s.Every(1).Day().At("10:30;08:00").Do(func(){ ... }) + +s.Every(1).Day().At("10:30").At("08:00").Do(func(){ ... }) + +// Schedule each last day of the month +s.Every(1).MonthLastDay().Do(func(){ ... }) + +// Or each last day of every other month +s.Every(2).MonthLastDay().Do(func(){ ... }) + +// cron expressions supported +s.Cron("*/1 * * * *").Do(task) // every minute + +// you can start running the scheduler in two different ways: +// starts the scheduler asynchronously +s.StartAsync() +// starts the scheduler and blocks current execution path +s.StartBlocking() +``` + +For more examples, take a look in our [go docs](https://pkg.go.dev/github.com/go-co-op/gocron#pkg-examples) + +## Options + +| Interval | Supported schedule options | +| ------------ | ------------------------------------------------------------------- | +| sub-second | `StartAt()` | +| milliseconds | `StartAt()` | +| seconds | `StartAt()` | +| minutes | `StartAt()` | +| hours | `StartAt()` | +| days | `StartAt()`, `At()` | +| weeks | `StartAt()`, `At()`, `Weekday()` (and all week day named functions) | +| months | `StartAt()`, `At()` | + +There are several options available to restrict how jobs run: + +| Mode | Function | Behavior | +| --------------- | ------------------------ | ------------------------------------------------------------------------------- | +| Default | | jobs are rescheduled at every interval | +| Job singleton | `SingletonMode()` | a long running job will not be rescheduled until the current run is completed | +| Scheduler limit | `SetMaxConcurrentJobs()` | set a collective maximum number of concurrent jobs running across the scheduler | + +## Tags + +Jobs may have arbitrary tags added which can be useful when tracking many jobs. +The scheduler supports both enforcing tags to be unique and when not unique, +running all jobs with a given tag. + +```golang +s := gocron.NewScheduler(time.UTC) +s.TagsUnique() + +_, _ = s.Every(1).Week().Tag("foo").Do(task) +_, err := s.Every(1).Week().Tag("foo").Do(task) +// error!!! + +s := gocron.NewScheduler(time.UTC) + +s.Every(2).Day().Tag("tag").At("10:00").Do(task) +s.Every(1).Minute().Tag("tag").Do(task) +s.RunByTag("tag") +// both jobs will run +``` + +## FAQ + +- Q: I'm running multiple pods on a distributed environment. How can I make a job not run once per pod causing duplication? + - A: We recommend using your own lock solution within the jobs themselves (you could use [Redis](https://redis.io/topics/distlock), for example) + +- Q: I've removed my job from the scheduler, but how can I stop a long-running job that has already been triggered? + - A: We recommend using a means of canceling your job, e.g. a `context.WithCancel()`. + +--- + +Looking to contribute? Try to follow these guidelines: + +- Use issues for everything +- For a small change, just send a PR! +- For bigger changes, please open an issue for discussion before sending a PR. +- PRs should have: tests, documentation and examples (if it makes sense) +- You can also contribute by: + - Reporting issues + - Suggesting new features or enhancements + - Improving/fixing documentation + +--- + +## Design + +![design-diagram](https://user-images.githubusercontent.com/19351306/110375142-2ba88680-8017-11eb-80c3-554cc746b165.png) + +[Jetbrains](https://www.jetbrains.com/?from=gocron) supports this project with GoLand licenses. We appreciate their support for free and open source software! diff --git a/vendor/github.com/go-co-op/gocron/SECURITY.md b/vendor/github.com/go-co-op/gocron/SECURITY.md new file mode 100644 index 000000000..6943a66f5 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/SECURITY.md @@ -0,0 +1,15 @@ +# Security Policy + +## Supported Versions + +The current plan is to maintain version 1 as long as possible incorporating any necessary security patches. + +| Version | Supported | +| ------- | ------------------ | +| 1.x.x | :white_check_mark: | + +## Reporting a Vulnerability + +Vulnerabilities can be reported by [opening an issue](https://github.com/go-co-op/gocron/issues/new/choose) or reaching out on Slack: [](https://gophers.slack.com/archives/CQ7T0T1FW) + +We will do our best to addrerss any vulnerabilites in an expeditious manner. diff --git a/vendor/github.com/go-co-op/gocron/executor.go b/vendor/github.com/go-co-op/gocron/executor.go new file mode 100644 index 000000000..43853fb21 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/executor.go @@ -0,0 +1,111 @@ +package gocron + +import ( + "context" + "sync" + + "golang.org/x/sync/semaphore" +) + +const ( + // RescheduleMode - the default is that if a limit on maximum + // concurrent jobs is set and the limit is reached, a job will + // skip it's run and try again on the next occurrence in the schedule + RescheduleMode limitMode = iota + + // WaitMode - if a limit on maximum concurrent jobs is set + // and the limit is reached, a job will wait to try and run + // until a spot in the limit is freed up. + // + // Note: this mode can produce unpredictable results as + // job execution order isn't guaranteed. For example, a job that + // executes frequently may pile up in the wait queue and be executed + // many times back to back when the queue opens. + WaitMode +) + +type executor struct { + jobFunctions chan jobFunction + stopCh chan struct{} + limitMode limitMode + maxRunningJobs *semaphore.Weighted +} + +func newExecutor() executor { + return executor{ + jobFunctions: make(chan jobFunction, 1), + stopCh: make(chan struct{}, 1), + } +} + +func (e *executor) start() { + stopCtx, cancel := context.WithCancel(context.Background()) + runningJobsWg := sync.WaitGroup{} + + for { + select { + case f := <-e.jobFunctions: + runningJobsWg.Add(1) + go func() { + defer runningJobsWg.Done() + + if e.maxRunningJobs != nil { + if !e.maxRunningJobs.TryAcquire(1) { + + switch e.limitMode { + case RescheduleMode: + return + case WaitMode: + for { + select { + case <-stopCtx.Done(): + return + case <-f.ctx.Done(): + return + default: + } + + if e.maxRunningJobs.TryAcquire(1) { + break + } + } + } + } + + defer e.maxRunningJobs.Release(1) + } + + switch f.runConfig.mode { + case defaultMode: + f.incrementRunState() + callJobFuncWithParams(f.function, f.parameters) + f.decrementRunState() + case singletonMode: + _, _, _ = f.limiter.Do("main", func() (interface{}, error) { + select { + case <-stopCtx.Done(): + return nil, nil + case <-f.ctx.Done(): + return nil, nil + default: + } + f.incrementRunState() + callJobFuncWithParams(f.function, f.parameters) + f.decrementRunState() + return nil, nil + }) + } + }() + case <-e.stopCh: + cancel() + runningJobsWg.Wait() + e.stopCh <- struct{}{} + return + } + } +} + +func (e *executor) stop() { + e.stopCh <- struct{}{} + <-e.stopCh +} diff --git a/vendor/github.com/go-co-op/gocron/gocron.go b/vendor/github.com/go-co-op/gocron/gocron.go new file mode 100644 index 000000000..cb840fe42 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/gocron.go @@ -0,0 +1,101 @@ +// Package gocron : A Golang Job Scheduling Package. +// +// An in-process scheduler for periodic jobs that uses the builder pattern +// for configuration. gocron lets you run Golang functions periodically +// at pre-determined intervals using a simple, human-friendly syntax. +// +package gocron + +import ( + "errors" + "fmt" + "reflect" + "regexp" + "runtime" + "time" +) + +// Error declarations for gocron related errors +var ( + ErrNotAFunction = errors.New("only functions can be scheduled into the job queue") + ErrNotScheduledWeekday = errors.New("job not scheduled weekly on a weekday") + ErrJobNotFoundWithTag = errors.New("no jobs found with given tag") + ErrUnsupportedTimeFormat = errors.New("the given time format is not supported") + ErrInvalidInterval = errors.New(".Every() interval must be greater than 0") + ErrInvalidIntervalType = errors.New(".Every() interval must be int, time.Duration, or string") + ErrInvalidIntervalUnitsSelection = errors.New(".Every(time.Duration) and .Cron() cannot be used with units (e.g. .Seconds())") + + ErrAtTimeNotSupported = errors.New("the At() method is not supported for this time unit") + ErrWeekdayNotSupported = errors.New("weekday is not supported for time unit") + ErrInvalidDayOfMonthEntry = errors.New("only days 1 through 28 are allowed for monthly schedules") + ErrTagsUnique = func(tag string) error { return fmt.Errorf("a non-unique tag was set on the job: %s", tag) } + ErrWrongParams = errors.New("wrong list of params") + ErrUpdateCalledWithoutJob = errors.New("a call to Scheduler.Update() requires a call to Scheduler.Job() first") + ErrCronParseFailure = errors.New("cron expression failed to be parsed") + ErrInvalidDaysOfMonthDuplicateValue = errors.New("duplicate days of month is not allowed in Month() and Months() methods") +) + +func wrapOrError(toWrap error, err error) error { + var returnErr error + if toWrap != nil && !errors.Is(err, toWrap) { + returnErr = fmt.Errorf("%s: %w", err, toWrap) + } else { + returnErr = err + } + return returnErr +} + +// regex patterns for supported time formats +var ( + timeWithSeconds = regexp.MustCompile(`(?m)^\d{1,2}:\d\d:\d\d$`) + timeWithoutSeconds = regexp.MustCompile(`(?m)^\d{1,2}:\d\d$`) +) + +type schedulingUnit int + +const ( + // default unit is seconds + milliseconds schedulingUnit = iota + seconds + minutes + hours + days + weeks + months + duration + crontab +) + +func callJobFuncWithParams(jobFunc interface{}, params []interface{}) { + f := reflect.ValueOf(jobFunc) + if len(params) != f.Type().NumIn() { + return + } + in := make([]reflect.Value, len(params)) + for k, param := range params { + in[k] = reflect.ValueOf(param) + } + f.Call(in) +} + +func getFunctionName(fn interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() +} + +func parseTime(t string) (hour, min, sec int, err error) { + var timeLayout string + switch { + case timeWithSeconds.Match([]byte(t)): + timeLayout = "15:04:05" + case timeWithoutSeconds.Match([]byte(t)): + timeLayout = "15:04" + default: + return 0, 0, 0, ErrUnsupportedTimeFormat + } + + parsedTime, err := time.Parse(timeLayout, t) + if err != nil { + return 0, 0, 0, ErrUnsupportedTimeFormat + } + return parsedTime.Hour(), parsedTime.Minute(), parsedTime.Second(), nil +} diff --git a/vendor/github.com/go-co-op/gocron/job.go b/vendor/github.com/go-co-op/gocron/job.go new file mode 100644 index 000000000..e52a0e10e --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/job.go @@ -0,0 +1,381 @@ +package gocron + +import ( + "context" + "fmt" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/robfig/cron/v3" + "golang.org/x/sync/singleflight" +) + +// Job struct stores the information necessary to run a Job +type Job struct { + mu sync.RWMutex + jobFunction + interval int // pause interval * unit between runs + duration time.Duration // time duration between runs + unit schedulingUnit // time units, e.g. 'minutes', 'hours'... + startsImmediately bool // if the Job should run upon scheduler start + atTimes []time.Duration // optional time(s) at which this Job runs when interval is day + startAtTime time.Time // optional time at which the Job starts + error error // error related to Job + lastRun time.Time // datetime of last run + nextRun time.Time // datetime of next run + scheduledWeekdays []time.Weekday // Specific days of the week to start on + daysOfTheMonth []int // Specific days of the month to run the job + tags []string // allow the user to tag Jobs with certain labels + runCount int // number of times the job ran + timer *time.Timer // handles running tasks at specific time + cronSchedule cron.Schedule // stores the schedule when a task uses cron +} + +type jobFunction struct { + function interface{} // task's function + parameters []interface{} // task's function parameters + name string //nolint the function name to run + runConfig runConfig // configuration for how many times to run the job + limiter *singleflight.Group // limits inflight runs of job to one + ctx context.Context // for cancellation + cancel context.CancelFunc // for cancellation + runState *int64 // will be non-zero when jobs are running +} + +func (jf *jobFunction) incrementRunState() { + if jf.runState != nil { + atomic.AddInt64(jf.runState, 1) + } +} + +func (jf *jobFunction) decrementRunState() { + if jf.runState != nil { + atomic.AddInt64(jf.runState, -1) + } +} + +type runConfig struct { + finiteRuns bool + maxRuns int + mode mode +} + +// mode is the Job's running mode +type mode int8 + +const ( + // defaultMode disable any mode + defaultMode mode = iota + + // singletonMode switch to single job mode + singletonMode +) + +// newJob creates a new Job with the provided interval +func newJob(interval int, startImmediately bool, singletonMode bool) *Job { + ctx, cancel := context.WithCancel(context.Background()) + var zero int64 + job := &Job{ + interval: interval, + unit: seconds, + lastRun: time.Time{}, + nextRun: time.Time{}, + jobFunction: jobFunction{ + ctx: ctx, + cancel: cancel, + runState: &zero, + }, + tags: []string{}, + startsImmediately: startImmediately, + } + if singletonMode { + job.SingletonMode() + } + return job +} + +func (j *Job) neverRan() bool { + return j.lastRun.IsZero() +} + +func (j *Job) getStartsImmediately() bool { + return j.startsImmediately +} + +func (j *Job) setStartsImmediately(b bool) { + j.startsImmediately = b +} + +func (j *Job) setTimer(t *time.Timer) { + j.mu.Lock() + defer j.mu.Unlock() + j.timer = t +} + +func (j *Job) getFirstAtTime() time.Duration { + var t time.Duration + if len(j.atTimes) > 0 { + t = j.atTimes[0] + } + + return t +} + +func (j *Job) getAtTime(lastRun time.Time) time.Duration { + var r time.Duration + if len(j.atTimes) == 0 { + return r + } + + if len(j.atTimes) == 1 { + return j.atTimes[0] + } + + if lastRun.IsZero() { + r = j.atTimes[0] + } else { + for _, d := range j.atTimes { + nt := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day(), 0, 0, 0, 0, lastRun.Location()).Add(d) + if nt.After(lastRun) { + r = d + break + } + } + } + + return r +} + +func (j *Job) addAtTime(t time.Duration) { + if len(j.atTimes) == 0 { + j.atTimes = append(j.atTimes, t) + return + } + exist := false + index := sort.Search(len(j.atTimes), func(i int) bool { + atTime := j.atTimes[i] + b := atTime >= t + if b { + exist = atTime == t + } + return b + }) + + // ignore if present + if exist { + return + } + + j.atTimes = append(j.atTimes, time.Duration(0)) + copy(j.atTimes[index+1:], j.atTimes[index:]) + j.atTimes[index] = t +} + +func (j *Job) getStartAtTime() time.Time { + return j.startAtTime +} + +func (j *Job) setStartAtTime(t time.Time) { + j.startAtTime = t +} + +func (j *Job) getUnit() schedulingUnit { + j.mu.RLock() + defer j.mu.RUnlock() + return j.unit +} + +func (j *Job) setUnit(t schedulingUnit) { + j.mu.Lock() + defer j.mu.Unlock() + j.unit = t +} + +func (j *Job) getDuration() time.Duration { + j.mu.RLock() + defer j.mu.RUnlock() + return j.duration +} + +func (j *Job) setDuration(t time.Duration) { + j.mu.Lock() + defer j.mu.Unlock() + j.duration = t +} + +// hasTags returns true if all tags are matched on this Job +func (j *Job) hasTags(tags ...string) bool { + // Build map of all Job tags for easy comparison + jobTags := map[string]int{} + for _, tag := range j.tags { + jobTags[tag] = 0 + } + + // Loop through required tags and if one doesn't exist, return false + for _, tag := range tags { + _, ok := jobTags[tag] + if !ok { + return false + } + } + return true +} + +// Error returns an error if one occurred while creating the Job. +// If multiple errors occurred, they will be wrapped and can be +// checked using the standard unwrap options. +func (j *Job) Error() error { + return j.error +} + +// Tag allows you to add arbitrary labels to a Job that do not +// impact the functionality of the Job +func (j *Job) Tag(tags ...string) { + j.tags = append(j.tags, tags...) +} + +// Untag removes a tag from a Job +func (j *Job) Untag(t string) { + var newTags []string + for _, tag := range j.tags { + if t != tag { + newTags = append(newTags, tag) + } + } + + j.tags = newTags +} + +// Tags returns the tags attached to the Job +func (j *Job) Tags() []string { + return j.tags +} + +// ScheduledTime returns the time of the Job's next scheduled run +func (j *Job) ScheduledTime() time.Time { + j.mu.RLock() + defer j.mu.RUnlock() + return j.nextRun +} + +// ScheduledAtTime returns the specific time of day the Job will run at. +// If multiple times are set, the earliest time will be returned. +func (j *Job) ScheduledAtTime() string { + if len(j.atTimes) == 0 { + return "0:0" + } + + return fmt.Sprintf("%d:%d", j.getFirstAtTime()/time.Hour, (j.getFirstAtTime()%time.Hour)/time.Minute) +} + +// ScheduledAtTimes returns the specific times of day the Job will run at +func (j *Job) ScheduledAtTimes() []string { + r := make([]string, len(j.atTimes)) + for i, t := range j.atTimes { + r[i] = fmt.Sprintf("%d:%d", t/time.Hour, (t%time.Hour)/time.Minute) + } + + return r +} + +// Weekday returns which day of the week the Job will run on and +// will return an error if the Job is not scheduled weekly +func (j *Job) Weekday() (time.Weekday, error) { + if len(j.scheduledWeekdays) == 0 { + return time.Sunday, ErrNotScheduledWeekday + } + return j.scheduledWeekdays[0], nil +} + +// Weekdays returns a slice of time.Weekday that the Job will run in a week and +// will return an error if the Job is not scheduled weekly +func (j *Job) Weekdays() []time.Weekday { + // appending on j.scheduledWeekdays may cause a side effect + if len(j.scheduledWeekdays) == 0 { + return []time.Weekday{time.Sunday} + } + + return j.scheduledWeekdays +} + +// LimitRunsTo limits the number of executions of this job to n. +// Upon reaching the limit, the job is removed from the scheduler. +// +// Note: If a job is added to a running scheduler and this method is then used +// you may see the job run more than the set limit as job is scheduled immediately +// by default upon being added to the scheduler. It is recommended to use the +// LimitRunsTo() func on the scheduler chain when scheduling the job. +// For example: scheduler.LimitRunsTo(1).Do() +func (j *Job) LimitRunsTo(n int) { + j.mu.Lock() + defer j.mu.Unlock() + j.runConfig.finiteRuns = true + j.runConfig.maxRuns = n +} + +// SingletonMode prevents a new job from starting if the prior job has not yet +// completed it's run +// Note: If a job is added to a running scheduler and this method is then used +// you may see the job run overrun itself as job is scheduled immediately +// by default upon being added to the scheduler. It is recommended to use the +// SingletonMode() func on the scheduler chain when scheduling the job. +func (j *Job) SingletonMode() { + j.mu.Lock() + defer j.mu.Unlock() + j.runConfig.mode = singletonMode + j.jobFunction.limiter = &singleflight.Group{} + +} + +// shouldRun evaluates if this job should run again +// based on the runConfig +func (j *Job) shouldRun() bool { + j.mu.RLock() + defer j.mu.RUnlock() + return !j.runConfig.finiteRuns || j.runCount < j.runConfig.maxRuns +} + +// LastRun returns the time the job was run last +func (j *Job) LastRun() time.Time { + return j.lastRun +} + +func (j *Job) setLastRun(t time.Time) { + j.lastRun = t +} + +// NextRun returns the time the job will run next +func (j *Job) NextRun() time.Time { + j.mu.RLock() + defer j.mu.RUnlock() + return j.nextRun +} + +func (j *Job) setNextRun(t time.Time) { + j.mu.Lock() + defer j.mu.Unlock() + j.nextRun = t +} + +// RunCount returns the number of time the job ran so far +func (j *Job) RunCount() int { + return j.runCount +} + +func (j *Job) stop() { + j.mu.Lock() + defer j.mu.Unlock() + if j.timer != nil { + j.timer.Stop() + } + if j.cancel != nil { + j.cancel() + } +} + +// IsRunning reports whether any instances of the job function are currently running +func (j *Job) IsRunning() bool { + return atomic.LoadInt64(j.runState) != 0 +} diff --git a/vendor/github.com/go-co-op/gocron/scheduler.go b/vendor/github.com/go-co-op/gocron/scheduler.go new file mode 100644 index 000000000..b92da946a --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/scheduler.go @@ -0,0 +1,1205 @@ +package gocron + +import ( + "context" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "time" + + "github.com/robfig/cron/v3" + "golang.org/x/sync/semaphore" +) + +type limitMode int8 + +// Scheduler struct stores a list of Jobs and the location of time used by the Scheduler, +// and implements the sort.Interface{} for sorting Jobs, by the time of nextRun +type Scheduler struct { + jobsMutex sync.RWMutex + jobs []*Job + + locationMutex sync.RWMutex + location *time.Location + runningMutex sync.RWMutex + running bool // represents if the scheduler is running at the moment or not + + time timeWrapper // wrapper around time.Time + executor *executor // executes jobs passed via chan + + tags sync.Map // for storing tags when unique tags is set + + tagsUnique bool // defines whether tags should be unique + updateJob bool // so the scheduler knows to create a new job or update the current + waitForInterval bool // defaults jobs to waiting for first interval to start + singletonMode bool // defaults all jobs to use SingletonMode() + jobCreated bool // so the scheduler knows a job was created prior to calling Every or Cron +} + +// days in a week +const allWeekDays = 7 + +// NewScheduler creates a new Scheduler +func NewScheduler(loc *time.Location) *Scheduler { + executor := newExecutor() + + return &Scheduler{ + jobs: make([]*Job, 0), + location: loc, + running: false, + time: &trueTime{}, + executor: &executor, + tagsUnique: false, + } +} + +// SetMaxConcurrentJobs limits how many jobs can be running at the same time. +// This is useful when running resource intensive jobs and a precise start time is not critical. +func (s *Scheduler) SetMaxConcurrentJobs(n int, mode limitMode) { + s.executor.maxRunningJobs = semaphore.NewWeighted(int64(n)) + s.executor.limitMode = mode +} + +// StartBlocking starts all jobs and blocks the current thread +func (s *Scheduler) StartBlocking() { + s.StartAsync() + <-make(chan bool) +} + +// StartAsync starts all jobs without blocking the current thread +func (s *Scheduler) StartAsync() { + if !s.IsRunning() { + s.start() + } +} + +//start starts the scheduler, scheduling and running jobs +func (s *Scheduler) start() { + go s.executor.start() + s.setRunning(true) + s.runJobs(s.Jobs()) +} + +func (s *Scheduler) runJobs(jobs []*Job) { + for _, job := range jobs { + s.scheduleNextRun(job) + } +} + +func (s *Scheduler) setRunning(b bool) { + s.runningMutex.Lock() + defer s.runningMutex.Unlock() + s.running = b +} + +// IsRunning returns true if the scheduler is running +func (s *Scheduler) IsRunning() bool { + s.runningMutex.RLock() + defer s.runningMutex.RUnlock() + return s.running +} + +// Jobs returns the list of Jobs from the Scheduler +func (s *Scheduler) Jobs() []*Job { + s.jobsMutex.RLock() + defer s.jobsMutex.RUnlock() + return s.jobs +} + +func (s *Scheduler) setJobs(jobs []*Job) { + s.jobsMutex.Lock() + defer s.jobsMutex.Unlock() + s.jobs = jobs +} + +// Len returns the number of Jobs in the Scheduler - implemented for sort +func (s *Scheduler) Len() int { + s.jobsMutex.RLock() + defer s.jobsMutex.RUnlock() + return len(s.jobs) +} + +// Swap places each job into the other job's position given +// the provided job indexes. +func (s *Scheduler) Swap(i, j int) { + s.jobsMutex.Lock() + defer s.jobsMutex.Unlock() + s.jobs[i], s.jobs[j] = s.jobs[j], s.jobs[i] +} + +// Less compares the next run of jobs based on their index. +// Returns true if the second job is after the first. +func (s *Scheduler) Less(first, second int) bool { + return s.Jobs()[second].NextRun().Unix() >= s.Jobs()[first].NextRun().Unix() +} + +// ChangeLocation changes the default time location +func (s *Scheduler) ChangeLocation(newLocation *time.Location) { + s.locationMutex.Lock() + defer s.locationMutex.Unlock() + s.location = newLocation +} + +// Location provides the current location set on the scheduler +func (s *Scheduler) Location() *time.Location { + s.locationMutex.RLock() + defer s.locationMutex.RUnlock() + return s.location +} + +type nextRun struct { + duration time.Duration + dateTime time.Time +} + +// scheduleNextRun Compute the instant when this Job should run next +func (s *Scheduler) scheduleNextRun(job *Job) { + now := s.now() + lastRun := job.LastRun() + if !s.jobPresent(job) { + return + } + + if job.getStartsImmediately() { + s.run(job) + lastRun = now + job.setStartsImmediately(false) + } + + if job.neverRan() { + // Increment startAtTime to the future + if !job.startAtTime.IsZero() && job.startAtTime.Before(now) { + duration := s.durationToNextRun(job.startAtTime, job).duration + job.startAtTime = job.startAtTime.Add(duration) + if job.startAtTime.Before(now) { + diff := now.Sub(job.startAtTime) + duration := s.durationToNextRun(job.startAtTime, job).duration + count := diff / duration + if diff%duration != 0 { + count++ + } + job.startAtTime = job.startAtTime.Add(duration * count) + } + } + lastRun = now + } + + if !job.shouldRun() { + s.RemoveByReference(job) + return + } + + next := s.durationToNextRun(lastRun, job) + + if next.dateTime.IsZero() { + job.setNextRun(lastRun.Add(next.duration)) + } else { + job.setNextRun(next.dateTime) + } + job.setTimer(time.AfterFunc(next.duration, func() { + if !next.dateTime.IsZero() { + for { + if time.Now().Unix() >= next.dateTime.Unix() { + break + } + } + } + s.run(job) + s.scheduleNextRun(job) + })) +} + +// durationToNextRun calculate how much time to the next run, depending on unit +func (s *Scheduler) durationToNextRun(lastRun time.Time, job *Job) nextRun { + // job can be scheduled with .StartAt() + if job.getStartAtTime().After(lastRun) { + return nextRun{duration: job.getStartAtTime().Sub(s.now()), dateTime: job.getStartAtTime()} + } + + var next nextRun + switch job.getUnit() { + case milliseconds, seconds, minutes, hours: + next.duration = s.calculateDuration(job) + case days: + next = s.calculateDays(job, lastRun) + case weeks: + if len(job.scheduledWeekdays) != 0 { // weekday selected, Every().Monday(), for example + next = s.calculateWeekday(job, lastRun) + } else { + next = s.calculateWeeks(job, lastRun) + } + case months: + next = s.calculateMonths(job, lastRun) + case duration: + next.duration = job.getDuration() + case crontab: + next.dateTime = job.cronSchedule.Next(lastRun) + next.duration = next.dateTime.Sub(lastRun) + } + return next +} + +func (s *Scheduler) calculateMonths(job *Job, lastRun time.Time) nextRun { + lastRunRoundedMidnight := s.roundToMidnight(lastRun) + + // Special case: the last day of the month + if len(job.daysOfTheMonth) == 1 && job.daysOfTheMonth[0] == -1 { + return calculateNextRunForLastDayOfMonth(s, job, lastRun) + } + + if len(job.daysOfTheMonth) != 0 { // calculate days to job.daysOfTheMonth + + nextRunDateMap := make(map[int]nextRun) + for _, day := range job.daysOfTheMonth { + nextRunDateMap[day] = calculateNextRunForMonth(s, job, lastRun, day) + } + + nextRunResult := nextRun{} + for _, val := range nextRunDateMap { + if nextRunResult.dateTime.IsZero() { + nextRunResult = val + } else if nextRunResult.dateTime.Sub(val.dateTime).Milliseconds() > 0 { + nextRunResult = val + } + } + + return nextRunResult + } + next := lastRunRoundedMidnight.Add(job.getFirstAtTime()).AddDate(0, job.interval, 0) + return nextRun{duration: until(lastRun, next), dateTime: next} +} + +func calculateNextRunForLastDayOfMonth(s *Scheduler, job *Job, lastRun time.Time) nextRun { + // Calculate the last day of the next month, by adding job.interval+1 months (i.e. the + // first day of the month after the next month), and subtracting one day, unless the + // last run occurred before the end of the month. + addMonth := job.interval + atTime := job.getAtTime(lastRun) + if testDate := lastRun.AddDate(0, 0, 1); testDate.Month() != lastRun.Month() && + !s.roundToMidnight(lastRun).Add(atTime).After(lastRun) { + // Our last run was on the last day of this month. + addMonth++ + atTime = job.getFirstAtTime() + } + + next := time.Date(lastRun.Year(), lastRun.Month(), 1, 0, 0, 0, 0, s.Location()). + Add(atTime). + AddDate(0, addMonth, 0). + AddDate(0, 0, -1) + return nextRun{duration: until(lastRun, next), dateTime: next} +} + +func calculateNextRunForMonth(s *Scheduler, job *Job, lastRun time.Time, dayOfMonth int) nextRun { + atTime := job.getAtTime(lastRun) + natTime := atTime + jobDay := time.Date(lastRun.Year(), lastRun.Month(), dayOfMonth, 0, 0, 0, 0, s.Location()).Add(atTime) + difference := absDuration(lastRun.Sub(jobDay)) + next := lastRun + if jobDay.Before(lastRun) { // shouldn't run this month; schedule for next interval minus day difference + next = next.AddDate(0, job.interval, -0) + next = next.Add(-difference) + natTime = job.getFirstAtTime() + } else { + if job.interval == 1 && !jobDay.Equal(lastRun) { // every month counts current month + next = next.AddDate(0, job.interval-1, 0) + } else { // should run next month interval + next = next.AddDate(0, job.interval, 0) + natTime = job.getFirstAtTime() + } + next = next.Add(difference) + } + if atTime != natTime { + next = next.Add(-atTime).Add(natTime) + } + return nextRun{duration: until(lastRun, next), dateTime: next} +} + +func (s *Scheduler) calculateWeekday(job *Job, lastRun time.Time) nextRun { + daysToWeekday := s.remainingDaysToWeekday(lastRun, job) + totalDaysDifference := s.calculateTotalDaysDifference(lastRun, daysToWeekday, job) + acTime := job.getAtTime(lastRun) + if totalDaysDifference > 0 { + acTime = job.getFirstAtTime() + } + next := s.roundToMidnight(lastRun).Add(acTime).AddDate(0, 0, totalDaysDifference) + return nextRun{duration: until(lastRun, next), dateTime: next} +} + +func (s *Scheduler) calculateWeeks(job *Job, lastRun time.Time) nextRun { + totalDaysDifference := int(job.interval) * 7 + next := s.roundToMidnight(lastRun).Add(job.getFirstAtTime()).AddDate(0, 0, totalDaysDifference) + return nextRun{duration: until(lastRun, next), dateTime: next} +} + +func (s *Scheduler) calculateTotalDaysDifference(lastRun time.Time, daysToWeekday int, job *Job) int { + if job.interval > 1 && job.RunCount() < len(job.Weekdays()) { // just count weeks after the first jobs were done + return daysToWeekday + } + if job.interval > 1 && job.RunCount() >= len(job.Weekdays()) { + if daysToWeekday > 0 { + return int(job.interval)*7 - (allWeekDays - daysToWeekday) + } + + return int(job.interval) * 7 + } + + if daysToWeekday == 0 { // today, at future time or already passed + lastRunAtTime := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day(), 0, 0, 0, 0, s.Location()).Add(job.getAtTime(lastRun)) + if lastRun.Before(lastRunAtTime) { + return 0 + } + return 7 + } + return daysToWeekday +} + +func (s *Scheduler) calculateDays(job *Job, lastRun time.Time) nextRun { + + if job.interval == 1 { + lastRunDayPlusJobAtTime := s.roundToMidnight(lastRun).Add(job.getAtTime(lastRun)) + + // handle occasional occurrence of job running to quickly / too early such that last run was within a second of now + lastRunUnix, nowUnix := job.LastRun().Unix(), s.now().Unix() + if lastRunUnix == nowUnix || lastRunUnix == nowUnix-1 || lastRunUnix == nowUnix+1 { + lastRun = lastRunDayPlusJobAtTime + } + + if shouldRunToday(lastRun, lastRunDayPlusJobAtTime) { + return nextRun{duration: until(lastRun, lastRunDayPlusJobAtTime), dateTime: lastRunDayPlusJobAtTime} + } + } + + nextRunAtTime := s.roundToMidnight(lastRun).Add(job.getFirstAtTime()).AddDate(0, 0, job.interval).In(s.Location()) + return nextRun{duration: until(lastRun, nextRunAtTime), dateTime: nextRunAtTime} +} + +func until(from time.Time, until time.Time) time.Duration { + return until.Sub(from) +} + +func shouldRunToday(lastRun time.Time, atTime time.Time) bool { + return lastRun.Before(atTime) +} + +func in(scheduleWeekdays []time.Weekday, weekday time.Weekday) bool { + in := false + + for _, weekdayInSchedule := range scheduleWeekdays { + if int(weekdayInSchedule) == int(weekday) { + in = true + break + } + } + return in +} + +func (s *Scheduler) calculateDuration(job *Job) time.Duration { + if job.neverRan() && shouldRunAtSpecificTime(job) { // ugly. in order to avoid this we could prohibit setting .At() and allowing only .StartAt() when dealing with Duration types + now := s.time.Now(s.location) + next := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, s.Location()).Add(job.getFirstAtTime()) + if now.Before(next) || now.Equal(next) { + return next.Sub(now) + } + } + + interval := job.interval + switch job.getUnit() { + case milliseconds: + return time.Duration(interval) * time.Millisecond + case seconds: + return time.Duration(interval) * time.Second + case minutes: + return time.Duration(interval) * time.Minute + default: + return time.Duration(interval) * time.Hour + } +} + +func shouldRunAtSpecificTime(job *Job) bool { + return job.getAtTime(job.lastRun) != 0 +} + +func (s *Scheduler) remainingDaysToWeekday(lastRun time.Time, job *Job) int { + weekDays := job.Weekdays() + sort.Slice(weekDays, func(i, j int) bool { + return weekDays[i] < weekDays[j] + }) + + equals := false + lastRunWeekday := lastRun.Weekday() + index := sort.Search(len(weekDays), func(i int) bool { + b := weekDays[i] >= lastRunWeekday + if b { + equals = weekDays[i] == lastRunWeekday + } + return b + }) + // check atTime + if equals { + if s.roundToMidnight(lastRun).Add(job.getAtTime(lastRun)).After(lastRun) { + return 0 + } + index++ + } + + if index < len(weekDays) { + return int(weekDays[index] - lastRunWeekday) + } + + return int(weekDays[0]) + allWeekDays - int(lastRunWeekday) +} + +// absDuration returns the abs time difference +func absDuration(a time.Duration) time.Duration { + if a >= 0 { + return a + } + return -a +} + +// roundToMidnight truncates time to midnight +func (s *Scheduler) roundToMidnight(t time.Time) time.Time { + return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, s.Location()) +} + +// NextRun datetime when the next Job should run. +func (s *Scheduler) NextRun() (*Job, time.Time) { + if len(s.Jobs()) <= 0 { + return nil, s.now() + } + + sort.Sort(s) + + return s.Jobs()[0], s.Jobs()[0].NextRun() +} + +// Every schedules a new periodic Job with an interval. +// Interval can be an int, time.Duration or a string that +// parses with time.ParseDuration(). +// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". +func (s *Scheduler) Every(interval interface{}) *Scheduler { + job := &Job{} + if s.updateJob || s.jobCreated { + job = s.getCurrentJob() + } + + switch interval := interval.(type) { + case int: + if !(s.updateJob || s.jobCreated) { + job = s.newJob(interval) + } else { + job = s.newJob(interval) + } + if interval <= 0 { + job.error = wrapOrError(job.error, ErrInvalidInterval) + } + case time.Duration: + if !(s.updateJob || s.jobCreated) { + job = s.newJob(0) + } else { + job.interval = 0 + } + job.setDuration(interval) + job.setUnit(duration) + case string: + if !(s.updateJob || s.jobCreated) { + job = s.newJob(0) + } else { + job.interval = 0 + } + d, err := time.ParseDuration(interval) + if err != nil { + job.error = wrapOrError(job.error, err) + } + job.setDuration(d) + job.setUnit(duration) + default: + if !(s.updateJob || s.jobCreated) { + job = s.newJob(0) + } else { + job.interval = 0 + } + job.error = wrapOrError(job.error, ErrInvalidIntervalType) + } + + if s.updateJob || s.jobCreated { + s.setJobs(append(s.Jobs()[:len(s.Jobs())-1], job)) + if s.jobCreated { + s.jobCreated = false + } + } else { + s.setJobs(append(s.Jobs(), job)) + } + + return s +} + +func (s *Scheduler) run(job *Job) { + if !s.IsRunning() { + return + } + + job.mu.Lock() + defer job.mu.Unlock() + job.setLastRun(s.now()) + job.runCount++ + s.executor.jobFunctions <- job.jobFunction +} + +// RunAll run all Jobs regardless if they are scheduled to run or not +func (s *Scheduler) RunAll() { + s.RunAllWithDelay(0) +} + +// RunAllWithDelay runs all jobs with the provided delay in between each job +func (s *Scheduler) RunAllWithDelay(d time.Duration) { + for _, job := range s.Jobs() { + s.run(job) + s.time.Sleep(d) + } +} + +// RunByTag runs all the jobs containing a specific tag +// regardless of whether they are scheduled to run or not +func (s *Scheduler) RunByTag(tag string) error { + return s.RunByTagWithDelay(tag, 0) +} + +// RunByTagWithDelay is same as RunByTag but introduces a delay between +// each job execution +func (s *Scheduler) RunByTagWithDelay(tag string, d time.Duration) error { + jobs, err := s.FindJobsByTag(tag) + if err != nil { + return err + } + for _, job := range jobs { + s.run(job) + s.time.Sleep(d) + } + return nil +} + +// Remove specific Job by function +// +// Removing a job stops that job's timer. However, if a job has already +// been started by by the job's timer before being removed, there is no way to stop +// it through gocron as https://pkg.go.dev/time#Timer.Stop explains. +// The job function would need to have implemented a means of +// stopping, e.g. using a context.WithCancel(). +func (s *Scheduler) Remove(job interface{}) { + fName := getFunctionName(job) + j := s.findJobByTaskName(fName) + s.removeJobsUniqueTags(j) + s.removeByCondition(func(someJob *Job) bool { + return someJob.name == fName + }) +} + +// RemoveByReference removes specific Job by reference +func (s *Scheduler) RemoveByReference(job *Job) { + s.removeJobsUniqueTags(job) + s.removeByCondition(func(someJob *Job) bool { + job.mu.RLock() + defer job.mu.RUnlock() + return someJob == job + }) +} + +func (s *Scheduler) findJobByTaskName(name string) *Job { + for _, job := range s.Jobs() { + if job.name == name { + return job + } + } + return nil +} + +func (s *Scheduler) removeJobsUniqueTags(job *Job) { + if job == nil { + return + } + if s.tagsUnique && len(job.tags) > 0 { + for _, tag := range job.tags { + s.tags.Delete(tag) + } + } +} + +func (s *Scheduler) removeByCondition(shouldRemove func(*Job) bool) { + retainedJobs := make([]*Job, 0) + for _, job := range s.Jobs() { + if !shouldRemove(job) { + retainedJobs = append(retainedJobs, job) + } else { + job.stop() + } + } + s.setJobs(retainedJobs) +} + +// RemoveByTag will remove Jobs that match the given tag. +func (s *Scheduler) RemoveByTag(tag string) error { + return s.RemoveByTags(tag) +} + +// RemoveByTags will remove Jobs that match all given tags. +func (s *Scheduler) RemoveByTags(tags ...string) error { + jobs, err := s.FindJobsByTag(tags...) + if err != nil { + return err + } + + for _, job := range jobs { + s.RemoveByReference(job) + } + return nil +} + +// RemoveByTagsAny will remove Jobs that match any one of the given tags. +func (s *Scheduler) RemoveByTagsAny(tags ...string) error { + var errs error + mJob := make(map[*Job]struct{}) + for _, tag := range tags { + jobs, err := s.FindJobsByTag(tag) + if err != nil { + errs = wrapOrError(errs, fmt.Errorf("%s: %s", err.Error(), tag)) + } + for _, job := range jobs { + mJob[job] = struct{}{} + } + } + + for job := range mJob { + s.RemoveByReference(job) + } + + return errs +} + +// FindJobsByTag will return a slice of Jobs that match all given tags +func (s *Scheduler) FindJobsByTag(tags ...string) ([]*Job, error) { + var jobs []*Job + +Jobs: + for _, job := range s.Jobs() { + if job.hasTags(tags...) { + jobs = append(jobs, job) + continue Jobs + } + } + + if len(jobs) > 0 { + return jobs, nil + } + return nil, ErrJobNotFoundWithTag +} + +// LimitRunsTo limits the number of executions of this job to n. +// Upon reaching the limit, the job is removed from the scheduler. +func (s *Scheduler) LimitRunsTo(i int) *Scheduler { + job := s.getCurrentJob() + job.LimitRunsTo(i) + return s +} + +// SingletonMode prevents a new job from starting if the prior job has not yet +// completed its run +func (s *Scheduler) SingletonMode() *Scheduler { + job := s.getCurrentJob() + job.SingletonMode() + return s +} + +// SingletonModeAll prevents new jobs from starting if the prior instance of the +// particular job has not yet completed its run +func (s *Scheduler) SingletonModeAll() { + s.singletonMode = true +} + +// TaskPresent checks if specific job's function was added to the scheduler. +func (s *Scheduler) TaskPresent(j interface{}) bool { + for _, job := range s.Jobs() { + if job.name == getFunctionName(j) { + return true + } + } + return false +} + +// To avoid the recursive read lock on s.Jobs() and this function, +// creating this new function and distributing the lock between jobPresent, _jobPresent +func (s *Scheduler) _jobPresent(j *Job, jobs []*Job) bool { + s.jobsMutex.RLock() + defer s.jobsMutex.RUnlock() + for _, job := range jobs { + if job == j { + return true + } + } + return false +} + +func (s *Scheduler) jobPresent(j *Job) bool { + return s._jobPresent(j, s.Jobs()) +} + +// Clear clears all Jobs from this scheduler +func (s *Scheduler) Clear() { + for _, job := range s.Jobs() { + job.stop() + } + s.setJobs(make([]*Job, 0)) + // If unique tags was enabled, delete all the tags loaded in the tags sync.Map + if s.tagsUnique { + s.tags.Range(func(key interface{}, value interface{}) bool { + s.tags.Delete(key) + return true + }) + } +} + +// Stop stops the scheduler. This is a no-op if the scheduler is already stopped. +// It waits for all running jobs to finish before returning, so it is safe to assume that running jobs will finish when calling this. +func (s *Scheduler) Stop() { + if s.IsRunning() { + s.stop() + } +} + +func (s *Scheduler) stop() { + s.setRunning(false) + s.executor.stop() +} + +// Do specifies the jobFunc that should be called every time the Job runs +func (s *Scheduler) Do(jobFun interface{}, params ...interface{}) (*Job, error) { + job := s.getCurrentJob() + + jobUnit := job.getUnit() + if job.getAtTime(job.lastRun) != 0 && (jobUnit <= hours || jobUnit >= duration) { + job.error = wrapOrError(job.error, ErrAtTimeNotSupported) + } + + if len(job.scheduledWeekdays) != 0 && jobUnit != weeks { + job.error = wrapOrError(job.error, ErrWeekdayNotSupported) + } + + if job.unit != crontab && job.interval == 0 { + if job.unit != duration { + job.error = wrapOrError(job.error, ErrInvalidInterval) + } + } + + if job.error != nil { + // delete the job from the scheduler as this job + // cannot be executed + s.RemoveByReference(job) + return nil, job.error + } + + typ := reflect.TypeOf(jobFun) + if typ.Kind() != reflect.Func { + // delete the job for the same reason as above + s.RemoveByReference(job) + return nil, ErrNotAFunction + } + + f := reflect.ValueOf(jobFun) + if len(params) != f.Type().NumIn() { + s.RemoveByReference(job) + job.error = wrapOrError(job.error, ErrWrongParams) + return nil, job.error + } + + fname := getFunctionName(jobFun) + if job.name != fname { + job.function = jobFun + job.parameters = params + job.name = fname + } + + // we should not schedule if not running since we can't foresee how long it will take for the scheduler to start + if s.IsRunning() { + s.scheduleNextRun(job) + } + + return job, nil +} + +// At schedules the Job at a specific time of day in the form "HH:MM:SS" or "HH:MM" +// or time.Time (note that only the hours, minutes, seconds and nanos are used). +func (s *Scheduler) At(i interface{}) *Scheduler { + job := s.getCurrentJob() + + switch t := i.(type) { + case string: + for _, tt := range strings.Split(t, ";") { + hour, min, sec, err := parseTime(tt) + if err != nil { + job.error = wrapOrError(job.error, err) + return s + } + // save atTime start as duration from midnight + job.addAtTime(time.Duration(hour)*time.Hour + time.Duration(min)*time.Minute + time.Duration(sec)*time.Second) + } + case time.Time: + job.addAtTime(time.Duration(t.Hour())*time.Hour + time.Duration(t.Minute())*time.Minute + time.Duration(t.Second())*time.Second + time.Duration(t.Nanosecond())*time.Nanosecond) + default: + job.error = wrapOrError(job.error, ErrUnsupportedTimeFormat) + } + job.startsImmediately = false + return s +} + +// Tag will add a tag when creating a job. +func (s *Scheduler) Tag(t ...string) *Scheduler { + job := s.getCurrentJob() + + if s.tagsUnique { + for _, tag := range t { + if _, ok := s.tags.Load(tag); ok { + job.error = wrapOrError(job.error, ErrTagsUnique(tag)) + return s + } + s.tags.Store(tag, struct{}{}) + } + } + + job.tags = append(job.tags, t...) + return s +} + +// StartAt schedules the next run of the Job. If this time is in the past, the configured interval will be used +// to calculate the next future time +func (s *Scheduler) StartAt(t time.Time) *Scheduler { + job := s.getCurrentJob() + job.setStartAtTime(t) + job.startsImmediately = false + return s +} + +// setUnit sets the unit type +func (s *Scheduler) setUnit(unit schedulingUnit) { + job := s.getCurrentJob() + currentUnit := job.getUnit() + if currentUnit == duration || currentUnit == crontab { + job.error = wrapOrError(job.error, ErrInvalidIntervalUnitsSelection) + return + } + job.setUnit(unit) +} + +// Millisecond sets the unit with seconds +func (s *Scheduler) Millisecond() *Scheduler { + return s.Milliseconds() +} + +// Milliseconds sets the unit with seconds +func (s *Scheduler) Milliseconds() *Scheduler { + s.setUnit(milliseconds) + return s +} + +// Second sets the unit with seconds +func (s *Scheduler) Second() *Scheduler { + return s.Seconds() +} + +// Seconds sets the unit with seconds +func (s *Scheduler) Seconds() *Scheduler { + s.setUnit(seconds) + return s +} + +// Minute sets the unit with minutes +func (s *Scheduler) Minute() *Scheduler { + return s.Minutes() +} + +// Minutes sets the unit with minutes +func (s *Scheduler) Minutes() *Scheduler { + s.setUnit(minutes) + return s +} + +// Hour sets the unit with hours +func (s *Scheduler) Hour() *Scheduler { + return s.Hours() +} + +// Hours sets the unit with hours +func (s *Scheduler) Hours() *Scheduler { + s.setUnit(hours) + return s +} + +// Day sets the unit with days +func (s *Scheduler) Day() *Scheduler { + s.setUnit(days) + return s +} + +// Days set the unit with days +func (s *Scheduler) Days() *Scheduler { + s.setUnit(days) + return s +} + +// Week sets the unit with weeks +func (s *Scheduler) Week() *Scheduler { + s.setUnit(weeks) + return s +} + +// Weeks sets the unit with weeks +func (s *Scheduler) Weeks() *Scheduler { + s.setUnit(weeks) + return s +} + +// Month sets the unit with months +func (s *Scheduler) Month(daysOfMonth ...int) *Scheduler { + return s.Months(daysOfMonth...) +} + +// MonthLastDay sets the unit with months at every last day of the month +func (s *Scheduler) MonthLastDay() *Scheduler { + return s.Months(-1) +} + +// Months sets the unit with months +// Note: Only days 1 through 28 are allowed for monthly schedules +// Note: Multiple add same days of month cannot be allowed +// Note: -1 is a special value and can only occur as single argument +func (s *Scheduler) Months(daysOfTheMonth ...int) *Scheduler { + job := s.getCurrentJob() + + if len(daysOfTheMonth) == 0 { + job.error = wrapOrError(job.error, ErrInvalidDayOfMonthEntry) + } else if len(daysOfTheMonth) == 1 { + dayOfMonth := daysOfTheMonth[0] + if dayOfMonth != -1 && (dayOfMonth < 1 || dayOfMonth > 28) { + job.error = wrapOrError(job.error, ErrInvalidDayOfMonthEntry) + } + } else { + + repeatMap := make(map[int]int) + for _, dayOfMonth := range daysOfTheMonth { + + if dayOfMonth < 1 || dayOfMonth > 28 { + job.error = wrapOrError(job.error, ErrInvalidDayOfMonthEntry) + break + } + + for _, dayOfMonthInJob := range job.daysOfTheMonth { + if dayOfMonthInJob == dayOfMonth { + job.error = wrapOrError(job.error, ErrInvalidDaysOfMonthDuplicateValue) + break + } + } + + if _, ok := repeatMap[dayOfMonth]; ok { + job.error = wrapOrError(job.error, ErrInvalidDaysOfMonthDuplicateValue) + break + } else { + repeatMap[dayOfMonth]++ + } + } + } + if job.daysOfTheMonth == nil { + job.daysOfTheMonth = make([]int, 0) + } + job.daysOfTheMonth = append(job.daysOfTheMonth, daysOfTheMonth...) + job.startsImmediately = false + s.setUnit(months) + return s +} + +// NOTE: If the dayOfTheMonth for the above two functions is +// more than the number of days in that month, the extra day(s) +// spill over to the next month. Similarly, if it's less than 0, +// it will go back to the month before + +// Weekday sets the scheduledWeekdays with a specifics weekdays +func (s *Scheduler) Weekday(weekDay time.Weekday) *Scheduler { + job := s.getCurrentJob() + + if in := in(job.scheduledWeekdays, weekDay); !in { + job.scheduledWeekdays = append(job.scheduledWeekdays, weekDay) + } + + job.startsImmediately = false + s.setUnit(weeks) + return s +} + +func (s *Scheduler) Midday() *Scheduler { + return s.At("12:00") +} + +// Monday sets the start day as Monday +func (s *Scheduler) Monday() *Scheduler { + return s.Weekday(time.Monday) +} + +// Tuesday sets the start day as Tuesday +func (s *Scheduler) Tuesday() *Scheduler { + return s.Weekday(time.Tuesday) +} + +// Wednesday sets the start day as Wednesday +func (s *Scheduler) Wednesday() *Scheduler { + return s.Weekday(time.Wednesday) +} + +// Thursday sets the start day as Thursday +func (s *Scheduler) Thursday() *Scheduler { + return s.Weekday(time.Thursday) +} + +// Friday sets the start day as Friday +func (s *Scheduler) Friday() *Scheduler { + return s.Weekday(time.Friday) +} + +// Saturday sets the start day as Saturday +func (s *Scheduler) Saturday() *Scheduler { + return s.Weekday(time.Saturday) +} + +// Sunday sets the start day as Sunday +func (s *Scheduler) Sunday() *Scheduler { + return s.Weekday(time.Sunday) +} + +func (s *Scheduler) getCurrentJob() *Job { + if len(s.Jobs()) == 0 { + s.setJobs([]*Job{{}}) + s.jobCreated = true + } + return s.Jobs()[len(s.Jobs())-1] +} + +func (s *Scheduler) now() time.Time { + return s.time.Now(s.Location()) +} + +// TagsUnique forces job tags to be unique across the scheduler +// when adding tags with (s *Scheduler) Tag(). +// This does not enforce uniqueness on tags added via +// (j *Job) Tag() +func (s *Scheduler) TagsUnique() { + s.tagsUnique = true +} + +// Job puts the provided job in focus for the purpose +// of making changes to the job with the scheduler chain +// and finalized by calling Update() +func (s *Scheduler) Job(j *Job) *Scheduler { + jobs := s.Jobs() + for index, job := range jobs { + if job == j { + // the current job is always last, so put this job there + s.Swap(len(jobs)-1, index) + } + } + s.updateJob = true + return s +} + +// Update stops the job (if running) and starts it with any updates +// that were made to the job in the scheduler chain. Job() must be +// called first to put the given job in focus. +func (s *Scheduler) Update() (*Job, error) { + job := s.getCurrentJob() + + if !s.updateJob { + return job, wrapOrError(job.error, ErrUpdateCalledWithoutJob) + } + s.updateJob = false + job.stop() + job.ctx, job.cancel = context.WithCancel(context.Background()) + return s.Do(job.function, job.parameters...) +} + +func (s *Scheduler) Cron(cronExpression string) *Scheduler { + return s.cron(cronExpression, false) +} + +func (s *Scheduler) CronWithSeconds(cronExpression string) *Scheduler { + return s.cron(cronExpression, true) +} + +func (s *Scheduler) cron(cronExpression string, withSeconds bool) *Scheduler { + job := s.newJob(0) + if s.updateJob || s.jobCreated { + job = s.getCurrentJob() + } + + withLocation := fmt.Sprintf("CRON_TZ=%s %s", s.location.String(), cronExpression) + + var ( + cronSchedule cron.Schedule + err error + ) + + if withSeconds { + p := cron.NewParser(cron.Second | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor) + cronSchedule, err = p.Parse(withLocation) + } else { + cronSchedule, err = cron.ParseStandard(withLocation) + } + + if err != nil { + job.error = wrapOrError(err, ErrCronParseFailure) + } + + job.cronSchedule = cronSchedule + job.setUnit(crontab) + job.startsImmediately = false + + if s.updateJob || s.jobCreated { + s.setJobs(append(s.Jobs()[:len(s.Jobs())-1], job)) + s.jobCreated = false + } else { + s.setJobs(append(s.Jobs(), job)) + } + return s +} + +func (s *Scheduler) newJob(interval int) *Job { + return newJob(interval, !s.waitForInterval, s.singletonMode) +} + +// WaitForScheduleAll defaults the scheduler to create all +// new jobs with the WaitForSchedule option as true. +// The jobs will not start immediately but rather will +// wait until their first scheduled interval. +func (s *Scheduler) WaitForScheduleAll() { + s.waitForInterval = true +} + +// WaitForSchedule sets the job to not start immediately +// but rather wait until the first scheduled interval. +func (s *Scheduler) WaitForSchedule() *Scheduler { + job := s.getCurrentJob() + job.startsImmediately = false + return s +} + +// StartImmediately sets the job to run immediately upon +// starting the scheduler or adding the job to a running +// scheduler. This overrides the jobs start status of any +// previously called methods in the chain. +// +// Note: This is the default behavior of the scheduler +// for most jobs, but is useful for overriding the default +// behavior of Cron scheduled jobs which default to +// WaitForSchedule. +func (s *Scheduler) StartImmediately() *Scheduler { + job := s.getCurrentJob() + job.startsImmediately = true + return s +} diff --git a/vendor/github.com/go-co-op/gocron/timeHelper.go b/vendor/github.com/go-co-op/gocron/timeHelper.go new file mode 100644 index 000000000..b5baeb573 --- /dev/null +++ b/vendor/github.com/go-co-op/gocron/timeHelper.go @@ -0,0 +1,25 @@ +package gocron + +import "time" + +var _ timeWrapper = (*trueTime)(nil) + +type timeWrapper interface { + Now(*time.Location) time.Time + Unix(int64, int64) time.Time + Sleep(time.Duration) +} + +type trueTime struct{} + +func (t *trueTime) Now(location *time.Location) time.Time { + return time.Now().In(location) +} + +func (t *trueTime) Unix(sec int64, nsec int64) time.Time { + return time.Unix(sec, nsec) +} + +func (t *trueTime) Sleep(d time.Duration) { + time.Sleep(d) +} diff --git a/vendor/github.com/robfig/cron/v3/.gitignore b/vendor/github.com/robfig/cron/v3/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/robfig/cron/v3/.travis.yml b/vendor/github.com/robfig/cron/v3/.travis.yml new file mode 100644 index 000000000..4f2ee4d97 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/robfig/cron/v3/LICENSE b/vendor/github.com/robfig/cron/v3/LICENSE new file mode 100644 index 000000000..3a0f627ff --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/LICENSE @@ -0,0 +1,21 @@ +Copyright (C) 2012 Rob Figueiredo +All Rights Reserved. + +MIT LICENSE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/robfig/cron/v3/README.md b/vendor/github.com/robfig/cron/v3/README.md new file mode 100644 index 000000000..984c537c0 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/README.md @@ -0,0 +1,125 @@ +[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron) +[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron) + +# cron + +Cron V3 has been released! + +To download the specific tagged release, run: + + go get github.com/robfig/cron/v3@v3.0.0 + +Import it in your program as: + + import "github.com/robfig/cron/v3" + +It requires Go 1.11 or later due to usage of Go Modules. + +Refer to the documentation here: +http://godoc.org/github.com/robfig/cron + +The rest of this document describes the the advances in v3 and a list of +breaking changes for users that wish to upgrade from an earlier version. + +## Upgrading to v3 (June 2019) + +cron v3 is a major upgrade to the library that addresses all outstanding bugs, +feature requests, and rough edges. It is based on a merge of master which +contains various fixes to issues found over the years and the v2 branch which +contains some backwards-incompatible features like the ability to remove cron +jobs. In addition, v3 adds support for Go Modules, cleans up rough edges like +the timezone support, and fixes a number of bugs. + +New features: + +- Support for Go modules. Callers must now import this library as + `github.com/robfig/cron/v3`, instead of `gopkg.in/...` + +- Fixed bugs: + - 0f01e6b parser: fix combining of Dow and Dom (#70) + - dbf3220 adjust times when rolling the clock forward to handle non-existent midnight (#157) + - eeecf15 spec_test.go: ensure an error is returned on 0 increment (#144) + - 70971dc cron.Entries(): update request for snapshot to include a reply channel (#97) + - 1cba5e6 cron: fix: removing a job causes the next scheduled job to run too late (#206) + +- Standard cron spec parsing by default (first field is "minute"), with an easy + way to opt into the seconds field (quartz-compatible). Although, note that the + year field (optional in Quartz) is not supported. + +- Extensible, key/value logging via an interface that complies with + the https://github.com/go-logr/logr project. + +- The new Chain & JobWrapper types allow you to install "interceptors" to add + cross-cutting behavior like the following: + - Recover any panics from jobs + - Delay a job's execution if the previous run hasn't completed yet + - Skip a job's execution if the previous run hasn't completed yet + - Log each job's invocations + - Notification when jobs are completed + +It is backwards incompatible with both v1 and v2. These updates are required: + +- The v1 branch accepted an optional seconds field at the beginning of the cron + spec. This is non-standard and has led to a lot of confusion. The new default + parser conforms to the standard as described by [the Cron wikipedia page]. + + UPDATING: To retain the old behavior, construct your Cron with a custom + parser: + + // Seconds field, required + cron.New(cron.WithSeconds()) + + // Seconds field, optional + cron.New( + cron.WithParser( + cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)) + +- The Cron type now accepts functional options on construction rather than the + previous ad-hoc behavior modification mechanisms (setting a field, calling a setter). + + UPDATING: Code that sets Cron.ErrorLogger or calls Cron.SetLocation must be + updated to provide those values on construction. + +- CRON_TZ is now the recommended way to specify the timezone of a single + schedule, which is sanctioned by the specification. The legacy "TZ=" prefix + will continue to be supported since it is unambiguous and easy to do so. + + UPDATING: No update is required. + +- By default, cron will no longer recover panics in jobs that it runs. + Recovering can be surprising (see issue #192) and seems to be at odds with + typical behavior of libraries. Relatedly, the `cron.WithPanicLogger` option + has been removed to accommodate the more general JobWrapper type. + + UPDATING: To opt into panic recovery and configure the panic logger: + + cron.New(cron.WithChain( + cron.Recover(logger), // or use cron.DefaultLogger + )) + +- In adding support for https://github.com/go-logr/logr, `cron.WithVerboseLogger` was + removed, since it is duplicative with the leveled logging. + + UPDATING: Callers should use `WithLogger` and specify a logger that does not + discard `Info` logs. For convenience, one is provided that wraps `*log.Logger`: + + cron.New( + cron.WithLogger(cron.VerbosePrintfLogger(logger))) + + +### Background - Cron spec format + +There are two cron spec formats in common usage: + +- The "standard" cron format, described on [the Cron wikipedia page] and used by + the cron Linux system utility. + +- The cron format used by [the Quartz Scheduler], commonly used for scheduled + jobs in Java software + +[the Cron wikipedia page]: https://en.wikipedia.org/wiki/Cron +[the Quartz Scheduler]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/tutorial-lesson-06.html + +The original version of this package included an optional "seconds" field, which +made it incompatible with both of these formats. Now, the "standard" format is +the default format accepted, and the Quartz format is opt-in. diff --git a/vendor/github.com/robfig/cron/v3/chain.go b/vendor/github.com/robfig/cron/v3/chain.go new file mode 100644 index 000000000..9565b418e --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/chain.go @@ -0,0 +1,92 @@ +package cron + +import ( + "fmt" + "runtime" + "sync" + "time" +) + +// JobWrapper decorates the given Job with some behavior. +type JobWrapper func(Job) Job + +// Chain is a sequence of JobWrappers that decorates submitted jobs with +// cross-cutting behaviors like logging or synchronization. +type Chain struct { + wrappers []JobWrapper +} + +// NewChain returns a Chain consisting of the given JobWrappers. +func NewChain(c ...JobWrapper) Chain { + return Chain{c} +} + +// Then decorates the given job with all JobWrappers in the chain. +// +// This: +// NewChain(m1, m2, m3).Then(job) +// is equivalent to: +// m1(m2(m3(job))) +func (c Chain) Then(j Job) Job { + for i := range c.wrappers { + j = c.wrappers[len(c.wrappers)-i-1](j) + } + return j +} + +// Recover panics in wrapped jobs and log them with the provided logger. +func Recover(logger Logger) JobWrapper { + return func(j Job) Job { + return FuncJob(func() { + defer func() { + if r := recover(); r != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + err, ok := r.(error) + if !ok { + err = fmt.Errorf("%v", r) + } + logger.Error(err, "panic", "stack", "...\n"+string(buf)) + } + }() + j.Run() + }) + } +} + +// DelayIfStillRunning serializes jobs, delaying subsequent runs until the +// previous one is complete. Jobs running after a delay of more than a minute +// have the delay logged at Info. +func DelayIfStillRunning(logger Logger) JobWrapper { + return func(j Job) Job { + var mu sync.Mutex + return FuncJob(func() { + start := time.Now() + mu.Lock() + defer mu.Unlock() + if dur := time.Since(start); dur > time.Minute { + logger.Info("delay", "duration", dur) + } + j.Run() + }) + } +} + +// SkipIfStillRunning skips an invocation of the Job if a previous invocation is +// still running. It logs skips to the given logger at Info level. +func SkipIfStillRunning(logger Logger) JobWrapper { + return func(j Job) Job { + var ch = make(chan struct{}, 1) + ch <- struct{}{} + return FuncJob(func() { + select { + case v := <-ch: + j.Run() + ch <- v + default: + logger.Info("skip") + } + }) + } +} diff --git a/vendor/github.com/robfig/cron/v3/constantdelay.go b/vendor/github.com/robfig/cron/v3/constantdelay.go new file mode 100644 index 000000000..cd6e7b1be --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/constantdelay.go @@ -0,0 +1,27 @@ +package cron + +import "time" + +// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes". +// It does not support jobs more frequent than once a second. +type ConstantDelaySchedule struct { + Delay time.Duration +} + +// Every returns a crontab Schedule that activates once every duration. +// Delays of less than a second are not supported (will round up to 1 second). +// Any fields less than a Second are truncated. +func Every(duration time.Duration) ConstantDelaySchedule { + if duration < time.Second { + duration = time.Second + } + return ConstantDelaySchedule{ + Delay: duration - time.Duration(duration.Nanoseconds())%time.Second, + } +} + +// Next returns the next time this should be run. +// This rounds so that the next activation time will be on the second. +func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time { + return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond) +} diff --git a/vendor/github.com/robfig/cron/v3/cron.go b/vendor/github.com/robfig/cron/v3/cron.go new file mode 100644 index 000000000..c7e917665 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/cron.go @@ -0,0 +1,355 @@ +package cron + +import ( + "context" + "sort" + "sync" + "time" +) + +// Cron keeps track of any number of entries, invoking the associated func as +// specified by the schedule. It may be started, stopped, and the entries may +// be inspected while running. +type Cron struct { + entries []*Entry + chain Chain + stop chan struct{} + add chan *Entry + remove chan EntryID + snapshot chan chan []Entry + running bool + logger Logger + runningMu sync.Mutex + location *time.Location + parser ScheduleParser + nextID EntryID + jobWaiter sync.WaitGroup +} + +// ScheduleParser is an interface for schedule spec parsers that return a Schedule +type ScheduleParser interface { + Parse(spec string) (Schedule, error) +} + +// Job is an interface for submitted cron jobs. +type Job interface { + Run() +} + +// Schedule describes a job's duty cycle. +type Schedule interface { + // Next returns the next activation time, later than the given time. + // Next is invoked initially, and then each time the job is run. + Next(time.Time) time.Time +} + +// EntryID identifies an entry within a Cron instance +type EntryID int + +// Entry consists of a schedule and the func to execute on that schedule. +type Entry struct { + // ID is the cron-assigned ID of this entry, which may be used to look up a + // snapshot or remove it. + ID EntryID + + // Schedule on which this job should be run. + Schedule Schedule + + // Next time the job will run, or the zero time if Cron has not been + // started or this entry's schedule is unsatisfiable + Next time.Time + + // Prev is the last time this job was run, or the zero time if never. + Prev time.Time + + // WrappedJob is the thing to run when the Schedule is activated. + WrappedJob Job + + // Job is the thing that was submitted to cron. + // It is kept around so that user code that needs to get at the job later, + // e.g. via Entries() can do so. + Job Job +} + +// Valid returns true if this is not the zero entry. +func (e Entry) Valid() bool { return e.ID != 0 } + +// byTime is a wrapper for sorting the entry array by time +// (with zero time at the end). +type byTime []*Entry + +func (s byTime) Len() int { return len(s) } +func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byTime) Less(i, j int) bool { + // Two zero times should return false. + // Otherwise, zero is "greater" than any other time. + // (To sort it at the end of the list.) + if s[i].Next.IsZero() { + return false + } + if s[j].Next.IsZero() { + return true + } + return s[i].Next.Before(s[j].Next) +} + +// New returns a new Cron job runner, modified by the given options. +// +// Available Settings +// +// Time Zone +// Description: The time zone in which schedules are interpreted +// Default: time.Local +// +// Parser +// Description: Parser converts cron spec strings into cron.Schedules. +// Default: Accepts this spec: https://en.wikipedia.org/wiki/Cron +// +// Chain +// Description: Wrap submitted jobs to customize behavior. +// Default: A chain that recovers panics and logs them to stderr. +// +// See "cron.With*" to modify the default behavior. +func New(opts ...Option) *Cron { + c := &Cron{ + entries: nil, + chain: NewChain(), + add: make(chan *Entry), + stop: make(chan struct{}), + snapshot: make(chan chan []Entry), + remove: make(chan EntryID), + running: false, + runningMu: sync.Mutex{}, + logger: DefaultLogger, + location: time.Local, + parser: standardParser, + } + for _, opt := range opts { + opt(c) + } + return c +} + +// FuncJob is a wrapper that turns a func() into a cron.Job +type FuncJob func() + +func (f FuncJob) Run() { f() } + +// AddFunc adds a func to the Cron to be run on the given schedule. +// The spec is parsed using the time zone of this Cron instance as the default. +// An opaque ID is returned that can be used to later remove it. +func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) { + return c.AddJob(spec, FuncJob(cmd)) +} + +// AddJob adds a Job to the Cron to be run on the given schedule. +// The spec is parsed using the time zone of this Cron instance as the default. +// An opaque ID is returned that can be used to later remove it. +func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) { + schedule, err := c.parser.Parse(spec) + if err != nil { + return 0, err + } + return c.Schedule(schedule, cmd), nil +} + +// Schedule adds a Job to the Cron to be run on the given schedule. +// The job is wrapped with the configured Chain. +func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID { + c.runningMu.Lock() + defer c.runningMu.Unlock() + c.nextID++ + entry := &Entry{ + ID: c.nextID, + Schedule: schedule, + WrappedJob: c.chain.Then(cmd), + Job: cmd, + } + if !c.running { + c.entries = append(c.entries, entry) + } else { + c.add <- entry + } + return entry.ID +} + +// Entries returns a snapshot of the cron entries. +func (c *Cron) Entries() []Entry { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + replyChan := make(chan []Entry, 1) + c.snapshot <- replyChan + return <-replyChan + } + return c.entrySnapshot() +} + +// Location gets the time zone location +func (c *Cron) Location() *time.Location { + return c.location +} + +// Entry returns a snapshot of the given entry, or nil if it couldn't be found. +func (c *Cron) Entry(id EntryID) Entry { + for _, entry := range c.Entries() { + if id == entry.ID { + return entry + } + } + return Entry{} +} + +// Remove an entry from being run in the future. +func (c *Cron) Remove(id EntryID) { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + c.remove <- id + } else { + c.removeEntry(id) + } +} + +// Start the cron scheduler in its own goroutine, or no-op if already started. +func (c *Cron) Start() { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + return + } + c.running = true + go c.run() +} + +// Run the cron scheduler, or no-op if already running. +func (c *Cron) Run() { + c.runningMu.Lock() + if c.running { + c.runningMu.Unlock() + return + } + c.running = true + c.runningMu.Unlock() + c.run() +} + +// run the scheduler.. this is private just due to the need to synchronize +// access to the 'running' state variable. +func (c *Cron) run() { + c.logger.Info("start") + + // Figure out the next activation times for each entry. + now := c.now() + for _, entry := range c.entries { + entry.Next = entry.Schedule.Next(now) + c.logger.Info("schedule", "now", now, "entry", entry.ID, "next", entry.Next) + } + + for { + // Determine the next entry to run. + sort.Sort(byTime(c.entries)) + + var timer *time.Timer + if len(c.entries) == 0 || c.entries[0].Next.IsZero() { + // If there are no entries yet, just sleep - it still handles new entries + // and stop requests. + timer = time.NewTimer(100000 * time.Hour) + } else { + timer = time.NewTimer(c.entries[0].Next.Sub(now)) + } + + for { + select { + case now = <-timer.C: + now = now.In(c.location) + c.logger.Info("wake", "now", now) + + // Run every entry whose next time was less than now + for _, e := range c.entries { + if e.Next.After(now) || e.Next.IsZero() { + break + } + c.startJob(e.WrappedJob) + e.Prev = e.Next + e.Next = e.Schedule.Next(now) + c.logger.Info("run", "now", now, "entry", e.ID, "next", e.Next) + } + + case newEntry := <-c.add: + timer.Stop() + now = c.now() + newEntry.Next = newEntry.Schedule.Next(now) + c.entries = append(c.entries, newEntry) + c.logger.Info("added", "now", now, "entry", newEntry.ID, "next", newEntry.Next) + + case replyChan := <-c.snapshot: + replyChan <- c.entrySnapshot() + continue + + case <-c.stop: + timer.Stop() + c.logger.Info("stop") + return + + case id := <-c.remove: + timer.Stop() + now = c.now() + c.removeEntry(id) + c.logger.Info("removed", "entry", id) + } + + break + } + } +} + +// startJob runs the given job in a new goroutine. +func (c *Cron) startJob(j Job) { + c.jobWaiter.Add(1) + go func() { + defer c.jobWaiter.Done() + j.Run() + }() +} + +// now returns current time in c location +func (c *Cron) now() time.Time { + return time.Now().In(c.location) +} + +// Stop stops the cron scheduler if it is running; otherwise it does nothing. +// A context is returned so the caller can wait for running jobs to complete. +func (c *Cron) Stop() context.Context { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + c.stop <- struct{}{} + c.running = false + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + c.jobWaiter.Wait() + cancel() + }() + return ctx +} + +// entrySnapshot returns a copy of the current cron entry list. +func (c *Cron) entrySnapshot() []Entry { + var entries = make([]Entry, len(c.entries)) + for i, e := range c.entries { + entries[i] = *e + } + return entries +} + +func (c *Cron) removeEntry(id EntryID) { + var entries []*Entry + for _, e := range c.entries { + if e.ID != id { + entries = append(entries, e) + } + } + c.entries = entries +} diff --git a/vendor/github.com/robfig/cron/v3/doc.go b/vendor/github.com/robfig/cron/v3/doc.go new file mode 100644 index 000000000..fa5d08b4d --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/doc.go @@ -0,0 +1,231 @@ +/* +Package cron implements a cron spec parser and job runner. + +Installation + +To download the specific tagged release, run: + + go get github.com/robfig/cron/v3@v3.0.0 + +Import it in your program as: + + import "github.com/robfig/cron/v3" + +It requires Go 1.11 or later due to usage of Go Modules. + +Usage + +Callers may register Funcs to be invoked on a given schedule. Cron will run +them in their own goroutines. + + c := cron.New() + c.AddFunc("30 * * * *", func() { fmt.Println("Every hour on the half hour") }) + c.AddFunc("30 3-6,20-23 * * *", func() { fmt.Println(".. in the range 3-6am, 8-11pm") }) + c.AddFunc("CRON_TZ=Asia/Tokyo 30 04 * * *", func() { fmt.Println("Runs at 04:30 Tokyo time every day") }) + c.AddFunc("@hourly", func() { fmt.Println("Every hour, starting an hour from now") }) + c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty, starting an hour thirty from now") }) + c.Start() + .. + // Funcs are invoked in their own goroutine, asynchronously. + ... + // Funcs may also be added to a running Cron + c.AddFunc("@daily", func() { fmt.Println("Every day") }) + .. + // Inspect the cron job entries' next and previous run times. + inspect(c.Entries()) + .. + c.Stop() // Stop the scheduler (does not stop any jobs already running). + +CRON Expression Format + +A cron expression represents a set of times, using 5 space-separated fields. + + Field name | Mandatory? | Allowed values | Allowed special characters + ---------- | ---------- | -------------- | -------------------------- + Minutes | Yes | 0-59 | * / , - + Hours | Yes | 0-23 | * / , - + Day of month | Yes | 1-31 | * / , - ? + Month | Yes | 1-12 or JAN-DEC | * / , - + Day of week | Yes | 0-6 or SUN-SAT | * / , - ? + +Month and Day-of-week field values are case insensitive. "SUN", "Sun", and +"sun" are equally accepted. + +The specific interpretation of the format is based on the Cron Wikipedia page: +https://en.wikipedia.org/wiki/Cron + +Alternative Formats + +Alternative Cron expression formats support other fields like seconds. You can +implement that by creating a custom Parser as follows. + + cron.New( + cron.WithParser( + cron.NewParser( + cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor))) + +Since adding Seconds is the most common modification to the standard cron spec, +cron provides a builtin function to do that, which is equivalent to the custom +parser you saw earlier, except that its seconds field is REQUIRED: + + cron.New(cron.WithSeconds()) + +That emulates Quartz, the most popular alternative Cron schedule format: +http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html + +Special Characters + +Asterisk ( * ) + +The asterisk indicates that the cron expression will match for all values of the +field; e.g., using an asterisk in the 5th field (month) would indicate every +month. + +Slash ( / ) + +Slashes are used to describe increments of ranges. For example 3-59/15 in the +1st field (minutes) would indicate the 3rd minute of the hour and every 15 +minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...", +that is, an increment over the largest possible range of the field. The form +"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the +increment until the end of that specific range. It does not wrap around. + +Comma ( , ) + +Commas are used to separate items of a list. For example, using "MON,WED,FRI" in +the 5th field (day of week) would mean Mondays, Wednesdays and Fridays. + +Hyphen ( - ) + +Hyphens are used to define ranges. For example, 9-17 would indicate every +hour between 9am and 5pm inclusive. + +Question mark ( ? ) + +Question mark may be used instead of '*' for leaving either day-of-month or +day-of-week blank. + +Predefined schedules + +You may use one of several pre-defined schedules in place of a cron expression. + + Entry | Description | Equivalent To + ----- | ----------- | ------------- + @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 * + @monthly | Run once a month, midnight, first of month | 0 0 1 * * + @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0 + @daily (or @midnight) | Run once a day, midnight | 0 0 * * * + @hourly | Run once an hour, beginning of hour | 0 * * * * + +Intervals + +You may also schedule a job to execute at fixed intervals, starting at the time it's added +or cron is run. This is supported by formatting the cron spec like this: + + @every + +where "duration" is a string accepted by time.ParseDuration +(http://golang.org/pkg/time/#ParseDuration). + +For example, "@every 1h30m10s" would indicate a schedule that activates after +1 hour, 30 minutes, 10 seconds, and then every interval after that. + +Note: The interval does not take the job runtime into account. For example, +if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes, +it will have only 2 minutes of idle time between each run. + +Time zones + +By default, all interpretation and scheduling is done in the machine's local +time zone (time.Local). You can specify a different time zone on construction: + + cron.New( + cron.WithLocation(time.UTC)) + +Individual cron schedules may also override the time zone they are to be +interpreted in by providing an additional space-separated field at the beginning +of the cron spec, of the form "CRON_TZ=Asia/Tokyo". + +For example: + + # Runs at 6am in time.Local + cron.New().AddFunc("0 6 * * ?", ...) + + # Runs at 6am in America/New_York + nyc, _ := time.LoadLocation("America/New_York") + c := cron.New(cron.WithLocation(nyc)) + c.AddFunc("0 6 * * ?", ...) + + # Runs at 6am in Asia/Tokyo + cron.New().AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...) + + # Runs at 6am in Asia/Tokyo + c := cron.New(cron.WithLocation(nyc)) + c.SetLocation("America/New_York") + c.AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...) + +The prefix "TZ=(TIME ZONE)" is also supported for legacy compatibility. + +Be aware that jobs scheduled during daylight-savings leap-ahead transitions will +not be run! + +Job Wrappers + +A Cron runner may be configured with a chain of job wrappers to add +cross-cutting functionality to all submitted jobs. For example, they may be used +to achieve the following effects: + + - Recover any panics from jobs (activated by default) + - Delay a job's execution if the previous run hasn't completed yet + - Skip a job's execution if the previous run hasn't completed yet + - Log each job's invocations + +Install wrappers for all jobs added to a cron using the `cron.WithChain` option: + + cron.New(cron.WithChain( + cron.SkipIfStillRunning(logger), + )) + +Install wrappers for individual jobs by explicitly wrapping them: + + job = cron.NewChain( + cron.SkipIfStillRunning(logger), + ).Then(job) + +Thread safety + +Since the Cron service runs concurrently with the calling code, some amount of +care must be taken to ensure proper synchronization. + +All cron methods are designed to be correctly synchronized as long as the caller +ensures that invocations have a clear happens-before ordering between them. + +Logging + +Cron defines a Logger interface that is a subset of the one defined in +github.com/go-logr/logr. It has two logging levels (Info and Error), and +parameters are key/value pairs. This makes it possible for cron logging to plug +into structured logging systems. An adapter, [Verbose]PrintfLogger, is provided +to wrap the standard library *log.Logger. + +For additional insight into Cron operations, verbose logging may be activated +which will record job runs, scheduling decisions, and added or removed jobs. +Activate it with a one-off logger as follows: + + cron.New( + cron.WithLogger( + cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)))) + + +Implementation + +Cron entries are stored in an array, sorted by their next activation time. Cron +sleeps until the next job is due to be run. + +Upon waking: + - it runs each entry that is active on that second + - it calculates the next run times for the jobs that were run + - it re-sorts the array of entries by next activation time. + - it goes to sleep until the soonest job. +*/ +package cron diff --git a/vendor/github.com/robfig/cron/v3/logger.go b/vendor/github.com/robfig/cron/v3/logger.go new file mode 100644 index 000000000..b4efcc053 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/logger.go @@ -0,0 +1,86 @@ +package cron + +import ( + "io/ioutil" + "log" + "os" + "strings" + "time" +) + +// DefaultLogger is used by Cron if none is specified. +var DefaultLogger Logger = PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)) + +// DiscardLogger can be used by callers to discard all log messages. +var DiscardLogger Logger = PrintfLogger(log.New(ioutil.Discard, "", 0)) + +// Logger is the interface used in this package for logging, so that any backend +// can be plugged in. It is a subset of the github.com/go-logr/logr interface. +type Logger interface { + // Info logs routine messages about cron's operation. + Info(msg string, keysAndValues ...interface{}) + // Error logs an error condition. + Error(err error, msg string, keysAndValues ...interface{}) +} + +// PrintfLogger wraps a Printf-based logger (such as the standard library "log") +// into an implementation of the Logger interface which logs errors only. +func PrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger { + return printfLogger{l, false} +} + +// VerbosePrintfLogger wraps a Printf-based logger (such as the standard library +// "log") into an implementation of the Logger interface which logs everything. +func VerbosePrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger { + return printfLogger{l, true} +} + +type printfLogger struct { + logger interface{ Printf(string, ...interface{}) } + logInfo bool +} + +func (pl printfLogger) Info(msg string, keysAndValues ...interface{}) { + if pl.logInfo { + keysAndValues = formatTimes(keysAndValues) + pl.logger.Printf( + formatString(len(keysAndValues)), + append([]interface{}{msg}, keysAndValues...)...) + } +} + +func (pl printfLogger) Error(err error, msg string, keysAndValues ...interface{}) { + keysAndValues = formatTimes(keysAndValues) + pl.logger.Printf( + formatString(len(keysAndValues)+2), + append([]interface{}{msg, "error", err}, keysAndValues...)...) +} + +// formatString returns a logfmt-like format string for the number of +// key/values. +func formatString(numKeysAndValues int) string { + var sb strings.Builder + sb.WriteString("%s") + if numKeysAndValues > 0 { + sb.WriteString(", ") + } + for i := 0; i < numKeysAndValues/2; i++ { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString("%v=%v") + } + return sb.String() +} + +// formatTimes formats any time.Time values as RFC3339. +func formatTimes(keysAndValues []interface{}) []interface{} { + var formattedArgs []interface{} + for _, arg := range keysAndValues { + if t, ok := arg.(time.Time); ok { + arg = t.Format(time.RFC3339) + } + formattedArgs = append(formattedArgs, arg) + } + return formattedArgs +} diff --git a/vendor/github.com/robfig/cron/v3/option.go b/vendor/github.com/robfig/cron/v3/option.go new file mode 100644 index 000000000..09e4278e7 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/option.go @@ -0,0 +1,45 @@ +package cron + +import ( + "time" +) + +// Option represents a modification to the default behavior of a Cron. +type Option func(*Cron) + +// WithLocation overrides the timezone of the cron instance. +func WithLocation(loc *time.Location) Option { + return func(c *Cron) { + c.location = loc + } +} + +// WithSeconds overrides the parser used for interpreting job schedules to +// include a seconds field as the first one. +func WithSeconds() Option { + return WithParser(NewParser( + Second | Minute | Hour | Dom | Month | Dow | Descriptor, + )) +} + +// WithParser overrides the parser used for interpreting job schedules. +func WithParser(p ScheduleParser) Option { + return func(c *Cron) { + c.parser = p + } +} + +// WithChain specifies Job wrappers to apply to all jobs added to this cron. +// Refer to the Chain* functions in this package for provided wrappers. +func WithChain(wrappers ...JobWrapper) Option { + return func(c *Cron) { + c.chain = NewChain(wrappers...) + } +} + +// WithLogger uses the provided logger. +func WithLogger(logger Logger) Option { + return func(c *Cron) { + c.logger = logger + } +} diff --git a/vendor/github.com/robfig/cron/v3/parser.go b/vendor/github.com/robfig/cron/v3/parser.go new file mode 100644 index 000000000..3cf8879f7 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/parser.go @@ -0,0 +1,434 @@ +package cron + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Configuration options for creating a parser. Most options specify which +// fields should be included, while others enable features. If a field is not +// included the parser will assume a default value. These options do not change +// the order fields are parse in. +type ParseOption int + +const ( + Second ParseOption = 1 << iota // Seconds field, default 0 + SecondOptional // Optional seconds field, default 0 + Minute // Minutes field, default 0 + Hour // Hours field, default 0 + Dom // Day of month field, default * + Month // Month field, default * + Dow // Day of week field, default * + DowOptional // Optional day of week field, default * + Descriptor // Allow descriptors such as @monthly, @weekly, etc. +) + +var places = []ParseOption{ + Second, + Minute, + Hour, + Dom, + Month, + Dow, +} + +var defaults = []string{ + "0", + "0", + "0", + "*", + "*", + "*", +} + +// A custom Parser that can be configured. +type Parser struct { + options ParseOption +} + +// NewParser creates a Parser with custom options. +// +// It panics if more than one Optional is given, since it would be impossible to +// correctly infer which optional is provided or missing in general. +// +// Examples +// +// // Standard parser without descriptors +// specParser := NewParser(Minute | Hour | Dom | Month | Dow) +// sched, err := specParser.Parse("0 0 15 */3 *") +// +// // Same as above, just excludes time fields +// subsParser := NewParser(Dom | Month | Dow) +// sched, err := specParser.Parse("15 */3 *") +// +// // Same as above, just makes Dow optional +// subsParser := NewParser(Dom | Month | DowOptional) +// sched, err := specParser.Parse("15 */3") +// +func NewParser(options ParseOption) Parser { + optionals := 0 + if options&DowOptional > 0 { + optionals++ + } + if options&SecondOptional > 0 { + optionals++ + } + if optionals > 1 { + panic("multiple optionals may not be configured") + } + return Parser{options} +} + +// Parse returns a new crontab schedule representing the given spec. +// It returns a descriptive error if the spec is not valid. +// It accepts crontab specs and features configured by NewParser. +func (p Parser) Parse(spec string) (Schedule, error) { + if len(spec) == 0 { + return nil, fmt.Errorf("empty spec string") + } + + // Extract timezone if present + var loc = time.Local + if strings.HasPrefix(spec, "TZ=") || strings.HasPrefix(spec, "CRON_TZ=") { + var err error + i := strings.Index(spec, " ") + eq := strings.Index(spec, "=") + if loc, err = time.LoadLocation(spec[eq+1 : i]); err != nil { + return nil, fmt.Errorf("provided bad location %s: %v", spec[eq+1:i], err) + } + spec = strings.TrimSpace(spec[i:]) + } + + // Handle named schedules (descriptors), if configured + if strings.HasPrefix(spec, "@") { + if p.options&Descriptor == 0 { + return nil, fmt.Errorf("parser does not accept descriptors: %v", spec) + } + return parseDescriptor(spec, loc) + } + + // Split on whitespace. + fields := strings.Fields(spec) + + // Validate & fill in any omitted or optional fields + var err error + fields, err = normalizeFields(fields, p.options) + if err != nil { + return nil, err + } + + field := func(field string, r bounds) uint64 { + if err != nil { + return 0 + } + var bits uint64 + bits, err = getField(field, r) + return bits + } + + var ( + second = field(fields[0], seconds) + minute = field(fields[1], minutes) + hour = field(fields[2], hours) + dayofmonth = field(fields[3], dom) + month = field(fields[4], months) + dayofweek = field(fields[5], dow) + ) + if err != nil { + return nil, err + } + + return &SpecSchedule{ + Second: second, + Minute: minute, + Hour: hour, + Dom: dayofmonth, + Month: month, + Dow: dayofweek, + Location: loc, + }, nil +} + +// normalizeFields takes a subset set of the time fields and returns the full set +// with defaults (zeroes) populated for unset fields. +// +// As part of performing this function, it also validates that the provided +// fields are compatible with the configured options. +func normalizeFields(fields []string, options ParseOption) ([]string, error) { + // Validate optionals & add their field to options + optionals := 0 + if options&SecondOptional > 0 { + options |= Second + optionals++ + } + if options&DowOptional > 0 { + options |= Dow + optionals++ + } + if optionals > 1 { + return nil, fmt.Errorf("multiple optionals may not be configured") + } + + // Figure out how many fields we need + max := 0 + for _, place := range places { + if options&place > 0 { + max++ + } + } + min := max - optionals + + // Validate number of fields + if count := len(fields); count < min || count > max { + if min == max { + return nil, fmt.Errorf("expected exactly %d fields, found %d: %s", min, count, fields) + } + return nil, fmt.Errorf("expected %d to %d fields, found %d: %s", min, max, count, fields) + } + + // Populate the optional field if not provided + if min < max && len(fields) == min { + switch { + case options&DowOptional > 0: + fields = append(fields, defaults[5]) // TODO: improve access to default + case options&SecondOptional > 0: + fields = append([]string{defaults[0]}, fields...) + default: + return nil, fmt.Errorf("unknown optional field") + } + } + + // Populate all fields not part of options with their defaults + n := 0 + expandedFields := make([]string, len(places)) + copy(expandedFields, defaults) + for i, place := range places { + if options&place > 0 { + expandedFields[i] = fields[n] + n++ + } + } + return expandedFields, nil +} + +var standardParser = NewParser( + Minute | Hour | Dom | Month | Dow | Descriptor, +) + +// ParseStandard returns a new crontab schedule representing the given +// standardSpec (https://en.wikipedia.org/wiki/Cron). It requires 5 entries +// representing: minute, hour, day of month, month and day of week, in that +// order. It returns a descriptive error if the spec is not valid. +// +// It accepts +// - Standard crontab specs, e.g. "* * * * ?" +// - Descriptors, e.g. "@midnight", "@every 1h30m" +func ParseStandard(standardSpec string) (Schedule, error) { + return standardParser.Parse(standardSpec) +} + +// getField returns an Int with the bits set representing all of the times that +// the field represents or error parsing field value. A "field" is a comma-separated +// list of "ranges". +func getField(field string, r bounds) (uint64, error) { + var bits uint64 + ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) + for _, expr := range ranges { + bit, err := getRange(expr, r) + if err != nil { + return bits, err + } + bits |= bit + } + return bits, nil +} + +// getRange returns the bits indicated by the given expression: +// number | number "-" number [ "/" number ] +// or error parsing range. +func getRange(expr string, r bounds) (uint64, error) { + var ( + start, end, step uint + rangeAndStep = strings.Split(expr, "/") + lowAndHigh = strings.Split(rangeAndStep[0], "-") + singleDigit = len(lowAndHigh) == 1 + err error + ) + + var extra uint64 + if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { + start = r.min + end = r.max + extra = starBit + } else { + start, err = parseIntOrName(lowAndHigh[0], r.names) + if err != nil { + return 0, err + } + switch len(lowAndHigh) { + case 1: + end = start + case 2: + end, err = parseIntOrName(lowAndHigh[1], r.names) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("too many hyphens: %s", expr) + } + } + + switch len(rangeAndStep) { + case 1: + step = 1 + case 2: + step, err = mustParseInt(rangeAndStep[1]) + if err != nil { + return 0, err + } + + // Special handling: "N/step" means "N-max/step". + if singleDigit { + end = r.max + } + if step > 1 { + extra = 0 + } + default: + return 0, fmt.Errorf("too many slashes: %s", expr) + } + + if start < r.min { + return 0, fmt.Errorf("beginning of range (%d) below minimum (%d): %s", start, r.min, expr) + } + if end > r.max { + return 0, fmt.Errorf("end of range (%d) above maximum (%d): %s", end, r.max, expr) + } + if start > end { + return 0, fmt.Errorf("beginning of range (%d) beyond end of range (%d): %s", start, end, expr) + } + if step == 0 { + return 0, fmt.Errorf("step of range should be a positive number: %s", expr) + } + + return getBits(start, end, step) | extra, nil +} + +// parseIntOrName returns the (possibly-named) integer contained in expr. +func parseIntOrName(expr string, names map[string]uint) (uint, error) { + if names != nil { + if namedInt, ok := names[strings.ToLower(expr)]; ok { + return namedInt, nil + } + } + return mustParseInt(expr) +} + +// mustParseInt parses the given expression as an int or returns an error. +func mustParseInt(expr string) (uint, error) { + num, err := strconv.Atoi(expr) + if err != nil { + return 0, fmt.Errorf("failed to parse int from %s: %s", expr, err) + } + if num < 0 { + return 0, fmt.Errorf("negative number (%d) not allowed: %s", num, expr) + } + + return uint(num), nil +} + +// getBits sets all bits in the range [min, max], modulo the given step size. +func getBits(min, max, step uint) uint64 { + var bits uint64 + + // If step is 1, use shifts. + if step == 1 { + return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) + } + + // Else, use a simple loop. + for i := min; i <= max; i += step { + bits |= 1 << i + } + return bits +} + +// all returns all bits within the given bounds. (plus the star bit) +func all(r bounds) uint64 { + return getBits(r.min, r.max, 1) | starBit +} + +// parseDescriptor returns a predefined schedule for the expression, or error if none matches. +func parseDescriptor(descriptor string, loc *time.Location) (Schedule, error) { + switch descriptor { + case "@yearly", "@annually": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: 1 << months.min, + Dow: all(dow), + Location: loc, + }, nil + + case "@monthly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: all(months), + Dow: all(dow), + Location: loc, + }, nil + + case "@weekly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: 1 << dow.min, + Location: loc, + }, nil + + case "@daily", "@midnight": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: all(dow), + Location: loc, + }, nil + + case "@hourly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: all(hours), + Dom: all(dom), + Month: all(months), + Dow: all(dow), + Location: loc, + }, nil + + } + + const every = "@every " + if strings.HasPrefix(descriptor, every) { + duration, err := time.ParseDuration(descriptor[len(every):]) + if err != nil { + return nil, fmt.Errorf("failed to parse duration %s: %s", descriptor, err) + } + return Every(duration), nil + } + + return nil, fmt.Errorf("unrecognized descriptor: %s", descriptor) +} diff --git a/vendor/github.com/robfig/cron/v3/spec.go b/vendor/github.com/robfig/cron/v3/spec.go new file mode 100644 index 000000000..fa1e241e5 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/spec.go @@ -0,0 +1,188 @@ +package cron + +import "time" + +// SpecSchedule specifies a duty cycle (to the second granularity), based on a +// traditional crontab specification. It is computed initially and stored as bit sets. +type SpecSchedule struct { + Second, Minute, Hour, Dom, Month, Dow uint64 + + // Override location for this schedule. + Location *time.Location +} + +// bounds provides a range of acceptable values (plus a map of name to value). +type bounds struct { + min, max uint + names map[string]uint +} + +// The bounds for each field. +var ( + seconds = bounds{0, 59, nil} + minutes = bounds{0, 59, nil} + hours = bounds{0, 23, nil} + dom = bounds{1, 31, nil} + months = bounds{1, 12, map[string]uint{ + "jan": 1, + "feb": 2, + "mar": 3, + "apr": 4, + "may": 5, + "jun": 6, + "jul": 7, + "aug": 8, + "sep": 9, + "oct": 10, + "nov": 11, + "dec": 12, + }} + dow = bounds{0, 6, map[string]uint{ + "sun": 0, + "mon": 1, + "tue": 2, + "wed": 3, + "thu": 4, + "fri": 5, + "sat": 6, + }} +) + +const ( + // Set the top bit if a star was included in the expression. + starBit = 1 << 63 +) + +// Next returns the next time this schedule is activated, greater than the given +// time. If no time can be found to satisfy the schedule, return the zero time. +func (s *SpecSchedule) Next(t time.Time) time.Time { + // General approach + // + // For Month, Day, Hour, Minute, Second: + // Check if the time value matches. If yes, continue to the next field. + // If the field doesn't match the schedule, then increment the field until it matches. + // While incrementing the field, a wrap-around brings it back to the beginning + // of the field list (since it is necessary to re-verify previous field + // values) + + // Convert the given time into the schedule's timezone, if one is specified. + // Save the original timezone so we can convert back after we find a time. + // Note that schedules without a time zone specified (time.Local) are treated + // as local to the time provided. + origLocation := t.Location() + loc := s.Location + if loc == time.Local { + loc = t.Location() + } + if s.Location != time.Local { + t = t.In(s.Location) + } + + // Start at the earliest possible time (the upcoming second). + t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) + + // This flag indicates whether a field has been incremented. + added := false + + // If no time is found within five years, return zero. + yearLimit := t.Year() + 5 + +WRAP: + if t.Year() > yearLimit { + return time.Time{} + } + + // Find the first applicable month. + // If it's this month, then do nothing. + for 1< 12 { + t = t.Add(time.Duration(24-t.Hour()) * time.Hour) + } else { + t = t.Add(time.Duration(-t.Hour()) * time.Hour) + } + } + + if t.Day() == 1 { + goto WRAP + } + } + + for 1< 0 + dowMatch bool = 1< 0 + ) + if s.Dom&starBit > 0 || s.Dow&starBit > 0 { + return domMatch && dowMatch + } + return domMatch || dowMatch +} diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/sync/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 000000000..30f632c57 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,136 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + isFront := s.waiters.Front() == elem + s.waiters.Remove(elem) + // If we're at the front and there're extra tokens left, notify other waiters. + if isFront && s.size > s.cur { + s.notifyWaiters() + } + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: released more than held") + } + s.notifyWaiters() + s.mu.Unlock() +} + +func (s *Weighted) notifyWaiters() { + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } +} diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go new file mode 100644 index 000000000..690eb8501 --- /dev/null +++ b/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -0,0 +1,212 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight // import "golang.org/x/sync/singleflight" + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +// +// The returned channel will not be closed. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + c.wg.Done() + g.mu.Lock() + defer g.mu.Unlock() + if !c.forgotten { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/modules.txt b/vendor/modules.txt index ef9df0742..79bb9af6a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -57,6 +57,9 @@ github.com/evanphx/json-patch # github.com/fsnotify/fsnotify v1.5.4 ## explicit; go 1.16 github.com/fsnotify/fsnotify +# github.com/go-co-op/gocron v1.13.0 +## explicit; go 1.17 +github.com/go-co-op/gocron # github.com/go-logr/logr v1.2.3 ## explicit; go 1.16 github.com/go-logr/logr @@ -195,6 +198,9 @@ github.com/pkg/errors ## explicit; go 1.13 # github.com/prometheus/common v0.34.0 ## explicit; go 1.15 +# github.com/robfig/cron/v3 v3.0.1 +## explicit; go 1.12 +github.com/robfig/cron/v3 # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag @@ -234,6 +240,10 @@ golang.org/x/net/trace ## explicit; go 1.11 golang.org/x/oauth2 golang.org/x/oauth2/internal +# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c +## explicit +golang.org/x/sync/semaphore +golang.org/x/sync/singleflight # golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a ## explicit; go 1.17 golang.org/x/sys/execabs From 545f05525c710a1f7f5b34c47ee6396d223581bc Mon Sep 17 00:00:00 2001 From: nicklesimba Date: Wed, 15 Jun 2022 19:01:26 -0500 Subject: [PATCH 2/2] Moved IP Reconciler code into IP Control Loop Signed-off-by: nicklesimba --- .github/CODEOWNERS | 3 +- Dockerfile | 1 - Dockerfile.arm64 | 1 - Dockerfile.openshift | 4 +- README.md | 3 +- cmd/controlloop/{main.go => controlloop.go} | 53 +++++- cmd/reconciler/errors.go | 8 - cmd/reconciler/ip.go | 48 ----- doc/crds/ip-reconciler-job.yaml | 41 ---- doc/extended-configuration.md | 6 + hack/build-go.sh | 1 - hack/e2e-setup-kind-cluster.sh | 2 +- pkg/config/config.go | 81 ++++---- pkg/config/config_test.go | 28 +++ .../controlloop/dummy_controller.go | 0 .../controlloop/entity_generators.go | 0 pkg/{reconciler => }/controlloop/pod.go | 0 .../controlloop/pod_controller_test.go | 0 pkg/reconciler/ip.go | 45 +++++ {cmd => pkg}/reconciler/ip_test.go | 158 +++++++++++++++- pkg/reconciler/iploop_test.go | 153 --------------- {cmd => pkg}/reconciler/suite_test.go | 14 +- pkg/types/types.go | 177 +++++++++--------- script/install-cni.sh | 3 +- 24 files changed, 420 insertions(+), 410 deletions(-) rename cmd/controlloop/{main.go => controlloop.go} (74%) delete mode 100644 cmd/reconciler/errors.go delete mode 100644 cmd/reconciler/ip.go delete mode 100644 doc/crds/ip-reconciler-job.yaml rename pkg/{reconciler => }/controlloop/dummy_controller.go (100%) rename pkg/{reconciler => }/controlloop/entity_generators.go (100%) rename pkg/{reconciler => }/controlloop/pod.go (100%) rename pkg/{reconciler => }/controlloop/pod_controller_test.go (100%) create mode 100644 pkg/reconciler/ip.go rename {cmd => pkg}/reconciler/ip_test.go (71%) delete mode 100644 pkg/reconciler/iploop_test.go rename {cmd => pkg}/reconciler/suite_test.go (94%) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 3e6a020f6..037108fce 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,7 +1,6 @@ # https://help.github.com/en/articles/about-code-owners * @dougbtv cmd/controlloop @maiqueb -cmd/reconciler @maiqueb e2e/ @maiqueb pkg/reconciler/ @maiqueb - +pkg/controlloop @maiqueb diff --git a/Dockerfile b/Dockerfile index 460d918be..e5660020a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,6 +9,5 @@ FROM alpine:latest LABEL org.opencontainers.image.source https://github.com/k8snetworkplumbingwg/whereabouts COPY --from=0 /go/src/github.com/k8snetworkplumbingwg/whereabouts/bin/whereabouts . COPY --from=0 /go/src/github.com/k8snetworkplumbingwg/whereabouts/bin/ip-control-loop . -COPY --from=0 /go/src/github.com/k8snetworkplumbingwg/whereabouts/bin/ip-reconciler . COPY script/install-cni.sh . CMD ["/install-cni.sh"] diff --git a/Dockerfile.arm64 b/Dockerfile.arm64 index 7030a6284..e1713143a 100644 --- a/Dockerfile.arm64 +++ b/Dockerfile.arm64 @@ -13,6 +13,5 @@ FROM arm64v8/alpine:latest LABEL org.opencontainers.image.source https://github.com/k8snetworkplumbingwg/whereabouts COPY --from=0 /go/src/github.com/k8snetworkplumbingwg/whereabouts/bin/whereabouts . COPY --from=0 /go/src/github.com/k8snetworkplumbingwg/whereabouts/bin/ip-control-loop . -COPY --from=0 /go/src/github.com/k8snetworkplumbingwg/whereabouts/bin/ip-reconciler . COPY script/install-cni.sh . CMD ["/install-cni.sh"] diff --git a/Dockerfile.openshift b/Dockerfile.openshift index bb08d83d5..16e5efe4e 100644 --- a/Dockerfile.openshift +++ b/Dockerfile.openshift @@ -5,15 +5,13 @@ WORKDIR /go/src/github.com/k8snetworkplumbingwg/whereabouts ENV CGO_ENABLED=1 ENV GO111MODULE=on RUN go build -mod vendor -o bin/whereabouts cmd/whereabouts.go -RUN go build -mod vendor -o bin/ip-reconciler cmd/reconciler/ip.go cmd/reconciler/errors.go -RUN go build -mod vendor -o bin/ip-control-loop cmd/controlloop/main.go +RUN go build -mod vendor -o bin/ip-control-loop cmd/controlloop/controlloop.go WORKDIR / FROM openshift/origin-base RUN mkdir -p /usr/src/whereabouts/images && \ mkdir -p /usr/src/whereabouts/bin COPY --from=builder /go/src/github.com/k8snetworkplumbingwg/whereabouts/bin/whereabouts /usr/src/whereabouts/bin -COPY --from=builder /go/src/github.com/k8snetworkplumbingwg/whereabouts/bin/ip-reconciler /usr/src/whereabouts/bin COPY --from=builder /go/src/github.com/k8snetworkplumbingwg/whereabouts/bin/ip-control-loop /usr/src/whereabouts/bin LABEL org.opencontainers.image.source https://github.com/k8snetworkplumbingwg/whereabouts diff --git a/README.md b/README.md index 144fc68e5..28fbed65b 100644 --- a/README.md +++ b/README.md @@ -45,8 +45,7 @@ git clone https://github.com/k8snetworkplumbingwg/whereabouts && cd whereabouts kubectl apply \ -f doc/crds/daemonset-install.yaml \ -f doc/crds/whereabouts.cni.cncf.io_ippools.yaml \ - -f doc/crds/whereabouts.cni.cncf.io_overlappingrangeipreservations.yaml \ - -f doc/crds/ip-reconciler-job.yaml + -f doc/crds/whereabouts.cni.cncf.io_overlappingrangeipreservations.yaml ``` The daemonset installation requires Kubernetes Version 1.16 or later. diff --git a/cmd/controlloop/main.go b/cmd/controlloop/controlloop.go similarity index 74% rename from cmd/controlloop/main.go rename to cmd/controlloop/controlloop.go index 30482f43b..257cb69b6 100644 --- a/cmd/controlloop/main.go +++ b/cmd/controlloop/controlloop.go @@ -5,7 +5,9 @@ import ( "fmt" "os" "os/signal" + "time" + gocron "github.com/go-co-op/gocron" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -21,17 +23,23 @@ import ( wbclient "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/clientset/versioned" wbinformers "github.com/k8snetworkplumbingwg/whereabouts/pkg/client/informers/externalversions" + "github.com/k8snetworkplumbingwg/whereabouts/pkg/config" + "github.com/k8snetworkplumbingwg/whereabouts/pkg/controlloop" "github.com/k8snetworkplumbingwg/whereabouts/pkg/logging" - "github.com/k8snetworkplumbingwg/whereabouts/pkg/reconciler/controlloop" + "github.com/k8snetworkplumbingwg/whereabouts/pkg/reconciler" + "github.com/k8snetworkplumbingwg/whereabouts/pkg/types" ) const ( allNamespaces = "" - controllerName = "pod-ip-reconciler" + controllerName = "pod-ip-controlloop" ) const ( couldNotCreateController = 1 + couldNotReadFlatfile = 1 + couldNotGetFlatIPAM = 1 + cronExpressionError = 1 ) const ( @@ -46,7 +54,9 @@ func main() { logging.SetLogStderr(true) stopChan := make(chan struct{}) + errorChan := make(chan error) defer close(stopChan) + defer close(errorChan) handleSignals(stopChan, os.Interrupt) networkController, err := newPodController(stopChan) @@ -57,8 +67,34 @@ func main() { networkController.Start(stopChan) defer networkController.Shutdown() - <-stopChan - logging.Verbosef("shutting down network controller") + + s := gocron.NewScheduler(time.UTC) + schedule := cronExpressionFromFlatFile() + + _, err = s.Cron(schedule).Do(func() { // user configurable cron expression in install-cni.sh + reconciler.ReconcileIPs(errorChan) + }) + if err != nil { + _ = logging.Errorf("error with cron expression schedule: %v", err) + os.Exit(cronExpressionError) + } + + s.StartAsync() + + for { + select { + case <-stopChan: + logging.Verbosef("shutting down network controller") + s.Stop() + return + case err := <-errorChan: + if err == nil { + logging.Verbosef("reconciler success") + } else { + logging.Verbosef("reconciler failure: %s", err) + } + } + } } func handleSignals(stopChannel chan struct{}, signals ...os.Signal) { @@ -133,3 +169,12 @@ func newEventBroadcaster(k8sClientset kubernetes.Interface) record.EventBroadcas func newEventRecorder(broadcaster record.EventBroadcaster) record.EventRecorder { return broadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerName}) } + +func cronExpressionFromFlatFile() string { + flatipam, _, err := config.GetFlatIPAM(true, &types.IPAMConfig{}, "") + if err != nil { + _ = logging.Errorf("could not get flatipam: %v", err) + os.Exit(couldNotGetFlatIPAM) + } + return flatipam.IPAM.ReconcilerCronExpression +} diff --git a/cmd/reconciler/errors.go b/cmd/reconciler/errors.go deleted file mode 100644 index 1046e6519..000000000 --- a/cmd/reconciler/errors.go +++ /dev/null @@ -1,8 +0,0 @@ -package main - -const ( - kubeconfigNotFound = iota + 1 - couldNotStartOrphanedIPMonitor - failedToReconcileIPPools - failedToReconcileClusterWideIPs -) diff --git a/cmd/reconciler/ip.go b/cmd/reconciler/ip.go deleted file mode 100644 index 7407034bd..000000000 --- a/cmd/reconciler/ip.go +++ /dev/null @@ -1,48 +0,0 @@ -package main - -import ( - "context" - "flag" - "os" - - "github.com/k8snetworkplumbingwg/whereabouts/pkg/logging" - "github.com/k8snetworkplumbingwg/whereabouts/pkg/reconciler" -) - -const defaultReconcilerTimeout = 30 - -func main() { - kubeConfigFile := flag.String("kubeconfig", "", "the path to the Kubernetes configuration file") - logLevel := flag.String("log-level", "error", "the logging level for the `ip-reconciler` app. Valid values are: \"debug\", \"verbose\", \"error\", and \"panic\".") - reconcilerTimeout := flag.Int("timeout", defaultReconcilerTimeout, "the value for a request timeout in seconds.") - flag.Parse() - - logging.SetLogLevel(*logLevel) - - var err error - var ipReconcileLoop *reconciler.ReconcileLooper - if kubeConfigFile == nil { - ipReconcileLoop, err = reconciler.NewReconcileLooper(context.Background(), *reconcilerTimeout) - } else { - ipReconcileLoop, err = reconciler.NewReconcileLooperWithKubeconfig(context.Background(), *kubeConfigFile, *reconcilerTimeout) - } - if err != nil { - _ = logging.Errorf("failed to create the reconcile looper: %v", err) - os.Exit(couldNotStartOrphanedIPMonitor) - } - - cleanedUpIps, err := ipReconcileLoop.ReconcileIPPools(context.Background()) - if err != nil { - _ = logging.Errorf("failed to clean up IP for allocations: %v", err) - os.Exit(failedToReconcileIPPools) - } - if len(cleanedUpIps) > 0 { - logging.Debugf("successfully cleanup IPs: %+v", cleanedUpIps) - } else { - logging.Debugf("no IP addresses to cleanup") - } - - if err := ipReconcileLoop.ReconcileOverlappingIPAddresses(context.Background()); err != nil { - os.Exit(failedToReconcileClusterWideIPs) - } -} diff --git a/doc/crds/ip-reconciler-job.yaml b/doc/crds/ip-reconciler-job.yaml deleted file mode 100644 index 247a6498d..000000000 --- a/doc/crds/ip-reconciler-job.yaml +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: batch/v1beta1 -kind: CronJob -metadata: - name: ip-reconciler - namespace: kube-system - labels: - tier: node - app: whereabouts -spec: - concurrencyPolicy: Forbid - successfulJobsHistoryLimit: 0 - schedule: "*/5 * * * *" - jobTemplate: - spec: - backoffLimit: 0 - ttlSecondsAfterFinished: 300 - template: - metadata: - labels: - app: whereabouts - spec: - priorityClassName: "system-node-critical" - serviceAccountName: whereabouts - containers: - - name: whereabouts - image: ghcr.io/k8snetworkplumbingwg/whereabouts:latest-amd64 - resources: - requests: - cpu: "100m" - memory: "50Mi" - command: - - /ip-reconciler - - -log-level=verbose - volumeMounts: - - name: cni-net-dir - mountPath: /host/etc/cni/net.d - volumes: - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - restartPolicy: OnFailure diff --git a/doc/extended-configuration.md b/doc/extended-configuration.md index 7f20b631a..06d1d28e5 100644 --- a/doc/extended-configuration.md +++ b/doc/extended-configuration.md @@ -134,6 +134,12 @@ spec: You'll note that in the `ipam` section there's a lot less parameters than are used in the previous examples. +### Reconciler Cron Expression Configuration (optional) + +You may want to provide a cron expression to configure how frequently the ip-reconciler runs. This is done via the flatfile. + +Look for the following parameter `"reconciler_cron_expression"` located in `script/install-cni.sh` and change to your desired schedule. + ## Installing etcd. (optional) etcd installation is optional. By default, we recommend the custom resource backend (given in the first example configuration). diff --git a/hack/build-go.sh b/hack/build-go.sh index 1acc9734f..0d615169e 100755 --- a/hack/build-go.sh +++ b/hack/build-go.sh @@ -45,5 +45,4 @@ VERSION_LDFLAGS="-X github.com/k8snetworkplumbingwg/whereabouts/pkg/version.Vers GLDFLAGS="${GLDFLAGS} ${VERSION_LDFLAGS}" CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} ${GO} build ${GOFLAGS} -ldflags "${GLDFLAGS}" -o bin/${cmd} cmd/${cmd}.go -CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} ${GO} build ${GOFLAGS} -ldflags "${GLDFLAGS}" -o bin/ip-reconciler cmd/reconciler/*.go CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} ${GO} build ${GOFLAGS} -ldflags "${GLDFLAGS}" -o bin/ip-control-loop cmd/controlloop/*.go diff --git a/hack/e2e-setup-kind-cluster.sh b/hack/e2e-setup-kind-cluster.sh index 2a5f96b31..ac4b66252 100755 --- a/hack/e2e-setup-kind-cluster.sh +++ b/hack/e2e-setup-kind-cluster.sh @@ -98,7 +98,7 @@ trap "rm /tmp/whereabouts-img.tar || true" EXIT kind load image-archive --name "$KIND_CLUSTER_NAME" /tmp/whereabouts-img.tar echo "## install whereabouts" -for file in "daemonset-install.yaml" "ip-reconciler-job.yaml" "whereabouts.cni.cncf.io_ippools.yaml" "whereabouts.cni.cncf.io_overlappingrangeipreservations.yaml"; do +for file in "daemonset-install.yaml" "whereabouts.cni.cncf.io_ippools.yaml" "whereabouts.cni.cncf.io_overlappingrangeipreservations.yaml"; do retry kubectl apply -f "$ROOT/doc/crds/$file" done retry kubectl wait -n kube-system --for=condition=ready -l app=whereabouts pod --timeout=$TIMEOUT_K8 diff --git a/pkg/config/config.go b/pkg/config/config.go index 33e53f673..677d5eeff 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -53,41 +53,9 @@ func LoadIPAMConfig(bytes []byte, envArgs string, extraConfigPaths ...string) (* n.IPAM.PodName = string(args.K8S_POD_NAME) n.IPAM.PodNamespace = string(args.K8S_POD_NAMESPACE) - // Once we have our basics, let's look for our (optional) configuration file - confdirs := []string{"/etc/kubernetes/cni/net.d/whereabouts.d/whereabouts.conf", "/etc/cni/net.d/whereabouts.d/whereabouts.conf"} - confdirs = append(confdirs, extraConfigPaths...) - // We prefix the optional configuration path (so we look there first) - if n.IPAM.ConfigurationPath != "" { - confdirs = append([]string{n.IPAM.ConfigurationPath}, confdirs...) - } - - // Cycle through the path and parse the JSON config - flatipam := types.Net{} - foundflatfile := "" - for _, confpath := range confdirs { - if pathExists(confpath) { - - jsonFile, err := os.Open(confpath) - - if err != nil { - return nil, "", fmt.Errorf("error opening flat configuration file @ %s with: %s", confpath, err) - } - - defer jsonFile.Close() - - jsonBytes, err := ioutil.ReadAll(jsonFile) - if err != nil { - return nil, "", fmt.Errorf("LoadIPAMConfig Flatfile (%s) - ioutil.ReadAll error: %s", confpath, err) - } - - if err := json.Unmarshal(jsonBytes, &flatipam.IPAM); err != nil { - return nil, "", fmt.Errorf("LoadIPAMConfig Flatfile (%s) - JSON Parsing Error: %s / bytes: %s", confpath, err, jsonBytes) - } - - foundflatfile = confpath - - break - } + flatipam, foundflatfile, err := GetFlatIPAM(false, n.IPAM, extraConfigPaths...) + if err != nil { + return nil, "", err } // Now let's try to merge the configurations... @@ -139,7 +107,6 @@ func LoadIPAMConfig(bytes []byte, envArgs string, extraConfigPaths ...string) (* n.IPAM.Datastore = types.DatastoreETCD } - var err error storageError := "You have not configured the storage engine (looks like you're using an invalid `%s` parameter in your config)" switch n.IPAM.Datastore { case types.DatastoreKubernetes: @@ -164,7 +131,6 @@ func LoadIPAMConfig(bytes []byte, envArgs string, extraConfigPaths ...string) (* } n.IPAM.Gateway = gwip } - for i := range n.IPAM.OmitRanges { _, _, err := netutils.ParseCIDRSloppy(n.IPAM.OmitRanges[i]) if err != nil { @@ -252,6 +218,47 @@ func configureStatic(n *types.Net, args types.IPAMEnvArgs) error { } +func GetFlatIPAM(isControlLoop bool, IPAM *types.IPAMConfig, extraConfigPaths ...string) (types.Net, string, error) { + // Once we have our basics, let's look for our (optional) configuration file + confdirs := []string{"/etc/kubernetes/cni/net.d/whereabouts.d/whereabouts.conf", "/etc/cni/net.d/whereabouts.d/whereabouts.conf", "/host/etc/cni/net.d/whereabouts.d/whereabouts.conf"} + confdirs = append(confdirs, extraConfigPaths...) + // We prefix the optional configuration path (so we look there first) + + if !isControlLoop && IPAM != nil { + if IPAM.ConfigurationPath != "" { + confdirs = append([]string{IPAM.ConfigurationPath}, confdirs...) + } + } + + // Cycle through the path and parse the JSON config + flatipam := types.Net{} + foundflatfile := "" + for _, confpath := range confdirs { + if pathExists(confpath) { + jsonFile, err := os.Open(confpath) + if err != nil { + return flatipam, foundflatfile, fmt.Errorf("error opening flat configuration file @ %s with: %s", confpath, err) + } + + defer jsonFile.Close() + + jsonBytes, err := ioutil.ReadAll(jsonFile) + if err != nil { + return flatipam, foundflatfile, fmt.Errorf("LoadIPAMConfig Flatfile (%s) - ioutil.ReadAll error: %s", confpath, err) + } + + if err := json.Unmarshal(jsonBytes, &flatipam.IPAM); err != nil { + return flatipam, foundflatfile, fmt.Errorf("LoadIPAMConfig Flatfile (%s) - JSON Parsing Error: %s / bytes: %s", confpath, err, jsonBytes) + } + + foundflatfile = confpath + return flatipam, foundflatfile, err + } + } + var err error + return flatipam, foundflatfile, err +} + func handleEnvArgs(n *types.Net, numV6 int, numV4 int, args types.IPAMEnvArgs) (int, int, error) { if args.IP != "" { diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 799e31b58..f4c838b15 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -243,4 +243,32 @@ var _ = Describe("Allocation operations", func() { Expect(ipamConfig.RangeStart).To(Equal(net.ParseIP("192.168.1.44"))) Expect(ipamConfig.RangeEnd).To(Equal(net.ParseIP("192.168.1.209"))) }) + + It("can unmarshall the cronjob expression", func() { + conf := `{ + "cniVersion": "0.3.1", + "name": "mynet", + "type": "ipvlan", + "master": "foo0", + "ipam": { + "type": "whereabouts", + "log_file" : "/tmp/whereabouts.log", + "log_level" : "debug", + "etcd_host": "foo", + "range": "00192.00168.1.0/24", + "range_start": "00192.00168.1.44", + "range_end": "00192.00168.01.209", + "gateway": "192.168.10.1", + "reconciler_cron_expression": "30 4 * * *" + } + }` + + ipamConfig, _, err := LoadIPAMConfig([]byte(conf), "") + Expect(err).NotTo(HaveOccurred()) + Expect(ipamConfig.Range).To(Equal("192.168.1.0/24")) + Expect(ipamConfig.RangeStart).To(Equal(net.ParseIP("192.168.1.44"))) + Expect(ipamConfig.RangeEnd).To(Equal(net.ParseIP("192.168.1.209"))) + Expect(ipamConfig.RangeEnd).To(Equal(net.ParseIP("192.168.1.209"))) + Expect(ipamConfig.ReconcilerCronExpression).To(Equal("30 4 * * *")) + }) }) diff --git a/pkg/reconciler/controlloop/dummy_controller.go b/pkg/controlloop/dummy_controller.go similarity index 100% rename from pkg/reconciler/controlloop/dummy_controller.go rename to pkg/controlloop/dummy_controller.go diff --git a/pkg/reconciler/controlloop/entity_generators.go b/pkg/controlloop/entity_generators.go similarity index 100% rename from pkg/reconciler/controlloop/entity_generators.go rename to pkg/controlloop/entity_generators.go diff --git a/pkg/reconciler/controlloop/pod.go b/pkg/controlloop/pod.go similarity index 100% rename from pkg/reconciler/controlloop/pod.go rename to pkg/controlloop/pod.go diff --git a/pkg/reconciler/controlloop/pod_controller_test.go b/pkg/controlloop/pod_controller_test.go similarity index 100% rename from pkg/reconciler/controlloop/pod_controller_test.go rename to pkg/controlloop/pod_controller_test.go diff --git a/pkg/reconciler/ip.go b/pkg/reconciler/ip.go new file mode 100644 index 000000000..012bc0420 --- /dev/null +++ b/pkg/reconciler/ip.go @@ -0,0 +1,45 @@ +package reconciler + +import ( + "context" + "time" + + "github.com/k8snetworkplumbingwg/whereabouts/pkg/logging" +) + +const ( + defaultReconcilerTimeout = 30 +) + +func ReconcileIPs(errorChan chan error) { + logging.Verbosef("starting reconciler run") + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(defaultReconcilerTimeout*time.Second)) + defer cancel() + + ipReconcileLoop, err := NewReconcileLooper(ctx, defaultReconcilerTimeout) + if err != nil { + _ = logging.Errorf("failed to create the reconcile looper: %v", err) + errorChan <- err + return + } + + cleanedUpIps, err := ipReconcileLoop.ReconcileIPPools(ctx) + if err != nil { + _ = logging.Errorf("failed to clean up IP for allocations: %v", err) + errorChan <- err + return + } + + if len(cleanedUpIps) > 0 { + logging.Debugf("successfully cleanup IPs: %+v", cleanedUpIps) + } else { + logging.Debugf("no IP addresses to cleanup") + } + + if err := ipReconcileLoop.ReconcileOverlappingIPAddresses(ctx); err != nil { + errorChan <- err + return + } + + errorChan <- nil +} diff --git a/cmd/reconciler/ip_test.go b/pkg/reconciler/ip_test.go similarity index 71% rename from cmd/reconciler/ip_test.go rename to pkg/reconciler/ip_test.go index 83eaac84a..b43140697 100644 --- a/cmd/reconciler/ip_test.go +++ b/pkg/reconciler/ip_test.go @@ -1,4 +1,4 @@ -package main +package reconciler import ( "context" @@ -6,19 +6,25 @@ import ( "fmt" "net" "strings" + "testing" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" multusv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" "github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1" - "github.com/k8snetworkplumbingwg/whereabouts/pkg/reconciler" + "github.com/k8snetworkplumbingwg/whereabouts/pkg/types" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8stypes "k8s.io/apimachinery/pkg/types" ) +func TestIPReconciler(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Reconcile IP address allocation in the system") +} + var _ = Describe("Whereabouts IP reconciler", func() { const ( firstIPInRange = "10.10.10.1" @@ -30,7 +36,7 @@ var _ = Describe("Whereabouts IP reconciler", func() { ) var ( - reconcileLooper *reconciler.ReconcileLooper + reconcileLooper *ReconcileLooper ) Context("reconciling IP pools with a single running pod", func() { @@ -67,7 +73,7 @@ var _ = Describe("Whereabouts IP reconciler", func() { Context("reconciling the IPPool", func() { BeforeEach(func() { var err error - reconcileLooper, err = reconciler.NewReconcileLooperWithKubeconfig(context.TODO(), kubeConfigPath, timeout) + reconcileLooper, err = NewReconcileLooperWithKubeconfig(context.TODO(), kubeConfigPath, timeout) Expect(err).NotTo(HaveOccurred()) }) @@ -138,7 +144,7 @@ var _ = Describe("Whereabouts IP reconciler", func() { Context("reconciling the IPPool", func() { BeforeEach(func() { var err error - reconcileLooper, err = reconciler.NewReconcileLooperWithKubeconfig(context.TODO(), kubeConfigPath, timeout) + reconcileLooper, err = NewReconcileLooperWithKubeconfig(context.TODO(), kubeConfigPath, timeout) Expect(err).NotTo(HaveOccurred()) }) @@ -243,7 +249,7 @@ var _ = Describe("Whereabouts IP reconciler", func() { It("will delete an orphaned IP address", func() { Expect(k8sClientSet.CoreV1().Pods(namespace).Delete(context.TODO(), pods[podIndexToRemove].Name, metav1.DeleteOptions{})).NotTo(HaveOccurred()) - newReconciler, err := reconciler.NewReconcileLooperWithKubeconfig(context.TODO(), kubeConfigPath, timeout) + newReconciler, err := NewReconcileLooperWithKubeconfig(context.TODO(), kubeConfigPath, timeout) Expect(err).NotTo(HaveOccurred()) Expect(newReconciler.ReconcileOverlappingIPAddresses(context.TODO())).To(Succeed()) @@ -271,7 +277,7 @@ var _ = Describe("Whereabouts IP reconciler", func() { pool = generateIPPoolSpec(ipRange, namespace, poolName, pod.Name) Expect(k8sClient.Create(context.Background(), pool)).NotTo(HaveOccurred()) - reconcileLooper, err = reconciler.NewReconcileLooperWithKubeconfig(context.TODO(), kubeConfigPath, timeout) + reconcileLooper, err = NewReconcileLooperWithKubeconfig(context.TODO(), kubeConfigPath, timeout) Expect(err).NotTo(HaveOccurred()) }) @@ -286,6 +292,117 @@ var _ = Describe("Whereabouts IP reconciler", func() { }) }) +// mock the pool +type dummyPool struct { + orphans []types.IPReservation + pool v1alpha1.IPPool +} + +func (dp dummyPool) Allocations() []types.IPReservation { + return dp.orphans +} + +func (dp dummyPool) Update(context.Context, []types.IPReservation) error { + return nil +} + +var _ = Describe("IPReconciler", func() { + var ipReconciler *ReconcileLooper + + newIPReconciler := func(orphanedIPs ...OrphanedIPReservations) *ReconcileLooper { + reconciler := &ReconcileLooper{ + orphanedIPs: orphanedIPs, + } + + return reconciler + } + + When("there are no IP addresses to reconcile", func() { + BeforeEach(func() { + ipReconciler = newIPReconciler() + }) + + It("does not delete anything", func() { + reconciledIPs, err := ipReconciler.ReconcileIPPools(context.TODO()) + Expect(err).NotTo(HaveOccurred()) + Expect(reconciledIPs).To(BeEmpty()) + }) + }) + + When("there are IP addresses to reconcile", func() { + const ( + firstIPInRange = "192.168.14.1" + ipCIDR = "192.168.14.0/24" + namespace = "default" + podName = "pod1" + ) + + BeforeEach(func() { + podRef := "default/pod1" + reservations := generateIPReservation(firstIPInRange, podRef) + + pool := generateIPPool(ipCIDR, podRef) + orphanedIPAddr := OrphanedIPReservations{ + Pool: dummyPool{orphans: reservations, pool: pool}, + Allocations: reservations, + } + + ipReconciler = newIPReconciler(orphanedIPAddr) + }) + + It("does delete the orphaned IP address", func() { + reconciledIPs, err := ipReconciler.ReconcileIPPools(context.TODO()) + Expect(err).NotTo(HaveOccurred()) + Expect(reconciledIPs).To(Equal([]net.IP{net.ParseIP(firstIPInRange)})) + }) + + Context("and they are actually multiple IPs", func() { + BeforeEach(func() { + podRef := "default/pod2" + reservations := generateIPReservation("192.168.14.2", podRef) + + pool := generateIPPool(ipCIDR, podRef, "default/pod2", "default/pod3") + orphanedIPAddr := OrphanedIPReservations{ + Pool: dummyPool{orphans: reservations, pool: pool}, + Allocations: reservations, + } + + ipReconciler = newIPReconciler(orphanedIPAddr) + }) + + It("does delete *only the orphaned* the IP address", func() { + reconciledIPs, err := ipReconciler.ReconcileIPPools(context.TODO()) + Expect(err).NotTo(HaveOccurred()) + Expect(reconciledIPs).To(ConsistOf([]net.IP{net.ParseIP("192.168.14.2")})) + }) + }) + + Context("but the IP reservation owner does not match", func() { + var reservationPodRef string + BeforeEach(func() { + reservationPodRef = "default/pod2" + podRef := "default/pod1" + reservations := generateIPReservation(firstIPInRange, podRef) + erroredReservations := generateIPReservation(firstIPInRange, reservationPodRef) + + pool := generateIPPool(ipCIDR, podRef) + orphanedIPAddr := OrphanedIPReservations{ + Pool: dummyPool{orphans: reservations, pool: pool}, + Allocations: erroredReservations, + } + + ipReconciler = newIPReconciler(orphanedIPAddr) + }) + + It("errors when attempting to clean up the IP address", func() { + reconciledIPs, err := ipReconciler.ReconcileIPPools(context.TODO()) + Expect(err).To(MatchError(fmt.Sprintf("did not find reserved IP for container %s", reservationPodRef))) + Expect(reconciledIPs).To(BeEmpty()) + }) + }) + }) +}) + func generateIPPoolSpec(ipRange string, namespace string, poolName string, podNames ...string) *v1alpha1.IPPool { allocations := map[string]v1alpha1.IPAllocation{} for i, podName := range podNames { @@ -348,8 +465,8 @@ func generatePodAnnotations(ipNetworks ...ipInNetwork) map[string]string { networks = append(networks, ipNetworkInfo.networkName) } networkAnnotations := map[string]string{ - reconciler.MultusNetworkAnnotation: strings.Join(networks, ","), - reconciler.MultusNetworkStatusAnnotation: generatePodNetworkStatusAnnotation(ipNetworks...), + MultusNetworkAnnotation: strings.Join(networks, ","), + MultusNetworkStatusAnnotation: generatePodNetworkStatusAnnotation(ipNetworks...), } return networkAnnotations } @@ -370,3 +487,26 @@ func generatePodNetworkStatusAnnotation(ipNetworks ...ipInNetwork) string { return string(networkStatusStr) } + +func generateIPPool(cidr string, podRefs ...string) v1alpha1.IPPool { + allocations := map[string]v1alpha1.IPAllocation{} + for i, podRef := range podRefs { + allocations[fmt.Sprintf("%d", i)] = v1alpha1.IPAllocation{PodRef: podRef} + } + + return v1alpha1.IPPool{ + Spec: v1alpha1.IPPoolSpec{ + Range: cidr, + Allocations: allocations, + }, + } +} + +func generateIPReservation(ip string, podRef string) []types.IPReservation { + return []types.IPReservation{ + { + IP: net.ParseIP(ip), + PodRef: podRef, + }, + } +} diff --git a/pkg/reconciler/iploop_test.go b/pkg/reconciler/iploop_test.go deleted file mode 100644 index 1d8a5b1c2..000000000 --- a/pkg/reconciler/iploop_test.go +++ /dev/null @@ -1,153 +0,0 @@ -package reconciler - -import ( - "context" - "fmt" - "net" - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - whereaboutsv1alpha1 "github.com/k8snetworkplumbingwg/whereabouts/pkg/api/whereabouts.cni.cncf.io/v1alpha1" - "github.com/k8snetworkplumbingwg/whereabouts/pkg/types" -) - -func TestIPReconciler(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Reconcile IP address allocation in the system") -} - -// mock the pool -type dummyPool struct { - orphans []types.IPReservation - pool whereaboutsv1alpha1.IPPool -} - -func (dp dummyPool) Allocations() []types.IPReservation { - return dp.orphans -} - -func (dp dummyPool) Update(context.Context, []types.IPReservation) error { - return nil -} - -var _ = Describe("IPReconciler", func() { - var ipReconciler *ReconcileLooper - - newIPReconciler := func(orphanedIPs ...OrphanedIPReservations) *ReconcileLooper { - reconciler := &ReconcileLooper{ - orphanedIPs: orphanedIPs, - } - - return reconciler - } - - When("there are no IP addresses to reconcile", func() { - BeforeEach(func() { - ipReconciler = newIPReconciler() - }) - - It("does not delete anything", func() { - reconciledIPs, err := ipReconciler.ReconcileIPPools(context.TODO()) - Expect(err).NotTo(HaveOccurred()) - Expect(reconciledIPs).To(BeEmpty()) - }) - }) - - When("there are IP addresses to reconcile", func() { - const ( - firstIPInRange = "192.168.14.1" - ipCIDR = "192.168.14.0/24" - namespace = "default" - podName = "pod1" - ) - - BeforeEach(func() { - podRef := "default/pod1" - reservations := generateIPReservation(firstIPInRange, podRef) - - pool := generateIPPool(ipCIDR, podRef) - orphanedIPAddr := OrphanedIPReservations{ - Pool: dummyPool{orphans: reservations, pool: pool}, - Allocations: reservations, - } - - ipReconciler = newIPReconciler(orphanedIPAddr) - }) - - It("does delete the orphaned IP address", func() { - reconciledIPs, err := ipReconciler.ReconcileIPPools(context.TODO()) - Expect(err).NotTo(HaveOccurred()) - Expect(reconciledIPs).To(Equal([]net.IP{net.ParseIP(firstIPInRange)})) - }) - - Context("and they are actually multiple IPs", func() { - BeforeEach(func() { - podRef := "default/pod2" - reservations := generateIPReservation("192.168.14.2", podRef) - - pool := generateIPPool(ipCIDR, podRef, "default/pod2", "default/pod3") - orphanedIPAddr := OrphanedIPReservations{ - Pool: dummyPool{orphans: reservations, pool: pool}, - Allocations: reservations, - } - - ipReconciler = newIPReconciler(orphanedIPAddr) - }) - - It("does delete *only the orphaned* the IP address", func() { - reconciledIPs, err := ipReconciler.ReconcileIPPools(context.TODO()) - Expect(err).NotTo(HaveOccurred()) - Expect(reconciledIPs).To(ConsistOf([]net.IP{net.ParseIP("192.168.14.2")})) - }) - }) - - Context("but the IP reservation owner does not match", func() { - var reservationPodRef string - BeforeEach(func() { - reservationPodRef = "default/pod2" - podRef := "default/pod1" - reservations := generateIPReservation(firstIPInRange, podRef) - erroredReservations := generateIPReservation(firstIPInRange, reservationPodRef) - - pool := generateIPPool(ipCIDR, podRef) - orphanedIPAddr := OrphanedIPReservations{ - Pool: dummyPool{orphans: reservations, pool: pool}, - Allocations: erroredReservations, - } - - ipReconciler = newIPReconciler(orphanedIPAddr) - }) - - It("errors when attempting to clean up the IP address", func() { - reconciledIPs, err := ipReconciler.ReconcileIPPools(context.TODO()) - Expect(err).To(MatchError(fmt.Sprintf("did not find reserved IP for container %s", reservationPodRef))) - Expect(reconciledIPs).To(BeEmpty()) - }) - }) - }) -}) - -func generateIPPool(cidr string, podRefs ...string) whereaboutsv1alpha1.IPPool { - allocations := map[string]whereaboutsv1alpha1.IPAllocation{} - for i, podRef := range podRefs { - allocations[fmt.Sprintf("%d", i)] = whereaboutsv1alpha1.IPAllocation{PodRef: podRef} - } - - return whereaboutsv1alpha1.IPPool{ - Spec: whereaboutsv1alpha1.IPPoolSpec{ - Range: cidr, - Allocations: allocations, - }, - } -} - -func generateIPReservation(ip string, podRef string) []types.IPReservation { - return []types.IPReservation{ - { - IP: net.ParseIP(ip), - PodRef: podRef, - }, - } -} diff --git a/cmd/reconciler/suite_test.go b/pkg/reconciler/suite_test.go similarity index 94% rename from cmd/reconciler/suite_test.go rename to pkg/reconciler/suite_test.go index 3af7d5331..202eac197 100644 --- a/cmd/reconciler/suite_test.go +++ b/pkg/reconciler/suite_test.go @@ -1,15 +1,15 @@ -package main +package reconciler import ( "fmt" "io/fs" "io/ioutil" - "k8s.io/client-go/kubernetes" "os" "path" "path/filepath" "strings" - "testing" + + "k8s.io/client-go/kubernetes" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -32,14 +32,6 @@ var ( tmpdir string ) -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecsWithDefaultAndCustomReporters(t, - "Whereabouts IP reconciler Suite", - []Reporter{}) -} - var _ = BeforeSuite(func(done Done) { zap.WriteTo(GinkgoWriter) logf.SetLogger(zap.New()) diff --git a/pkg/types/types.go b/pkg/types/types.go index 9ecc44f49..5d6608130 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -41,68 +41,70 @@ type NetConfList struct { // IPAMConfig describes the expected json configuration for this plugin type IPAMConfig struct { - Name string - Type string `json:"type"` - Routes []*cnitypes.Route `json:"routes"` - Datastore string `json:"datastore"` - Addresses []Address `json:"addresses,omitempty"` - OmitRanges []string `json:"exclude,omitempty"` - DNS cnitypes.DNS `json:"dns"` - Range string `json:"range"` - RangeStart net.IP `json:"range_start,omitempty"` - RangeEnd net.IP `json:"range_end,omitempty"` - GatewayStr string `json:"gateway"` - EtcdHost string `json:"etcd_host,omitempty"` - EtcdUsername string `json:"etcd_username,omitempty"` - EtcdPassword string `json:"etcd_password,omitempty"` - EtcdKeyFile string `json:"etcd_key_file,omitempty"` - EtcdCertFile string `json:"etcd_cert_file,omitempty"` - EtcdCACertFile string `json:"etcd_ca_cert_file,omitempty"` - LeaderLeaseDuration int `json:"leader_lease_duration,omitempty"` - LeaderRenewDeadline int `json:"leader_renew_deadline,omitempty"` - LeaderRetryPeriod int `json:"leader_retry_period,omitempty"` - LogFile string `json:"log_file"` - LogLevel string `json:"log_level"` - OverlappingRanges bool `json:"enable_overlapping_ranges,omitempty"` - SleepForRace int `json:"sleep_for_race,omitempty"` - Gateway net.IP - Kubernetes KubernetesConfig `json:"kubernetes,omitempty"` - ConfigurationPath string `json:"configuration_path"` - PodName string - PodNamespace string + Name string + Type string `json:"type"` + Routes []*cnitypes.Route `json:"routes"` + Datastore string `json:"datastore"` + Addresses []Address `json:"addresses,omitempty"` + OmitRanges []string `json:"exclude,omitempty"` + DNS cnitypes.DNS `json:"dns"` + Range string `json:"range"` + RangeStart net.IP `json:"range_start,omitempty"` + RangeEnd net.IP `json:"range_end,omitempty"` + GatewayStr string `json:"gateway"` + EtcdHost string `json:"etcd_host,omitempty"` + EtcdUsername string `json:"etcd_username,omitempty"` + EtcdPassword string `json:"etcd_password,omitempty"` + EtcdKeyFile string `json:"etcd_key_file,omitempty"` + EtcdCertFile string `json:"etcd_cert_file,omitempty"` + EtcdCACertFile string `json:"etcd_ca_cert_file,omitempty"` + LeaderLeaseDuration int `json:"leader_lease_duration,omitempty"` + LeaderRenewDeadline int `json:"leader_renew_deadline,omitempty"` + LeaderRetryPeriod int `json:"leader_retry_period,omitempty"` + LogFile string `json:"log_file"` + LogLevel string `json:"log_level"` + ReconcilerCronExpression string `json:"reconciler_cron_expression,omitempty"` + OverlappingRanges bool `json:"enable_overlapping_ranges,omitempty"` + SleepForRace int `json:"sleep_for_race,omitempty"` + Gateway net.IP + Kubernetes KubernetesConfig `json:"kubernetes,omitempty"` + ConfigurationPath string `json:"configuration_path"` + PodName string + PodNamespace string } func (ic *IPAMConfig) UnmarshalJSON(data []byte) error { type IPAMConfigAlias struct { - Name string - Type string `json:"type"` - Routes []*cnitypes.Route `json:"routes"` - Datastore string `json:"datastore"` - Addresses []Address `json:"addresses,omitempty"` - OmitRanges []string `json:"exclude,omitempty"` - DNS cnitypes.DNS `json:"dns"` - Range string `json:"range"` - RangeStart string `json:"range_start,omitempty"` - RangeEnd string `json:"range_end,omitempty"` - GatewayStr string `json:"gateway"` - EtcdHost string `json:"etcd_host,omitempty"` - EtcdUsername string `json:"etcd_username,omitempty"` - EtcdPassword string `json:"etcd_password,omitempty"` - EtcdKeyFile string `json:"etcd_key_file,omitempty"` - EtcdCertFile string `json:"etcd_cert_file,omitempty"` - EtcdCACertFile string `json:"etcd_ca_cert_file,omitempty"` - LeaderLeaseDuration int `json:"leader_lease_duration,omitempty"` - LeaderRenewDeadline int `json:"leader_renew_deadline,omitempty"` - LeaderRetryPeriod int `json:"leader_retry_period,omitempty"` - LogFile string `json:"log_file"` - LogLevel string `json:"log_level"` - OverlappingRanges bool `json:"enable_overlapping_ranges,omitempty"` - SleepForRace int `json:"sleep_for_race,omitempty"` - Gateway string - Kubernetes KubernetesConfig `json:"kubernetes,omitempty"` - ConfigurationPath string `json:"configuration_path"` - PodName string - PodNamespace string + Name string + Type string `json:"type"` + Routes []*cnitypes.Route `json:"routes"` + Datastore string `json:"datastore"` + Addresses []Address `json:"addresses,omitempty"` + OmitRanges []string `json:"exclude,omitempty"` + DNS cnitypes.DNS `json:"dns"` + Range string `json:"range"` + RangeStart string `json:"range_start,omitempty"` + RangeEnd string `json:"range_end,omitempty"` + GatewayStr string `json:"gateway"` + EtcdHost string `json:"etcd_host,omitempty"` + EtcdUsername string `json:"etcd_username,omitempty"` + EtcdPassword string `json:"etcd_password,omitempty"` + EtcdKeyFile string `json:"etcd_key_file,omitempty"` + EtcdCertFile string `json:"etcd_cert_file,omitempty"` + EtcdCACertFile string `json:"etcd_ca_cert_file,omitempty"` + LeaderLeaseDuration int `json:"leader_lease_duration,omitempty"` + LeaderRenewDeadline int `json:"leader_renew_deadline,omitempty"` + LeaderRetryPeriod int `json:"leader_retry_period,omitempty"` + LogFile string `json:"log_file"` + LogLevel string `json:"log_level"` + ReconcilerCronExpression string `json:"reconciler_cron_expression,omitempty"` + OverlappingRanges bool `json:"enable_overlapping_ranges,omitempty"` + SleepForRace int `json:"sleep_for_race,omitempty"` + Gateway string + Kubernetes KubernetesConfig `json:"kubernetes,omitempty"` + ConfigurationPath string `json:"configuration_path"` + PodName string + PodNamespace string } ipamConfigAlias := IPAMConfigAlias{ @@ -114,35 +116,36 @@ func (ic *IPAMConfig) UnmarshalJSON(data []byte) error { } *ic = IPAMConfig{ - Name: ipamConfigAlias.Name, - Type: ipamConfigAlias.Type, - Routes: ipamConfigAlias.Routes, - Datastore: ipamConfigAlias.Datastore, - Addresses: ipamConfigAlias.Addresses, - OmitRanges: ipamConfigAlias.OmitRanges, - DNS: ipamConfigAlias.DNS, - Range: ipamConfigAlias.Range, - RangeStart: backwardsCompatibleIPAddress(ipamConfigAlias.RangeStart), - RangeEnd: backwardsCompatibleIPAddress(ipamConfigAlias.RangeEnd), - GatewayStr: ipamConfigAlias.GatewayStr, - EtcdHost: ipamConfigAlias.EtcdHost, - EtcdUsername: ipamConfigAlias.EtcdUsername, - EtcdPassword: ipamConfigAlias.EtcdPassword, - EtcdKeyFile: ipamConfigAlias.EtcdKeyFile, - EtcdCertFile: ipamConfigAlias.EtcdCertFile, - EtcdCACertFile: ipamConfigAlias.EtcdCACertFile, - LeaderLeaseDuration: ipamConfigAlias.LeaderLeaseDuration, - LeaderRenewDeadline: ipamConfigAlias.LeaderRenewDeadline, - LeaderRetryPeriod: ipamConfigAlias.LeaderRetryPeriod, - LogFile: ipamConfigAlias.LogFile, - LogLevel: ipamConfigAlias.LogLevel, - OverlappingRanges: ipamConfigAlias.OverlappingRanges, - SleepForRace: ipamConfigAlias.SleepForRace, - Gateway: backwardsCompatibleIPAddress(ipamConfigAlias.Gateway), - Kubernetes: ipamConfigAlias.Kubernetes, - ConfigurationPath: ipamConfigAlias.ConfigurationPath, - PodName: ipamConfigAlias.PodName, - PodNamespace: ipamConfigAlias.PodNamespace, + Name: ipamConfigAlias.Name, + Type: ipamConfigAlias.Type, + Routes: ipamConfigAlias.Routes, + Datastore: ipamConfigAlias.Datastore, + Addresses: ipamConfigAlias.Addresses, + OmitRanges: ipamConfigAlias.OmitRanges, + DNS: ipamConfigAlias.DNS, + Range: ipamConfigAlias.Range, + RangeStart: backwardsCompatibleIPAddress(ipamConfigAlias.RangeStart), + RangeEnd: backwardsCompatibleIPAddress(ipamConfigAlias.RangeEnd), + GatewayStr: ipamConfigAlias.GatewayStr, + EtcdHost: ipamConfigAlias.EtcdHost, + EtcdUsername: ipamConfigAlias.EtcdUsername, + EtcdPassword: ipamConfigAlias.EtcdPassword, + EtcdKeyFile: ipamConfigAlias.EtcdKeyFile, + EtcdCertFile: ipamConfigAlias.EtcdCertFile, + EtcdCACertFile: ipamConfigAlias.EtcdCACertFile, + LeaderLeaseDuration: ipamConfigAlias.LeaderLeaseDuration, + LeaderRenewDeadline: ipamConfigAlias.LeaderRenewDeadline, + LeaderRetryPeriod: ipamConfigAlias.LeaderRetryPeriod, + LogFile: ipamConfigAlias.LogFile, + LogLevel: ipamConfigAlias.LogLevel, + OverlappingRanges: ipamConfigAlias.OverlappingRanges, + ReconcilerCronExpression: ipamConfigAlias.ReconcilerCronExpression, + SleepForRace: ipamConfigAlias.SleepForRace, + Gateway: backwardsCompatibleIPAddress(ipamConfigAlias.Gateway), + Kubernetes: ipamConfigAlias.Kubernetes, + ConfigurationPath: ipamConfigAlias.ConfigurationPath, + PodName: ipamConfigAlias.PodName, + PodNamespace: ipamConfigAlias.PodNamespace, } return nil } diff --git a/script/install-cni.sh b/script/install-cni.sh index 5e79957b5..6eab0ff56 100755 --- a/script/install-cni.sh +++ b/script/install-cni.sh @@ -102,7 +102,8 @@ EOF "datastore": "kubernetes", "kubernetes": { "kubeconfig": "${WHEREABOUTS_KUBECONFIG_LITERAL}" - } + }, + "reconciler_cron_expression": "30 4 * * *" } EOF