From 4a613a6e4a525eb50670baa844c0fa8018c78b1e Mon Sep 17 00:00:00 2001 From: Randy Coburn Date: Tue, 10 Sep 2019 16:30:24 +0200 Subject: [PATCH] Initial commit. Works. --- .gitignore | 8 + Dockerfile.debug | 8 + Dockerfile.test | 10 + README.md | 101 +++++ build_and_test.sh | 37 ++ configfile/configfile.go | 120 ++++++ configfile/configfile_test.go | 261 +++++++++++++ configfile/configfileexample.go | 70 ++++ configfile/defaultConfig.go | 13 + configfile/logging.go | 34 ++ configfile/processes.go | 19 + configfile/processmanager.go | 13 + configfile/templating/templating.go | 99 +++++ configfile/templating/templating_test.go | 50 +++ go.mod | 14 + go.sum | 22 ++ internallogger/internallogger.go | 118 ++++++ launch.yaml | 26 ++ main.go | 152 ++++++++ passwd | 1 + processlogger/allloggers/all.go | 17 + processlogger/console/console.go | 79 ++++ processlogger/devnull/devnull.go | 47 +++ processlogger/filelogger/filelogger.go | 86 +++++ processlogger/filelogger/filelogger_test.go | 74 ++++ processlogger/filelogger/rotationwriter.go | 149 ++++++++ processlogger/interfaces.go | 19 + processlogger/logManager.go | 257 +++++++++++++ processlogger/logManager_test.go | 148 ++++++++ processlogger/loggerRegistration.go | 17 + processlogger/syslog/levelextraction.go | 53 +++ processlogger/syslog/levelextraction_test.go | 120 ++++++ processlogger/syslog/syslog.go | 246 ++++++++++++ processlogger/syslog/syslog_test.go | 25 ++ processmanager/process.go | 32 ++ processmanager/processmanager.go | 380 +++++++++++++++++++ testbin/go.mod | 5 + testbin/go.sum | 2 + testbin/main.go | 142 +++++++ testbin/spammer.go | 54 +++ 40 files changed, 3128 insertions(+) create mode 100644 .gitignore create mode 100644 Dockerfile.debug create mode 100644 Dockerfile.test create mode 100644 README.md create mode 100755 build_and_test.sh create mode 100644 configfile/configfile.go create mode 100644 configfile/configfile_test.go create mode 100644 configfile/configfileexample.go create mode 100644 configfile/defaultConfig.go create mode 100644 configfile/logging.go create mode 100644 configfile/processes.go create mode 100644 configfile/processmanager.go create mode 100644 configfile/templating/templating.go create mode 100644 configfile/templating/templating_test.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 internallogger/internallogger.go create mode 100644 launch.yaml create mode 100644 main.go create mode 100644 passwd create mode 100644 processlogger/allloggers/all.go create mode 100644 processlogger/console/console.go create mode 100644 processlogger/devnull/devnull.go create mode 100644 processlogger/filelogger/filelogger.go create mode 100644 processlogger/filelogger/filelogger_test.go create mode 100644 processlogger/filelogger/rotationwriter.go create mode 100644 processlogger/interfaces.go create mode 100644 processlogger/logManager.go create mode 100644 processlogger/logManager_test.go create mode 100644 processlogger/loggerRegistration.go create mode 100644 processlogger/syslog/levelextraction.go create mode 100644 processlogger/syslog/levelextraction_test.go create mode 100644 processlogger/syslog/syslog.go create mode 100644 processlogger/syslog/syslog_test.go create mode 100644 processmanager/process.go create mode 100644 processmanager/processmanager.go create mode 100644 testbin/go.mod create mode 100644 testbin/go.sum create mode 100644 testbin/main.go create mode 100644 testbin/spammer.go diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..63f3b70 --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +/launch +/testbin/testbin +# Available publically and likely to fall out of date. +# https://help.papertrailapp.com/kb/configuration/encrypting-remote-syslog-with-tls-ssl/ +papertrail_cert.crt + +# while I sort these out +READMEs/ \ No newline at end of file diff --git a/Dockerfile.debug b/Dockerfile.debug new file mode 100644 index 0000000..d8e344f --- /dev/null +++ b/Dockerfile.debug @@ -0,0 +1,8 @@ +FROM ubuntu:latest + +USER nobody + +ADD ./launch* / +ADD ./testbin/testbin /testbin + +ENTRYPOINT [ "/launch" ] \ No newline at end of file diff --git a/Dockerfile.test b/Dockerfile.test new file mode 100644 index 0000000..a75b3f0 --- /dev/null +++ b/Dockerfile.test @@ -0,0 +1,10 @@ +FROM gcr.io/distroless/base + +ADD ./passwd /etc/passwd + +USER nobody + +ADD ./launch* / +ADD ./testbin/testbin /testbin + +ENTRYPOINT [ "/launch" ] \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..cbbdd4e --- /dev/null +++ b/README.md @@ -0,0 +1,101 @@ +# Launch + +A simple runtime-agnostic process manager that eases Docker services startup and logging. +Consider it a half way house between Kubernetes and Docker. + +Launch is expected to be process 1 in a container. It allows you to watch other processes in your containers and when they are all finished it will finish allowing containers to stop correctly. + +## What can Launch help with + +Launch is designed to be a process manager with simple but powerful logging. It borrows some ideas from kubernetes without having to deploy a kubernetes stack. + +* You can run multiple processes in a single container. +* You can ship logs from processes to different logging engines. +* You can run init processes that run before your main processes. This allows you to collect artifacts, secrets or just setup an environment. +* A single main process dying will bring down a container, gracefully shutting down the other applications. + +## TODO + +features still in the back burner: + +* [ ] delayed start on processes. +* [ ] health check processes - not sure how to do this yet +* [ ] restart failed main processes if configured +* [ ] rendering of configuration file trigger after init processes. To allow for secrets or configuration to be collected. + +## Logging + +Launch has the following statement in grained into its design which guides its actions on logging: + +1. Development teams should not have to worry about where logs are going during creation of their projects. +1. A change to where logs end up should not result in projects having to refactor source code. +1. It is the responsibility of the developer to push all logging out to STDOUT and STDERR. Launch will collect and forward the logs onto the relevant logging engine defined by the configuration of the project. + +Launch does however give the choice to the developer of where they would like the logs to end up. This does not contradict points 1 and 2 because the developer has the choice at run time rather than at development time. + +Logging is important to all applications. However that importance does not trump the running of the service. Therefore all logging plugins will send logs in `Best Effort` mode. + +Logging engines available: + +1. Console +1. DevNull +1. File with rotation +1. Syslog + +See +[Logging Documentation](./READMEs/Logging.MD) +for details on each logging engine. + +The use of Launch's logging engines are optional. You could if you wanted to, setup filebeat in the container and read from files. +Use the console logger for any stray logs. +Another option is to use the file logger to feed the files that filebeat can watch. + +## Processes + +Launch has 2 processes types when running. + +* Init processes +* Main Processes + +### Init Processes + +Initialization Processes are used to get the environment ready for the main processes to run. +Init processes are run sequentially in the order that they are defined in the configuration file. +These processes **MUST** finish successfully (exit code 0) for the next process to start. Only once all init processes are complete will the main processes start. If a single init process fails Launch will stop further processes from starting and terminate. + +### Main Processes + +Main processes are processes that need to run continually. They dictate the lifespan of the container. Multiple main processes can be run however currently all of then **MUST** be running in order for the container to be considered as healthy. + +If a main process terminates for any reason then launch will send termination signals to all remaining main processes. It will give them a grace period to terminate after which it will forcefully terminate them. The grace period is configuration driven so you have can give the process the time it needs to wrap up any tasks. The default grace period is 30 seconds. + +## Run time + +Launch effectively acts as an init system. It does this to ease administration in container clusters by wrapping everything a application needs to run including any sidecar services for logging, metrics and data harvesting. + +Launch is aware that it is not the first process and knows that it can be terminated by a controller of some kind. To this effect Launch will forward on selected signalling that it gets to the underlying processes. + +Signals currently supported for forwarding: + +* SIGTERM +* SIGINT + +Launch is designed to work in a container no other signalling is expected. + +## Configuration + +The configuration YAML file is the driving force behind Launch. The configuration file will tell Launch what processes to run with what arguments, where to send logs and what tags to put on logs. + +The configuration file has a templating feature that allows you to make the configuration dynamic. + +The configuration file has many sections that are documented in the +[README dedicated to configuration](./READMEs/ConfigurationFile.MD). + +## Contributing + +This project is still very much in active development. Expect changes and improvements. + +There is a simple build script that does the current integration testing. + +Use `./build_and_test.sh` to run only the tests +Use `./build_and_test.sh` gobuild" to also rebuild the go project diff --git a/build_and_test.sh b/build_and_test.sh new file mode 100755 index 0000000..a60f34f --- /dev/null +++ b/build_and_test.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Use "./build_and_test.sh" to run only the tests +# Use "./build_and_test.sh gobuild" to also rebuild the go project + +if [[ $1 = "gobuild" ]]; then + echo "Building the go project!" + curdir=$(pwd) + echo "Building launch" + CGO_ENABLED=0 go build -a -installsuffix cgo -o launch . + cd testbin + echo "Building testbin" + CGO_ENABLED=0 go build -a -installsuffix cgo -o testbin . + cd $curdir + echo "Finished building" +else + echo "Skipping go build" +fi + +docker build -t morfien101/launch-test:latest -f Dockerfile.test . +docker build -t morfien101/launch-test:debug -f Dockerfile.debug . + +echo "#########################" +echo "## Running full config ##" +echo "#########################" +docker run \ + -it \ + -v $(pwd)/launch.yaml:/launch.yaml \ + morfien101/launch-test:latest + +#echo "############################" +#echo "## Running minimal config ##" +#echo "############################" +# +#docker run \ +# -it \ +# -v $(pwd)/launch_minimal.yaml:/launch.yaml \ +# morfien101/launch-test:latest \ No newline at end of file diff --git a/configfile/configfile.go b/configfile/configfile.go new file mode 100644 index 0000000..969c1fd --- /dev/null +++ b/configfile/configfile.go @@ -0,0 +1,120 @@ +package configfile + +import ( + "fmt" + "io/ioutil" + + "github.com/morfien101/launch/configfile/templating" + "gopkg.in/yaml.v2" +) + +// Config is a struct that represents the YAML file that we want to pass in. +type Config struct { + ProcessManager ProcessManager `yaml:"process_manager"` + Processes Processes `yaml:"processes"` + DefaultLoggerConfig DefaultLoggerDetails `yaml:"default_logger_config"` +} + +// New will return a new config file if one can be read from the location +// specified. An error is also returned if something goes wrong. +func New(filePath string) (*Config, error) { + // Digest the config file + fileBytes, err := ioutil.ReadFile(filePath) + if err != nil { + return nil, fmt.Errorf("Could not read config file. Error: %s", err) + } + + decodedYaml, err := templating.GenerateTemplate(fileBytes) + if err != nil { + return nil, fmt.Errorf("Failed to decode template. Error: %s", err) + } + newConfig := &Config{} + if err := yaml.Unmarshal(decodedYaml, newConfig); err != nil { + return nil, fmt.Errorf("Failed to unmarshal yaml. Error: %s", err) + } + + newConfig.setDefaultLoggerConfig() + newConfig.setDefaultProcessLogger() + newConfig.setDefaultProcessManager() + newConfig.setDefaultProcessTimeout() + + return newConfig, nil +} + +// setDefaultLoggerConfig will setup a default logging config if one doesn't already exist. +func (cf *Config) setDefaultLoggerConfig() { + if &cf.DefaultLoggerConfig == nil { + cf.DefaultLoggerConfig = DefaultLoggerDetails{ + Config: defaultLoggingEngine, + } + } + if cf.DefaultLoggerConfig.Config.Engine == "" { + cf.DefaultLoggerConfig.Config.Engine = defaultLoggingEngine.Engine + } +} + +// setDefaultProcessLogger will go through the processes and set the default logging if there is +// nothing set. The following rules will apply +// The logging engine will be the default engine +// The process logging name should the be name given to the process +// +// NOTE: setDefaultLoggerConfig should be called first +// +func (cf *Config) setDefaultProcessLogger() { + createLoggingConfig := func(proc *Process) { + proc.LoggerConfig = LoggingConfig{} + } + setName := func(proc *Process) { + proc.LoggerConfig.ProcessName = proc.Name + } + setEngine := func(proc *Process) { + proc.LoggerConfig.Engine = cf.DefaultLoggerConfig.Config.Engine + } + f := func(procList []*Process) { + for _, proc := range procList { + if &proc.LoggerConfig == nil { + // Create a logging config + createLoggingConfig(proc) + } + if proc.LoggerConfig.ProcessName == "" { + setName(proc) + } + if proc.LoggerConfig.Engine == "" { + setEngine(proc) + } + } + } + + f(cf.Processes.InitProcesses) + f(cf.Processes.MainProcesses) +} + +func (cf *Config) setDefaultProcessManager() { + if &cf.ProcessManager == nil { + cf.ProcessManager = defaultProcessManager + } + if &cf.ProcessManager.LoggerConfig == nil { + cf.ProcessManager.LoggerConfig = defaultProcessManager.LoggerConfig + } + if cf.ProcessManager.LoggerConfig.Engine == "" { + cf.ProcessManager.LoggerConfig.Engine = defaultProcessManager.LoggerConfig.Engine + } +} + +func (cf *Config) setDefaultProcessTimeout() { + f := func(procs []*Process) { + for _, proc := range procs { + if proc.TermTimeout <= 0 { + proc.TermTimeout = defaultProcTimeout + } + } + } + + f(cf.Processes.InitProcesses) + f(cf.Processes.MainProcesses) +} + +func (cf Config) String() string { + output, _ := yaml.Marshal(cf) + return string(output) +} diff --git a/configfile/configfile_test.go b/configfile/configfile_test.go new file mode 100644 index 0000000..7277798 --- /dev/null +++ b/configfile/configfile_test.go @@ -0,0 +1,261 @@ +package configfile + +import ( + "fmt" + "os" + "testing" + + "gopkg.in/yaml.v2" + + "github.com/Flaque/filet" +) + +func TestExampleConfig(t *testing.T) { + out, err := ExampleConfigFile() + if err != nil { + t.Log(err) + t.Fail() + } + t.Log(out) +} + +func TestNewConfig(t *testing.T) { + out, err := ExampleConfigFile() + if err != nil { + t.Fatal(err) + } + + testingConfigFile := filet.TmpFile(t, "", out) + _, err = New(testingConfigFile.Name()) + if err != nil { + t.Fatalf("Failed to create a config struct. Error: %s", err) + } +} + +func TestTemplating(t *testing.T) { + os.Setenv("SYSLOG_SERVER", "syslog.test.local") + testYaml := `default_logger_config: + logging_config: + engine: syslog + syslog: + program_name: example_service + address: {{ env "SYSLOG_SERVER" }} + protocol: {{ default ( env "SYSLOG_PROTOCOL" ) "udp" }}` + + testingfile := filet.TmpFile(t, "", testYaml) + conf, err := New(testingfile.Name()) + if err != nil { + t.Fatalf("Failed to generate templated configuration. Got Error: %s", err) + } + + tests := []struct { + want string + got string + function string + }{ + { + want: "syslog.test.local", + got: conf.DefaultLoggerConfig.Config.Syslog.Address, + function: `{{ env "SYSLOG_SERVER" }}`, + }, + { + want: "udp", + got: conf.DefaultLoggerConfig.Config.Syslog.ConnectionType, + function: `{{ default ( env "SYSLOG_PROTOCOL" ) "udp" }}`, + }, + } + + for _, test := range tests { + if test.want != test.got { + t.Logf("%s failed. Got %s, Want: %s", + test.function, + test.got, + test.want, + ) + t.Fail() + } + } +} + +func TestDefaultConfig(t *testing.T) { + cf := Config{ + Processes: Processes{ + InitProcesses: []*Process{ + &Process{ + Name: "TestInit_1", + CMD: "/bin/false", + Args: []string{"arg1", "arg2"}, + }, + }, + MainProcesses: []*Process{ + &Process{ + Name: "TestMain_1", + CMD: "/bin/false", + Args: []string{"arg1", "arg2"}, + }, + }, + }, + } + + // Test Default Loggers + cf.setDefaultLoggerConfig() + if cf.DefaultLoggerConfig.Config.Engine == "" { + t.Logf("Setting the default logger config resulted in a empty engine") + t.Fail() + } + + // Test generated logging for processes from default logger + cf.setDefaultProcessLogger() + if cf.Processes.InitProcesses[0].LoggerConfig.Engine != cf.DefaultLoggerConfig.Config.Engine { + t.Logf("Trying to set the default logger engine for a process in the Init branch did not work") + t.Fail() + } + if cf.Processes.InitProcesses[0].LoggerConfig.ProcessName != cf.Processes.InitProcesses[0].Name { + t.Logf("Trying to set the default logger process_name for a process in the Init branch did not work") + t.Fail() + } + if cf.Processes.MainProcesses[0].LoggerConfig.Engine != cf.DefaultLoggerConfig.Config.Engine { + t.Logf("Trying to set the default logger for a process in the Main branch did not work") + t.Fail() + } + if cf.Processes.MainProcesses[0].LoggerConfig.ProcessName != cf.Processes.MainProcesses[0].Name { + t.Logf("Trying to set the default logger process_name for a process in the Main branch did not work") + t.Fail() + } + + // Test ProcessManager config generation + cf.setDefaultProcessManager() + if cf.ProcessManager.LoggerConfig.Engine != defaultProcessManager.LoggerConfig.Engine { + t.Logf("Process manager did not get expect default configuration set") + t.Fail() + } + + // visual readout + out, err := yaml.Marshal(cf) + if err != nil { + t.Logf("Failed to marshal the config to yaml. Error: %s", err) + t.Fail() + } + t.Log("\n", string(out)) +} + +func TestDefaultConfigNotRequired(t *testing.T) { + pmLogger := "proc_logging" + defaultLogger := "default_logger" + initLogger := "init_logger" + initLoggingName := "init_proc" + + cf := Config{ + Processes: Processes{ + InitProcesses: []*Process{ + &Process{ + Name: "TestInit_1", + CMD: "/bin/false", + Args: []string{"arg1", "arg2"}, + LoggerConfig: LoggingConfig{ + Engine: initLogger, + ProcessName: initLoggingName, + }, + }, + }, + }, + DefaultLoggerConfig: DefaultLoggerDetails{ + Config: LoggingConfig{ + Engine: defaultLogger, + }, + }, + ProcessManager: ProcessManager{ + LoggerConfig: LoggingConfig{ + Engine: pmLogger, + }, + }, + } + + // Test Default Loggers + cf.setDefaultLoggerConfig() + cf.setDefaultProcessManager() + cf.setDefaultProcessLogger() + + if cf.ProcessManager.LoggerConfig.Engine != pmLogger { + t.Logf("process_manager logging config was over written. Got: %s, Want: %s", cf.ProcessManager.LoggerConfig.Engine, pmLogger) + t.Fail() + } + if cf.DefaultLoggerConfig.Config.Engine != defaultLogger { + t.Logf("default logger engine was overwritten. Got: %s, Want: %s", cf.DefaultLoggerConfig.Config.Engine, defaultLogger) + t.Fail() + } + if cf.Processes.InitProcesses[0].LoggerConfig.ProcessName != initLoggingName { + t.Logf("init process logging name was overwritten. Got: %s, Want: %s", cf.Processes.InitProcesses[0].LoggerConfig.ProcessName, initLoggingName) + t.Fail() + } + if cf.Processes.InitProcesses[0].LoggerConfig.Engine != initLogger { + t.Logf("init process logging name was overwritten. Got: %s, Want: %s", cf.Processes.InitProcesses[0].LoggerConfig.Engine, initLogger) + t.Fail() + } +} + +func TestDefaultTimeoutForProcesses(t *testing.T) { + cf := Config{ + Processes: Processes{ + InitProcesses: []*Process{ + &Process{ + Name: "TestInit_1", + CMD: "/bin/false", + Args: []string{"arg1", "arg2"}, + }, + }, + MainProcesses: []*Process{ + &Process{ + Name: "TestMain_1", + CMD: "/bin/false", + Args: []string{"arg1", "arg2"}, + }, + }, + }, + } + + cf.setDefaultProcessTimeout() + + if cf.Processes.InitProcesses[0].TermTimeout != defaultProcTimeout { + t.Logf("Default timeout on init process was not set as expected. Got: %d, Want: %d.", cf.Processes.InitProcesses[0].TermTimeout, defaultProcTimeout) + t.Fail() + } + if cf.Processes.MainProcesses[0].TermTimeout != defaultProcTimeout { + t.Logf("Default timeout on main process was not set as expected. Got: %d, Want: %d.", cf.Processes.MainProcesses[0].TermTimeout, defaultProcTimeout) + t.Fail() + } +} + +func TestConfigString(t *testing.T) { + cf := Config{ + Processes: Processes{ + InitProcesses: []*Process{ + &Process{ + Name: "TestInit_1", + CMD: "/bin/false", + Args: []string{"arg1", "arg2"}, + LoggerConfig: LoggingConfig{ + Engine: "syslog", + ProcessName: "test_proc_name", + }, + }, + }, + }, + DefaultLoggerConfig: DefaultLoggerDetails{ + Config: LoggingConfig{ + Engine: "console", + }, + }, + ProcessManager: ProcessManager{ + LoggerConfig: LoggingConfig{ + Engine: "console", + }, + }, + } + + if fmt.Sprintf("%s", cf) == "" { + t.Logf("Failed to change config to a string") + t.Fail() + } + t.Logf("config as string:\n%s", cf) +} diff --git a/configfile/configfileexample.go b/configfile/configfileexample.go new file mode 100644 index 0000000..80782ae --- /dev/null +++ b/configfile/configfileexample.go @@ -0,0 +1,70 @@ +package configfile + +import ( + "fmt" + + yaml "gopkg.in/yaml.v2" +) + +// ExampleConfigFile will return a string with an example yaml file. +// All features should be in here to present to the user. +func ExampleConfigFile() (string, error) { + exampleInitProcesses := []*Process{ + { + Name: "Process1", + CMD: "/example/bin1", + Args: []string{"--arg1", "two"}, + CombindOutput: false, + LoggerConfig: LoggingConfig{ + Engine: "console", + }, + }, + { + Name: "Process2", + Args: []string{"--print", "extra"}, + }, + } + exampleMainProcesses := []*Process{ + { + Name: "Process1", + CMD: "/example/bin1", + Args: []string{"--arg1", "--arg2", "--arg3", "extra"}, + CombindOutput: false, + }, { + Name: "Process2", + CMD: "/example/bin2", + Args: []string{"--print", "extra"}, + }, + } + + exampleLoggerConfig := DefaultLoggerDetails{ + Config: LoggingConfig{ + Engine: "syslog", + Syslog: Syslog{ + ProgramName: "example_service", + Address: "logs.papertrail.com:16900", + }, + }, + } + + exampleProcessManagerConfig := ProcessManager{ + LoggerConfig: LoggingConfig{ + Engine: "syslog", + }, + } + exampleConfig := &Config{ + ProcessManager: exampleProcessManagerConfig, + Processes: Processes{ + InitProcesses: exampleInitProcesses, + MainProcesses: exampleMainProcesses, + }, + DefaultLoggerConfig: exampleLoggerConfig, + } + + out, err := yaml.Marshal(exampleConfig) + if err != nil { + return "", fmt.Errorf("Creating example failed. Error: %s", err) + } + + return string(out), nil +} diff --git a/configfile/defaultConfig.go b/configfile/defaultConfig.go new file mode 100644 index 0000000..35fe096 --- /dev/null +++ b/configfile/defaultConfig.go @@ -0,0 +1,13 @@ +package configfile + +var ( + defaultLoggingEngine = LoggingConfig{ + Engine: "console", + } + + defaultProcessManager = ProcessManager{ + LoggerConfig: defaultLoggingEngine, + } + + defaultProcTimeout = 30 +) diff --git a/configfile/logging.go b/configfile/logging.go new file mode 100644 index 0000000..88fa7b0 --- /dev/null +++ b/configfile/logging.go @@ -0,0 +1,34 @@ +package configfile + +// LoggingConfig is a struct that will hold the values of the logging +// configuration of the process or process manager +type LoggingConfig struct { + Engine string `yaml:"engine,omitempty"` + ProcessName string `yaml:"process_name,omitempty"` + Syslog Syslog `yaml:"syslog,omitempty"` + Logfile FileLogger `yaml:"file_logger,omitempty"` +} + +// DefaultLoggerDetails will hold the default logger configuration +type DefaultLoggerDetails struct { + Config LoggingConfig `yaml:"logging_config,omitempty"` +} + +// Syslog is used to send configuration to the syslog logger +type Syslog struct { + ProgramName string `yaml:"program_name"` + Address string `yaml:"address"` + ConnectionType string `yaml:"protocol,omitempty"` + CertificateBundlePath string `yaml:"cert_bundle_path,omitempty"` + ExtractLogLevel bool `yaml:"extract_log_level,omitempty"` + OverrideHostname string `yaml:"override_hostname,omitempty"` + AddContainerNameToTag bool `yaml:"append_container_name_to_tag,omitempty"` + AddContainerNameToHostname bool `yaml:"append_container_name_to_hostname,omitempty"` +} + +// FileLogger is a logger that will write to files +type FileLogger struct { + Filename string `yaml:"filepath"` + SizeLimit uint64 `yaml:"size_limit"` + HistoricalFiles int `yaml:"historical_files_limit"` +} diff --git a/configfile/processes.go b/configfile/processes.go new file mode 100644 index 0000000..5e40bf7 --- /dev/null +++ b/configfile/processes.go @@ -0,0 +1,19 @@ +package configfile + +// Processes holds all the processes that need to be executed. +type Processes struct { + InitProcesses []*Process `yaml:"init_processes,omitempty"` + MainProcesses []*Process `yaml:"main_processes"` +} + +// Process is a struct that consumes a yaml configration and holds config for a +// process that needs to be run. +type Process struct { + Name string `yaml:"name"` + CMD string `yaml:"command"` + Args []string `yaml:"arguments"` + // loggingEngine can be either Papertrail or ELK + LoggerConfig LoggingConfig `yaml:"logging_config"` + CombindOutput bool `yaml:"combine_output,omitempty"` + TermTimeout int `yaml:"termination_timeout_seconds,omitempty"` +} diff --git a/configfile/processmanager.go b/configfile/processmanager.go new file mode 100644 index 0000000..79d0641 --- /dev/null +++ b/configfile/processmanager.go @@ -0,0 +1,13 @@ +package configfile + +// ProcessManager hold configuration for the Process Manger itself +type ProcessManager struct { + LoggerConfig LoggingConfig `yaml:"logging_config"` + DebugLogging bool `yaml:"debug_logging,omitempty"` + DebugOptions PMDebugOptions `yaml:"debug_options,omitempty"` +} + +// PMDebugOptions holds configuration for debugging +type PMDebugOptions struct { + PrintGeneratedConfig bool `yaml:"show_generated_config"` +} diff --git a/configfile/templating/templating.go b/configfile/templating/templating.go new file mode 100644 index 0000000..dad4e85 --- /dev/null +++ b/configfile/templating/templating.go @@ -0,0 +1,99 @@ +// Package templating is used to allow the configuration files to have +// some dynamic configuration to them. +// It was shamelessly taken from +// https://github.com/tnozicka/goenvtemplator/blob/master/template_test.go +// However I didn't need most of the other stuff in the package +package templating + +import ( + "bytes" + "errors" + "fmt" + "os" + "text/template" +) + +type OptionalString struct { + ptr *string +} + +var funcMap = template.FuncMap{ + "env": Env, + "default": Default, + "required": Required, +} + +func (s OptionalString) String() string { + if s.ptr == nil { + return "" + } + return *s.ptr +} + +func Env(key string) OptionalString { + value, ok := os.LookupEnv(key) + if !ok { + return OptionalString{nil} + } + return OptionalString{&value} +} + +func Default(args ...interface{}) (string, error) { + for _, arg := range args { + if arg == nil { + continue + } + switch v := arg.(type) { + case string: + return v, nil + case *string: + if v != nil { + return *v, nil + } + case OptionalString: + if v.ptr != nil { + return *v.ptr, nil + } + default: + return "", fmt.Errorf("Default: unsupported type '%T'", v) + } + } + + return "", errors.New("Default: all arguments are nil") +} + +func Required(arg interface{}) (string, error) { + if arg == nil { + return "", errors.New("Required argument is missing") + } + + switch value := arg.(type) { + case string: + return value, nil + case *string: + if value != nil { + return *value, nil + } + case OptionalString: + if value.ptr != nil { + return *value.ptr, nil + } + default: + return "", fmt.Errorf("Requires: unsupported type '%T'", value) + } + return "", nil +} + +// GenerateTemplate will action all the functions on the configuration file +func GenerateTemplate(source []byte) ([]byte, error) { + tplt, err := template.New("configfile").Funcs(funcMap).Parse(string(source)) + if err != nil { + return nil, fmt.Errorf("failed to create template. Error: %s", err) + } + + var buffer bytes.Buffer + if err = tplt.Execute(&buffer, nil); err != nil { + return nil, fmt.Errorf("failed to transform template. Error: %s", err) + } + return buffer.Bytes(), nil +} diff --git a/configfile/templating/templating_test.go b/configfile/templating/templating_test.go new file mode 100644 index 0000000..62512bd --- /dev/null +++ b/configfile/templating/templating_test.go @@ -0,0 +1,50 @@ +package templating + +import ( + "os" + "testing" +) + +func TestGenerateTemplate(t *testing.T) { + os.Setenv("ALWAYS_THERE", "always_there") + + source := []byte(` +K={{ env "ALWAYS_THERE" }} +K={{ env "NONEXISTING" }} +K={{ .NONEXISTING }} +K={{ default .NonExisting "default value" }} +K={{ default (env "ALWAYS_THERE") }} +K={{ required (default ( .NotValid ) "Valid") }} +K={{ default (env "NONEXISTING") "default value" }} +`) + + correctOutput := []byte(` +K=always_there +K= +K= +K=default value +K=always_there +K=Valid +K=default value +`) + + result, err := GenerateTemplate(source) + if err != nil { + t.Fatal(err) + } + + if string(result) != string(correctOutput) { + t.Fatalf("Result:\n%s\n==== is not equal to correct template output:\n%s\n", result, correctOutput) + } +} + +func TestFailedRequired(t *testing.T) { + source := []byte(`{{ required ( .NotValid ) }}`) + _, err := GenerateTemplate(source) + + if err == nil { + t.Fail() + t.Log("Required failed to see pickup a failed value") + } + +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..bc5e1f3 --- /dev/null +++ b/go.mod @@ -0,0 +1,14 @@ +module github.com/morfien101/launch + +go 1.12 + +require ( + github.com/Flaque/filet v0.0.0-20190209224823-fc4d33cfcf93 + github.com/aws/aws-sdk-go v1.23.12 + github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae + github.com/satori/go.uuid v1.2.0 // indirect + github.com/silverstagtech/gotracer v0.2.0 + github.com/silverstagtech/srslog v0.2.1 + github.com/spf13/afero v1.2.2 // indirect + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..5c0b1df --- /dev/null +++ b/go.sum @@ -0,0 +1,22 @@ +github.com/Flaque/filet v0.0.0-20190209224823-fc4d33cfcf93 h1:NnAUCP75PRm8yWE7+MZBIAR6PA9iwsBYEc6ZNYOy+AQ= +github.com/Flaque/filet v0.0.0-20190209224823-fc4d33cfcf93/go.mod h1:TK+jB3mBs+8ZMWhU5BqZKnZWJ1MrLo8etNVg51ueTBo= +github.com/aws/aws-sdk-go v1.23.12 h1:2UnxgNO6Y5J1OrkXS8XNp0UatDxD1bWHiDT62RDPggI= +github.com/aws/aws-sdk-go v1.23.12/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae h1:2Zmk+8cNvAGuY8AyvZuWpUdpQUAXwfom4ReVMe/CTIo= +github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/silverstagtech/gotracer v0.2.0 h1:80HX+VLmX1bdy1f8Hq8v/05Hx/QdyWhsoNhk/E9MUrQ= +github.com/silverstagtech/gotracer v0.2.0/go.mod h1:11IG1jPKZc+SNt7EIgdyorjFxuo8uf6thPeE7kPuMig= +github.com/silverstagtech/srslog v0.2.1 h1:6l3QyXNfaikdnEy6904b+9ddpr4Fau4Jki5PwU2f08E= +github.com/silverstagtech/srslog v0.2.1/go.mod h1:ZTWonwdt+RAZ+Dz2A9coegAGg9fCmaFCeohQsRUMBVQ= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/internallogger/internallogger.go b/internallogger/internallogger.go new file mode 100644 index 0000000..f16af7b --- /dev/null +++ b/internallogger/internallogger.go @@ -0,0 +1,118 @@ +// Package internallogger is used to log messages out for the process manager itself. +// There is a normal logger which logs to stdout and stderr and a debug logger which +// logs to stdout only if debug logging is turned on. +package internallogger + +import ( + "fmt" + + "github.com/morfien101/launch/configfile" + "github.com/morfien101/launch/processlogger" +) + +const ( + // processManagerSource is used to tag the messages when they are sent on to the logging engine + // required. + processManagerSource = "launch_process_manager" +) + +//IntErrLogger is a logger that will log at Error level +type IntErrLogger interface { + Errorf(format string, args ...interface{}) + Errorln(s interface{}) +} + +// IntStdLogger is a logger that will minic fmt.Printf or fmt.Println +type IntStdLogger interface { + Printf(format string, args ...interface{}) + Println(s interface{}) +} + +// IntDebugLogger is a logger that will log at standard level but only +// if the debug toggle is turned on. +type IntDebugLogger interface { + Debugf(format string, args ...interface{}) + Debugln(s interface{}) + DebugOn(on bool) +} + +// IntLogger is a fully implemented internal logger. It must have Err, Std and Debug logging. +type IntLogger interface { + IntDebugLogger + IntStdLogger + IntErrLogger +} + +// InternalLogger is a logger that the process manager will use internally. +// This logger has a debug bool value which will dictate if the debug logging +// will be produced. +type InternalLogger struct { + debug bool + config configfile.LoggingConfig + logManager *processlogger.LogManager +} + +// New requires a copy of the config for logging and a log manager to forward logs to. +// It will return a *InternalLogger +func New(config configfile.LoggingConfig, logManager *processlogger.LogManager) *InternalLogger { + return &InternalLogger{ + config: config, + logManager: logManager, + } +} + +// Printf mimics the functionality of fmt.Printf and sends the result to STDOUT +func (il *InternalLogger) Printf(format string, args ...interface{}) { + il.submit(il.newMsg(fmt.Sprintf(format, args...), processlogger.STDOUT)) +} + +// Println mimics the functionality of fmt.Println and sends the result to STDOUT +func (il *InternalLogger) Println(s interface{}) { + il.submit(il.newMsg(fmt.Sprintln(s), processlogger.STDOUT)) +} + +// Errorf mimics the functionality of fmt.Printf and sends the result to STDERR +func (il *InternalLogger) Errorf(format string, args ...interface{}) { + il.submit(il.newMsg(fmt.Sprintf(format, args...), processlogger.STDERR)) +} + +// Errorln mimics the functionality of fmt.Println and sends the result to STDERR +func (il *InternalLogger) Errorln(s interface{}) { + il.submit(il.newMsg(fmt.Sprintln(s), processlogger.STDERR)) +} + +// Debugf mimics the functionality of fmt.Printf and sends the result to STDOUT +// if the debug toggle is true +func (il *InternalLogger) Debugf(format string, args ...interface{}) { + if il.debug { + il.submit(il.newMsg(fmt.Sprintf(format, args...), processlogger.STDERR)) + } +} + +// Debugln mimics the functionality of fmt.Printf and sends the result to STDOUT +// if the debug toggle is true +func (il *InternalLogger) Debugln(s interface{}) { + if il.debug { + il.submit(il.newMsg(fmt.Sprintln(s), processlogger.STDERR)) + } +} + +// DebugOn is used to turn debug logging on and off. +func (il *InternalLogger) DebugOn(on bool) { + il.debug = on +} + +// newMsg creates a new LogMessage with the required resources and returns a pointer to it. +func (il *InternalLogger) newMsg(msg string, pipe processlogger.Pipe) *processlogger.LogMessage { + return &processlogger.LogMessage{ + Source: processManagerSource, + Pipe: pipe, + Config: il.config, + Message: msg, + } +} + +// submit wraps the Submit function of the logmanager which will consume the message and route it. +func (il *InternalLogger) submit(lMsg *processlogger.LogMessage) { + il.logManager.Submit(*lMsg) +} diff --git a/launch.yaml b/launch.yaml new file mode 100644 index 0000000..978307c --- /dev/null +++ b/launch.yaml @@ -0,0 +1,26 @@ +process_manager: + debug_logging: true + debug_options: + show_generated_config: true + logging_config: + engine: console +processes: + main_processes: + - name: TestBin1 + command: /testbin + arguments: + - -timeout + - 2 + - -no-env + - -log-json + - 5 +default_logger_config: + logging_config: + engine: syslog + syslog: + program_name: example_service + address: logs.papertrailapp.com:12345 + protocol: tcp+tls + override_hostname: toaster + detect_log_level: true + cert_bundle_path: papertrail_cert.crt \ No newline at end of file diff --git a/main.go b/main.go new file mode 100644 index 0000000..43e02b6 --- /dev/null +++ b/main.go @@ -0,0 +1,152 @@ +package main + +import ( + "flag" + "fmt" + "log" + "os" + "os/signal" + "runtime" + "strings" + "syscall" + + // Pull in all available loggers. + _ "github.com/morfien101/launch/processlogger/allloggers" + + "github.com/morfien101/launch/configfile" + "github.com/morfien101/launch/internallogger" + "github.com/morfien101/launch/processlogger" + "github.com/morfien101/launch/processmanager" +) + +const ( + // DefaultTimeout period for binaries + DefaultTimeout = 30 +) + +var ( + // version and timestamp are expected to be passed in at build time. + buildVersion = "0.1.0" + buildTimestamp = "" + + timeout = DefaultTimeout +) + +func main() { + flagHelp := flag.Bool("h", false, "Shows this help menu.") + flagVersion := flag.Bool("v", false, "Shows the version.") + flagVersionExtended := flag.Bool("version", false, "Shows extended version numbering.") + flagConfigExample := flag.Bool("example-config", false, "Displays and example configration.") + flagConfigFilePath := flag.String("f", "/launch.yaml", "Location of the config file to read.") + // Parse and process terminating flags + flag.Parse() + if *flagHelp { + flag.PrintDefaults() + return + } + if *flagVersion { + fmt.Println(buildVersion) + return + } + if *flagVersionExtended { + fmt.Printf("Version: %s\nBuild time: %s\nGo version: %s\n", buildVersion, buildTimestamp, runtime.Version()) + return + } + if *flagConfigExample { + out, err := configfile.ExampleConfigFile() + if err != nil { + fmt.Printf(`There was an error generating the configuration file example. +Please log an error with the maintainer. +The error was: %s`, err) + os.Exit(1) + } + fmt.Println(out) + return + } + + // Setup signal capture + signals := make(chan os.Signal, 1) + signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) + + // Create a limited logger that will be thrown away once we fired up our actual loggers. + loggers := processlogger.New( + 10, + configfile.DefaultLoggerDetails{}, + ) + starterPMConfig := configfile.LoggingConfig{ + Engine: "console", + } + err := loggers.StartLoggers(configfile.Processes{}, starterPMConfig) + if err != nil { + fmt.Println(err) + } + pmlogger := internallogger.New(starterPMConfig, loggers) + + config, err := configfile.New(*flagConfigFilePath) + if err != nil { + pmlogger.Errorf("Failed to render the configuration. Error: %s", err) + terminate(1, loggers) + } + + pmlogger.Println("Starting full loggers") + + // Start logging engines + loggers = processlogger.New(10, config.DefaultLoggerConfig) + err = loggers.StartLoggers(config.Processes, config.ProcessManager.LoggerConfig) + if err != nil { + fmt.Println(err) + pmlogger.Errorf("Could not start full logging. Error: %s", err) + // Attempt to close what has been opened. + terminate(1, loggers) + } + + // Start the internal logger now that we know where to log to + pmlogger = internallogger.New(config.ProcessManager.LoggerConfig, loggers) + pmlogger.DebugOn(config.ProcessManager.DebugLogging) + pmlogger.Debugln("Debugging logging for the process manager has been turned on") + if config.ProcessManager.DebugOptions.PrintGeneratedConfig { + pmlogger.Debugf("Using generated config:\n%s", *config) + } + + // Get a new proccess manager + pm := processmanager.New(config.Processes, loggers, pmlogger, signals) + // Start init processes in order one by one + if output, err := pm.RunInitProcesses(); err != nil { + pmlogger.Errorf("An init process failed. Error: %s\n", err) + pmlogger.Println(output) + terminate(1, loggers) + } + // Start processes + wait, err := pm.RunMainProcesses() + if err != nil { + pmlogger.Errorf("Something went wrong starting the main processes. Error: %s", err) + terminate(1, loggers) + } + + // Wait for processes to finish + pmlogger.Debugln("Waiting for main processes to finish.") + endMessage := <-wait + pmlogger.Debugln("Finished waiting. Proceeding to shutdown loggers.") + pmlogger.Println("Final state: " + endMessage) + + // Shutdown the loggers. + terminate(0, loggers) +} + +// terminate will flush the loggers and then exit with the passed in code. +// If the loggers fail then we have no choice but to spit to the console. +func terminate(exitcode int, loggers *processlogger.LogManager) { + // Shutdown the loggers. + if errs := loggers.Shutdown(); len(errs) > 0 { + errString := func() string { + es := []string{} + for _, err := range errs { + es = append(es, err.Error()) + } + return strings.Join(es, ",") + } + log.Fatalf("Error shutting down loggers. Errors: %s" + errString()) + } + + os.Exit(exitcode) +} diff --git a/passwd b/passwd new file mode 100644 index 0000000..7b3e491 --- /dev/null +++ b/passwd @@ -0,0 +1 @@ +nobody:x:65534:65534:Nobody:/: \ No newline at end of file diff --git a/processlogger/allloggers/all.go b/processlogger/allloggers/all.go new file mode 100644 index 0000000..db35ac7 --- /dev/null +++ b/processlogger/allloggers/all.go @@ -0,0 +1,17 @@ +// Package allloggers is used to bring in all the packages that contain the loggers +// which we support. This allows the loggers to be written as a plugin and used +// when they are added to the list below. +// We need to do it this way because the compiler needs to know which packages to +// bring in. +package allloggers + +import ( + // Adding console logger + _ "github.com/morfien101/launch/processlogger/console" + // Adding devnull logger + _ "github.com/morfien101/launch/processlogger/devnull" + // Adding ELK logger + _ "github.com/morfien101/launch/processlogger/filelogger" + // Adding Syslog logger + _ "github.com/morfien101/launch/processlogger/syslog" +) diff --git a/processlogger/console/console.go b/processlogger/console/console.go new file mode 100644 index 0000000..c68430c --- /dev/null +++ b/processlogger/console/console.go @@ -0,0 +1,79 @@ +// Package console is a logger that prints to the console where the +// process manager is running. It is intented to be used for development and +// debugging purposes. +package console + +import ( + "fmt" + "os" + + "github.com/morfien101/launch/configfile" + "github.com/morfien101/launch/processlogger" +) + +const ( + //LoggerTag will be used to call this package + LoggerTag = "console" +) + +// Console is a logger that will output to the local stdout and stderr +type Console struct{} + +// New will return a new pointer to a Console logger +func init() { + processlogger.RegisterLogger(LoggerTag, func() processlogger.Logger { + return &Console{} + }) +} + +// RegisterConfig does nothing here. +func (c *Console) RegisterConfig(_ configfile.LoggingConfig, _ configfile.DefaultLoggerDetails) error { + return nil +} + +// Start will start the logging engine. There is nothing to do in this package +func (c *Console) Start() error { + return nil +} + +// IsStarted will let the caller know if it needs to start this service +func (c *Console) IsStarted() bool { + return true +} + +// Shutdown does not really need to do anything in this package. +// It returns a chan bool preloaded with a true to signal that +// the connections are closed. +func (c *Console) Shutdown() chan error { + ch := make(chan error, 1) + ch <- nil + close(ch) + return ch +} + +// Submit will consume a processlogger.LogMessage and send it to the right pipe. +func (c *Console) Submit(msg processlogger.LogMessage) { + m := fmt.Sprintf("%s: %s", msg.Source, msg.Message) + if msg.Pipe == processlogger.STDERR { + c.stdErr(m) + } + if msg.Pipe == processlogger.STDOUT { + c.stdOut(m) + } +} + +// StdOut will copy the message to Stdout +func (c *Console) stdOut(msg string) error { + if _, err := os.Stdout.WriteString(msg); err != nil { + return err + } + return nil +} + +// StdErr will copy the message to Stderr +func (c *Console) stdErr(msg string) error { + if _, err := os.Stderr.WriteString(msg); err != nil { + return err + } + return nil +} diff --git a/processlogger/devnull/devnull.go b/processlogger/devnull/devnull.go new file mode 100644 index 0000000..7db6134 --- /dev/null +++ b/processlogger/devnull/devnull.go @@ -0,0 +1,47 @@ +// Package devnull is a black hole logger. It implements the interfaces to be a logger +// but will discard the data it gets. +package devnull + +import ( + "github.com/morfien101/launch/configfile" + "github.com/morfien101/launch/processlogger" +) + +const ( + // LoggerTag is the tag that should be used in config files to access this logger + LoggerTag = "devnull" +) + +// DevNull is a discard logger. It will not process logs. Usefull for junk logs +type DevNull struct { + running bool +} + +func init() { + processlogger.RegisterLogger(LoggerTag, func() processlogger.Logger { + return &DevNull{} + }) +} + +// Shutdown satisfies the processlogger.Logger interface but effectively does nothing +func (d *DevNull) Shutdown() chan error { + shutdownChan := make(chan error, 1) + shutdownChan <- nil + close(shutdownChan) + d.running = false + return shutdownChan +} + +// RegisterConfig does nothing here. +func (d *DevNull) RegisterConfig(_ configfile.LoggingConfig, _ configfile.DefaultLoggerDetails) error { + return nil +} + +// Start satisfies the processlogger.Logger interface but effectively does nothing +func (d *DevNull) Start() error { + d.running = true + return nil +} + +// Submit satisfies the processlogger.Logger interface but effectively does nothing +func (d *DevNull) Submit(_ processlogger.LogMessage) {} diff --git a/processlogger/filelogger/filelogger.go b/processlogger/filelogger/filelogger.go new file mode 100644 index 0000000..40e2cac --- /dev/null +++ b/processlogger/filelogger/filelogger.go @@ -0,0 +1,86 @@ +// Package filelogger is use to push log messages to a file on disk. +// Given that the process manger is designed to work inside containers +// filelogger also manages the rotation of files. +// Configuration passed in dictates how many files to keep and how large +// they should be. +package filelogger + +import ( + "fmt" + "strings" + + "github.com/morfien101/launch/configfile" + "github.com/morfien101/launch/processlogger" +) + +const ( + // LoggerTag is used to identify the logger + LoggerTag = "logfile" +) + +// FileLogManager is used to keep track of the current files that are used +// to write logs to. +type FileLogManager struct { + filetracker map[string]*rotateWriter +} + +var fileLogManager *FileLogManager + +func init() { + processlogger.RegisterLogger(LoggerTag, func() processlogger.Logger { + return &FileLogManager{} + }) +} + +// RegisterConfig will create a new file and router for each config passed in. +func (flm *FileLogManager) RegisterConfig(conf configfile.LoggingConfig, defaults configfile.DefaultLoggerDetails) error { + if _, ok := flm.filetracker[conf.Logfile.Filename]; ok { + return nil + } + wr, err := newRW(conf.Logfile) + flm.filetracker[conf.Logfile.Filename] = wr + if err != nil { + return err + } + + return nil +} + +// Start will create all the internal components that are required to run the logger +func (flm *FileLogManager) Start() error { + return nil +} + +// Shutdown will close all the files and return a chan error to signal completion +// and forward any errors +func (flm *FileLogManager) Shutdown() chan error { + errChan := make(chan error, 1) + + go func() { + errors := make([]string, 0) + addErr := func(err error) { + errors = append(errors, err.Error()) + } + for _, tracker := range flm.filetracker { + err := tracker.Close() + if err != nil { + addErr(err) + } + } + + if len(errors) > 0 { + errChan <- fmt.Errorf(strings.Join(errors, " | ")) + } else { + errChan <- nil + } + close(errChan) + }() + + return errChan +} + +// Submit will write a log message to a file that is dictated by the configuration +// sent with the processlogger.LogMessage +func (flm *FileLogManager) Submit(msg processlogger.LogMessage) { + flm.filetracker[msg.Config.Logfile.Filename].Write([]byte(msg.Message)) +} diff --git a/processlogger/filelogger/filelogger_test.go b/processlogger/filelogger/filelogger_test.go new file mode 100644 index 0000000..a76ed88 --- /dev/null +++ b/processlogger/filelogger/filelogger_test.go @@ -0,0 +1,74 @@ +package filelogger + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/c2h5oh/datasize" + "github.com/morfien101/launch/configfile" +) + +func TestDeleteOldFiles(t *testing.T) { + testLogContent := ` +logline1 +logline2 +logline3 +` + fileslimit := 10 + filenames := make([]string, fileslimit) + + dir, err := ioutil.TempDir("./", "") + if err != nil { + t.Fatalf("Could not create temp dir for test files.") + } + + defer os.RemoveAll(dir) + + for i := 0; i < fileslimit; i++ { + fName := fmt.Sprintf("./%s/file%02d.log", dir, i+1) + err := ioutil.WriteFile(fName, []byte(testLogContent), 0600) + if err != nil { + t.Fatalf("Could not create test file. %s", err) + } + filenames[i] = fName + } + var oneHundredMegs datasize.ByteSize + err = oneHundredMegs.UnmarshalText([]byte("100 mb")) + if err != nil { + t.Fatal(err) + } + config := configfile.FileLogger{ + Filename: "/tmp/file01.log", + SizeLimit: oneHundredMegs.Bytes(), + HistoricalFiles: 2, + } + rw, err := newRW(config) + if err != nil { + t.Fatal(err) + } + rw.historicalFilePaths = filenames + rw.deleteOldFiles() + + t.Log(rw.historicalFilePaths) + + errCheck := func(e error) { + t.Log(e) + t.Fail() + } + + for i := 0; i < fileslimit; i++ { + _, err := os.Stat(filenames[i]) + if i < 2 { + if err != nil { + errCheck(err) + } + continue + } + + if err == nil { + errCheck(err) + } + } +} diff --git a/processlogger/filelogger/rotationwriter.go b/processlogger/filelogger/rotationwriter.go new file mode 100644 index 0000000..d385124 --- /dev/null +++ b/processlogger/filelogger/rotationwriter.go @@ -0,0 +1,149 @@ +package filelogger + +import ( + "fmt" + "os" + "sync" + "time" + + "github.com/morfien101/launch/configfile" +) + +// rotateWriter can write and rotate a log file +type rotateWriter struct { + lock sync.Mutex + fp *os.File + config configfile.FileLogger + watchDogSignals chan bool + historicalFilePaths []string + currentFileSize uint64 + running bool +} + +// New makes a new rotateWriter. Return nil if error occurs during setup. +func newRW(conf configfile.FileLogger) (*rotateWriter, error) { + w := &rotateWriter{ + config: conf, + watchDogSignals: make(chan bool, 100), + historicalFilePaths: make([]string, 0), + running: true, + } + // rotate gives us the first file + err := w.rotate() + if err != nil { + return nil, err + } + return w, nil +} + +// watchDog watches the file for size changes and rotates when required. +// WatchDog will also trigger clean up tasks to remove old files. +// watchDog is run as a go routine. +func (w *rotateWriter) watchDog() { + for { + select { + case _, ok := <-w.watchDogSignals: + // If the channel is closed we can just exit out. + if !ok { + return + } + // Check to see if the file is too large + ok = w.tooLarge() + // Check to see if the rotation has created a new file. + // This means we could need to delete the files. + if !ok { + err := w.rotate() + if err != nil { + w.panic(err) + } + w.deleteOldFiles() + } + } + } +} + +// Loggers should not terminate service. +// Should we need to panic we should handle the situation as best we can +// Trying to keep service running. +func (w *rotateWriter) panic(err error) { + // we have an err that we need to recover from + // The best we can do here is print it to the console + fmt.Println(err) +} + +// tooLarge will tell us if the number of bytes we have written is more than the +// file size we want to handle. +// This is infered to avoid millions of os.stat calls +func (w *rotateWriter) tooLarge() bool { + if w.currentFileSize > w.config.SizeLimit { + return true + } + return false +} + +func (w *rotateWriter) deleteOldFiles() { + keep := make([]string, w.config.HistoricalFiles) + for index, filename := range w.historicalFilePaths { + if index < w.config.HistoricalFiles { + keep[index] = filename + continue + } + err := os.Remove(filename) + if err != nil { + w.panic(err) + } + } + w.historicalFilePaths = keep +} + +// Write satisfies the io.Writer interface. +func (w *rotateWriter) Write(output []byte) (int, error) { + w.lock.Lock() + defer w.lock.Unlock() + n, err := w.fp.Write(output) + w.currentFileSize = w.currentFileSize + uint64(n) + return n, err +} + +// Close closes out the current file +func (w *rotateWriter) Close() error { + w.lock.Lock() + defer w.lock.Unlock() + close(w.watchDogSignals) + w.running = false + return w.Close() +} + +// Rotate Perform the actual act of rotating and reopening file. +func (w *rotateWriter) rotate() error { + w.lock.Lock() + defer w.lock.Unlock() + + // Close existing file if open + if w.fp != nil { + err := w.fp.Close() + w.fp = nil + if err != nil { + return err + } + } + // Rename dest file if it already exists + _, err := os.Stat(w.config.Filename) + if err == nil { + newFileName := w.config.Filename + "." + time.Now().Format(time.RFC3339) + err = os.Rename(w.config.Filename, newFileName) + if err != nil { + return err + } + w.updateHistoricalFileNames(newFileName) + } + + // Create a file. + w.fp, err = os.Create(w.config.Filename) + w.currentFileSize = 0 + return err +} + +func (w *rotateWriter) updateHistoricalFileNames(newFileName string) { + w.historicalFilePaths = append([]string{newFileName}, w.historicalFilePaths...) +} diff --git a/processlogger/interfaces.go b/processlogger/interfaces.go new file mode 100644 index 0000000..3334d3b --- /dev/null +++ b/processlogger/interfaces.go @@ -0,0 +1,19 @@ +package processlogger + +import "github.com/morfien101/launch/configfile" + +// Logger is a logger interface that can start stop and send logs. +// This is the shipper interface. +type Logger interface { + RegisterConfig(configfile.LoggingConfig, configfile.DefaultLoggerDetails) error + Start() error + Shutdown() chan error + Submit(LogMessage) +} + +// LogsManager is something that can start, stop and submit logs. +type LogsManager interface { + StartLoggers(configfile.Processes, configfile.LoggingConfig) error + Submit(log LogMessage) + Shutdown() []error +} diff --git a/processlogger/logManager.go b/processlogger/logManager.go new file mode 100644 index 0000000..21b239d --- /dev/null +++ b/processlogger/logManager.go @@ -0,0 +1,257 @@ +// Package processlogger is responsible for collecting logs that are submitted and +// forwarding them to the correct logging engine. The logging engines need to satisfiy +// the Logger interface. +// Loggers handle themselves and processlogger forwards on the log messages that they +// need to send. +// processlogger will only start the logging engines that it requires although all +// logging engines are available. +package processlogger + +import ( + "fmt" + "sync" + + "github.com/morfien101/launch/configfile" +) + +// Pipe describes if the message came out of stdout or stderr +type Pipe string + +const ( + // STDERR is used to indicate a message was from stderr + STDERR = Pipe("e") + // STDOUT is used to indicate a message was from stdout + STDOUT = Pipe("o") + // logBufferSize is how many logs can be in queue for each logging end point before we + // start dropping messages + logBufferSize = 100 +) + +// LogMessage is the box that needs to be created to ship a message to a +// log forwarder. +type LogMessage struct { + Source string + Pipe Pipe + Config configfile.LoggingConfig + Message string +} + +// LogManager is used to collect, route and submit logs to the correct logging engines. +type LogManager struct { + logChan chan LogMessage + workersList []*logworker + waitgroup sync.WaitGroup + defaultConfig configfile.DefaultLoggerDetails + availableLoggers map[string]Logger + activeLoggers map[string]Logger + activeLoggerQ map[string]chan *LogMessage + terminated bool +} + +// New will create a new LogManager +func New(loggingBuffer int, defaultConfig configfile.DefaultLoggerDetails) *LogManager { + lm := &LogManager{ + logChan: make(chan LogMessage, loggingBuffer), + workersList: make([]*logworker, 0), + defaultConfig: defaultConfig, + activeLoggers: make(map[string]Logger), + activeLoggerQ: make(map[string]chan *LogMessage), + } + lm.loadAvailableLoggers() + return lm +} + +func (lm *LogManager) loadAvailableLoggers() { + lm.availableLoggers = make(map[string]Logger) + // Load all the available loggers here + for name, regfunc := range registeredLoggers { + lm.availableLoggers[name] = regfunc() + } +} + +// StartLoggers will start all of the required loggers for this process +func (lm *LogManager) StartLoggers(processes configfile.Processes, PMConf configfile.LoggingConfig) error { + // Start the logger to the process manager itself. + // If we can't log ourselves then we need to error. + if err := lm.startLogger(PMConf); err != nil { + return err + } + + // Start the loggers for each process. + setup := func(pSlice []*configfile.Process) error { + for _, proc := range pSlice { + err := lm.startLogger(proc.LoggerConfig) + if err != nil { + return err + } + } + return nil + } + + // If we can't start the loggers then we need to error out. + if err := setup(processes.InitProcesses); err != nil { + return err + } + if err := setup(processes.MainProcesses); err != nil { + return err + } + + // Now that we have a list of the loggers that are going to be used. + // We can start logger and start the router worker for the logger. + for id, logger := range lm.activeLoggers { + err := logger.Start() + if err != nil { + return err + } + lm.startLogRouter(id, logger) + } + + return nil +} + +func (lm *LogManager) startLogRouter(id string, logger Logger) { + c := make(chan *LogMessage, logBufferSize) + lm.activeLoggerQ[id] = c + + worker := newWorker(logger) + lm.workersList = append(lm.workersList, worker) + + go worker.route(c) +} + +func (lm *LogManager) startLogger(conf configfile.LoggingConfig) error { + if _, ok := lm.availableLoggers[conf.Engine]; !ok { + return fmt.Errorf("logging engine %s is not recognized. Please check your configuration file", conf.Engine) + } + + // We register the configuration for the loggers here. + // We need to tell the loggers to start later. + err := lm.availableLoggers[conf.Engine].RegisterConfig(conf, lm.defaultConfig) + if err != nil { + return err + } + if lm.activeLoggers[conf.Engine] != lm.availableLoggers[conf.Engine] { + lm.activeLoggers[conf.Engine] = lm.availableLoggers[conf.Engine] + } + return nil +} + +// Submit is used to push log messages in to the router queue. +// If the queue is full we will drop the message. +func (lm *LogManager) Submit(log LogMessage) { + if lm.terminated { + // We should send a metric or something here to show that we ditched + // a log. + return + } + select { + case lm.activeLoggerQ[log.Config.Engine] <- &log: + // We should also possibly log metrics for successful logs in queue + default: + // TODO + // We should be logging metrics here for each message that we have dropped. + errMsg := fmt.Errorf("Can't log because we are overflowing with logs. Log is from: %s", log.Source) + fmt.Println(errMsg) + } +} + +// Shutdown is used to gracefully shutdown all the loggers and log routers. +func (lm *LogManager) Shutdown() []error { + // There may be some lagging go funcs that are sending log messages. + // Unfortunately we can't know for sure. So we can only assume that + // the point we have called shutdown, all the important logs have arrived. + // Mark the log manager as terminated and that we can't accept more + // logs from this point on. + lm.terminated = true + + // Drain the queues + //close the channels for the loggers + for _, ch := range lm.activeLoggerQ { + close(ch) + } + // Collect the waitgroups. Wait for each one to + // free up + for _, worker := range lm.workersList { + worker.waitGroup().Wait() + } + + // Each logger needs to shutdown. + // So we need to loop through all the active loggers and call the shutdown function. + // Shutdown returns a channel that gets the error if there is one. + outputs := make(map[string]error) + mu := sync.Mutex{} + wg := &sync.WaitGroup{} + for id, logger := range lm.activeLoggers { + innerID := id + innerLogger := logger + wg.Add(1) + go func() { + defer wg.Done() + select { + case err, ok := <-innerLogger.Shutdown(): + if !ok { + // got no error. Safe to return + return + } + mu.Lock() + outputs[innerID] = err + mu.Unlock() + } + }() + } + // We can then wait for all of them to complete using the wait group. + wg.Wait() + // Then check to see if they returned any errors and pass that back to the caller. + // We return a list of errors because we are responsible for closeing multiple loggers. + errList := make([]error, 0) + for key, err := range outputs { + if err != nil { + errList = append(errList, fmt.Errorf("Logger %s got an error on shutdown call. Error: %s", key, err)) + } + } + + // return once we are done + return errList +} + +// The logworker is used to route logs into the logger that they want to use. +type logworker struct { + myLogger Logger + wg *sync.WaitGroup +} + +func newWorker(logger Logger) *logworker { + return &logworker{ + myLogger: logger, + wg: &sync.WaitGroup{}, + } + +} + +// route is is the worker function. It will accept message as they come in on the input channel +// and call the Submit func for the logger that has been allocated to it. +func (lw *logworker) route(input chan *LogMessage) { + lw.wg.Add(1) + for { + select { + case log, ok := <-input: + if !ok { + // Queue is closed, no more logs expected therefore stopping. + lw.wg.Done() + return + } + // Push the message into the logger. + // The submit is a hand over to the logger. It is responsible for the + // log from this point on. + lw.myLogger.Submit(*log) + } + } +} + +// waitGroup gives the reference the sync wait group. +// This can be used to determine when this worker is finished. +// Its expected that the caller should also close the input channel to the worker +// to get the worker to free the wait group. +func (lw *logworker) waitGroup() *sync.WaitGroup { + return lw.wg +} diff --git a/processlogger/logManager_test.go b/processlogger/logManager_test.go new file mode 100644 index 0000000..473ce0a --- /dev/null +++ b/processlogger/logManager_test.go @@ -0,0 +1,148 @@ +package processlogger + +import ( + "testing" + "time" + + "github.com/morfien101/launch/configfile" + "github.com/silverstagtech/gotracer" +) + +type trace struct { + logger *gotracer.Tracer +} + +func (tr *trace) RegisterConfig(configfile.LoggingConfig, configfile.DefaultLoggerDetails) error { + return nil +} +func (tr *trace) Start() error { + return nil +} +func (tr *trace) Shutdown() chan error { + return nil +} +func (tr *trace) Submit(msg LogMessage) { + tr.logger.Send(msg.Message) +} + +func (tr *trace) Logs() []string { + return tr.logger.Show() +} + +func TestLogger(t *testing.T) { + // Registration needs to happen BEFORE the logger is created. + // This is to mimic init function calls which don't happen in tests + tracer1 := &trace{ + logger: gotracer.New(), + } + tracer2 := &trace{ + logger: gotracer.New(), + } + RegisterLogger("tracer1", func() Logger { + return tracer1 + }) + RegisterLogger("tracer2", func() Logger { + return tracer2 + }) + + proc1 := &configfile.Process{ + Name: "Test1", + LoggerConfig: configfile.LoggingConfig{ + Engine: "tracer1", + ProcessName: "test1", + }, + } + + proc2 := &configfile.Process{ + Name: "Test2", + LoggerConfig: configfile.LoggingConfig{ + Engine: "tracer2", + ProcessName: "test2", + }, + } + + conf := configfile.Config{ + ProcessManager: configfile.ProcessManager{ + LoggerConfig: configfile.LoggingConfig{ + Engine: "tracer1", + ProcessName: "test1", + }, + }, + Processes: configfile.Processes{ + InitProcesses: []*configfile.Process{}, + MainProcesses: []*configfile.Process{proc1, proc2}, + }, + DefaultLoggerConfig: configfile.DefaultLoggerDetails{}, + } + logManager := New(10, conf.DefaultLoggerConfig) + err := logManager.StartLoggers(conf.Processes, conf.ProcessManager.LoggerConfig) + if err != nil { + t.Logf("Got an error starting the logger. Error: %s", err) + } + + t.Logf("Available Loggers: %v", logManager.availableLoggers) + t.Logf("Active Loggers: %v", logManager.activeLoggers) + + msg1 := LogMessage{ + Source: "proc1", + Config: proc1.LoggerConfig, + Message: "Message 1", + } + msg2 := LogMessage{ + Source: "proc2", + Config: proc2.LoggerConfig, + Message: "Message 2", + } + + logManager.Submit(msg1) + logManager.Submit(msg2) + + t.Logf("Number of queues: %d", len(logManager.activeLoggerQ)) + + if len(logManager.activeLoggerQ) != 2 { + t.Logf("Failed to create correct number of channels for logs. Want %d, Got: %d", 2, len(logManager.activeLoggerQ)) + t.Fail() + } + // Give the logger a bit of time to flush the logs + // It can't be too much as we need to make sure its still fast enough + time.Sleep(time.Millisecond * 2) + + if tracer1.logger.Len() != 1 { + t.Logf("Tracer 1 does not have the correct number of messages. Want: %d, Got: %d", 1, tracer1.logger.Len()) + t.Fail() + } + if tracer2.logger.Len() != 1 { + t.Logf("Tracer 2 does not have the correct number of messages. Want: %d, Got: %d", 1, tracer2.logger.Len()) + t.Fail() + } +} + +func TestUndefinedLogger(t *testing.T) { + proc1 := &configfile.Process{ + Name: "Test1", + LoggerConfig: configfile.LoggingConfig{ + Engine: "tracer1", + ProcessName: "test1", + }, + } + conf := configfile.Config{ + ProcessManager: configfile.ProcessManager{ + LoggerConfig: configfile.LoggingConfig{ + Engine: "Potato", + ProcessName: "test1", + }, + }, + Processes: configfile.Processes{ + InitProcesses: []*configfile.Process{}, + MainProcesses: []*configfile.Process{proc1}, + }, + DefaultLoggerConfig: configfile.DefaultLoggerDetails{}, + } + logManager := New(10, conf.DefaultLoggerConfig) + err := logManager.StartLoggers(conf.Processes, conf.ProcessManager.LoggerConfig) + t.Logf("Bad logger error: %s", err) + if err == nil { + t.Logf("A process with a bad logger engine did not cause the logger to fail.") + t.Fail() + } +} diff --git a/processlogger/loggerRegistration.go b/processlogger/loggerRegistration.go new file mode 100644 index 0000000..142aa65 --- /dev/null +++ b/processlogger/loggerRegistration.go @@ -0,0 +1,17 @@ +package processlogger + +// RegisterFunc is a function that can register the loggers +type RegisterFunc func() Logger + +// registeredLoggers contains a map of all available loggers and a function +// that will initialize them. +var registeredLoggers map[string]RegisterFunc + +// RegisterLogger will register the loggers when the packages are +// read at init time. +func RegisterLogger(name string, regfunc RegisterFunc) { + if registeredLoggers == nil { + registeredLoggers = make(map[string]RegisterFunc) + } + registeredLoggers[name] = regfunc +} diff --git a/processlogger/syslog/levelextraction.go b/processlogger/syslog/levelextraction.go new file mode 100644 index 0000000..2407df2 --- /dev/null +++ b/processlogger/syslog/levelextraction.go @@ -0,0 +1,53 @@ +package syslog + +import ( + "encoding/json" + "fmt" + "strings" + + syslogger "github.com/silverstagtech/srslog" +) + +type level struct { + LVL string `json:"level"` +} + +func extractJSONlevel(jsonlog []byte) (syslogger.Priority, error) { + lvl := &level{} + err := json.Unmarshal(jsonlog, lvl) + if err != nil { + return syslogger.LOG_INFO, fmt.Errorf("Failed to read json log. Error: %s", err) + } + if lvl.LVL == "" { + return syslogger.LOG_INFO, fmt.Errorf("Failed to detect level in json log") + } + + return detectLevel(strings.ToLower(lvl.LVL)), nil +} + +func detectLevel(level string) syslogger.Priority { + switch level { + case "emerg": + return syslogger.LOG_EMERG + case "alert": + return syslogger.LOG_ALERT + case "crit": + return syslogger.LOG_CRIT + case "err": + return syslogger.LOG_ERR + case "error": + return syslogger.LOG_ERR + case "warning": + return syslogger.LOG_WARNING + case "warn": + return syslogger.LOG_WARNING + case "notice": + return syslogger.LOG_NOTICE + case "info": + return syslogger.LOG_INFO + case "debug": + return syslogger.LOG_DEBUG + default: + return syslogger.LOG_INFO + } +} diff --git a/processlogger/syslog/levelextraction_test.go b/processlogger/syslog/levelextraction_test.go new file mode 100644 index 0000000..c3efbcf --- /dev/null +++ b/processlogger/syslog/levelextraction_test.go @@ -0,0 +1,120 @@ +package syslog + +import ( + "testing" + + syslogger "github.com/silverstagtech/srslog" +) + +func TestLevelExtraction(t *testing.T) { + tests := []struct { + name string + JSONBlob []byte + expectedLevel syslogger.Priority + }{ + { + name: "info", + JSONBlob: []byte(`{"test_key":"test_value","level":"info"}`), + expectedLevel: syslogger.LOG_INFO, + }, + { + name: "INFO", + JSONBlob: []byte(`{"test_key":"test_value","level":"INFO"}`), + expectedLevel: syslogger.LOG_INFO, + }, + { + name: "Info", + JSONBlob: []byte(`{"test_key":"test_value","level":"Info"}`), + expectedLevel: syslogger.LOG_INFO, + }, + { + name: "notice", + JSONBlob: []byte(`{"test_key":"test_value","level":"notice"}`), + expectedLevel: syslogger.LOG_NOTICE, + }, + { + name: "warning", + JSONBlob: []byte(`{"test_key":"test_value","level":"warning"}`), + expectedLevel: syslogger.LOG_WARNING, + }, + { + name: "warn", + JSONBlob: []byte(`{"test_key":"test_value","level":"warn"}`), + expectedLevel: syslogger.LOG_WARNING, + }, + { + name: "err", + JSONBlob: []byte(`{"test_key":"test_value","level":"err"}`), + expectedLevel: syslogger.LOG_ERR, + }, + { + name: "error", + JSONBlob: []byte(`{"test_key":"test_value","level":"error"}`), + expectedLevel: syslogger.LOG_ERR, + }, + { + name: "crit", + JSONBlob: []byte(`{"test_key":"test_value","level":"crit"}`), + expectedLevel: syslogger.LOG_CRIT, + }, + { + name: "alert", + JSONBlob: []byte(`{"test_key":"test_value","level":"alert"}`), + expectedLevel: syslogger.LOG_ALERT, + }, + { + name: "emerg", + JSONBlob: []byte(`{"test_key":"test_value","level":"emerg"}`), + expectedLevel: syslogger.LOG_EMERG, + }, + { + name: "debug", + JSONBlob: []byte(`{"test_key":"test_value","level":"debug"}`), + expectedLevel: syslogger.LOG_DEBUG, + }, + { + name: "unknown", + JSONBlob: []byte(`{"test_key":"test_value","level":"not_valid"}`), + expectedLevel: syslogger.LOG_INFO, + }, + } + + for _, test := range tests { + out, err := extractJSONlevel(test.JSONBlob) + if err != nil { + t.Logf("Test failed to see level, error %s", err) + t.Fail() + } + if test.expectedLevel != out { + t.Logf("Log level for test %s returned is not expected. Want: %v, Got: %v", test.name, test.expectedLevel, out) + t.Fail() + } + t.Logf("%s translates to log level: %v", test.name, out) + } +} + +func benchExtractJSON(b *testing.B, JSONBlob []byte) syslogger.Priority { + var lvl syslogger.Priority + for n := 0; n < b.N; n++ { + lvl, _ = extractJSONlevel(JSONBlob) + } + return lvl +} + +func BenchmarkExtractJSONLevelSmall(b *testing.B) { + JSONBlob := []byte(`{"test_key_1":"test_value_1","test_key_2":"test_value_2","level":"crit"}`) + result := benchExtractJSON(b, JSONBlob) + b.Logf("Blob size: %d. Last result: %v", len(JSONBlob), result) +} + +func BenchmarkExtractJSONLevelMed(b *testing.B) { + JSONBlob := []byte(`{"test_key_1":"test_value_1","test_key_2":"test_value_2","test_key_text":"this is a log messsage. It needs to be a bit long to make sure that we don't slow down too much while extracting the level","level":"crit"}`) + result := benchExtractJSON(b, JSONBlob) + b.Logf("Blob size: %d. Last result: %v", len(JSONBlob), result) +} + +func BenchmarkExtractJSONLevelLarge(b *testing.B) { + JSONBlob := []byte(`{"test_key_1":"test_value_1","test_key_2":"test_value_2","test_key_text_1":"this is a log messsage. It needs to be a bit long to make sure that we don't slow down too much while extracting the level","test_key_text_2":"this is a log messsage. It needs to be a bit long to make sure that we don't slow down too much while extracting the level","test_key_text_3":"this is a log messsage. It needs to be a bit long to make sure that we don't slow down too much while extracting the level","level":"crit"}`) + result := benchExtractJSON(b, JSONBlob) + b.Logf("Blob size: %d. Last result: %v", len(JSONBlob), result) +} diff --git a/processlogger/syslog/syslog.go b/processlogger/syslog/syslog.go new file mode 100644 index 0000000..6b3d5d2 --- /dev/null +++ b/processlogger/syslog/syslog.go @@ -0,0 +1,246 @@ +package syslog + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + + "github.com/morfien101/launch/configfile" + "github.com/morfien101/launch/processlogger" + syslogger "github.com/silverstagtech/srslog" +) + +const ( + tlsConnection = "tcp+tls" + tcpConnection = "tcp" + udpConnection = "udp" + // LoggerTag will be used to call this package + LoggerTag = "syslog" + defaultProtocol = tlsConnection +) + +var ( + validDialers = map[string]bool{ + tlsConnection: true, + tcpConnection: true, + udpConnection: true, + } +) + +func isValidDialer(s string) bool { + _, ok := validDialers[s] + return ok +} + +func init() { + processlogger.RegisterLogger(LoggerTag, func() processlogger.Logger { + return &Syslog{ + protocol: defaultProtocol, + loggingFacility: syslogger.LOG_DAEMON, + } + }) +} + +// Syslog is responsible for logging to a syslog endpoint +type Syslog struct { + location string + protocol string + tlsconfig *tls.Config + config configfile.LoggingConfig + defaults configfile.DefaultLoggerDetails + logwriter *syslogger.Writer + running bool + loggingFacility syslogger.Priority + + // hostname is the name that you want to appear in syslog message + hostname string + // basename is the containers hostname and can be appended to the hostname + // then sending the log. Useful when you want to see what container is sending + // the logs. + basename string +} + +// IsStarted will tell the caller if this logger needs to be started. +func (sl *Syslog) isStarted() bool { + return sl.running +} + +func (sl *Syslog) readCertificates() (*x509.CertPool, error) { + if sl.defaults.Config.Syslog.CertificateBundlePath == "" { + return nil, fmt.Errorf("No certificate bundle specified") + } + certbundle, err := ioutil.ReadFile(sl.defaults.Config.Syslog.CertificateBundlePath) + if err != nil { + return nil, err + } + + roots := x509.NewCertPool() + ok := roots.AppendCertsFromPEM(certbundle) + if !ok { + return nil, fmt.Errorf("failed to parse the given certificate bundle") + } + + return roots, nil +} + +// RegisterConfig does nothing here. +func (sl *Syslog) RegisterConfig(config configfile.LoggingConfig, defaults configfile.DefaultLoggerDetails) error { + sl.config = config + sl.defaults = defaults + + if sl.defaults.Config.Syslog.ConnectionType == "" { + sl.defaults.Config.Syslog.ConnectionType = defaultProtocol + } + if !isValidDialer(sl.defaults.Config.Syslog.ConnectionType) { + return fmt.Errorf("%s is not a valid protocol to connect to syslog", sl.defaults.Config.Syslog.ConnectionType) + } + + if sl.defaults.Config.Syslog.ConnectionType == tlsConnection { + roots, err := sl.readCertificates() + if err != nil { + return err + } + sl.tlsconfig = &tls.Config{ + RootCAs: roots, + } + } + + var err error + sl.basename, err = os.Hostname() + if err != nil { + sl.basename = "not_available" + } + if sl.defaults.Config.Syslog.OverrideHostname != "" { + sl.hostname = sl.defaults.Config.Syslog.OverrideHostname + } + + return nil +} + +// Start connects the logging engine and links the writer pipe to be able to call StdOut and StdErr. +// Start is safe to be called multiple times. +func (sl *Syslog) Start() error { + if sl.isStarted() { + return nil + } + + // Dial with TLS + var writer *syslogger.Writer + var err error + switch sl.defaults.Config.Syslog.ConnectionType { + case tlsConnection: + writer, err = syslogger.DialWithTLSConfig( + tlsConnection, + sl.defaults.Config.Syslog.Address, + syslogger.LOG_INFO|syslogger.LOG_KERN, + sl.defaults.Config.Syslog.ProgramName, + sl.tlsconfig, + ) + case tcpConnection, udpConnection: + writer, err = syslogger.Dial( + sl.defaults.Config.Syslog.ConnectionType, + sl.defaults.Config.Syslog.Address, + syslogger.LOG_INFO|syslogger.LOG_KERN, + sl.defaults.Config.Syslog.ProgramName, + ) + default: + err = fmt.Errorf("Invalid logger type detected") + } + + if err != nil { + return fmt.Errorf("failed to connect to Syslog server because: %s", err) + } + + sl.logwriter = writer + sl.running = true + return nil +} + +// send will send the supplied text to syslog server. +func (sl *Syslog) send(facility, priority syslogger.Priority, tag, hostname, text string) error { + _, err := sl.logwriter.WriteWithOverrides(facility, priority, hostname, tag, text) + if err != nil { + return err + } + return nil +} + +func (sl *Syslog) appendBaseName(in string) string { + return in + sl.basename +} + +// Submit will consume a processlogger.LogMessage and route it to the correct writer. +func (sl *Syslog) Submit(msg processlogger.LogMessage) { + // We need to know what is sending the log + var tag string + if msg.Config.Syslog.ProgramName != "" { + tag = msg.Config.Syslog.ProgramName + } else if sl.defaults.Config.Syslog.ProgramName != "" { + tag = sl.defaults.Config.Syslog.ProgramName + } else { + tag = msg.Config.ProcessName + } + + if sl.defaults.Config.Syslog.AddContainerNameToTag || msg.Config.Syslog.AddContainerNameToTag { + tag = sl.appendBaseName(tag) + } + + // We need a hostname + var hostname string + if msg.Config.Syslog.OverrideHostname != "" { + hostname = msg.Config.Syslog.OverrideHostname + } else if sl.defaults.Config.Syslog.OverrideHostname != "" { + hostname = sl.defaults.Config.Syslog.OverrideHostname + } else { + hostname = sl.basename + } + + if sl.defaults.Config.Syslog.AddContainerNameToHostname || msg.Config.Syslog.AddContainerNameToHostname { + hostname = sl.appendBaseName(hostname) + } + + // How critical is the log + var level syslogger.Priority + switch msg.Pipe { + case processlogger.STDOUT: + level = syslogger.LOG_INFO + case processlogger.STDERR: + level = syslogger.LOG_CRIT + } + + // Can we detect the criticality from the log? + if msg.Config.Syslog.ExtractLogLevel { + detectedlevel, err := extractJSONlevel([]byte(msg.Message)) + if err != nil { + // got nothing to send this to currently. + // The process manager should really consume this + // You get the default anyway... + } else { + level = detectedlevel + } + } + // Send the log + sl.send(sl.loggingFacility, level, tag, hostname, msg.Message) +} + +// Shutdown will try to close the connection to the syslog server. This is a best effort close. +// A chan bool is return that will get a true on it once the connection is closed. +func (sl *Syslog) Shutdown() chan error { + c := make(chan error, 1) + go func() { + // If it fails so fast that the logger didn't start, then it will be nil. + // Nothing started, nothing to close. + if sl.logwriter == nil { + c <- nil + return + } + err := sl.logwriter.Close() + if err != nil { + c <- fmt.Errorf("failed to close syslog connection. Error: %s", err) + } + c <- nil + }() + return c +} diff --git a/processlogger/syslog/syslog_test.go b/processlogger/syslog/syslog_test.go new file mode 100644 index 0000000..e7a6b99 --- /dev/null +++ b/processlogger/syslog/syslog_test.go @@ -0,0 +1,25 @@ +package syslog + +import ( + "testing" + + syslogger "github.com/silverstagtech/srslog" +) + +func TestSyslogConnection(t *testing.T) { + writer, err := syslogger.Dial( + "udp", + "127.0.0.2:514", + syslogger.LOG_INFO|syslogger.LOG_KERN, + "launch_test", + ) + if err != nil { + t.Logf("Failed to connect. Error: %s", err) + t.Fail() + } + + err = writer.Info("Hello from Go") + if err != nil { + t.Logf("Failed to write message") + } +} diff --git a/processmanager/process.go b/processmanager/process.go new file mode 100644 index 0000000..7537990 --- /dev/null +++ b/processmanager/process.go @@ -0,0 +1,32 @@ +package processmanager + +import ( + "os" + "os/exec" + "sync" + + "github.com/morfien101/launch/internallogger" + + "github.com/morfien101/launch/configfile" +) + +// Process is used to hold config and state of a process +type Process struct { + pmlogger internallogger.IntLogger + sync.RWMutex + config *configfile.Process + exiting bool + exited bool + exitcode int + shutdown chan bool + sigChan chan os.Signal + loggerTag string + proc *exec.Cmd + closePipesChan chan bool +} + +func (p *Process) running() bool { + p.RLock() + defer p.RUnlock() + return p.exiting +} diff --git a/processmanager/processmanager.go b/processmanager/processmanager.go new file mode 100644 index 0000000..4fe15d4 --- /dev/null +++ b/processmanager/processmanager.go @@ -0,0 +1,380 @@ +// Package processmanager is responsible for running the processes defined in the configuration. +package processmanager + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "sync" + "syscall" + "time" + + "github.com/morfien101/launch/configfile" + "github.com/morfien101/launch/internallogger" + "github.com/morfien101/launch/processlogger" +) + +const ( + initProcess = "init" + mainProcess = "main" +) + +// ProcessManger holds the config and state of the running processes. +type ProcessManger struct { + config configfile.Processes + logger *processlogger.LogManager + pmlogger internallogger.IntLogger + Signals chan os.Signal + mainProcesses []*Process + EndList []*processEnd + wg sync.WaitGroup + tumble chan bool + shuttingDown bool +} + +type processEnd struct { + Name string `json:"name"` + ProcessType string `json:"type"` + Error error `json:"runtime_error,omitempty"` + ExitCode int `json:"exit_code"` +} + +// New will create a ProcessManager with the supplied config and return it +func New( + config configfile.Processes, + logManager *processlogger.LogManager, + pmlogger internallogger.IntLogger, + signalChan chan os.Signal, +) *ProcessManger { + pm := &ProcessManger{ + config: config, + logger: logManager, + pmlogger: pmlogger, + Signals: signalChan, + tumble: make(chan bool, 1), + } + + // Once we get a single process fail we shutdown everything. + // A SIGTERM is sent to the master signal channel + pmlogger.Debugln("Starting terminator go func for when signals arrive") + go pm.terminator() + + return pm +} + +func (pm *ProcessManger) signalRecplicator() { + for { + select { + case signalIn := <-pm.Signals: + f := func(procs []*Process) { + for _, proc := range procs { + if !proc.running() { + proc.sigChan <- signalIn + } + } + } + f(pm.mainProcesses) + + } + } +} + +func (pm *ProcessManger) terminator() { + for { + select { + case _ = <-pm.tumble: + // If the process manager is already shutting down we will get a few + // signals from the processes as they turn off. We can safely discard + // them since we expect them. + if pm.shuttingDown { + continue + } + // If we are not in shutdown mode trigger it. Them mark it as shutting down. + pm.shuttingDown = true + // We only need to send signals to propagate if we have started some mains. + if pm.mainProcesses != nil { + pm.Signals <- syscall.SIGTERM + } + } + } +} + +// RunInitProcesses will run all of the processes that are under the +// init processes configuration. All init processes will be run sequentially +// in the order supplied and MUST return success before the next is started. +// If a process does not return successfully an error is returned and further +// processing will stop. +func (pm *ProcessManger) RunInitProcesses() (string, error) { + pm.pmlogger.Println("Starting Init Processes") + // Start the main processes + for _, procConfig := range pm.config.InitProcesses { + // Create a process object + proc := &Process{ + config: procConfig, + pmlogger: pm.pmlogger, + sigChan: make(chan os.Signal, 1), + } + pm.pmlogger.Debugf("Attempting to run %s.\n", proc.config.CMD) + // setup logging hooks + // If this fails we can't carry on. + err := pm.setupProcess(proc) + if err != nil { + return "", err + } + signalReplicatorFuncChan := make(chan struct{}, 1) + destroySignalReplicator := func() { + signalReplicatorFuncChan <- struct{}{} + } + go func() { + for { + select { + case sig := <-pm.Signals: + proc.sigChan <- sig + case <-signalReplicatorFuncChan: + return + } + } + }() + + // Run the process + endstate := proc.runProcess(initProcess) + pm.pmlogger.Debugf("Finished running %s.\n", proc.config.CMD) + pm.EndList = append(pm.EndList, &endstate) + if endstate.Error != nil { + pm.pmlogger.Debugln("The last init command failed. Stack will now tumble.") + pm.tumble <- true + + err := fmt.Errorf("Process %s failed. Error reported: %s", procConfig.Name, endstate.Error) + output := make(chan string, 1) + pm.exitStatusPrinter(output) + + destroySignalReplicator() + + return <-output, err + } + destroySignalReplicator() + } + return "", nil +} + +// RunMainProcesses will start the processes listed in sequential order. +// Processes are expected to start and stay running. A failure of one +// will cause all the ProcessManger to send termination signals to all +// remaining and subsqently kill the processes manager. +func (pm *ProcessManger) RunMainProcesses() (chan string, error) { + // We need to wait for signals and repeat them into the processes + pm.pmlogger.Debugln("Starting signal catcher go func") + go pm.signalRecplicator() + + pm.pmlogger.Println("Starting Main Processes") + // Start the main processes + for _, procConfig := range pm.config.MainProcesses { + // Create a process object + pm.wg.Add(1) + pm.pmlogger.Debugf("Adding %s to the list of main processes.\n", procConfig.CMD) + proc := &Process{ + config: procConfig, + pmlogger: pm.pmlogger, + sigChan: make(chan os.Signal, 1), + shutdown: make(chan bool, 1), + } + pm.mainProcesses = append(pm.mainProcesses, proc) + // setup logging hooks + // If this fails we can't carry on. + err := pm.setupProcess(proc) + if err != nil { + return nil, err + } + // Run the process + go func() { + pm.pmlogger.Debugf("Starting %s.\n", proc.config.CMD) + endstate := proc.runProcess(mainProcess) + pm.EndList = append(pm.EndList, &endstate) + pm.pmlogger.Debugf("%s has terminated.\n", proc.config.CMD) + pm.tumble <- true + pm.wg.Done() + }() + } + + exitStatusTextChan := make(chan string, 1) + go pm.waitMain(exitStatusTextChan) + + return exitStatusTextChan, nil +} + +func (pm *ProcessManger) waitMain(output chan string) { + pm.pmlogger.Debugln("Starting wait on waitgroup for main processes.") + pm.wg.Wait() + pm.pmlogger.Debugln("passed waitgroup for main processes.") + pm.exitStatusPrinter(output) +} + +func (pm *ProcessManger) exitStatusPrinter(output chan string) { + b, err := json.Marshal(pm.EndList) + if err != nil { + pm.pmlogger.Debugf("Error generating end state. Error: %s\n", err) + } + output <- string(b) +} + +// Setup Process will link create the process object and also link the stdout and stderr. +// An error is returned if anything fails. +func (pm *ProcessManger) setupProcess(proc *Process) error { + proc.proc = exec.Command(proc.config.CMD, proc.config.Args...) + + procStdOut, err := proc.proc.StdoutPipe() + if err != nil { + return fmt.Errorf("Failed to connect to stdout pipe. Error: %s", err) + } + + procStdErr, err := proc.proc.StderrPipe() + if err != nil { + return fmt.Errorf("Failed to connect to stderr pipe. Error: %s", err) + } + + proc.closePipesChan = pm.redirectOutput(procStdOut, procStdErr, proc.config.LoggerConfig) + + return nil +} + +// redirectOutput will take the pipes of the process and redirect it to the logger for the process +func (pm *ProcessManger) redirectOutput(stdout, stderr io.ReadCloser, config configfile.LoggingConfig) chan bool { + closePipetrigger := make(chan bool, 1) + go func() { + <-closePipetrigger + defer stdout.Close() + defer stderr.Close() + }() + + stdOutScanner := bufio.NewScanner(stdout) + stdErrScanner := bufio.NewScanner(stderr) + + newLog := func(from processlogger.Pipe, msg string) processlogger.LogMessage { + return processlogger.LogMessage{ + Source: config.ProcessName, + Pipe: from, + Config: config, + Message: msg, + } + } + + go func() { + for stdOutScanner.Scan() { + pm.logger.Submit(newLog(processlogger.STDOUT, stdOutScanner.Text()+"\n")) + } + }() + go func() { + for stdErrScanner.Scan() { + pm.logger.Submit(newLog(processlogger.STDOUT, stdErrScanner.Text()+"\n")) + } + }() + + return closePipetrigger +} + +// runProcess will spawn a child process and return only once that child +// has exited either good or bad. +func (p *Process) runProcess(processType string) processEnd { + finalState := processEnd{ + Name: p.config.CMD, + ProcessType: processType, + ExitCode: -1, + } + + if err := p.proc.Start(); err != nil { + p.exitcode = 1 + finalState.Error = err + finalState.ExitCode = 1 + return finalState + } + + // Wait for the process to finish + done := make(chan error, 1) + go func() { + done <- p.proc.Wait() + // Close the pipes that redirect std out and err + p.closePipesChan <- true + }() + + // Wait for signals + go func() { + p.pmlogger.Debugf("staring signal watch for %s\n", p.config.Name) + exitTimeout := make(chan bool, 1) + for { + select { + case signal := <-p.sigChan: + + // Collect signals and pass them onto the main command that we are running. + p.pmlogger.Printf("Got signal %s, forwarding onto %s\n", signal, p.config.Name) + err := p.proc.Process.Signal(signal) + if err != nil { + // Failed to send signal to process + done <- fmt.Errorf("Failed to send signal %s to running instance of %s. Allowing crash when process manager dies", signal, p.config.CMD) + } + + if signal == syscall.SIGINT || signal == syscall.SIGTERM { + // Signals SIGTERM and SIGINT will cause the app to stop. We need to timeout after a specified time. + if !p.exiting { + go func() { + // This will always fire. It is used to break this for loop in the timeout case + // below. The value passed down the channel will determine if any action needs + // to take place. + p.pmlogger.Debugf("Starting forceful termination timer for %s\n", p.config.Name) + time.AfterFunc(time.Duration(p.config.TermTimeout)*time.Second, func() { + exitTimeout <- p.running() + }) + }() + } + p.Lock() + p.exiting = true + p.Unlock() + } + case timeout := <-exitTimeout: + // If a process is still running after X seconds then we just terminate it. + if timeout { + p.pmlogger.Printf("Forcefully killing process %s because termination timeout has been reached.\n", p.config.Name) + err := p.proc.Process.Kill() + if err != nil { + done <- fmt.Errorf("Failed to terminate %s", p.config.CMD) + } + } + break + } + } + }() + + // Wait here to get an err. + // It could be nil which would indicate that the process exited without an error. + // It could be an exec.ExitError which would indicate that the process terminated badly. + // We will try to get the error but its not possible in all OSs. + // This should work on Linux and Windows. See below for more details: + // https://stackoverflow.com/questions/10385551/get-exit-code-go + finalState.Error = <-done + p.exited = true + + if exiterr, ok := finalState.Error.(*exec.ExitError); ok { + // The program has exited with an exit code != 0 + + // This works on both Unix and Windows. Although package + // syscall is generally platform dependent, WaitStatus is + // defined for both Unix and Windows and in both cases has + // an ExitStatus() method with the same signature. + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + exitstatus := status.ExitStatus() + if exitstatus == -1 { + exitstatus = 1 + } + p.exitcode = exitstatus + } else { + p.pmlogger.Debugf("Could not determine actual exit code for %s. Assuming 1 because it failed.\n", p.config.Name) + p.exitcode = 1 + } + } + + finalState.ExitCode = p.exitcode + + return finalState +} diff --git a/testbin/go.mod b/testbin/go.mod new file mode 100644 index 0000000..839ba32 --- /dev/null +++ b/testbin/go.mod @@ -0,0 +1,5 @@ +module github.com/morfien101/launch/testbin + +go 1.12 + +require github.com/silverstagtech/randomstring v0.1.1 diff --git a/testbin/go.sum b/testbin/go.sum new file mode 100644 index 0000000..304746a --- /dev/null +++ b/testbin/go.sum @@ -0,0 +1,2 @@ +github.com/silverstagtech/randomstring v0.1.1 h1:T8lqM0GZtnO+wcVA+OeX923h8tuCK6RFLJBs+PKdR5s= +github.com/silverstagtech/randomstring v0.1.1/go.mod h1:Q8BctodC+zlFJmmJIHZv/jLP+ouctRVE2QxaflDriZo= diff --git a/testbin/main.go b/testbin/main.go new file mode 100644 index 0000000..145e0fe --- /dev/null +++ b/testbin/main.go @@ -0,0 +1,142 @@ +// testbin is used to test the process manager and is not +// included in the building of launch +package main + +import ( + "encoding/json" + "flag" + "fmt" + "log" + "os" + "os/signal" + "syscall" + "time" + + "github.com/silverstagtech/randomstring" +) + +const ( + //VERSION is the used to display a version number + VERSION = "0.1.0" +) + +var ( + spamout = flag.Bool("spam", false, "Send progressivly more out to STDOUT. Use with -stdout and -stderr") + turnOnSTDOUT = flag.Bool("stdout", false, "enabled stdout spamming.") + turnOnSTDERR = flag.Bool("stderr", false, "enabled stderr spamming.") + noenvflag = flag.Bool("no-env", false, "Don't display the environment variables.") + noNewLineFlag = flag.Bool("no-newline", false, "Remove new line from stdout and stderr output.") + echoFlag = flag.String("id", "", "Prints this on execution") + timeoutSeconds = flag.Int("timeout", 10, "How long to wait before dying in seconds.") + exitBad = flag.Int("exit-bad", 0, "Exit with an exitcode of 1 to indicate a bad exit code.") + ignoreSignals = flag.Bool("ignore-signals", false, "Ignore the signals that the process gets.") + logjson = flag.Int("log-json", 0, "Log some random json messages. The number says how many logs you want.") + helpflag = flag.Bool("h", false, "Show the help menu") + versionflag = flag.Bool("v", false, "Displays a version number.") +) + +func main() { + flag.Parse() + if *helpflag { + flag.PrintDefaults() + os.Exit(0) + } + if *versionflag { + fmt.Println(VERSION) + os.Exit(0) + } + // Start + log.Printf("Starting %s version %s", os.Args[0], VERSION) + + signals := make(chan os.Signal, 1) + timeout := make(chan bool, 1) + done := make(chan string, 1) + + signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) + + if len(*echoFlag) != 0 { + fmt.Println(*echoFlag) + } + + fmt.Printf("got arguments: %s\n", os.Args) + + if !*noenvflag { + fmt.Println("Below is the environment variables that I can see.") + for _, env := range os.Environ() { + fmt.Println(env) + } + } + + fmt.Println("Waiting for a signal or timeout...") + go func() { + for { + select { + case signal := <-signals: + msg := fmt.Sprintf("Got signal %s", signal) + if *ignoreSignals { + fmt.Println(msg, "but, told to ignoring it.") + continue + } + done <- fmt.Sprintln(msg) + case <-timeout: + done <- fmt.Sprintln("Timed out") + } + return + } + }() + time.AfterFunc(time.Second*time.Duration(*timeoutSeconds), func() { + timeout <- true + }) + + if *spamout { + fmt.Println("Starting spam generators...") + if *turnOnSTDOUT { + go spammer("STDOUT", *noNewLineFlag, os.Stdout) + } + if *turnOnSTDERR { + go spammer("STDERR", *noNewLineFlag, os.Stderr) + } + } + + if *logjson > 0 { + for i := 0; i < *logjson; i++ { + log, err := generateJSONLog() + if err != nil { + fmt.Println(err) + } else { + fmt.Println(log) + } + } + } + + fmt.Println(<-done) + + if *exitBad > 0 { + os.Stderr.WriteString(fmt.Sprintf("Exiting badly because of -bad-exit flag. Using exitcode %d", *exitBad)) + os.Exit(*exitBad) + } +} + +func generateJSONLog() (string, error) { + r, _ := randomstring.Generate(4, 4, 4, 4, 64) + output := struct { + Name string `json:"name"` + Timestamp string `json:"time_stamp"` + Severity string `json:"level"` + SomeRandom string `json:"some_random"` + SomeStatic string `json:"some_static"` + }{ + Name: "container-bootrapper testbin", + Timestamp: time.Now().String(), + Severity: "crit", + SomeStatic: "look_for_me", + SomeRandom: r, + } + b, err := json.Marshal(output) + if err != nil { + return fmt.Sprintf(`{"msg":"Error generating log","error":"%s"}`, err), nil + } + + return string(b), nil + +} diff --git a/testbin/spammer.go b/testbin/spammer.go new file mode 100644 index 0000000..41a0db5 --- /dev/null +++ b/testbin/spammer.go @@ -0,0 +1,54 @@ +package main + +import ( + "fmt" + "os" + "strings" +) + +func spammer(lable string, nonewline bool, target *os.File) { + spam := newSpamCan(10, lable, nonewline) + for { + target.WriteString(spam.getSpam()) + } +} + +type spamCan struct { + offset int + spamIncrement int + currentSpam []string + endLineToken string + lable string +} + +func newSpamCan(increment int, lable string, noNewLine bool) *spamCan { + sc := &spamCan{ + offset: 33, + currentSpam: make([]string, 0), + spamIncrement: increment, + lable: lable, + endLineToken: "", + } + if !noNewLine { + sc.endLineToken = "\n" + } + return sc +} + +func (spam *spamCan) nextOffset() int { + if spam.offset < 125 { + spam.offset++ + } else { + spam.offset = 33 + } + + return spam.offset +} + +func (spam *spamCan) getSpam() string { + for i := 0; i < spam.spamIncrement; i++ { + nextChar := string(byte(spam.nextOffset())) + spam.currentSpam = append(spam.currentSpam, nextChar) + } + return fmt.Sprintf("%s - %s%s", spam.lable, strings.Join(spam.currentSpam, ""), spam.endLineToken) +}