Skip to content

Commit

Permalink
backup: remove deprecated hook support (#12066)
Browse files Browse the repository at this point in the history
* backup: remove deprecated hook support

Signed-off-by: deepthi <[email protected]>

* backup: document hook removal in release notes

Signed-off-by: deepthi <[email protected]>

* backup: improve logging

Signed-off-by: deepthi <[email protected]>

Signed-off-by: deepthi <[email protected]>
  • Loading branch information
deepthi authored Jan 11, 2023
1 parent d91a9ad commit 7931087
Show file tree
Hide file tree
Showing 7 changed files with 13 additions and 157 deletions.
2 changes: 0 additions & 2 deletions build.env
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,6 @@ mkdir -p "$VTDATAROOT"
# TODO(mberlin): Which of these can be deleted?
ln -snf "$PWD/go/vt/zkctl/zksrv.sh" bin/zksrv.sh
ln -snf "$PWD/test/vthook-test.sh" vthook/test.sh
ln -snf "$PWD/test/vthook-test_backup_error" vthook/test_backup_error
ln -snf "$PWD/test/vthook-test_backup_transform" vthook/test_backup_transform

# install git hooks

Expand Down
11 changes: 8 additions & 3 deletions doc/releasenotes/16_0_0_summary.md
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,11 @@ is now fixed. The full issue can be found [here](https://github.com/vitessio/vit

- `vtctlclient OnlineDDL ... [complete|retry|cancel|cancel-all]` returns empty result on success instead of number of shard affected.

- VTTablet flag `--backup_storage_hook` has been removed, use one of the builtin compression algorithms or `--external-compressor` and `--external-decompressor` instead.

- vtbackup flag `--backup_storage_hook` has been removed, use one of the builtin compression algorithms or `--external-compressor` and `--external-decompressor` instead.


### MySQL Compatibility

#### Transaction Isolation Level
Expand Down Expand Up @@ -261,7 +266,7 @@ This will allow users to start a transaction with these characteristics.

Vitess now supports views in sharded keyspace. Views are not created on the underlying database but are logically stored
in vschema.
Any query using view will get re-rewritten as derived table during query planning.
Any query using a view will get re-written as a derived table during query planning.
VSchema Example

```json
Expand All @@ -284,9 +289,9 @@ The flag `lock-shard-timeout` has been deprecated. Please use the newly introduc

### VTTestServer

#### Improvement
#### Performance Improvement

Creating a database with vttestserver was taking ~45 seconds. This can be problematic in test environments where testcases do a lot of `create` and `drop` database.
In an effort to minimize the database creation time, we have changed the value of `tablet_refresh_interval` to 10s while instantiating vtcombo during vttestserver initialization. We have also made this configurable so that it can be reduced further if desired.
For any production cluster the default value of this flag is still [1 minute](https://vitess.io/docs/15.0/reference/programs/vtgate/). Reducing this values might put more stress on Topo Server (since we now read from Topo server more often) but for testing purposes
For any production cluster the default value of this flag is still [1 minute](https://vitess.io/docs/15.0/reference/programs/vtgate/). Reducing this value might put more stress on Topo Server (since we now read from Topo server more often) but for testing purposes
this shouldn't be a concern.
12 changes: 1 addition & 11 deletions go/vt/mysqlctl/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,16 +73,8 @@ var (
// but none of them are complete.
ErrNoCompleteBackup = errors.New("backup(s) found but none are complete")

// backupStorageHook contains the hook name to use to process
// backup files. If not set, we will not process the files. It is
// only used at backup time. Then it is put in the manifest,
// and when decoding a backup, it is read from the manifest,
// and used as the transform hook name again.
backupStorageHook string

// backupStorageCompress can be set to false to not use gzip
// on the backups. Usually would be set if a hook is used, and
// the hook compresses the data.
// on the backups.
backupStorageCompress = true

// backupCompressBlockSize is the splitting size for each
Expand All @@ -104,8 +96,6 @@ func init() {
}

func registerBackupFlags(fs *pflag.FlagSet) {
fs.StringVar(&backupStorageHook, "backup_storage_hook", backupStorageHook, "if set, we send the contents of the backup files through this hook.")
_ = fs.MarkDeprecated("backup_storage_hook", "consider using one of the builtin compression algorithms or --external-compressor and --external-decompressor instead.")
fs.BoolVar(&backupStorageCompress, "backup_storage_compress", backupStorageCompress, "if set, the backup files will be compressed.")
fs.IntVar(&backupCompressBlockSize, "backup_storage_block_size", backupCompressBlockSize, "if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000).")
fs.IntVar(&backupCompressBlocks, "backup_storage_number_blocks", backupCompressBlocks, "if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, at once, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression.")
Expand Down
2 changes: 1 addition & 1 deletion go/vt/mysqlctl/backupengine.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ type BackupParams struct {
// Concurrency is the value of -concurrency flag given to Backup command
// It determines how many files are processed in parallel
Concurrency int
// Extra env variables for pre-backup and post-backup transform hooks
// Extra env variables used while stopping and starting mysqld
HookExtraEnv map[string]string
// TopoServer, Keyspace and Shard are used to discover primary tablet
TopoServer *topo.Server
Expand Down
69 changes: 3 additions & 66 deletions go/vt/mysqlctl/builtinbackupengine.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ import (
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sync2"
"vitess.io/vitess/go/vt/concurrency"
"vitess.io/vitess/go/vt/hook"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/mysqlctl/backupstorage"
Expand Down Expand Up @@ -73,8 +72,7 @@ type BuiltinBackupEngine struct {
}

// builtinBackupManifest represents the backup. It lists all the files, the
// Position that the backup was taken at, and the transform hook used,
// if any.
// Position that the backup was taken at, the compression engine used, etc.
type builtinBackupManifest struct {
// BackupManifest is an anonymous embedding of the base manifest struct.
BackupManifest
Expand All @@ -88,9 +86,6 @@ type builtinBackupManifest struct {
// FileEntries contains all the files in the backup
FileEntries []FileEntry

// TransformHook that was used on the files, if any.
TransformHook string

// SkipCompress is true if the backup files were NOT run through gzip.
// The field is expressed as a negative because it will come through as
// false for backups that were created before the field existed, and those
Expand Down Expand Up @@ -181,7 +176,8 @@ func (fe *FileEntry) open(cnf *Mycnf, readOnly bool) (*os.File, error) {
// ExecuteBackup runs a backup based on given params. This could be a full or incremental backup.
// The function returns a boolean that indicates if the backup is usable, and an overall error.
func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (bool, error) {
params.Logger.Infof("Hook: %v, Compress: %v", backupStorageHook, backupStorageCompress)
params.Logger.Infof("Executing Backup at %v for keyspace/shard %v/%v on tablet %v, concurrency: %v, compress: %v, incrementalFromPos: %v",
params.BackupTime, params.Keyspace, params.Shard, params.TabletAlias, params.Concurrency, backupStorageCompress, params.IncrementalFromPos)

if isIncrementalBackup(params) {
return be.executeIncrementalBackup(ctx, params, bh)
Expand Down Expand Up @@ -301,11 +297,6 @@ func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, par
// and an overall error.
func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (bool, error) {

params.Logger.Infof("Hook: %v, Compress: %v", backupStorageHook, backupStorageCompress)
if backupStorageHook != "" {
log.Warning("Flag --backup_storage_hook has been deprecated, consider using one of the builtin compression algorithms or --external-compressor and --external-decompressor instead.")
}

if params.IncrementalFromPos != "" {
return be.executeIncrementalBackup(ctx, params, bh)
}
Expand Down Expand Up @@ -551,7 +542,6 @@ func (be *BuiltinBackupEngine) backupFiles(

// Builtin-specific fields
FileEntries: fes,
TransformHook: backupStorageHook,
SkipCompress: !backupStorageCompress,
CompressionEngine: CompressionEngineName,
}
Expand Down Expand Up @@ -686,19 +676,6 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara

var writer io.Writer = bw

// Create the external write pipe, if any.
var pipe io.WriteCloser
var wait hook.WaitFunc
if backupStorageHook != "" {
h := hook.NewHook(backupStorageHook, []string{"-operation", "write"})
h.ExtraEnv = params.HookExtraEnv
pipe, wait, _, err = h.ExecuteAsWritePipe(writer)
if err != nil {
return vterrors.Wrapf(err, "'%v' hook returned error", backupStorageHook)
}
writer = pipe
}

// Create the gzip compression pipe, if necessary.
var compressor io.WriteCloser
if backupStorageCompress {
Expand Down Expand Up @@ -727,20 +704,6 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara
}
}

// Close the hook pipe if necessary.
if pipe != nil {
if err := pipe.Close(); err != nil {
return vterrors.Wrap(err, "cannot close hook pipe")
}
stderr, err := wait()
if stderr != "" {
params.Logger.Infof("'%v' hook returned stderr: %v", backupStorageHook, stderr)
}
if err != nil {
return vterrors.Wrapf(err, "'%v' returned error", backupStorageHook)
}
}

// Close the backupPipe to finish writing on destination.
if err = bw.Close(); err != nil {
return vterrors.Wrapf(err, "cannot flush destination: %v", name)
Expand Down Expand Up @@ -820,10 +783,6 @@ func (be *BuiltinBackupEngine) ExecuteRestore(ctx context.Context, params Restor
return nil, err
}

if bm.TransformHook != "" {
log.Warning("Flag --backup_storage_hook has been deprecated, consider using one of the builtin compression algorithms or --external-compressor and --external-decompressor instead.")
}

var err error
if bm.Incremental {
err = be.executeRestoreIncrementalBackup(ctx, params, bh, bm)
Expand Down Expand Up @@ -919,17 +878,6 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestorePa
dst := bufio.NewWriterSize(dstFile, writerBufferSize)
var reader io.Reader = bp

// Create the external read pipe, if any.
var wait hook.WaitFunc
if bm.TransformHook != "" {
h := hook.NewHook(bm.TransformHook, []string{"-operation", "read"})
h.ExtraEnv = params.HookExtraEnv
reader, wait, _, err = h.ExecuteAsReadPipe(reader)
if err != nil {
return vterrors.Wrapf(err, "'%v' hook returned error", bm.TransformHook)
}
}

// Create the uncompresser if needed.
if !bm.SkipCompress {
var decompressor io.ReadCloser
Expand Down Expand Up @@ -974,17 +922,6 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestorePa
return vterrors.Wrap(err, "failed to copy file contents")
}

// Close the Pipe.
if wait != nil {
stderr, err := wait()
if stderr != "" {
log.Infof("'%v' hook returned stderr: %v", bm.TransformHook, stderr)
}
if err != nil {
return vterrors.Wrapf(err, "'%v' returned error", bm.TransformHook)
}
}

// Check the hash.
hash := bp.HashString()
if hash != fe.Hash {
Expand Down
20 changes: 0 additions & 20 deletions test/vthook-test_backup_error

This file was deleted.

54 changes: 0 additions & 54 deletions test/vthook-test_backup_transform

This file was deleted.

0 comments on commit 7931087

Please sign in to comment.