Skip to content

Commit

Permalink
Merge #63678 #64022
Browse files Browse the repository at this point in the history
63678: cliccl/debug_backup.go: fix export backup table with multiple ranges r=pbardea a=Elliebababa

Previously, `MakeBackupTableEntry` only exports sst files
of a single range of a table. This is problematic if the table has
multiple ranges.

This patch changes the struct of `BackupTableEntry` to allow
grouping sst files of a table over ranges.

Release note (bug fix): Fix a bug of `debug backup export` caused
by inspecting table with multiple ranges.

64022: sql/pgwire: fix encoding of decimals with trailing 0s r=otan a=rafiss

fixes #56907

The dscale is defined as number of digits (in base 10)
visible after the decimal separator," so a negative
dscale makes no sense and is not valid.

This was preventing NPGSQL from being able to decode the binary decimals
that CockroachDB was sending.

Release note (bug fix): The binary encoding of decimals no longer will
have negative `dscale` values. This was preventing Npgsql from being
able to read some binary decimals from CockroachDB.

Co-authored-by: elliebababa <[email protected]>
Co-authored-by: Rafi Shamim <[email protected]>
  • Loading branch information
3 people committed Apr 22, 2021
3 parents 8088e62 + c8b74b1 + f7fe860 commit 21e954f
Show file tree
Hide file tree
Showing 8 changed files with 268 additions and 84 deletions.
15 changes: 11 additions & 4 deletions pkg/ccl/backupccl/targets.go
Original file line number Diff line number Diff line change
Expand Up @@ -444,13 +444,16 @@ func selectTargets(
return matched.Descs, matched.RequestedDBs, nil, nil
}

// EntryFiles is a group of sst files of a backup table range
type EntryFiles []roachpb.ImportRequest_File

// BackupTableEntry wraps information of a table retrieved
// from backup manifests.
// exported to cliccl for exporting data directly from backup sst.
type BackupTableEntry struct {
Desc catalog.TableDescriptor
Span roachpb.Span
Files []roachpb.ImportRequest_File
Files []EntryFiles
}

// MakeBackupTableEntry looks up the descriptor of fullyQualifiedTableName
Expand Down Expand Up @@ -525,11 +528,15 @@ func MakeBackupTableEntry(
return BackupTableEntry{}, errors.Wrapf(err, "making spans for table %s", fullyQualifiedTableName)
}

res := BackupTableEntry{
backupTableEntry := BackupTableEntry{
tbDesc,
tablePrimaryIndexSpan,
entry[0].Files,
make([]EntryFiles, 0),
}

return res, nil
for _, e := range entry {
backupTableEntry.Files = append(backupTableEntry.Files, e.Files)
}

return backupTableEntry, nil
}
173 changes: 96 additions & 77 deletions pkg/ccl/cliccl/debug_backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,16 @@ import (
"github.com/spf13/cobra"
)

var externalIODir string
var exportTableName string
var readTime string
var destination string
var format string
var nullas string
// debugBackupArgs captures the parameters of the `debug backup` command.
var debugBackupArgs struct {
externalIODir string

exportTableName string
readTime string
destination string
format string
nullas string
}

func init() {

Expand Down Expand Up @@ -105,42 +109,42 @@ func init() {

backupFlags := backupCmds.Flags()
backupFlags.StringVarP(
&externalIODir,
&debugBackupArgs.externalIODir,
cliflags.ExternalIODir.Name,
cliflags.ExternalIODir.Shorthand,
"", /*value*/
cliflags.ExternalIODir.Usage())

exportDataCmd.Flags().StringVarP(
&exportTableName,
&debugBackupArgs.exportTableName,
cliflags.ExportTableTarget.Name,
cliflags.ExportTableTarget.Shorthand,
"", /*value*/
cliflags.ExportTableTarget.Usage())

exportDataCmd.Flags().StringVarP(
&readTime,
&debugBackupArgs.readTime,
cliflags.ReadTime.Name,
cliflags.ReadTime.Shorthand,
"", /*value*/
cliflags.ReadTime.Usage())

exportDataCmd.Flags().StringVarP(
&destination,
&debugBackupArgs.destination,
cliflags.ExportDestination.Name,
cliflags.ExportDestination.Shorthand,
"", /*value*/
cliflags.ExportDestination.Usage())

exportDataCmd.Flags().StringVarP(
&format,
&debugBackupArgs.format,
cliflags.ExportTableFormat.Name,
cliflags.ExportTableFormat.Shorthand,
"csv", /*value*/
cliflags.ExportTableFormat.Usage())

exportDataCmd.Flags().StringVarP(
&nullas,
&debugBackupArgs.nullas,
cliflags.ExportCSVNullas.Name,
cliflags.ExportCSVNullas.Shorthand,
"null", /*value*/
Expand All @@ -165,10 +169,10 @@ func newBlobFactory(ctx context.Context, dialing roachpb.NodeID) (blobs.BlobClie
if dialing != 0 {
return nil, errors.Errorf("accessing node %d during nodelocal access is unsupported for CLI inspection; only local access is supported with nodelocal://self", dialing)
}
if externalIODir == "" {
externalIODir = filepath.Join(server.DefaultStorePath, "extern")
if debugBackupArgs.externalIODir == "" {
debugBackupArgs.externalIODir = filepath.Join(server.DefaultStorePath, "extern")
}
return blobs.NewLocalClient(externalIODir)
return blobs.NewLocalClient(debugBackupArgs.externalIODir)
}

func externalStorageFromURIFactory(
Expand Down Expand Up @@ -302,10 +306,10 @@ func runListIncrementalCmd(cmd *cobra.Command, args []string) error {

func runExportDataCmd(cmd *cobra.Command, args []string) error {

if exportTableName == "" {
if debugBackupArgs.exportTableName == "" {
return errors.New("export data requires table name specified by --table flag")
}
fullyQualifiedTableName := strings.ToLower(exportTableName)
fullyQualifiedTableName := strings.ToLower(debugBackupArgs.exportTableName)
manifestPaths := args

ctx := context.Background()
Expand All @@ -318,9 +322,9 @@ func runExportDataCmd(cmd *cobra.Command, args []string) error {
manifests = append(manifests, manifest)
}

endTime, err := evalAsOfTimestamp(readTime)
endTime, err := evalAsOfTimestamp(debugBackupArgs.readTime)
if err != nil {
return errors.Wrapf(err, "eval as of timestamp %s", readTime)
return errors.Wrapf(err, "eval as of timestamp %s", debugBackupArgs.readTime)
}

codec := keys.TODOSQLCodec
Expand Down Expand Up @@ -364,90 +368,52 @@ func evalAsOfTimestamp(readTime string) (hlc.Timestamp, error) {

func showData(
ctx context.Context, entry backupccl.BackupTableEntry, endTime hlc.Timestamp, codec keys.SQLCodec,
) (err error) {

iters, cleanup, err := makeIters(ctx, entry)
if err != nil {
return errors.Wrapf(err, "make iters")
}
defer func() {
cleanupErr := cleanup()
if err == nil {
err = cleanupErr
}
}()

iter := storage.MakeMultiIterator(iters)
defer iter.Close()

rf, err := makeRowFetcher(ctx, entry, codec)
if err != nil {
return errors.Wrapf(err, "make row fetcher")
}
defer rf.Close(ctx)

startKeyMVCC, endKeyMVCC := storage.MVCCKey{Key: entry.Span.Key}, storage.MVCCKey{Key: entry.Span.EndKey}
kvFetcher := row.MakeBackupSSTKVFetcher(startKeyMVCC, endKeyMVCC, iter, endTime)

if err := rf.StartScanFrom(ctx, &kvFetcher); err != nil {
return errors.Wrapf(err, "row fetcher starts scan")
}
) error {

buf := bytes.NewBuffer([]byte{})
var writer *csv.Writer
if format != "csv" {
if debugBackupArgs.format != "csv" {
return errors.Newf("only exporting to csv format is supported")
}

buf := bytes.NewBuffer([]byte{})
if destination == "" {
if debugBackupArgs.destination == "" {
writer = csv.NewWriter(os.Stdout)
} else {
writer = csv.NewWriter(buf)
}

for {
datums, _, _, err := rf.NextRowDecoded(ctx)
if err != nil {
return errors.Wrapf(err, "decode row")
}
if datums == nil {
break
}
row := make([]string, datums.Len())
for i, datum := range datums {
if datum == tree.DNull {
row[i] = nullas
} else {
row[i] = datum.String()
}
}
if err := writer.Write(row); err != nil {
rf, err := makeRowFetcher(ctx, entry, codec)
if err != nil {
return errors.Wrapf(err, "make row fetcher")
}
defer rf.Close(ctx)

for _, files := range entry.Files {
if err := processEntryFiles(ctx, rf, files, entry.Span, endTime, writer); err != nil {
return err
}
writer.Flush()
}

if destination != "" {
dir, file := filepath.Split(destination)
if debugBackupArgs.destination != "" {
dir, file := filepath.Split(debugBackupArgs.destination)
store, err := externalStorageFromURIFactory(ctx, dir, security.RootUserName())
if err != nil {
return errors.Wrapf(err, "unable to open store to write files: %s", destination)
return errors.Wrapf(err, "unable to open store to write files: %s", debugBackupArgs.destination)
}
if err = store.WriteFile(ctx, file, bytes.NewReader(buf.Bytes())); err != nil {
_ = store.Close()
return err
}
return store.Close()
}
return err
return nil
}

func makeIters(
ctx context.Context, entry backupccl.BackupTableEntry,
ctx context.Context, files backupccl.EntryFiles,
) ([]storage.SimpleMVCCIterator, func() error, error) {
iters := make([]storage.SimpleMVCCIterator, len(entry.Files))
dirStorage := make([]cloud.ExternalStorage, len(entry.Files))
for i, file := range entry.Files {
iters := make([]storage.SimpleMVCCIterator, len(files))
dirStorage := make([]cloud.ExternalStorage, len(files))
for i, file := range files {
var err error
clusterSettings := cluster.MakeClusterSettings()
dirStorage[i], err = cloudimpl.MakeExternalStorage(ctx, file.Dir, base.ExternalIODirConfig{},
Expand Down Expand Up @@ -512,6 +478,59 @@ func makeRowFetcher(
return rf, nil
}

func processEntryFiles(
ctx context.Context,
rf row.Fetcher,
files backupccl.EntryFiles,
span roachpb.Span,
endTime hlc.Timestamp,
writer *csv.Writer,
) (err error) {

iters, cleanup, err := makeIters(ctx, files)
defer func() {
if cleanupErr := cleanup(); err == nil {
err = cleanupErr
}
}()
if err != nil {
return errors.Wrapf(err, "make iters")
}

iter := storage.MakeMultiIterator(iters)
defer iter.Close()

startKeyMVCC, endKeyMVCC := storage.MVCCKey{Key: span.Key}, storage.MVCCKey{Key: span.EndKey}
kvFetcher := row.MakeBackupSSTKVFetcher(startKeyMVCC, endKeyMVCC, iter, endTime)

if err := rf.StartScanFrom(ctx, &kvFetcher); err != nil {
return errors.Wrapf(err, "row fetcher starts scan")
}

for {
datums, _, _, err := rf.NextRowDecoded(ctx)
if err != nil {
return errors.Wrapf(err, "decode row")
}
if datums == nil {
break
}
rowDisplay := make([]string, datums.Len())
for i, datum := range datums {
if datum == tree.DNull {
rowDisplay[i] = debugBackupArgs.nullas
} else {
rowDisplay[i] = datum.String()
}
}
if err := writer.Write(rowDisplay); err != nil {
return err
}
writer.Flush()
}
return nil
}

type backupMetaDisplayMsg backupccl.BackupManifest
type backupFileDisplayMsg backupccl.BackupManifest_File

Expand Down
Loading

0 comments on commit 21e954f

Please sign in to comment.