From b32889201e072ebafd193694bd233d01945c24d8 Mon Sep 17 00:00:00 2001 From: Krzesimir Nowak Date: Wed, 8 May 2024 11:29:19 +0200 Subject: [PATCH 1/4] *: Rename to github.com/flatcar/azure-vhd-utils --- README.md | 4 ++-- go.mod | 2 +- upload/detectEmptyRanges.go | 8 ++++---- upload/metadata/metaData.go | 5 +++-- upload/upload.go | 9 +++++---- upload/uploadableRanges.go | 4 ++-- vhdInspectCmdHandler.go | 8 ++++---- vhdUploadCmdHandler.go | 11 ++++++----- vhdcore/bat/blockAllocationTable.go | 2 +- vhdcore/bat/blockAllocationTableFactory.go | 4 ++-- vhdcore/block/bitmap/factory.go | 4 ++-- vhdcore/block/block.go | 6 +++--- vhdcore/block/dataReadError.go | 2 +- vhdcore/block/differencingDiskBlockFactory.go | 6 +++--- vhdcore/block/differencingDiskBlockReader.go | 6 +++--- vhdcore/block/dynamicDiskBlockFactory.go | 6 +++--- vhdcore/block/dynamicDiskBlockReader.go | 6 +++--- vhdcore/block/factory.go | 2 +- vhdcore/block/factoryParams.go | 8 ++++---- vhdcore/block/fixedDiskBlockFactory.go | 4 ++-- vhdcore/block/fixedDiskBlockReader.go | 4 ++-- vhdcore/block/sectorFactory.go | 4 ++-- vhdcore/diskstream/diskstream.go | 10 +++++----- vhdcore/footer/diskGeometry.go | 2 +- vhdcore/footer/factory.go | 6 +++--- vhdcore/footer/footer.go | 4 ++-- vhdcore/footer/vhdFooterSerializer.go | 4 ++-- vhdcore/header/factory.go | 8 ++++---- vhdcore/header/header.go | 6 +++--- vhdcore/header/parentlocator/factory.go | 2 +- vhdcore/header/parentlocator/parentLocator.go | 2 +- vhdcore/reader/binaryReader.go | 2 +- vhdcore/reader/vhdReader.go | 2 +- vhdcore/validator/validator.go | 4 ++-- vhdcore/vhdfile/vhdFile.go | 10 +++++----- vhdcore/vhdfile/vhdFileFactory.go | 8 ++++---- vhdcore/writer/vhdWriter.go | 2 +- 37 files changed, 95 insertions(+), 92 deletions(-) diff --git a/README.md b/README.md index 9deef515..493cf23c 100644 --- a/README.md +++ b/README.md @@ -6,12 +6,12 @@ This project provides a Go package to read Virtual Hard Disk (VHD) file, a CLI i An implementation of VHD [VHD specification](https://technet.microsoft.com/en-us/virtualization/bb676673.aspx) can be found in the [vhdcore](/vhdcore) package. -[![Go Report Card](https://goreportcard.com/badge/github.com/Microsoft/azure-vhd-utils)](https://goreportcard.com/report/github.com/Microsoft/azure-vhd-utils) +[![Go Report Card](https://goreportcard.com/badge/github.com/flatcar/azure-vhd-utils)](https://goreportcard.com/report/github.com/flatcar/azure-vhd-utils) # Installation > Note: You must have Go installed on your machine, at version 1.11 or greater. [https://golang.org/dl/](https://golang.org/dl/) - go get github.com/Microsoft/azure-vhd-utils + go get github.com/flatcar/azure-vhd-utils # Features diff --git a/go.mod b/go.mod index f9d96900..2e83b0a9 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module github.com/Microsoft/azure-vhd-utils +module github.com/flatcar/azure-vhd-utils require ( github.com/Azure/azure-sdk-for-go v5.0.0-beta.0.20161118192335-3b1282355199+incompatible diff --git a/upload/detectEmptyRanges.go b/upload/detectEmptyRanges.go index f7d33aaa..a7c1f9c6 100644 --- a/upload/detectEmptyRanges.go +++ b/upload/detectEmptyRanges.go @@ -5,10 +5,10 @@ import ( "io" "math" - "github.com/Microsoft/azure-vhd-utils/vhdcore/block/bitmap" - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" - "github.com/Microsoft/azure-vhd-utils/vhdcore/diskstream" - "github.com/Microsoft/azure-vhd-utils/vhdcore/footer" + "github.com/flatcar/azure-vhd-utils/vhdcore/block/bitmap" + "github.com/flatcar/azure-vhd-utils/vhdcore/common" + "github.com/flatcar/azure-vhd-utils/vhdcore/diskstream" + "github.com/flatcar/azure-vhd-utils/vhdcore/footer" ) // DataWithRange type describes a range and data associated with the range. diff --git a/upload/metadata/metaData.go b/upload/metadata/metaData.go index 6bfcf5c4..2c56e60b 100644 --- a/upload/metadata/metaData.go +++ b/upload/metadata/metaData.go @@ -11,8 +11,9 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/storage" - "github.com/Microsoft/azure-vhd-utils/upload/progress" - "github.com/Microsoft/azure-vhd-utils/vhdcore/diskstream" + + "github.com/flatcar/azure-vhd-utils/upload/progress" + "github.com/flatcar/azure-vhd-utils/vhdcore/diskstream" ) // The key of the page blob metadata collection entry holding VHD metadata as json. diff --git a/upload/upload.go b/upload/upload.go index 4478b893..504064ad 100644 --- a/upload/upload.go +++ b/upload/upload.go @@ -7,10 +7,11 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/storage" - "github.com/Microsoft/azure-vhd-utils/upload/concurrent" - "github.com/Microsoft/azure-vhd-utils/upload/progress" - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" - "github.com/Microsoft/azure-vhd-utils/vhdcore/diskstream" + + "github.com/flatcar/azure-vhd-utils/upload/concurrent" + "github.com/flatcar/azure-vhd-utils/upload/progress" + "github.com/flatcar/azure-vhd-utils/vhdcore/common" + "github.com/flatcar/azure-vhd-utils/vhdcore/diskstream" ) // DiskUploadContext type describes VHD upload context, this includes the disk stream to read from, the ranges of diff --git a/upload/uploadableRanges.go b/upload/uploadableRanges.go index 3c2d07e4..5f3e1b7f 100644 --- a/upload/uploadableRanges.go +++ b/upload/uploadableRanges.go @@ -1,8 +1,8 @@ package upload import ( - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" - "github.com/Microsoft/azure-vhd-utils/vhdcore/diskstream" + "github.com/flatcar/azure-vhd-utils/vhdcore/common" + "github.com/flatcar/azure-vhd-utils/vhdcore/diskstream" ) // LocateUploadableRanges detects the uploadable ranges in a VHD stream, size of each range is at most pageSizeInBytes. diff --git a/vhdInspectCmdHandler.go b/vhdInspectCmdHandler.go index d0715a91..3cf3a019 100644 --- a/vhdInspectCmdHandler.go +++ b/vhdInspectCmdHandler.go @@ -9,10 +9,10 @@ import ( "strconv" "text/template" - "github.com/Microsoft/azure-vhd-utils/vhdcore" - "github.com/Microsoft/azure-vhd-utils/vhdcore/block/bitmap" - "github.com/Microsoft/azure-vhd-utils/vhdcore/footer" - "github.com/Microsoft/azure-vhd-utils/vhdcore/vhdfile" + "github.com/flatcar/azure-vhd-utils/vhdcore" + "github.com/flatcar/azure-vhd-utils/vhdcore/block/bitmap" + "github.com/flatcar/azure-vhd-utils/vhdcore/footer" + "github.com/flatcar/azure-vhd-utils/vhdcore/vhdfile" "gopkg.in/urfave/cli.v1" ) diff --git a/vhdUploadCmdHandler.go b/vhdUploadCmdHandler.go index fec3df9e..d8aa95c6 100644 --- a/vhdUploadCmdHandler.go +++ b/vhdUploadCmdHandler.go @@ -10,12 +10,13 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/storage" - "github.com/Microsoft/azure-vhd-utils/upload" - "github.com/Microsoft/azure-vhd-utils/upload/metadata" - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" - "github.com/Microsoft/azure-vhd-utils/vhdcore/diskstream" - "github.com/Microsoft/azure-vhd-utils/vhdcore/validator" "gopkg.in/urfave/cli.v1" + + "github.com/flatcar/azure-vhd-utils/upload" + "github.com/flatcar/azure-vhd-utils/upload/metadata" + "github.com/flatcar/azure-vhd-utils/vhdcore/common" + "github.com/flatcar/azure-vhd-utils/vhdcore/diskstream" + "github.com/flatcar/azure-vhd-utils/vhdcore/validator" ) func vhdUploadCmdHandler() cli.Command { diff --git a/vhdcore/bat/blockAllocationTable.go b/vhdcore/bat/blockAllocationTable.go index c5e6e835..509adf70 100644 --- a/vhdcore/bat/blockAllocationTable.go +++ b/vhdcore/bat/blockAllocationTable.go @@ -3,7 +3,7 @@ package bat import ( "math" - "github.com/Microsoft/azure-vhd-utils/vhdcore" + "github.com/flatcar/azure-vhd-utils/vhdcore" ) // BlockAllocationTable type represents the Block Allocation Table (BAT) of the disk, BAT served as diff --git a/vhdcore/bat/blockAllocationTableFactory.go b/vhdcore/bat/blockAllocationTableFactory.go index 04d8c29a..7036b5a0 100644 --- a/vhdcore/bat/blockAllocationTableFactory.go +++ b/vhdcore/bat/blockAllocationTableFactory.go @@ -1,8 +1,8 @@ package bat import ( - "github.com/Microsoft/azure-vhd-utils/vhdcore/header" - "github.com/Microsoft/azure-vhd-utils/vhdcore/reader" + "github.com/flatcar/azure-vhd-utils/vhdcore/header" + "github.com/flatcar/azure-vhd-utils/vhdcore/reader" ) // BlockAllocationTableFactory type is used to create BlockAllocationTable instance by reading BAT diff --git a/vhdcore/block/bitmap/factory.go b/vhdcore/block/bitmap/factory.go index 5a9af9b0..801d4e9e 100644 --- a/vhdcore/block/bitmap/factory.go +++ b/vhdcore/block/bitmap/factory.go @@ -1,8 +1,8 @@ package bitmap import ( - "github.com/Microsoft/azure-vhd-utils/vhdcore/bat" - "github.com/Microsoft/azure-vhd-utils/vhdcore/reader" + "github.com/flatcar/azure-vhd-utils/vhdcore/bat" + "github.com/flatcar/azure-vhd-utils/vhdcore/reader" ) // Factory type is used to create BitMap instance by reading 'bitmap section' of a block. diff --git a/vhdcore/block/block.go b/vhdcore/block/block.go index c24aad10..ea4039a2 100644 --- a/vhdcore/block/block.go +++ b/vhdcore/block/block.go @@ -3,9 +3,9 @@ package block import ( "fmt" - "github.com/Microsoft/azure-vhd-utils/vhdcore" - "github.com/Microsoft/azure-vhd-utils/vhdcore/block/bitmap" - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" + "github.com/flatcar/azure-vhd-utils/vhdcore" + "github.com/flatcar/azure-vhd-utils/vhdcore/block/bitmap" + "github.com/flatcar/azure-vhd-utils/vhdcore/common" ) // Block type represents Block of a vhd. A block of a dynamic or differential vhd starts with a diff --git a/vhdcore/block/dataReadError.go b/vhdcore/block/dataReadError.go index 8b5bbd3b..dc9d0f4f 100644 --- a/vhdcore/block/dataReadError.go +++ b/vhdcore/block/dataReadError.go @@ -3,7 +3,7 @@ package block import ( "fmt" - "github.com/Microsoft/azure-vhd-utils/vhdcore/footer" + "github.com/flatcar/azure-vhd-utils/vhdcore/footer" ) // DataReadError is the error type representing block data read error. diff --git a/vhdcore/block/differencingDiskBlockFactory.go b/vhdcore/block/differencingDiskBlockFactory.go index e70bb288..96791cd6 100644 --- a/vhdcore/block/differencingDiskBlockFactory.go +++ b/vhdcore/block/differencingDiskBlockFactory.go @@ -1,9 +1,9 @@ package block import ( - "github.com/Microsoft/azure-vhd-utils/vhdcore" - "github.com/Microsoft/azure-vhd-utils/vhdcore/block/bitmap" - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" + "github.com/flatcar/azure-vhd-utils/vhdcore" + "github.com/flatcar/azure-vhd-utils/vhdcore/block/bitmap" + "github.com/flatcar/azure-vhd-utils/vhdcore/common" ) // DifferencingDiskBlockFactory is a type which is used for following purposes diff --git a/vhdcore/block/differencingDiskBlockReader.go b/vhdcore/block/differencingDiskBlockReader.go index 59cc7b62..b01b6981 100644 --- a/vhdcore/block/differencingDiskBlockReader.go +++ b/vhdcore/block/differencingDiskBlockReader.go @@ -1,9 +1,9 @@ package block import ( - "github.com/Microsoft/azure-vhd-utils/vhdcore/bat" - "github.com/Microsoft/azure-vhd-utils/vhdcore/footer" - "github.com/Microsoft/azure-vhd-utils/vhdcore/reader" + "github.com/flatcar/azure-vhd-utils/vhdcore/bat" + "github.com/flatcar/azure-vhd-utils/vhdcore/footer" + "github.com/flatcar/azure-vhd-utils/vhdcore/reader" ) // DifferencingDiskBlockReader type satisfies BlockDataReader interface, diff --git a/vhdcore/block/dynamicDiskBlockFactory.go b/vhdcore/block/dynamicDiskBlockFactory.go index 92eee11f..63df9f48 100644 --- a/vhdcore/block/dynamicDiskBlockFactory.go +++ b/vhdcore/block/dynamicDiskBlockFactory.go @@ -1,9 +1,9 @@ package block import ( - "github.com/Microsoft/azure-vhd-utils/vhdcore" - "github.com/Microsoft/azure-vhd-utils/vhdcore/block/bitmap" - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" + "github.com/flatcar/azure-vhd-utils/vhdcore" + "github.com/flatcar/azure-vhd-utils/vhdcore/block/bitmap" + "github.com/flatcar/azure-vhd-utils/vhdcore/common" ) // DynamicDiskBlockFactory is a type which is used for following purposes diff --git a/vhdcore/block/dynamicDiskBlockReader.go b/vhdcore/block/dynamicDiskBlockReader.go index 0a0e699d..e4000c8d 100644 --- a/vhdcore/block/dynamicDiskBlockReader.go +++ b/vhdcore/block/dynamicDiskBlockReader.go @@ -3,9 +3,9 @@ package block import ( "io" - "github.com/Microsoft/azure-vhd-utils/vhdcore/bat" - "github.com/Microsoft/azure-vhd-utils/vhdcore/footer" - "github.com/Microsoft/azure-vhd-utils/vhdcore/reader" + "github.com/flatcar/azure-vhd-utils/vhdcore/bat" + "github.com/flatcar/azure-vhd-utils/vhdcore/footer" + "github.com/flatcar/azure-vhd-utils/vhdcore/reader" ) // DynamicDiskBlockReader type satisfies BlockDataReader interface, diff --git a/vhdcore/block/factory.go b/vhdcore/block/factory.go index cd3fe8dd..091804a5 100644 --- a/vhdcore/block/factory.go +++ b/vhdcore/block/factory.go @@ -1,7 +1,7 @@ package block import ( - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" + "github.com/flatcar/azure-vhd-utils/vhdcore/common" ) // Factory interface that all block factories specific to disk type (fixed, diff --git a/vhdcore/block/factoryParams.go b/vhdcore/block/factoryParams.go index 0fb06120..953da150 100644 --- a/vhdcore/block/factoryParams.go +++ b/vhdcore/block/factoryParams.go @@ -1,10 +1,10 @@ package block import ( - "github.com/Microsoft/azure-vhd-utils/vhdcore/bat" - "github.com/Microsoft/azure-vhd-utils/vhdcore/footer" - "github.com/Microsoft/azure-vhd-utils/vhdcore/header" - "github.com/Microsoft/azure-vhd-utils/vhdcore/reader" + "github.com/flatcar/azure-vhd-utils/vhdcore/bat" + "github.com/flatcar/azure-vhd-utils/vhdcore/footer" + "github.com/flatcar/azure-vhd-utils/vhdcore/header" + "github.com/flatcar/azure-vhd-utils/vhdcore/reader" ) // FactoryParams represents type of the parameter for different disk block diff --git a/vhdcore/block/fixedDiskBlockFactory.go b/vhdcore/block/fixedDiskBlockFactory.go index 348953f3..6b7e6475 100644 --- a/vhdcore/block/fixedDiskBlockFactory.go +++ b/vhdcore/block/fixedDiskBlockFactory.go @@ -4,8 +4,8 @@ import ( "log" "math" - "github.com/Microsoft/azure-vhd-utils/vhdcore" - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" + "github.com/flatcar/azure-vhd-utils/vhdcore" + "github.com/flatcar/azure-vhd-utils/vhdcore/common" ) // FixedDiskBlockFactory is a type which is used for following purposes diff --git a/vhdcore/block/fixedDiskBlockReader.go b/vhdcore/block/fixedDiskBlockReader.go index cd285399..1b190ba8 100644 --- a/vhdcore/block/fixedDiskBlockReader.go +++ b/vhdcore/block/fixedDiskBlockReader.go @@ -3,8 +3,8 @@ package block import ( "io" - "github.com/Microsoft/azure-vhd-utils/vhdcore/footer" - "github.com/Microsoft/azure-vhd-utils/vhdcore/reader" + "github.com/flatcar/azure-vhd-utils/vhdcore/footer" + "github.com/flatcar/azure-vhd-utils/vhdcore/reader" ) // FixedDiskBlockReader type satisfies BlockDataReader interface, diff --git a/vhdcore/block/sectorFactory.go b/vhdcore/block/sectorFactory.go index d0a55841..9d503bb0 100644 --- a/vhdcore/block/sectorFactory.go +++ b/vhdcore/block/sectorFactory.go @@ -3,8 +3,8 @@ package block import ( "fmt" - "github.com/Microsoft/azure-vhd-utils/vhdcore" - "github.com/Microsoft/azure-vhd-utils/vhdcore/reader" + "github.com/flatcar/azure-vhd-utils/vhdcore" + "github.com/flatcar/azure-vhd-utils/vhdcore/reader" ) // SectorFactory type is used to create Sector instance by reading 512 byte sector from block's 'data section'. diff --git a/vhdcore/diskstream/diskstream.go b/vhdcore/diskstream/diskstream.go index de2e697e..902510f7 100644 --- a/vhdcore/diskstream/diskstream.go +++ b/vhdcore/diskstream/diskstream.go @@ -4,11 +4,11 @@ import ( "errors" "io" - "github.com/Microsoft/azure-vhd-utils/vhdcore" - "github.com/Microsoft/azure-vhd-utils/vhdcore/block" - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" - "github.com/Microsoft/azure-vhd-utils/vhdcore/footer" - "github.com/Microsoft/azure-vhd-utils/vhdcore/vhdfile" + "github.com/flatcar/azure-vhd-utils/vhdcore" + "github.com/flatcar/azure-vhd-utils/vhdcore/block" + "github.com/flatcar/azure-vhd-utils/vhdcore/common" + "github.com/flatcar/azure-vhd-utils/vhdcore/footer" + "github.com/flatcar/azure-vhd-utils/vhdcore/vhdfile" ) // DiskStream provides a logical stream over a VHD file. diff --git a/vhdcore/footer/diskGeometry.go b/vhdcore/footer/diskGeometry.go index 92174b48..13355bc3 100644 --- a/vhdcore/footer/diskGeometry.go +++ b/vhdcore/footer/diskGeometry.go @@ -3,7 +3,7 @@ package footer import ( "fmt" - "github.com/Microsoft/azure-vhd-utils/vhdcore" + "github.com/flatcar/azure-vhd-utils/vhdcore" ) // DiskGeometry represents the cylinder, heads and sectors (CHS) per track. diff --git a/vhdcore/footer/factory.go b/vhdcore/footer/factory.go index b48377d2..645c0057 100644 --- a/vhdcore/footer/factory.go +++ b/vhdcore/footer/factory.go @@ -4,9 +4,9 @@ import ( "fmt" "time" - "github.com/Microsoft/azure-vhd-utils/vhdcore" - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" - "github.com/Microsoft/azure-vhd-utils/vhdcore/reader" + "github.com/flatcar/azure-vhd-utils/vhdcore" + "github.com/flatcar/azure-vhd-utils/vhdcore/common" + "github.com/flatcar/azure-vhd-utils/vhdcore/reader" ) // Factory type is used to create Footer instance by reading vhd footer section. diff --git a/vhdcore/footer/footer.go b/vhdcore/footer/footer.go index 90515b56..9cf77c93 100644 --- a/vhdcore/footer/footer.go +++ b/vhdcore/footer/footer.go @@ -4,8 +4,8 @@ import ( "bytes" "time" - "github.com/Microsoft/azure-vhd-utils/vhdcore" - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" + "github.com/flatcar/azure-vhd-utils/vhdcore" + "github.com/flatcar/azure-vhd-utils/vhdcore/common" ) // Footer represents the footer of the vhd, the size of the footer is 512 bytes. diff --git a/vhdcore/footer/vhdFooterSerializer.go b/vhdcore/footer/vhdFooterSerializer.go index acf43146..58e90cc1 100644 --- a/vhdcore/footer/vhdFooterSerializer.go +++ b/vhdcore/footer/vhdFooterSerializer.go @@ -1,8 +1,8 @@ package footer import ( - "github.com/Microsoft/azure-vhd-utils/vhdcore" - "github.com/Microsoft/azure-vhd-utils/vhdcore/writer" + "github.com/flatcar/azure-vhd-utils/vhdcore" + "github.com/flatcar/azure-vhd-utils/vhdcore/writer" ) // SerializeFooter returns the given VhdFooter instance as byte slice of length 512 bytes. diff --git a/vhdcore/header/factory.go b/vhdcore/header/factory.go index 1ca1d679..baf3594a 100644 --- a/vhdcore/header/factory.go +++ b/vhdcore/header/factory.go @@ -5,10 +5,10 @@ import ( "strings" "time" - "github.com/Microsoft/azure-vhd-utils/vhdcore" - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" - "github.com/Microsoft/azure-vhd-utils/vhdcore/header/parentlocator" - "github.com/Microsoft/azure-vhd-utils/vhdcore/reader" + "github.com/flatcar/azure-vhd-utils/vhdcore" + "github.com/flatcar/azure-vhd-utils/vhdcore/common" + "github.com/flatcar/azure-vhd-utils/vhdcore/header/parentlocator" + "github.com/flatcar/azure-vhd-utils/vhdcore/reader" ) // Factory type is used to create VhdHeader instance by reading vhd header section. diff --git a/vhdcore/header/header.go b/vhdcore/header/header.go index 12535fb9..a982ad07 100644 --- a/vhdcore/header/header.go +++ b/vhdcore/header/header.go @@ -3,9 +3,9 @@ package header import ( "time" - "github.com/Microsoft/azure-vhd-utils/vhdcore" - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" - "github.com/Microsoft/azure-vhd-utils/vhdcore/header/parentlocator" + "github.com/flatcar/azure-vhd-utils/vhdcore" + "github.com/flatcar/azure-vhd-utils/vhdcore/common" + "github.com/flatcar/azure-vhd-utils/vhdcore/header/parentlocator" ) // Header represents the header of the vhd, size of the header is 1024 bytes. diff --git a/vhdcore/header/parentlocator/factory.go b/vhdcore/header/parentlocator/factory.go index 654c0af4..4539cb50 100644 --- a/vhdcore/header/parentlocator/factory.go +++ b/vhdcore/header/parentlocator/factory.go @@ -2,7 +2,7 @@ package parentlocator import ( "fmt" - "github.com/Microsoft/azure-vhd-utils/vhdcore/reader" + "github.com/flatcar/azure-vhd-utils/vhdcore/reader" ) // Factory type is used to create ParentLocator instance by reading one entry diff --git a/vhdcore/header/parentlocator/parentLocator.go b/vhdcore/header/parentlocator/parentLocator.go index 4358a95c..734b90ac 100644 --- a/vhdcore/header/parentlocator/parentLocator.go +++ b/vhdcore/header/parentlocator/parentLocator.go @@ -1,7 +1,7 @@ package parentlocator import ( - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" + "github.com/flatcar/azure-vhd-utils/vhdcore/common" "log" "strings" ) diff --git a/vhdcore/reader/binaryReader.go b/vhdcore/reader/binaryReader.go index 59f1174c..a1b3b96f 100644 --- a/vhdcore/reader/binaryReader.go +++ b/vhdcore/reader/binaryReader.go @@ -3,7 +3,7 @@ package reader import ( "encoding/binary" "fmt" - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" + "github.com/flatcar/azure-vhd-utils/vhdcore/common" "io" ) diff --git a/vhdcore/reader/vhdReader.go b/vhdcore/reader/vhdReader.go index d9ec5774..41c70f2b 100644 --- a/vhdcore/reader/vhdReader.go +++ b/vhdcore/reader/vhdReader.go @@ -6,7 +6,7 @@ import ( "time" "unsafe" - "github.com/Microsoft/azure-vhd-utils/vhdcore" + "github.com/flatcar/azure-vhd-utils/vhdcore" ) // VhdReader is the reader used by various components responsible for reading different diff --git a/vhdcore/validator/validator.go b/vhdcore/validator/validator.go index 218a76e3..7ebbf829 100644 --- a/vhdcore/validator/validator.go +++ b/vhdcore/validator/validator.go @@ -3,8 +3,8 @@ package validator import ( "fmt" - "github.com/Microsoft/azure-vhd-utils/vhdcore/diskstream" - "github.com/Microsoft/azure-vhd-utils/vhdcore/vhdfile" + "github.com/flatcar/azure-vhd-utils/vhdcore/diskstream" + "github.com/flatcar/azure-vhd-utils/vhdcore/vhdfile" ) // oneTB is one TeraByte diff --git a/vhdcore/vhdfile/vhdFile.go b/vhdcore/vhdfile/vhdFile.go index bac323d9..bb8390da 100644 --- a/vhdcore/vhdfile/vhdFile.go +++ b/vhdcore/vhdfile/vhdFile.go @@ -3,11 +3,11 @@ package vhdfile import ( "fmt" - "github.com/Microsoft/azure-vhd-utils/vhdcore/bat" - "github.com/Microsoft/azure-vhd-utils/vhdcore/block" - "github.com/Microsoft/azure-vhd-utils/vhdcore/footer" - "github.com/Microsoft/azure-vhd-utils/vhdcore/header" - "github.com/Microsoft/azure-vhd-utils/vhdcore/reader" + "github.com/flatcar/azure-vhd-utils/vhdcore/bat" + "github.com/flatcar/azure-vhd-utils/vhdcore/block" + "github.com/flatcar/azure-vhd-utils/vhdcore/footer" + "github.com/flatcar/azure-vhd-utils/vhdcore/header" + "github.com/flatcar/azure-vhd-utils/vhdcore/reader" ) // VhdFile represents a VHD. diff --git a/vhdcore/vhdfile/vhdFileFactory.go b/vhdcore/vhdfile/vhdFileFactory.go index febc9db6..ed54c885 100644 --- a/vhdcore/vhdfile/vhdFileFactory.go +++ b/vhdcore/vhdfile/vhdFileFactory.go @@ -4,10 +4,10 @@ import ( "os" "path/filepath" - "github.com/Microsoft/azure-vhd-utils/vhdcore/bat" - "github.com/Microsoft/azure-vhd-utils/vhdcore/footer" - "github.com/Microsoft/azure-vhd-utils/vhdcore/header" - "github.com/Microsoft/azure-vhd-utils/vhdcore/reader" + "github.com/flatcar/azure-vhd-utils/vhdcore/bat" + "github.com/flatcar/azure-vhd-utils/vhdcore/footer" + "github.com/flatcar/azure-vhd-utils/vhdcore/header" + "github.com/flatcar/azure-vhd-utils/vhdcore/reader" ) // FileFactory is a type to create VhdFile representing VHD in the local machine diff --git a/vhdcore/writer/vhdWriter.go b/vhdcore/writer/vhdWriter.go index 754fdb67..fb33b62b 100644 --- a/vhdcore/writer/vhdWriter.go +++ b/vhdcore/writer/vhdWriter.go @@ -7,7 +7,7 @@ import ( "time" "unsafe" - "github.com/Microsoft/azure-vhd-utils/vhdcore" + "github.com/flatcar/azure-vhd-utils/vhdcore" ) // VhdWriter is the writer used by various components responsible for writing header and From 00c4b1dd3dc64b4752dc66e3651e691d7a84fb87 Mon Sep 17 00:00:00 2001 From: Krzesimir Nowak Date: Wed, 22 May 2024 15:41:56 +0200 Subject: [PATCH 2/4] *: Run gofmt --- upload/concurrent/balancer.go | 10 --- upload/concurrent/pool.go | 7 -- upload/concurrent/request.go | 1 - upload/concurrent/worker.go | 9 +-- upload/detectEmptyRanges.go | 4 -- upload/metadata/metaData.go | 10 --- upload/progress/computeStats.go | 4 -- upload/progress/readerWithProgress.go | 4 -- upload/progress/status.go | 14 ---- upload/upload.go | 5 -- upload/uploadableRanges.go | 1 - vhdInspectCmdHandler.go | 2 - vhdUploadCmdHandler.go | 8 --- vhdcore/bat/blockAllocationTable.go | 6 -- vhdcore/bat/blockAllocationTableFactory.go | 3 - vhdcore/bat/blockAllocationTableParseError.go | 4 -- vhdcore/block/bitmap/bitmap.go | 5 -- vhdcore/block/bitmap/factory.go | 3 - vhdcore/block/bitmap/parseError.go | 4 -- vhdcore/block/block.go | 5 -- vhdcore/block/dataReadError.go | 4 -- vhdcore/block/dataReader.go | 1 - vhdcore/block/differencingDiskBlockFactory.go | 8 --- vhdcore/block/differencingDiskBlockReader.go | 3 - vhdcore/block/dynamicDiskBlockFactory.go | 8 --- vhdcore/block/dynamicDiskBlockReader.go | 3 - vhdcore/block/factory.go | 1 - vhdcore/block/factoryParams.go | 1 - vhdcore/block/fixedDiskBlockFactory.go | 9 --- vhdcore/block/fixedDiskBlockReader.go | 3 - vhdcore/block/sector.go | 1 - vhdcore/block/sectorFactory.go | 4 -- vhdcore/block/sectorReadError.go | 3 - vhdcore/common/indexRange.go | 71 +++++++------------ vhdcore/common/utils.go | 4 -- vhdcore/common/uuid.go | 4 -- vhdcore/constants.go | 7 -- vhdcore/diskstream/diskstream.go | 13 ---- vhdcore/footer/diskGeometry.go | 5 -- vhdcore/footer/diskType.go | 2 - vhdcore/footer/factory.go | 20 ------ vhdcore/footer/footer.go | 3 - vhdcore/footer/hostOsType.go | 2 - vhdcore/footer/parseError.go | 4 -- vhdcore/footer/vhdCreatorVersion.go | 2 - vhdcore/footer/vhdFeature.go | 2 - vhdcore/footer/vhdFileFormatVersion.go | 4 -- vhdcore/footer/vhdFooterSerializer.go | 1 - vhdcore/header/factory.go | 16 ----- vhdcore/header/header.go | 1 - vhdcore/header/parentlocator/factory.go | 8 --- vhdcore/header/parentlocator/parentLocator.go | 2 - .../header/parentlocator/parentLocators.go | 4 -- vhdcore/header/parentlocator/parseError.go | 4 -- vhdcore/header/parentlocator/platformCode.go | 2 - vhdcore/header/parseError.go | 4 -- vhdcore/header/vhdHeaderVersion.go | 4 -- vhdcore/innererror/innerError.go | 1 - vhdcore/reader/binaryReader.go | 15 ---- vhdcore/reader/vhdReader.go | 5 -- vhdcore/validator/validator.go | 3 - vhdcore/vhdCookie.go | 9 --- vhdcore/vhdTimeStamp.go | 4 -- vhdcore/vhdfile/vhdFile.go | 4 -- vhdcore/vhdfile/vhdFileFactory.go | 6 -- vhdcore/writer/binaryWriter.go | 13 ---- vhdcore/writer/vhdWriter.go | 7 -- 67 files changed, 26 insertions(+), 388 deletions(-) diff --git a/upload/concurrent/balancer.go b/upload/concurrent/balancer.go index 03e7cb6e..959ba1cc 100644 --- a/upload/concurrent/balancer.go +++ b/upload/concurrent/balancer.go @@ -6,7 +6,6 @@ import ( ) // Balancer is a type that can balance load among a set of workers -// type Balancer struct { errorChan chan error // The channel used by all workers to report error requestHandledChan chan *Worker // The channel used by a worker to signal balancer that a work has been executed @@ -18,11 +17,9 @@ type Balancer struct { } // The size of work channel associated with each worker this balancer manages. -// const workerQueueSize int = 3 // NewBalancer creates a new instance of Balancer that needs to balance load between 'workerCount' workers -// func NewBalancer(workerCount int) *Balancer { balancer := &Balancer{ workerCount: workerCount, @@ -34,7 +31,6 @@ func NewBalancer(workerCount int) *Balancer { } // Init initializes all channels and start the workers. -// func (b *Balancer) Init() { b.errorChan = make(chan error, 0) b.requestHandledChan = make(chan *Worker, 0) @@ -49,7 +45,6 @@ func (b *Balancer) Init() { // TearDownWorkers sends a force quit signal to all workers, which case worker to quit as soon as possible, // workers won't drain it's request channel in this case. -// func (b *Balancer) TearDownWorkers() { close(b.tearDownChan) } @@ -58,7 +53,6 @@ func (b *Balancer) TearDownWorkers() { // with least load. This method returns two channels, a channel to communicate error from any worker back to // the consumer of balancer and second channel is used by the balancer to signal consumer that all workers has // been finished executing. -// func (b *Balancer) Run(requestChan <-chan *Request) (<-chan error, <-chan bool) { // Request dispatcher go func() { @@ -95,7 +89,6 @@ func (b *Balancer) Run(requestChan <-chan *Request) (<-chan error, <-chan bool) // closeWorkersRequestChannel closes the Request channel of all workers, this indicates that no // more work will not be send the channel so that the workers can gracefully exit after handling // any pending work in the channel. -// func (b *Balancer) closeWorkersRequestChannel() { for i := 0; i < b.workerCount; i++ { close((b.pool.Workers[i]).RequestsToHandleChan) @@ -105,7 +98,6 @@ func (b *Balancer) closeWorkersRequestChannel() { // dispatch dispatches the request to the worker with least load. If all workers are completely // busy (i.e. there Pending request count is currently equal to the maximum load) then this // method will poll until one worker is available. -// func (b *Balancer) dispatch(request *Request) { for { if b.pool.Workers[0].Pending >= workerQueueSize { @@ -125,7 +117,6 @@ func (b *Balancer) dispatch(request *Request) { // completed is called when a worker finishes one work, it updates the load status of the given the // worker. -// func (b *Balancer) completed(worker *Worker) { b.pool.Lock() worker.Pending-- @@ -136,7 +127,6 @@ func (b *Balancer) completed(worker *Worker) { // WorkersCurrentLoad returns the load of the workers this balancer manages as comma separated string // values where each value consists of worker id (Worker.Id property) and pending requests associated // with the worker. -// func (b *Balancer) WorkersCurrentLoad() string { return b.pool.WorkersCurrentLoad() } diff --git a/upload/concurrent/pool.go b/upload/concurrent/pool.go index 00cf9b6b..8be3ba9c 100644 --- a/upload/concurrent/pool.go +++ b/upload/concurrent/pool.go @@ -10,27 +10,23 @@ import ( // The priority is the number of pending works assigned to the worker. Lower the pending // work count higher the priority. Pool embeds sync.RWMutex to support concurrent heap // operation. -// type Pool struct { sync.RWMutex // If consumer want to use workers in a concurrent environment Workers []*Worker // The workers } // Len returns number of workers in the pool. -// func (p *Pool) Len() int { return len(p.Workers) } // Less returns true if priority of Worker instance at index i is less than priority of Worker // instance at j, lower the pending value higher the priority -// func (p *Pool) Less(i, j int) bool { return p.Workers[i].Pending < p.Workers[j].Pending } // Swap swaps the Worker instances at the given indices i and j -// func (p Pool) Swap(i, j int) { p.Workers[i], p.Workers[j] = p.Workers[j], p.Workers[i] p.Workers[i].Index = i @@ -40,7 +36,6 @@ func (p Pool) Swap(i, j int) { // Push is used by heap.Push implementation, to add a worker w to a Pool pool, we call // heap.Push(&pool, w) which invokes this method to add the worker to the end of collection // then it fix the heap by moving the added item to its correct position. -// func (p *Pool) Push(x interface{}) { n := len(p.Workers) worker := x.(*Worker) @@ -52,7 +47,6 @@ func (p *Pool) Push(x interface{}) { // p, we call w := heap.Pop(&p).(*Worker), which swap the min priority worker at the beginning // of the pool with the end of item, fix the heap and then invokes this method for popping the // worker from the end. -// func (p *Pool) Pop() interface{} { n := len(p.Workers) w := (*p).Workers[n-1] @@ -64,7 +58,6 @@ func (p *Pool) Pop() interface{} { // WorkersCurrentLoad returns the load of the workers as comma separated string values, where // each value consists of worker id (Worker.Id property) and pending requests associated with // the worker. -// func (p *Pool) WorkersCurrentLoad() string { var buffer bytes.Buffer buffer.WriteString("Load [") diff --git a/upload/concurrent/request.go b/upload/concurrent/request.go index 8f9ef99f..116b4e10 100644 --- a/upload/concurrent/request.go +++ b/upload/concurrent/request.go @@ -1,7 +1,6 @@ package concurrent // Request represents a work that Worker needs to execute -// type Request struct { ID string // The Id of the work (for debugging purposes) Work func() error // The work to be executed by a worker diff --git a/upload/concurrent/worker.go b/upload/concurrent/worker.go index 99c340ad..4e07bc75 100644 --- a/upload/concurrent/worker.go +++ b/upload/concurrent/worker.go @@ -3,7 +3,6 @@ package concurrent import "fmt" // Worker represents a type which can listen for work from a channel and run them -// type Worker struct { RequestsToHandleChan chan *Request // The buffered channel of works this worker needs to handle Pending int // The number of pending requests this worker needs to handle (i.e. worker load) @@ -16,13 +15,11 @@ type Worker struct { } // The maximum number of times a work needs to be retried before reporting failure on errorChan. -// const maxRetryCount int = 5 // NewWorker creates a new instance of the worker with the given work channel size. // errorChan is the channel to report the failure in addressing a work request after all // retries, each time a work is completed (failure or success) doneChan will be signalled -// func NewWorker(id int, workChannelSize int, pool *Pool, errorChan chan<- error, requestHandledChan chan<- *Worker, workerFinishedChan chan<- *Worker) *Worker { return &Worker{ ID: id, @@ -36,12 +33,11 @@ func NewWorker(id int, workChannelSize int, pool *Pool, errorChan chan<- error, // Run starts a go-routine that read work from work-queue associated with the worker and executes one // at a time. The go-routine returns/exit once one of the following condition is met: -// 1. The work-queue is closed and drained and there is no work to steal from peers worker's work-queue -// 2. A signal is received in the tearDownChan channel parameter +// 1. The work-queue is closed and drained and there is no work to steal from peers worker's work-queue +// 2. A signal is received in the tearDownChan channel parameter // // After executing each work, this method sends report to Worker::requestHandledChan channel // If a work fails after maximum retry, this method sends report to Worker::errorChan channel -// func (w *Worker) Run(tearDownChan <-chan bool) { go func() { defer func() { @@ -105,7 +101,6 @@ func (w *Worker) Run(tearDownChan <-chan bool) { // tryStealWork will try to steal a work from peer worker if available. If all peer channels are // empty then return nil -// func (w *Worker) tryStealWork() *Request { for _, w1 := range w.pool.Workers { request, ok := <-w1.RequestsToHandleChan diff --git a/upload/detectEmptyRanges.go b/upload/detectEmptyRanges.go index a7c1f9c6..b24e2ce0 100644 --- a/upload/detectEmptyRanges.go +++ b/upload/detectEmptyRanges.go @@ -12,7 +12,6 @@ import ( ) // DataWithRange type describes a range and data associated with the range. -// type DataWithRange struct { Range *common.IndexRange Data []byte @@ -22,7 +21,6 @@ type DataWithRange struct { // ranges and update the uploadableRanges slice by removing the empty ranges. This method returns the updated ranges. // The empty range detection required only for Fixed disk, if the stream is a expandable disk stream this method simply // returns the parameter uploadableRanges as it is. -// func DetectEmptyRanges(diskStream *diskstream.DiskStream, uploadableRanges []*common.IndexRange) ([]*common.IndexRange, error) { if diskStream.GetDiskType() != footer.DiskTypeFixed { return uploadableRanges, nil @@ -68,7 +66,6 @@ L: // to report the non-empty range indices and error channel - used to report any error while performing empty detection. // int channel will be closed on a successful completion, the caller must not expect any more value in the // int channel if the error channel is signaled. -// func LocateNonEmptyRangeIndices(stream *diskstream.DiskStream, ranges []*common.IndexRange) (<-chan int32, <-chan error) { indexChan := make(chan int32, 0) errorChan := make(chan error, 0) @@ -101,7 +98,6 @@ func LocateNonEmptyRangeIndices(stream *diskstream.DiskStream, ranges []*common. } // isAllZero returns true if the given byte slice contain all zeros -// func isAllZero(buf []byte) bool { l := len(buf) j := 0 diff --git a/upload/metadata/metaData.go b/upload/metadata/metaData.go index 2c56e60b..358d95d1 100644 --- a/upload/metadata/metaData.go +++ b/upload/metadata/metaData.go @@ -17,18 +17,15 @@ import ( ) // The key of the page blob metadata collection entry holding VHD metadata as json. -// const metaDataKey = "diskmetadata" // MetaData is the type representing metadata associated with an Azure page blob holding the VHD. // This will be stored as a JSON string in the page blob metadata collection with key 'diskmetadata'. -// type MetaData struct { FileMetaData *FileMetaData `json:"fileMetaData"` } // FileMetaData represents the metadata of a VHD file. -// type FileMetaData struct { FileName string `json:"fileName"` FileSize int64 `json:"fileSize"` @@ -38,7 +35,6 @@ type FileMetaData struct { } // ToJSON returns MetaData as a json string. -// func (m *MetaData) ToJSON() (string, error) { b, err := json.Marshal(m) if err != nil { @@ -48,7 +44,6 @@ func (m *MetaData) ToJSON() (string, error) { } // ToMap returns the map representation of the MetaData which can be stored in the page blob metadata colleciton -// func (m *MetaData) ToMap() (map[string]string, error) { v, err := m.ToJSON() if err != nil { @@ -60,7 +55,6 @@ func (m *MetaData) ToMap() (map[string]string, error) { // NewMetaDataFromLocalVHD creates a MetaData instance that should be associated with the page blob // holding the VHD. The parameter vhdPath is the path to the local VHD. -// func NewMetaDataFromLocalVHD(vhdPath string) (*MetaData, error) { fileStat, err := getFileStat(vhdPath) if err != nil { @@ -91,7 +85,6 @@ func NewMetaDataFromLocalVHD(vhdPath string) (*MetaData, error) { // NewMetadataFromBlob returns MetaData instance associated with a Azure page blob, if there is no // MetaData associated with the blob it returns nil value for MetaData -// func NewMetadataFromBlob(blobClient storage.BlobStorageClient, containerName, blobName string) (*MetaData, error) { allMetadata, err := blobClient.GetBlobMetadata(containerName, blobName) if err != nil { @@ -113,7 +106,6 @@ func NewMetadataFromBlob(blobClient storage.BlobStorageClient, containerName, bl // CompareMetaData compares the MetaData associated with the remote page blob and local VHD file. If both metadata // are same this method returns an empty error slice else a non-empty error slice with each error describing // the metadata entry that mismatched. -// func CompareMetaData(remote, local *MetaData) []error { var metadataErrors = make([]error, 0) if !bytes.Equal(remote.FileMetaData.MD5Hash, local.FileMetaData.MD5Hash) { @@ -151,7 +143,6 @@ func CompareMetaData(remote, local *MetaData) []error { } // getFileStat returns os.FileInfo of a file. -// func getFileStat(filePath string) (os.FileInfo, error) { fd, err := os.Open(filePath) if err != nil { @@ -163,7 +154,6 @@ func getFileStat(filePath string) (os.FileInfo, error) { // calculateMD5Hash compute the MD5 checksum of a disk stream, it writes the compute progress in stdout // If there is an error in reading file, then the MD5 compute will stop and it return error. -// func calculateMD5Hash(diskStream *diskstream.DiskStream) ([]byte, error) { progressStream := progress.NewReaderWithProgress(diskStream, diskStream.GetSize(), 1*time.Second) defer progressStream.Close() diff --git a/upload/progress/computeStats.go b/upload/progress/computeStats.go index 12623222..c55cb03f 100644 --- a/upload/progress/computeStats.go +++ b/upload/progress/computeStats.go @@ -1,7 +1,6 @@ package progress // ComputeStats type supports computing running average with a given window size. -// type ComputeStats struct { history []float64 size int @@ -10,7 +9,6 @@ type ComputeStats struct { // NewComputeStats returns a new instance of ComputeStats. The parameter size is the // maximum number of values that ComputeStats track at any given point of time. -// func NewComputeStats(size int) *ComputeStats { return &ComputeStats{ history: make([]float64, size), @@ -21,7 +19,6 @@ func NewComputeStats(size int) *ComputeStats { // NewComputestateDefaultSize returns a new instance of ComputeStats that can tracks // maximum of 60 values. -// func NewComputestateDefaultSize() *ComputeStats { return NewComputeStats(60) } @@ -29,7 +26,6 @@ func NewComputestateDefaultSize() *ComputeStats { // ComputeAvg adds the given value to a list containing set of previous values added // and returns the average of the values in the list. If the values list reached the // maximum size then oldest value will be removed -// func (s *ComputeStats) ComputeAvg(current float64) float64 { s.history[s.p] = current s.p++ diff --git a/upload/progress/readerWithProgress.go b/upload/progress/readerWithProgress.go index 178e8e88..7addbb6e 100644 --- a/upload/progress/readerWithProgress.go +++ b/upload/progress/readerWithProgress.go @@ -6,7 +6,6 @@ import ( ) // ReaderWithProgress wraps an io.ReadCloser, it track and report the read progress. -// type ReaderWithProgress struct { ProgressChan <-chan *Record innerReadCloser io.ReadCloser @@ -17,7 +16,6 @@ type ReaderWithProgress struct { // read progress needs to be tracked, sizeInBytes is the total size of the inner stream in bytes, // progressIntervalInSeconds is the interval at which the read progress needs to be send to ProgressChan channel. // After using the this reader, it must be closed by calling Close method to avoid goroutine leak. -// func NewReaderWithProgress(inner io.ReadCloser, sizeInBytes int64, progressIntervalInSeconds time.Duration) *ReaderWithProgress { r := &ReaderWithProgress{} r.innerReadCloser = inner @@ -28,7 +26,6 @@ func NewReaderWithProgress(inner io.ReadCloser, sizeInBytes int64, progressInter // Read reads up to len(b) bytes from the inner stream. It returns the number of bytes read and an error, if any. // EOF is signaled when no more data to read and n will set to 0. -// func (r *ReaderWithProgress) Read(p []byte) (n int, err error) { n, err = r.innerReadCloser.Read(p) if err == nil { @@ -38,7 +35,6 @@ func (r *ReaderWithProgress) Read(p []byte) (n int, err error) { } // Close closes the inner stream and stop reporting read progress in the ProgressChan chan. -// func (r *ReaderWithProgress) Close() error { err := r.innerReadCloser.Close() r.progressStatus.Close() diff --git a/upload/progress/status.go b/upload/progress/status.go index 887342f5..0adb8eaf 100644 --- a/upload/progress/status.go +++ b/upload/progress/status.go @@ -6,7 +6,6 @@ import ( // Status can be used by a collection of workers (reporters) to report the amount of work done when they need, // Status compute the overall progress at regular interval and report it. -// type Status struct { bytesProcessedCountChan chan int64 doneChan chan bool @@ -18,7 +17,6 @@ type Status struct { } // Record type is used by the ProgressStatus to report the progress at regular interval. -// type Record struct { PercentComplete float64 AverageThroughputMbPerSecond float64 @@ -27,18 +25,15 @@ type Record struct { } // oneMB is one MegaByte -// const oneMB = float64(1048576) // nanosecondsInOneSecond is 1 second expressed as nano-second unit -// const nanosecondsInOneSecond = 1000 * 1000 * 1000 // NewStatus creates a new instance of Status. reporterCount is the number of concurrent goroutines that want to // report processed bytes count, alreadyProcessedBytes is the bytes already processed if any, the parameter // totalBytes is the total number of bytes that the reports will be process eventually, the parameter computeStats // is used to calculate the running average. -// func NewStatus(reportersCount int, alreadyProcessedBytes, totalBytes int64, computeStats *ComputeStats) *Status { return &Status{ bytesProcessedCountChan: make(chan int64, reportersCount), @@ -51,7 +46,6 @@ func NewStatus(reportersCount int, alreadyProcessedBytes, totalBytes int64, comp } // ReportBytesProcessedCount method is used to report the number of bytes processed. -// func (s *Status) ReportBytesProcessedCount(count int64) { s.bytesProcessedCountChan <- count } @@ -59,7 +53,6 @@ func (s *Status) ReportBytesProcessedCount(count int64) { // Run starts counting the reported processed bytes count and compute the progress, this method returns a channel, // the computed progress will be send to this channel in regular interval. Once done with using ProgressStatus // instance, you must call Dispose method otherwise there will be go routine leak. -// func (s *Status) Run() <-chan *Record { go s.bytesProcessedCountReceiver() var outChan = make(chan *Record, 0) @@ -70,14 +63,12 @@ func (s *Status) Run() <-chan *Record { // Close disposes this ProgressStatus instance, an attempt to invoke ReportBytesProcessedCount method on a closed // instance will be panic. Close also stops sending progress to the channel returned by Run method. Not calling // Close will cause goroutine leak. -// func (s *Status) Close() { close(s.bytesProcessedCountChan) } // bytesProcessedCountReceiver read the channel containing the collection of reported bytes count and update the total // bytes processed. This method signal doneChan when there is no more data to read. -// func (s *Status) bytesProcessedCountReceiver() { for c := range s.bytesProcessedCountChan { s.bytesProcessed += c @@ -87,7 +78,6 @@ func (s *Status) bytesProcessedCountReceiver() { // progressRecordSender compute the progress information at regular interval and send it to channel outChan which is // returned by the Run method -// func (s *Status) progressRecordSender(outChan chan<- *Record) { progressRecord := &Record{} tickerChan := time.NewTicker(500 * time.Millisecond) @@ -114,25 +104,21 @@ Loop: } // remainingMB returns remaining bytes to be processed as MB. -// func (s *Status) remainingMB() float64 { return float64(s.totalBytes-s.bytesProcessed) / oneMB } // percentComplete returns the percentage of bytes processed out of total bytes. -// func (s *Status) percentComplete() float64 { return float64(100.0) * (float64(s.bytesProcessed) / float64(s.totalBytes)) } // processTime returns the Duration representing the time taken to process the bytes so far. -// func (s *Status) processTime() time.Duration { return time.Since(s.startTime) } // throughputMBs returns the throughput in MB -// func (s *Status) throughputMBs() float64 { return float64(s.bytesProcessed) / oneMB / s.processTime().Seconds() } diff --git a/upload/upload.go b/upload/upload.go index 504064ad..5b086214 100644 --- a/upload/upload.go +++ b/upload/upload.go @@ -17,7 +17,6 @@ import ( // DiskUploadContext type describes VHD upload context, this includes the disk stream to read from, the ranges of // the stream to read, the destination blob and it's container, the client to communicate with Azure storage and // the number of parallel go-routines to use for upload. -// type DiskUploadContext struct { VhdStream *diskstream.DiskStream // The stream whose ranges needs to be uploaded AlreadyProcessedBytes int64 // The size in bytes already uploaded @@ -31,13 +30,11 @@ type DiskUploadContext struct { } // oneMB is one MegaByte -// const oneMB = float64(1048576) // Upload uploads the disk ranges described by the parameter cxt, this parameter describes the disk stream to // read from, the ranges of the stream to read, the destination blob and it's container, the client to communicate // with Azure storage and the number of parallel go-routines to use for upload. -// func Upload(cxt *DiskUploadContext) error { // Get the channel that contains stream of disk data to upload dataWithRangeChan, streamReadErrChan := GetDataWithRanges(cxt.VhdStream, cxt.UploadableRanges) @@ -137,7 +134,6 @@ L: // It returns two channels, a data channel to stream the disk ranges and a channel to send any error while reading // the disk. On successful completion the data channel will be closed. the caller must not expect any more value in // the data channel if the error channel is signaled. -// func GetDataWithRanges(stream *diskstream.DiskStream, ranges []*common.IndexRange) (<-chan *DataWithRange, <-chan error) { dataWithRangeChan := make(chan *DataWithRange, 0) errorChan := make(chan error, 0) @@ -166,7 +162,6 @@ func GetDataWithRanges(stream *diskstream.DiskStream, ranges []*common.IndexRang // readAndPrintProgress reads the progress records from the given progress channel and output it. It reads the // progress record until the channel is closed. -// func readAndPrintProgress(progressChan <-chan *progress.Record, resume bool) { var spinChars = [4]rune{'\\', '|', '/', '-'} s := time.Time{} diff --git a/upload/uploadableRanges.go b/upload/uploadableRanges.go index 5f3e1b7f..a377f470 100644 --- a/upload/uploadableRanges.go +++ b/upload/uploadableRanges.go @@ -13,7 +13,6 @@ import ( // // Note that this method will not check whether ranges of a fixed disk contains zeros, hence inorder to filter out such // ranges from the uploadable ranges, caller must use LocateNonEmptyRangeIndices method. -// func LocateUploadableRanges(stream *diskstream.DiskStream, rangesToSkip []*common.IndexRange, pageSizeInBytes int64) ([]*common.IndexRange, error) { var err error var diskRanges = make([]*common.IndexRange, 0) diff --git a/vhdInspectCmdHandler.go b/vhdInspectCmdHandler.go index 3cf3a019..41fad48e 100644 --- a/vhdInspectCmdHandler.go +++ b/vhdInspectCmdHandler.go @@ -17,14 +17,12 @@ import ( ) // FixedDiskBlocksInfo type describes general block information of a fixed disk -// type FixedDiskBlocksInfo struct { BlockSize int64 BlockCount int64 } // ExpandableDiskBlocksInfo type describes general block information of a expandable disk -// type ExpandableDiskBlocksInfo struct { BlockDataSize int64 BlockBitmapSize int32 diff --git a/vhdUploadCmdHandler.go b/vhdUploadCmdHandler.go index d8aa95c6..084f9c04 100644 --- a/vhdUploadCmdHandler.go +++ b/vhdUploadCmdHandler.go @@ -177,7 +177,6 @@ func vhdUploadCmdHandler() cli.Command { } // printErrorsAndFatal prints the errors in a slice one by one and then exit -// func printErrorsAndFatal(errs []error) { fmt.Println() for _, e := range errs { @@ -187,7 +186,6 @@ func printErrorsAndFatal(errs []error) { } // ensureVHDSanity ensure is VHD is valid for Azure. -// func ensureVHDSanity(localVHDPath string) { if err := validator.ValidateVhd(localVHDPath); err != nil { log.Fatal(err) @@ -203,7 +201,6 @@ func ensureVHDSanity(localVHDPath string) { // in which the page blob resides, parameter blobName is name for the page blob // This method attempt to fetch the metadata only if MD5Hash is not set for the page blob, this method panic if the // MD5Hash is already set or if the custom metadata is absent. -// func getBlobMetaData(client storage.BlobStorageClient, containerName, blobName string) *metadata.MetaData { md5Hash := getBlobMD5Hash(client, containerName, blobName) if md5Hash != "" { @@ -222,7 +219,6 @@ func getBlobMetaData(client storage.BlobStorageClient, containerName, blobName s } // getLocalVHDMetaData returns the metadata of a local VHD -// func getLocalVHDMetaData(localVHDPath string) *metadata.MetaData { localMetaData, err := metadata.NewMetaDataFromLocalVHD(localVHDPath) if err != nil { @@ -235,7 +231,6 @@ func getLocalVHDMetaData(localVHDPath string) *metadata.MetaData { // The parameter client is the Azure blob service client, parameter containerName is the name of an existing container // in which the page blob needs to be created, parameter blobName is name for the new page blob, size is the size of // the new page blob in bytes and parameter vhdMetaData is the custom metadata to be associacted with the page blob -// func createBlob(client storage.BlobStorageClient, containerName, blobName string, size int64, vhdMetaData *metadata.MetaData) { if err := client.PutPageBlob(containerName, blobName, size, nil); err != nil { log.Fatal(err) @@ -247,7 +242,6 @@ func createBlob(client storage.BlobStorageClient, containerName, blobName string } // setBlobMD5Hash sets MD5 hash of the blob in it's properties -// func setBlobMD5Hash(client storage.BlobStorageClient, containerName, blobName string, vhdMetaData *metadata.MetaData) { if vhdMetaData.FileMetaData.MD5Hash != nil { blobHeaders := storage.BlobHeaders{ @@ -262,7 +256,6 @@ func setBlobMD5Hash(client storage.BlobStorageClient, containerName, blobName st // getAlreadyUploadedBlobRanges returns the range slice containing ranges of a page blob those are already uploaded. // The parameter client is the Azure blob service client, parameter containerName is the name of an existing container // in which the page blob resides, parameter blobName is name for the page blob -// func getAlreadyUploadedBlobRanges(client storage.BlobStorageClient, containerName, blobName string) []*common.IndexRange { existingRanges, err := client.GetPageRanges(containerName, blobName) if err != nil { @@ -278,7 +271,6 @@ func getAlreadyUploadedBlobRanges(client storage.BlobStorageClient, containerNam // getBlobMD5Hash returns the MD5Hash associated with a blob // The parameter client is the Azure blob service client, parameter containerName is the name of an existing container // in which the page blob resides, parameter blobName is name for the page blob -// func getBlobMD5Hash(client storage.BlobStorageClient, containerName, blobName string) string { properties, err := client.GetBlobProperties(containerName, blobName) if err != nil { diff --git a/vhdcore/bat/blockAllocationTable.go b/vhdcore/bat/blockAllocationTable.go index 509adf70..bc888477 100644 --- a/vhdcore/bat/blockAllocationTable.go +++ b/vhdcore/bat/blockAllocationTable.go @@ -20,7 +20,6 @@ import ( // the 'block bitmap section'. Each bit in the bitmap indicates the state of the corresponding sector // in 'data section', 1 indicates sector contains valid data, 0 indicates the sector have never been // modified. -// type BlockAllocationTable struct { BATEntriesCount uint32 BAT []uint32 @@ -30,7 +29,6 @@ type BlockAllocationTable struct { // NewBlockAllocationTable creates an instance of BlockAllocationTable, BAT is the block allocation table, // each entry in this table is the absolute sector offset to a block, blockSize is the size of block's // 'data section' in bytes. -// func NewBlockAllocationTable(blockSize uint32, bat []uint32) *BlockAllocationTable { return &BlockAllocationTable{BATEntriesCount: uint32(len(bat)), blockSize: blockSize, BAT: bat} } @@ -41,7 +39,6 @@ func NewBlockAllocationTable(blockSize uint32, bat []uint32) *BlockAllocationTab // required to store the bitmap. // As per vhd specification sectors per block must be power of two. The sector length is always 512 bytes. // This means the block size will be power of two as well e.g. 512 * 2^3, 512 * 2^4, 512 * 2^5 etc.. -// func (b *BlockAllocationTable) GetBitmapSizeInBytes() int32 { return int32(b.blockSize / uint32(vhdcore.VhdSectorLength) / 8) } @@ -58,20 +55,17 @@ func (b *BlockAllocationTable) GetSectorPaddedBitmapSizeInBytes() int32 { // GetBitmapAddress returns the address of the 'block bitmap section' of a given block. Address is the // absolute byte offset of the 'block bitmap section'. A block consists of 'block bitmap section' and // 'data section' -// func (b *BlockAllocationTable) GetBitmapAddress(blockIndex uint32) int64 { return int64(b.BAT[blockIndex]) * vhdcore.VhdSectorLength } // GetBlockDataAddress returns the address of the 'data section' of a given block. Address is the absolute // byte offset of the 'data section'. A block consists of 'block bitmap section' and 'data section' -// func (b *BlockAllocationTable) GetBlockDataAddress(blockIndex uint32) int64 { return b.GetBitmapAddress(blockIndex) + int64(b.GetSectorPaddedBitmapSizeInBytes()) } // HasData returns true if the given block has not yet expanded hence contains no data. -// func (b *BlockAllocationTable) HasData(blockIndex uint32) bool { return blockIndex != vhdcore.VhdNoDataInt && b.BAT[blockIndex] != vhdcore.VhdNoDataInt } diff --git a/vhdcore/bat/blockAllocationTableFactory.go b/vhdcore/bat/blockAllocationTableFactory.go index 7036b5a0..dfa45bba 100644 --- a/vhdcore/bat/blockAllocationTableFactory.go +++ b/vhdcore/bat/blockAllocationTableFactory.go @@ -7,7 +7,6 @@ import ( // BlockAllocationTableFactory type is used to create BlockAllocationTable instance by reading BAT // section of the disk which follows the header -// type BlockAllocationTableFactory struct { vhdReader *reader.VhdReader vhdHeader *header.Header @@ -17,7 +16,6 @@ type BlockAllocationTableFactory struct { // to create BlockAllocationTable instance by reading BAT section of the Vhd. // vhdReader is the reader to be used to read the entry, vhdHeader is the header structure representing // the disk header. -// func NewBlockAllocationFactory(vhdReader *reader.VhdReader, vhdHeader *header.Header) *BlockAllocationTableFactory { return &BlockAllocationTableFactory{ vhdReader: vhdReader, @@ -27,7 +25,6 @@ func NewBlockAllocationFactory(vhdReader *reader.VhdReader, vhdHeader *header.He // Create creates a BlockAllocationTable instance by reading the BAT section of the disk. // This function return error if any error occurs while reading or parsing the BAT entries. -// func (f *BlockAllocationTableFactory) Create() (*BlockAllocationTable, error) { var err error batEntriesCount := f.vhdHeader.MaxTableEntries diff --git a/vhdcore/bat/blockAllocationTableParseError.go b/vhdcore/bat/blockAllocationTableParseError.go index 985b7f7e..ce9326d6 100644 --- a/vhdcore/bat/blockAllocationTableParseError.go +++ b/vhdcore/bat/blockAllocationTableParseError.go @@ -3,20 +3,17 @@ package bat import "fmt" // BlockAllocationTableParseError is the error type representing BAT parse error. -// type BlockAllocationTableParseError struct { BATItemIndex uint32 err error } // Error returns the string representation of the BlockAllocationTableParseError instance. -// func (e *BlockAllocationTableParseError) Error() string { return fmt.Sprintf("Parse BAT entry at '%d' failed: "+e.err.Error(), e.BATItemIndex) } // GetInnerErr returns the inner error, this method satisfies InnerErr interface -// func (e *BlockAllocationTableParseError) GetInnerErr() error { return e.err } @@ -24,7 +21,6 @@ func (e *BlockAllocationTableParseError) GetInnerErr() error { // NewBlockAllocationTableParseError returns a new BlockAllocationTableParseError instance. // The parameter batItemIndex represents BAT index failed to parse // The parameter err is the underlying error for parse failure. -// func NewBlockAllocationTableParseError(batItemIndex uint32, err error) error { return &BlockAllocationTableParseError{ BATItemIndex: batItemIndex, diff --git a/vhdcore/block/bitmap/bitmap.go b/vhdcore/block/bitmap/bitmap.go index c2bd6749..45bcb2ff 100644 --- a/vhdcore/block/bitmap/bitmap.go +++ b/vhdcore/block/bitmap/bitmap.go @@ -3,7 +3,6 @@ package bitmap import "fmt" // BitMap type represents 'bitmap section' of a block. -// type BitMap struct { data []byte Length int32 @@ -11,14 +10,12 @@ type BitMap struct { // NewBitMapFromByteSlice creates a new BitMap, b is the byte slice that needs to be used as bitmap // source. The caller should not reuse this byte slice anymore. -// func NewBitMapFromByteSlice(b []byte) *BitMap { return &BitMap{data: b, Length: int32(len(b)) * 8} } // NewBitMapFromByteSliceCopy creates a new BitMap, b is the byte slice that needs to be used as bitmap // source. The caller can reuse the byte slice as this method creates a copy of it. -// func NewBitMapFromByteSliceCopy(b []byte) *BitMap { data := make([]byte, len(b)) copy(data, b) @@ -26,7 +23,6 @@ func NewBitMapFromByteSliceCopy(b []byte) *BitMap { } // Set sets the bit at the given index. It returns error if idx < 0 or idx >= bitsCount. -// func (b *BitMap) Set(idx int32, value bool) error { if idx < 0 || idx >= b.Length { return fmt.Errorf("The index %d is out of boundary", idx) @@ -45,7 +41,6 @@ func (b *BitMap) Set(idx int32, value bool) error { } // Get returns the value of the bit at the given index. It returns error if idx < 0 or idx >= bitsCount. -// func (b *BitMap) Get(idx int32) (bool, error) { if idx < 0 || idx >= b.Length { return false, fmt.Errorf("The index %d is out of boundary", idx) diff --git a/vhdcore/block/bitmap/factory.go b/vhdcore/block/bitmap/factory.go index 801d4e9e..789f7a57 100644 --- a/vhdcore/block/bitmap/factory.go +++ b/vhdcore/block/bitmap/factory.go @@ -6,7 +6,6 @@ import ( ) // Factory type is used to create BitMap instance by reading 'bitmap section' of a block. -// type Factory struct { vhdReader *reader.VhdReader blockAllocationTable *bat.BlockAllocationTable @@ -16,7 +15,6 @@ type Factory struct { // the 'bitmap section' of a block. vhdReader is the reader to read the disk, blockAllocationTable wraps // the disk's BAT table, which has one entry per block, this is used to retrieve the absolute offset to // the beginning of the 'bitmap section' of a block and the size of the 'bitmap section'. -// func NewFactory(vhdReader *reader.VhdReader, blockAllocationTable *bat.BlockAllocationTable) *Factory { return &Factory{vhdReader: vhdReader, blockAllocationTable: blockAllocationTable} } @@ -24,7 +22,6 @@ func NewFactory(vhdReader *reader.VhdReader, blockAllocationTable *bat.BlockAllo // Create creates a BitMap instance by reading block's 'bitmap section', block is the index of the // block entry in the BAT whose 'bitmap section' needs to be read. // This function return error if any error occurs while reading or parsing the block's bitmap. -// func (f *Factory) Create(blockIndex uint32) (*BitMap, error) { bitmapAbsoluteByteOffset := f.blockAllocationTable.GetBitmapAddress(blockIndex) bitmapSizeInBytes := f.blockAllocationTable.GetBitmapSizeInBytes() diff --git a/vhdcore/block/bitmap/parseError.go b/vhdcore/block/bitmap/parseError.go index 047b1243..cb0b00c8 100644 --- a/vhdcore/block/bitmap/parseError.go +++ b/vhdcore/block/bitmap/parseError.go @@ -3,20 +3,17 @@ package bitmap import "fmt" // ParseError is the error type representing parsing error of a block's bitmap. -// type ParseError struct { BlockIndex uint32 err error } // Error returns the string representation of the BitmapParseError instance. -// func (e *ParseError) Error() string { return fmt.Sprintf("Parse Bitmap section of block '%d' failed: "+e.err.Error(), e.BlockIndex) } // GetInnerErr returns the inner error, this method satisfies InnerErr interface -// func (e *ParseError) GetInnerErr() error { return e.err } @@ -24,7 +21,6 @@ func (e *ParseError) GetInnerErr() error { // NewParseError returns a new ParseError instance. // The parameter blockIndex represents index of the block whose bitmap failed to parse // The parameter err is the underlying error for parse failure. -// func NewParseError(blockIndex uint32, err error) error { return &ParseError{ BlockIndex: blockIndex, diff --git a/vhdcore/block/block.go b/vhdcore/block/block.go index ea4039a2..0b99f5f6 100644 --- a/vhdcore/block/block.go +++ b/vhdcore/block/block.go @@ -11,7 +11,6 @@ import ( // Block type represents Block of a vhd. A block of a dynamic or differential vhd starts with a // 'bitmap' section followed by the 'data' section, in case of fixed vhd the entire block is used // to store the 'data'. -// type Block struct { // BlockIndex is the index of the block, block indices are consecutive values starting from 0 // for the first block. @@ -46,7 +45,6 @@ type Block struct { // Data returns the block data, the content of entire block in case of fixed vhd and the content // of block's data section in case of dynamic and differential vhd. -// func (b *Block) Data() ([]byte, error) { if b.blockData == nil { var err error @@ -60,20 +58,17 @@ func (b *Block) Data() ([]byte, error) { // GetSector returns an instance of Sector representing a sector with the given Id in this block. // The parameter sectorIndex is the index of the sector in this block to read. -// func (b *Block) GetSector(sectorIndex uint32) (*Sector, error) { return b.blockFactory.GetSector(b, sectorIndex) } // GetSectorCount returns the number of sectors in the block. -// func (b *Block) GetSectorCount() int64 { return b.LogicalRange.Length() / vhdcore.VhdSectorLength } // String returns formatted representation of the block // This satisfies Stringer interface. -// func (b *Block) String() string { return fmt.Sprintf("Block:%d", b.BlockIndex) } diff --git a/vhdcore/block/dataReadError.go b/vhdcore/block/dataReadError.go index dc9d0f4f..12f4c084 100644 --- a/vhdcore/block/dataReadError.go +++ b/vhdcore/block/dataReadError.go @@ -7,7 +7,6 @@ import ( ) // DataReadError is the error type representing block data read error. -// type DataReadError struct { BlockIndex uint32 DiskType footer.DiskType @@ -15,13 +14,11 @@ type DataReadError struct { } // Error returns the string representation of the BlockDataReadError instance. -// func (e *DataReadError) Error() string { return fmt.Sprintf("Error in Reading block '%d', DiskType - %s : %s", e.BlockIndex, e.DiskType, e.err) } // GetInnerErr returns the inner error, this method satisfies InnerErr interface -// func (e *DataReadError) GetInnerErr() error { return e.err } @@ -29,7 +26,6 @@ func (e *DataReadError) GetInnerErr() error { // NewDataReadError returns a new DataReadError instance. // The parameter blockIndex represents index of the block whose bitmap failed to parse // The parameter err is the underlying error for parse failure. -// func NewDataReadError(blockIndex uint32, diskType footer.DiskType, err error) error { return &DataReadError{ BlockIndex: blockIndex, diff --git a/vhdcore/block/dataReader.go b/vhdcore/block/dataReader.go index 3cea537c..843f5e8e 100644 --- a/vhdcore/block/dataReader.go +++ b/vhdcore/block/dataReader.go @@ -2,7 +2,6 @@ package block // DataReader interface that all block readers specific to disk type (fixed, // dynamic, differencing) needs to satisfy. -// type DataReader interface { // Read reads the disk block identified by the parameter block // diff --git a/vhdcore/block/differencingDiskBlockFactory.go b/vhdcore/block/differencingDiskBlockFactory.go index 96791cd6..05948a04 100644 --- a/vhdcore/block/differencingDiskBlockFactory.go +++ b/vhdcore/block/differencingDiskBlockFactory.go @@ -12,7 +12,6 @@ import ( // To get the block size of the block in differencing disk // To get a Sector instance representing sector of differencing disk's block // To get the logical footer range of fixed disk generated from the differencing disk and it's parents. -// type DifferencingDiskBlockFactory struct { params *FactoryParams bitmapFactory *bitmap.Factory @@ -24,7 +23,6 @@ type DifferencingDiskBlockFactory struct { // NewDifferencingDiskBlockFactory creates a DifferencingDiskBlockFactory instance which can be used to // create a Block objects representing differential disk block of a size specified in header BlockSize // field parameter vhdFile represents the differencing disk. -// func NewDifferencingDiskBlockFactory(params *FactoryParams) *DifferencingDiskBlockFactory { blockFactory := &DifferencingDiskBlockFactory{params: params} @@ -43,20 +41,17 @@ func NewDifferencingDiskBlockFactory(params *FactoryParams) *DifferencingDiskBlo } // GetBlockCount returns the number of blocks in the differential disk. -// func (f *DifferencingDiskBlockFactory) GetBlockCount() int64 { return int64(f.params.BlockAllocationTable.BATEntriesCount) } // GetBlockSize returns the size of the 'data section' of block in bytes in the differential disk. -// func (f *DifferencingDiskBlockFactory) GetBlockSize() int64 { return int64(f.params.VhdHeader.BlockSize) } // GetFooterRange returns the logical range of the footer when converting this differential vhd to // fixed logical range of footer is the absolute start and end byte offset of the footer. -// func (f *DifferencingDiskBlockFactory) GetFooterRange() *common.IndexRange { return common.NewIndexRangeFromLength(f.GetBlockCount()*f.GetBlockSize(), vhdcore.VhdFooterSize) } @@ -65,7 +60,6 @@ func (f *DifferencingDiskBlockFactory) GetFooterRange() *common.IndexRange { // identifies the block. If the block to be read is marked as empty in the differencing disk BAT then this // method will query parent disk for the same block. This function return error if the block cannot be created // due to any read error. -// func (f *DifferencingDiskBlockFactory) Create(blockIndex uint32) (*Block, error) { if !f.params.BlockAllocationTable.HasData(blockIndex) { if f.cachedBlock == nil || f.cachedBlock.BlockIndex != blockIndex { @@ -104,7 +98,6 @@ func (f *DifferencingDiskBlockFactory) Create(blockIndex uint32) (*Block, error) // read is marked as empty in the block's bitmap then this method will query parent disk for the same sector. // This function return error if the sector cannot be created due to any read error or if the requested sector // index is invalid. -// func (f *DifferencingDiskBlockFactory) GetSector(block *Block, sectorIndex uint32) (*Sector, error) { blockIndex := block.BlockIndex if block.IsEmpty { @@ -132,7 +125,6 @@ func (f *DifferencingDiskBlockFactory) GetSector(block *Block, sectorIndex uint3 // GetBitmapFactory returns an instance of BitmapFactory that can be used to create the bitmap of a block // by reading block from differencing disk. -// func (f *DifferencingDiskBlockFactory) GetBitmapFactory() *bitmap.Factory { return f.bitmapFactory } diff --git a/vhdcore/block/differencingDiskBlockReader.go b/vhdcore/block/differencingDiskBlockReader.go index b01b6981..08983f89 100644 --- a/vhdcore/block/differencingDiskBlockReader.go +++ b/vhdcore/block/differencingDiskBlockReader.go @@ -9,7 +9,6 @@ import ( // DifferencingDiskBlockReader type satisfies BlockDataReader interface, // implementation of BlockDataReader::Read by this type can read the 'data' section // of a differencing disk's block. -// type DifferencingDiskBlockReader struct { vhdReader *reader.VhdReader blockAllocationTable *bat.BlockAllocationTable @@ -22,7 +21,6 @@ type DifferencingDiskBlockReader struct { // The parameter vhdReader is the reader to read the disk // The parameter blockAllocationTable represents the disk's BAT // The parameter blockSizeInBytes is the size of the differencing disk block -// func NewDifferencingDiskBlockReader(vhdReader *reader.VhdReader, blockAllocationTable *bat.BlockAllocationTable, blockSizeInBytes uint32) *DifferencingDiskBlockReader { return &DifferencingDiskBlockReader{ vhdReader: vhdReader, @@ -34,7 +32,6 @@ func NewDifferencingDiskBlockReader(vhdReader *reader.VhdReader, blockAllocation // Read reads the data in a block of a differencing disk // The parameter block represents the block whose 'data' section to read -// func (r *DifferencingDiskBlockReader) Read(block *Block) ([]byte, error) { blockIndex := block.BlockIndex if !r.blockAllocationTable.HasData(blockIndex) { diff --git a/vhdcore/block/dynamicDiskBlockFactory.go b/vhdcore/block/dynamicDiskBlockFactory.go index 63df9f48..9a8c1839 100644 --- a/vhdcore/block/dynamicDiskBlockFactory.go +++ b/vhdcore/block/dynamicDiskBlockFactory.go @@ -12,7 +12,6 @@ import ( // To get the block size of the block in dynamic disk // To get a Sector instance representing sector of dynamic disk's block // To get the logical footer range of fixed disk generated from the dynamic disk -// type DynamicDiskBlockFactory struct { params *FactoryParams bitmapFactory *bitmap.Factory @@ -24,7 +23,6 @@ type DynamicDiskBlockFactory struct { // NewDynamicDiskFactory creates a DynamicDiskBlockFactory instance which can be used to create a // Block objects representing dynamic disk block of a size specified in header BlockSize field // parameter params contains header, footer, BAT of dynamic disk and reader to read the disk. -// func NewDynamicDiskFactory(params *FactoryParams) *DynamicDiskBlockFactory { blockFactory := &DynamicDiskBlockFactory{params: params} @@ -42,27 +40,23 @@ func NewDynamicDiskFactory(params *FactoryParams) *DynamicDiskBlockFactory { } // GetBlockCount returns the number of blocks in the dynamic disk. -// func (f *DynamicDiskBlockFactory) GetBlockCount() int64 { return int64(f.params.BlockAllocationTable.BATEntriesCount) } // GetBlockSize returns the size of the 'data section' of block in bytes in the dynamic disk. -// func (f *DynamicDiskBlockFactory) GetBlockSize() int64 { return int64(f.params.VhdHeader.BlockSize) } // GetFooterRange returns the logical range of the footer when converting this dynamic vhd to fixed // logical range of footer is the absolute start and end byte offset of the footer. -// func (f *DynamicDiskBlockFactory) GetFooterRange() *common.IndexRange { return common.NewIndexRangeFromLength(f.GetBlockCount()*f.GetBlockSize(), vhdcore.VhdFooterSize) } // Create returns an instance of Block which represents a dynamic disk block, the parameter blockIndex // identifies the block. This function return error if the block cannot be created due to any read error. -// func (f *DynamicDiskBlockFactory) Create(blockIndex uint32) (*Block, error) { if f.cachedDynamicBlock == nil || f.cachedDynamicBlock.BlockIndex != blockIndex { logicalRange := common.NewIndexRangeFromLength(int64(blockIndex)*f.GetBlockSize(), f.GetBlockSize()) @@ -93,7 +87,6 @@ func (f *DynamicDiskBlockFactory) Create(blockIndex uint32) (*Block, error) { // GetSector returns an instance of Sector in a dynamic disk, parameter block object identifying the block // containing the sector, the parameter sectorIndex identifies the sector in the block. This function return // error if the sector cannot be created due to any read error or if the requested sector index is invalid. -// func (f *DynamicDiskBlockFactory) GetSector(block *Block, sectorIndex uint32) (*Sector, error) { blockIndex := block.BlockIndex if block.IsEmpty { @@ -105,7 +98,6 @@ func (f *DynamicDiskBlockFactory) GetSector(block *Block, sectorIndex uint32) (* // GetBitmapFactory returns an instance of BitmapFactory that can be used to create the bitmap of a block // by reading block from dynamic disk. -// func (f *DynamicDiskBlockFactory) GetBitmapFactory() *bitmap.Factory { return f.bitmapFactory } diff --git a/vhdcore/block/dynamicDiskBlockReader.go b/vhdcore/block/dynamicDiskBlockReader.go index e4000c8d..206e607b 100644 --- a/vhdcore/block/dynamicDiskBlockReader.go +++ b/vhdcore/block/dynamicDiskBlockReader.go @@ -11,7 +11,6 @@ import ( // DynamicDiskBlockReader type satisfies BlockDataReader interface, // implementation of BlockDataReader::Read by this type can read the 'data' section // of a dynamic disk's block. -// type DynamicDiskBlockReader struct { vhdReader *reader.VhdReader blockAllocationTable *bat.BlockAllocationTable @@ -24,7 +23,6 @@ type DynamicDiskBlockReader struct { // The parameter vhdReader is the reader to read the disk // The parameter blockAllocationTable represents the disk's BAT // The parameter blockSizeInBytes is the size of the dynamic disk block -// func NewDynamicDiskBlockReader(vhdReader *reader.VhdReader, blockAllocationTable *bat.BlockAllocationTable, blockSizeInBytes uint32) *DynamicDiskBlockReader { return &DynamicDiskBlockReader{ @@ -37,7 +35,6 @@ func NewDynamicDiskBlockReader(vhdReader *reader.VhdReader, blockAllocationTable // Read reads the data in a block of a dynamic disk // The parameter block represents the block whose 'data' section to read -// func (r *DynamicDiskBlockReader) Read(block *Block) ([]byte, error) { blockIndex := block.BlockIndex if !r.blockAllocationTable.HasData(blockIndex) { diff --git a/vhdcore/block/factory.go b/vhdcore/block/factory.go index 091804a5..7156ee7d 100644 --- a/vhdcore/block/factory.go +++ b/vhdcore/block/factory.go @@ -6,7 +6,6 @@ import ( // Factory interface that all block factories specific to disk type (fixed, // dynamic, differencing) needs to satisfy. -// type Factory interface { GetBlockCount() int64 GetBlockSize() int64 diff --git a/vhdcore/block/factoryParams.go b/vhdcore/block/factoryParams.go index 953da150..3757136b 100644 --- a/vhdcore/block/factoryParams.go +++ b/vhdcore/block/factoryParams.go @@ -9,7 +9,6 @@ import ( // FactoryParams represents type of the parameter for different disk block // factories. -// type FactoryParams struct { VhdFooter *footer.Footer VhdHeader *header.Header diff --git a/vhdcore/block/fixedDiskBlockFactory.go b/vhdcore/block/fixedDiskBlockFactory.go index 6b7e6475..81516285 100644 --- a/vhdcore/block/fixedDiskBlockFactory.go +++ b/vhdcore/block/fixedDiskBlockFactory.go @@ -14,7 +14,6 @@ import ( // To get the block size of the block in fixed disk // To get a Sector instance representing sector of fixed disk's block // To get the logical footer range of the fixed disk -// type FixedDiskBlockFactory struct { params *FactoryParams sectorFactory *SectorFactory @@ -28,7 +27,6 @@ type FixedDiskBlockFactory struct { // NewFixedDiskBlockFactoryWithDefaultBlockSize creates a FixedDiskBlockFactory instance which can // be used to create a Block object representing fixed disk block of default size 512 KB. // parameter params contains header, footer of the fixed disk and reader to read the disk. -// func NewFixedDiskBlockFactoryWithDefaultBlockSize(params *FactoryParams) *FixedDiskBlockFactory { return NewFixedDiskBlockFactory(params, vhdcore.VhdDefaultBlockSize) } @@ -37,7 +35,6 @@ func NewFixedDiskBlockFactoryWithDefaultBlockSize(params *FactoryParams) *FixedD // Block objects representing fixed disk block of a specific size, parameter params contains header, // footer of the fixed disk and reader to read the disk, parameter blockSize represents the size // of blocks in the fixed disk -// func NewFixedDiskBlockFactory(params *FactoryParams, blockSize int64) *FixedDiskBlockFactory { blockFactory := &FixedDiskBlockFactory{params: params} @@ -65,20 +62,17 @@ func NewFixedDiskBlockFactory(params *FactoryParams, blockSize int64) *FixedDisk } // GetBlockCount returns the number of blocks in the fixed disk. -// func (f *FixedDiskBlockFactory) GetBlockCount() int64 { return f.blockCount } // GetBlockSize returns the size of the block in bytes of the fixed disk. -// func (f *FixedDiskBlockFactory) GetBlockSize() int64 { return f.blockSize } // GetFooterRange returns the logical range of the footer of the fixed disk, logical range of footer // is the absolute start and end byte offset of the footer. -// func (f *FixedDiskBlockFactory) GetFooterRange() *common.IndexRange { footerStartIndex := f.params.VhdReader.Size - vhdcore.VhdFooterSize return common.NewIndexRangeFromLength(footerStartIndex, vhdcore.VhdFooterSize) @@ -86,7 +80,6 @@ func (f *FixedDiskBlockFactory) GetFooterRange() *common.IndexRange { // Create returns an instance of Block which represents a fixed disk block, the parameter blockIndex // identifies the block. -// func (f *FixedDiskBlockFactory) Create(blockIndex uint32) (*Block, error) { if f.cachedFixedBlock == nil || f.cachedFixedBlock.BlockIndex != blockIndex { var logicalRange *common.IndexRange @@ -112,7 +105,6 @@ func (f *FixedDiskBlockFactory) Create(blockIndex uint32) (*Block, error) { // GetSector returns an instance of Sector in a fixed disk, parameter block describes the block containing the // sector, the parameter sectorIndex identifies the sector in the block. This function return error if the sector // cannot be created due to any read error or if the requested sector index is invalid. -// func (f *FixedDiskBlockFactory) GetSector(block *Block, sectorIndex uint32) (*Sector, error) { blockIndex := block.BlockIndex if block.IsEmpty { @@ -124,7 +116,6 @@ func (f *FixedDiskBlockFactory) GetSector(block *Block, sectorIndex uint32) (*Se // getExtraBlockLogicalRange returns the IndexRange representing the additional block if any. Additional block // is the last block whose size < FixedDiskBlockFactory.BlockSize -// func (f *FixedDiskBlockFactory) getExtraBlockLogicalRange() *common.IndexRange { if f.extraBlockIndex == nil { log.Panicf("Unexpected state, extraBlockIndex not set") diff --git a/vhdcore/block/fixedDiskBlockReader.go b/vhdcore/block/fixedDiskBlockReader.go index 1b190ba8..fae1f610 100644 --- a/vhdcore/block/fixedDiskBlockReader.go +++ b/vhdcore/block/fixedDiskBlockReader.go @@ -10,7 +10,6 @@ import ( // FixedDiskBlockReader type satisfies BlockDataReader interface, // implementation of BlockDataReader::Read by this type can read the data from a block // of a fixed disk. -// type FixedDiskBlockReader struct { vhdReader *reader.VhdReader blockSizeInBytes uint32 @@ -20,7 +19,6 @@ type FixedDiskBlockReader struct { // a fixed disk block. // The parameter vhdReader is the reader to read the disk // The parameter blockSizeInBytes is the size of the fixed disk block -// func NewFixedDiskBlockReader(vhdReader *reader.VhdReader, blockSizeInBytes uint32) *FixedDiskBlockReader { return &FixedDiskBlockReader{ vhdReader: vhdReader, @@ -30,7 +28,6 @@ func NewFixedDiskBlockReader(vhdReader *reader.VhdReader, blockSizeInBytes uint3 // Read reads the data in a block of a fixed disk // The parameter block represents the block to read -// func (r *FixedDiskBlockReader) Read(block *Block) ([]byte, error) { blockIndex := block.BlockIndex blockByteOffset := int64(blockIndex) * int64(r.blockSizeInBytes) diff --git a/vhdcore/block/sector.go b/vhdcore/block/sector.go index c9a7b1d3..d5c62273 100644 --- a/vhdcore/block/sector.go +++ b/vhdcore/block/sector.go @@ -1,7 +1,6 @@ package block // Sector represents a sector in the 'data section' of a block. -// type Sector struct { BlockIndex uint32 SectorIndex int64 diff --git a/vhdcore/block/sectorFactory.go b/vhdcore/block/sectorFactory.go index 9d503bb0..de50cac6 100644 --- a/vhdcore/block/sectorFactory.go +++ b/vhdcore/block/sectorFactory.go @@ -8,7 +8,6 @@ import ( ) // SectorFactory type is used to create Sector instance by reading 512 byte sector from block's 'data section'. -// type SectorFactory struct { vhdReader *reader.VhdReader blockHasData func(uint32) bool @@ -21,7 +20,6 @@ type SectorFactory struct { // vhdReader is the reader to be used to read the sector, blockHasData is a function which can be used to // check a block is empty by providing block identifier, getBlockAddress is a function which can be used // to fetch the absolute byte offset of a block by providing block identifier. -// func NewSectorFactory(vhdReader *reader.VhdReader, blockHasData func(uint32) bool, getBlockAddress func(uint32) int64) *SectorFactory { return &SectorFactory{ vhdReader: vhdReader, @@ -33,7 +31,6 @@ func NewSectorFactory(vhdReader *reader.VhdReader, blockHasData func(uint32) boo // Create creates an instance of Sector by reading a 512 byte sector from the 'data section' of a block. // block describes the block containing the sector, sectorIndex identifies the sector to read. // This function return error if requested sector is invalid or in case of any read error. -// func (f *SectorFactory) Create(block *Block, sectorIndex uint32) (*Sector, error) { if int64(sectorIndex) > block.GetSectorCount() { return nil, fmt.Errorf("Total sectors: %d, Requested Sectors: %d", block.GetSectorCount(), sectorIndex) @@ -60,7 +57,6 @@ func (f *SectorFactory) Create(block *Block, sectorIndex uint32) (*Sector, error // CreateEmptySector creates an instance of Sector representing empty sector. The Data property of this sector // will be a slice of 512 bytes filled with zeros. -// func (f *SectorFactory) CreateEmptySector(blockIndex, sectorIndex uint32) *Sector { if f.emptySectorBuf == nil { f.emptySectorBuf = make([]byte, vhdcore.VhdSectorLength) diff --git a/vhdcore/block/sectorReadError.go b/vhdcore/block/sectorReadError.go index 79b6e469..5b76754f 100644 --- a/vhdcore/block/sectorReadError.go +++ b/vhdcore/block/sectorReadError.go @@ -3,7 +3,6 @@ package block import "fmt" // SectorReadError is the error type representing block's sector read error. -// type SectorReadError struct { BlockIndex uint32 SectorIndex uint32 @@ -11,7 +10,6 @@ type SectorReadError struct { } // Error returns the string representation of the SectorReadError instance. -// func (e *SectorReadError) Error() string { return fmt.Sprintf("Read sector '%d' of block '%d' failed: %s", e.SectorIndex, e.BlockIndex, e.err) } @@ -20,7 +18,6 @@ func (e *SectorReadError) Error() string { // The parameter blockIndex represents index of the block // The parameter sectorIndex represents index of the sector within the block // The parameter err is the underlying read error. -// func NewSectorReadError(blockIndex, sectorIndex uint32, err error) error { return &SectorReadError{ BlockIndex: blockIndex, diff --git a/vhdcore/common/indexRange.go b/vhdcore/common/indexRange.go index 8fe2957a..f93ad3a5 100644 --- a/vhdcore/common/indexRange.go +++ b/vhdcore/common/indexRange.go @@ -7,7 +7,6 @@ import ( // IndexRange represents sequence of integral numbers in a specified range, where range starts // at Start and ends at End, inclusive -// type IndexRange struct { Start int64 End int64 @@ -15,19 +14,16 @@ type IndexRange struct { // NewIndexRange creates a new range with start as value of the first integer in the sequence // and end as value of last integer in the sequence. -// func NewIndexRange(start, end int64) *IndexRange { return &IndexRange{Start: start, End: end} } // NewIndexRangeFromLength creates a new range starting from start and ends at start + length - 1. -// func NewIndexRangeFromLength(start, length int64) *IndexRange { return NewIndexRange(start, start+length-1) } // TotalRangeLength returns the total length of a given slice of ranges. -// func TotalRangeLength(ranges []*IndexRange) int64 { var length = int64(0) for _, r := range ranges { @@ -38,7 +34,6 @@ func TotalRangeLength(ranges []*IndexRange) int64 { // SubtractRanges produces a set of ranges, each subset of ranges in this set is produced by // subtracting subtrahends from each range in minuends. -// func SubtractRanges(minuends, subtrahends []*IndexRange) []*IndexRange { var result = make([]*IndexRange, 0) for _, minuend := range minuends { @@ -53,11 +48,10 @@ func SubtractRanges(minuends, subtrahends []*IndexRange) []*IndexRange { // Each each range in the given ranges X will be partitioned by the given partition-size to produce // a range set A. If the last range in A is not of partition-size and if it is adjacent to the // next range in the X then we calculate the bytes required to reach partition-size and -// 1. if next range has more bytes than required, then we borrow the required bytes from next -// range and advances the next range start -// 2. if next range has less or equal to the required bytes, then we borrow available and skip -// next range -// +// 1. if next range has more bytes than required, then we borrow the required bytes from next +// range and advances the next range start +// 2. if next range has less or equal to the required bytes, then we borrow available and skip +// next range func ChunkRangesBySize(ranges []*IndexRange, chunkSizeInBytes int64) []*IndexRange { var chunks = make([]*IndexRange, 0) length := len(ranges) @@ -105,14 +99,12 @@ func ChunkRangesBySize(ranges []*IndexRange, chunkSizeInBytes int64) []*IndexRan } // Length returns number of sequential integers in the range. -// func (ir *IndexRange) Length() int64 { return ir.End - ir.Start + 1 } // Equals returns true if this and given range represents the same sequence, two sequences // are same if both have the same start and end. -// func (ir *IndexRange) Equals(other *IndexRange) bool { return other != nil && ir.Start == other.Start && ir.End == other.End } @@ -120,16 +112,16 @@ func (ir *IndexRange) Equals(other *IndexRange) bool { // CompareTo indicates whether the this range precedes, follows, or occurs in the same // position in the sort order as the other // A return value -// Less than zero: This range precedes the other in the sort order, range A precedes -// range B if A start before B or both has the same start and A ends -// before B. -// Zero: This range occurs in the same position as other in sort order, two -// ranges are in the same sort position if both has the same start -// and end -// Greater than zero: This range follows the other in the sort order, a range A follows -// range B, if A start after B or both has the same start and A ends -// after B // +// Less than zero: This range precedes the other in the sort order, range A precedes +// range B if A start before B or both has the same start and A ends +// before B. +// Zero: This range occurs in the same position as other in sort order, two +// ranges are in the same sort position if both has the same start +// and end +// Greater than zero: This range follows the other in the sort order, a range A follows +// range B, if A start after B or both has the same start and A ends +// after B func (ir *IndexRange) CompareTo(other *IndexRange) int64 { r := ir.Start - other.Start if r != 0 { @@ -141,7 +133,6 @@ func (ir *IndexRange) CompareTo(other *IndexRange) int64 { // Intersects checks this and other range intersects, two ranges A and B intersects if either // of them starts or ends within the range of other, inclusive. -// func (ir *IndexRange) Intersects(other *IndexRange) bool { start := ir.Start if start < other.Start { @@ -158,7 +149,6 @@ func (ir *IndexRange) Intersects(other *IndexRange) bool { // Intersection computes the range representing the intersection of two ranges, a return // value nil indicates the ranges does not intersects. -// func (ir *IndexRange) Intersection(other *IndexRange) *IndexRange { start := ir.Start if start < other.Start { @@ -180,7 +170,6 @@ func (ir *IndexRange) Intersection(other *IndexRange) *IndexRange { // Includes checks this range includes the other range, a range A includes range B if B starts // and ends within A, inclusive. In other words a range A includes range B if their intersection // produces B -// func (ir *IndexRange) Includes(other *IndexRange) bool { if other.Start < ir.Start { return false @@ -195,7 +184,6 @@ func (ir *IndexRange) Includes(other *IndexRange) bool { // Gap compute the range representing the gap between this and the other range, a return value // nil indicates there is no gap because either the ranges intersects or they are adjacent. -// func (ir *IndexRange) Gap(other *IndexRange) *IndexRange { if ir.Intersects(other) { return nil @@ -219,7 +207,6 @@ func (ir *IndexRange) Gap(other *IndexRange) *IndexRange { // Adjacent checks this range starts immediately starts after the other range or vice-versa, // a return value nil indicates the ranges intersects or there is a gap between the ranges. -// func (ir *IndexRange) Adjacent(other *IndexRange) bool { return !ir.Intersects(other) && ir.Gap(other) == nil } @@ -228,18 +215,17 @@ func (ir *IndexRange) Adjacent(other *IndexRange) bool { // differences to result slice. // // Given two ranges A and B, A - B produces -// 1. No result -// a. If they are equal or -// b. B includes A i.e 'A n B' = A -// OR -// 2. A, if they don't intersects -// OR -// 3. [(A n B).End + 1, A.End], if A and 'A n B' has same start -// OR -// 4. [A.Start, (A n B).Start - 1], if A and 'A n B' has same end -// OR -// 5. { [A.Start, (A n B).Start - 1], [(A n B).End + 1, A.End] }, otherwise -// +// 1. No result +// a. If they are equal or +// b. B includes A i.e 'A n B' = A +// OR +// 2. A, if they don't intersects +// OR +// 3. [(A n B).End + 1, A.End], if A and 'A n B' has same start +// OR +// 4. [A.Start, (A n B).Start - 1], if A and 'A n B' has same end +// OR +// 5. { [A.Start, (A n B).Start - 1], [(A n B).End + 1, A.End] }, otherwise func (ir *IndexRange) Subtract(other *IndexRange, result []*IndexRange) []*IndexRange { if ir.Equals(other) { return result @@ -273,7 +259,6 @@ func (ir *IndexRange) Subtract(other *IndexRange, result []*IndexRange) []*Index // SubtractRanges subtracts a set of ranges from this range and appends the ranges representing // the differences to result slice. The result slice will be sorted and de-duped if sortandDedup // is true. -// func (ir *IndexRange) SubtractRanges(ranges []*IndexRange, sortandDedup bool, result []*IndexRange) []*IndexRange { intersectAny := false for _, o := range ranges { @@ -296,7 +281,6 @@ func (ir *IndexRange) SubtractRanges(ranges []*IndexRange, sortandDedup bool, re // Merge produces a range by merging this and other range if they are adjacent. Trying to merge // non-adjacent ranges are panic. -// func (ir *IndexRange) Merge(other *IndexRange) *IndexRange { if !ir.Adjacent(other) { // TODO: error @@ -312,7 +296,6 @@ func (ir *IndexRange) Merge(other *IndexRange) *IndexRange { // PartitionBy produces a slice of adjacent ranges of same size, first range in the slice starts // where this range starts and last range ends where this range ends. The length of last range will // be less than size if length of this range is not multiple of size. -// func (ir *IndexRange) PartitionBy(size int64) []*IndexRange { length := ir.Length() if length <= size { @@ -335,14 +318,12 @@ func (ir *IndexRange) PartitionBy(size int64) []*IndexRange { } // String returns the string representation of this range, this satisfies stringer interface. -// func (ir *IndexRange) String() string { return fmt.Sprintf("{%d, %d}", ir.Start, ir.End) } // sortAndDedup sorts the given range slice in place, remove the duplicates from the sorted slice // and returns the updated slice. -// func sortAndDedup(indexRanges []*IndexRange) []*IndexRange { if len(indexRanges) == 0 { return indexRanges @@ -360,24 +341,20 @@ func sortAndDedup(indexRanges []*IndexRange) []*IndexRange { // indexRangeSorter is a type that satisfies sort.Interface interface for supporting sorting of // a IndexRange collection. -// type indexRangeSorter []*IndexRange // Len is the number of elements in the range collection. -// func (s indexRangeSorter) Len() int { return len(s) } // Less reports whether range at i-th position precedes the range at j-th position in sort order. // range A precedes range B if A start before B or both has the same start and A ends before B. -// func (s indexRangeSorter) Less(i, j int) bool { return s[i].CompareTo(s[j]) < 0 } // Swap swaps the elements with indexes i and j. -// func (s indexRangeSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/vhdcore/common/utils.go b/vhdcore/common/utils.go index 09553958..f97dce55 100644 --- a/vhdcore/common/utils.go +++ b/vhdcore/common/utils.go @@ -8,14 +8,12 @@ import ( // Utf16BytesToStringLE decode the given UTF16 encoded byte sequence and returns // Go UTF8 encoded string, the byte order of the given sequence is little-endian. -// func Utf16BytesToStringLE(b []byte) string { return Utf16BytesToString(b, binary.LittleEndian) } // Utf16BytesToStringBE decode the given UTF16 encoded byte sequence and returns // Go UTF8 encoded string, the byte order of the given sequence is big-endian. -// func Utf16BytesToStringBE(b []byte) string { return Utf16BytesToString(b, binary.BigEndian) } @@ -23,7 +21,6 @@ func Utf16BytesToStringBE(b []byte) string { // Utf16BytesToString decode the given UTF16 encoded byte sequence and returns // Go UTF8 encoded string, the byte order of the sequence is determined by the // given binary.ByteOrder parameter. -// func Utf16BytesToString(b []byte, o binary.ByteOrder) string { var u []uint16 l := len(b) @@ -42,7 +39,6 @@ func Utf16BytesToString(b []byte, o binary.ByteOrder) string { } // CreateByteSliceCopy creates and returns a copy of the given slice. -// func CreateByteSliceCopy(b []byte) []byte { r := make([]byte, len(b)) copy(r, b) diff --git a/vhdcore/common/uuid.go b/vhdcore/common/uuid.go index d47bb9eb..97658ac0 100644 --- a/vhdcore/common/uuid.go +++ b/vhdcore/common/uuid.go @@ -6,13 +6,11 @@ import ( ) // UUID represents a Universally Unique Identifier. -// type UUID struct { uuid [16]byte } // NewUUID creates a new UUID, it uses the given 128-bit (16 byte) value as the uuid. -// func NewUUID(b []byte) (*UUID, error) { if len(b) != 16 { return nil, errors.New("NewUUID: buffer requires to be 16 bytes") @@ -25,7 +23,6 @@ func NewUUID(b []byte) (*UUID, error) { // String returns the string representation of the UUID which is 16 hex digits separated by hyphens // int form xxxx-xx-xx-xx-xxxxxx -// func (u *UUID) String() string { a := uint32(u.uuid[3])<<24 | uint32(u.uuid[2])<<16 | uint32(u.uuid[1])<<8 | uint32(u.uuid[0]) // a := b.order.Uint32(b.buffer[:4]) @@ -45,7 +42,6 @@ func (u *UUID) String() string { } // ToByteSlice returns the UUID as byte slice. -// func (u *UUID) ToByteSlice() []byte { b := make([]byte, 16) copy(b, u.uuid[:]) diff --git a/vhdcore/constants.go b/vhdcore/constants.go index 24b42f48..e01db71f 100644 --- a/vhdcore/constants.go +++ b/vhdcore/constants.go @@ -1,29 +1,22 @@ package vhdcore // VhdDefaultBlockSize is the default block size of the VHD. -// const VhdDefaultBlockSize int64 = 512 * 1024 // VhdNoDataLong is the value in the BAT indicating a block is empty. -// const VhdNoDataLong int64 = ^int64(0) // VhdNoDataInt is the value in the BAT indicating a block is empty. -// const VhdNoDataInt uint32 = 0xFFFFFFFF // VhdPageSize is the size of the VHD page size. -// const VhdPageSize int64 = 512 // VhdFooterSize is the size of the VHD footer in bytes. -// const VhdFooterSize int64 = 512 // VhdSectorLength is the sector length which is always 512 bytes, as per VHD specification. -// const VhdSectorLength int64 = 512 // VhdFooterChecksumOffset is the bye offset of checksum field in the VHD footer. -// const VhdFooterChecksumOffset int = 64 diff --git a/vhdcore/diskstream/diskstream.go b/vhdcore/diskstream/diskstream.go index 902510f7..88831178 100644 --- a/vhdcore/diskstream/diskstream.go +++ b/vhdcore/diskstream/diskstream.go @@ -14,7 +14,6 @@ import ( // DiskStream provides a logical stream over a VHD file. // The type exposes the VHD as a fixed VHD, regardless of actual underlying VHD type (dynamic, differencing // or fixed type) -// type DiskStream struct { offset int64 size int64 @@ -27,7 +26,6 @@ type DiskStream struct { } // StreamExtent describes a block range of a disk which contains data. -// type StreamExtent struct { Range *common.IndexRange OwnerVhdUniqueID *common.UUID @@ -35,7 +33,6 @@ type StreamExtent struct { // CreateNewDiskStream creates a new DiskStream. // Parameter vhdPath is the path to VHD -// func CreateNewDiskStream(vhdPath string) (*DiskStream, error) { var err error stream := &DiskStream{offset: 0, isClosed: false} @@ -56,13 +53,11 @@ func CreateNewDiskStream(vhdPath string) (*DiskStream, error) { // GetDiskType returns the type of the disk, expected values are DiskTypeFixed, DiskTypeDynamic // or DiskTypeDifferencing -// func (s *DiskStream) GetDiskType() footer.DiskType { return s.vhdFile.GetDiskType() } // GetSize returns the length of the stream in bytes. -// func (s *DiskStream) GetSize() int64 { return s.size } @@ -78,7 +73,6 @@ func (s *DiskStream) GetSize() int64 { // end of footer section after reading some but not all the bytes then Read won't return any error. // // Read satisfies io.Reader interface -// func (s *DiskStream) Read(p []byte) (n int, err error) { if s.offset >= s.size { return 0, io.EOF @@ -110,7 +104,6 @@ func (s *DiskStream) Read(p []byte) (n int, err error) { // means relative to the end. It returns the new offset and an error, if any. // // Seek satisfies io.Seeker interface -// func (s *DiskStream) Seek(offset int64, whence int) (int64, error) { switch whence { default: @@ -134,7 +127,6 @@ func (s *DiskStream) Seek(offset int64, whence int) (int64, error) { // Close closes the VHD file, rendering it unusable for I/O. It returns an error, if any. // // Close satisfies io.Closer interface -// func (s *DiskStream) Close() error { if !s.isClosed { s.vhdFactory.Dispose(nil) @@ -150,7 +142,6 @@ func (s *DiskStream) Close() error { // so returned extents slice will not contain such range. // For fixed disk - this method returns extents describing ranges of all blocks, to rule out fixed disk block // ranges containing zero bytes use DetectEmptyRanges function in upload package. -// func (s *DiskStream) GetExtents() ([]*StreamExtent, error) { extents := make([]*StreamExtent, 1) blocksCount := s.vhdBlockFactory.GetBlockCount() @@ -181,7 +172,6 @@ func (s *DiskStream) GetExtents() ([]*StreamExtent, error) { // so returned extents slice will not contain such range. // For fixed disk - this method returns extents describing ranges of all blocks, to rule out fixed disk block // ranges containing zero bytes use DetectEmptyRanges function in upload package. -// func (s *DiskStream) EnumerateExtents(f func(*StreamExtent, error) bool) { blocksCount := s.vhdBlockFactory.GetBlockCount() i := int64(0) @@ -214,7 +204,6 @@ func (s *DiskStream) EnumerateExtents(f func(*StreamExtent, error) bool) { // readFromBlocks identifies the blocks constituting the range rangeToRead, and read data from these // blocks into p. It returns the number of bytes read, which will be the minimum of sum of lengths // of all constituting range and len(p), provided there is no error. -// func (s *DiskStream) readFromBlocks(rangeToRead *common.IndexRange, p []byte) (n int, err error) { rangeToReadFromBlocks := s.vhdDataRange.Intersection(rangeToRead) if rangeToReadFromBlocks == nil { @@ -248,7 +237,6 @@ func (s *DiskStream) readFromBlocks(rangeToRead *common.IndexRange, p []byte) (n // readFromFooter reads the range rangeToRead from footer into p. It returns the number of bytes read, which // will be minimum of the given range length and len(p), provided there is no error. -// func (s *DiskStream) readFromFooter(rangeToRead *common.IndexRange, p []byte) (n int, err error) { rangeToReadFromFooter := s.vhdFooterRange.Intersection(rangeToRead) if rangeToReadFromFooter == nil { @@ -277,7 +265,6 @@ func (s *DiskStream) readFromFooter(rangeToRead *common.IndexRange, p []byte) (n } // byteToBlock returns the block index corresponding to the given byte position. -// func (s *DiskStream) byteToBlock(position int64) int64 { sectorsPerBlock := s.vhdBlockFactory.GetBlockSize() / vhdcore.VhdSectorLength return position / vhdcore.VhdSectorLength / sectorsPerBlock diff --git a/vhdcore/footer/diskGeometry.go b/vhdcore/footer/diskGeometry.go index 13355bc3..161b9552 100644 --- a/vhdcore/footer/diskGeometry.go +++ b/vhdcore/footer/diskGeometry.go @@ -7,7 +7,6 @@ import ( ) // DiskGeometry represents the cylinder, heads and sectors (CHS) per track. -// type DiskGeometry struct { // Offset = 0, Size = 2 // Stored in big-endian format @@ -21,7 +20,6 @@ type DiskGeometry struct { // CreateNewDiskGeometry creates a new DiskGeometry from the given virtual // size. CHS field values are calculated based on the total data sectors // present in the disk image. -// func CreateNewDiskGeometry(virtualSize int64) *DiskGeometry { // Total data sectors present in the disk image var totalSectors = virtualSize / vhdcore.VhdSectorLength @@ -77,7 +75,6 @@ func CreateNewDiskGeometry(virtualSize int64) *DiskGeometry { } // CreateCopy creates a copy of this instance -// func (d *DiskGeometry) CreateCopy() *DiskGeometry { return &DiskGeometry{ Cylinder: d.Cylinder, @@ -88,7 +85,6 @@ func (d *DiskGeometry) CreateCopy() *DiskGeometry { // Equals returns true if this and other points to the same instance // or if CHS fields of pointed instances are same -// func (d *DiskGeometry) Equals(other *DiskGeometry) bool { if other == nil { return false @@ -98,7 +94,6 @@ func (d *DiskGeometry) Equals(other *DiskGeometry) bool { } // String returns the string representation of this range, this satisfies stringer interface. -// func (d *DiskGeometry) String() string { return fmt.Sprintf("Cylinder:%d Heads:%d Sectors:%d", d.Cylinder, d.Heads, d.Sectors) } diff --git a/vhdcore/footer/diskType.go b/vhdcore/footer/diskType.go index 99ba271f..d9318b04 100644 --- a/vhdcore/footer/diskType.go +++ b/vhdcore/footer/diskType.go @@ -2,7 +2,6 @@ package footer // DiskType type represents the type of the disk, Value is stored in the footer // in big-endian format. -// type DiskType uint32 const ( @@ -23,7 +22,6 @@ const ( // String returns the string representation of the DiskType. If the int type value // does not match with the predefined disk types then this function return the // string "UnknownDiskType" -// func (d DiskType) String() string { switch d { case DiskTypeFixed: diff --git a/vhdcore/footer/factory.go b/vhdcore/footer/factory.go index 645c0057..9c2d6e52 100644 --- a/vhdcore/footer/factory.go +++ b/vhdcore/footer/factory.go @@ -10,7 +10,6 @@ import ( ) // Factory type is used to create Footer instance by reading vhd footer section. -// type Factory struct { vhdReader *reader.VhdReader footerOffset int64 @@ -18,14 +17,12 @@ type Factory struct { // NewFactory creates a new instance of Factory, which can be used to create a Footer // instance by reading the footer section using VhdReader. -// func NewFactory(vhdReader *reader.VhdReader) *Factory { return &Factory{vhdReader: vhdReader, footerOffset: vhdReader.Size - vhdcore.VhdFooterSize} } // Create creates a Footer instance by reading the footer section of the disk. // This function return error if any error occurs while reading or parsing the footer fields. -// func (f *Factory) Create() (*Footer, error) { footer := &Footer{} var err error @@ -124,7 +121,6 @@ func (f *Factory) Create() (*Footer, error) { // This function returns error if the cookie is invalid, if no or fewer bytes could be // read. Cookie is stored as eight-character ASCII string starting at offset 0 relative // to the beginning of footer. -// func (f *Factory) readVhdCookie() (*vhdcore.Cookie, error) { cookieData := make([]byte, 8) if _, err := f.vhdReader.ReadBytes(f.footerOffset+0, cookieData); err != nil { @@ -142,7 +138,6 @@ func (f *Factory) readVhdCookie() (*vhdcore.Cookie, error) { // fewer bytes could be read. // Feature is stored as 4 bytes value starting at offset 8 relative to the beginning of // footer. -// func (f *Factory) readFeatures() (VhdFeature, error) { value, err := f.vhdReader.ReadUInt32(f.footerOffset + 8) if err != nil { @@ -155,7 +150,6 @@ func (f *Factory) readFeatures() (VhdFeature, error) { // This function is return error if no or fewer bytes could be read. // VhdFileFormatVersion is stored as 4 bytes value starting at offset 12 relative to the // beginning of footer. -// func (f *Factory) readFileFormatVersion() (VhdFileFormatVersion, error) { value, err := f.vhdReader.ReadUInt32(f.footerOffset + 12) if err != nil { @@ -168,7 +162,6 @@ func (f *Factory) readFileFormatVersion() (VhdFileFormatVersion, error) { // This function return error if no or fewer bytes could be read. // Header offset is stored as 8 bytes value starting at offset 16 relative to the beginning // of footer. This value is stored in big-endian format. -// func (f *Factory) readHeaderOffset() (int64, error) { value, err := f.vhdReader.ReadInt64(f.footerOffset + 16) if err != nil { @@ -182,7 +175,6 @@ func (f *Factory) readHeaderOffset() (int64, error) { // This function return error if no or fewer bytes could be read. // TimeStamp is stored as 4 bytes value starting at offset 24 relative to the beginning // of footer. This value is stored in big-endian format. -// func (f *Factory) readTimeStamp() (*time.Time, error) { value, err := f.vhdReader.ReadDateTime(f.footerOffset + 24) if err != nil { @@ -196,7 +188,6 @@ func (f *Factory) readTimeStamp() (*time.Time, error) { // character set. This function return error if no or fewer bytes could be read. // Identifier is stored as 4 bytes value starting at offset 28 relative to the beginning // of footer. -// func (f *Factory) readCreatorApplication() (string, error) { creatorApp := make([]byte, 4) _, err := f.vhdReader.ReadBytes(f.footerOffset+28, creatorApp) @@ -211,7 +202,6 @@ func (f *Factory) readCreatorApplication() (string, error) { // bytes could be read. // Version is stored as 4 bytes value starting at offset 32 relative to the beginning // of footer. -// func (f *Factory) readCreatorVersion() (VhdCreatorVersion, error) { value, err := f.vhdReader.ReadUInt32(f.footerOffset + 32) if err != nil { @@ -225,7 +215,6 @@ func (f *Factory) readCreatorVersion() (VhdCreatorVersion, error) { // bytes could be read. // Version is stored as 4 bytes value starting at offset 36 relative to the beginning // of footer. -// func (f *Factory) readCreatorHostOsType() (HostOsType, error) { value, err := f.vhdReader.ReadUInt32(f.footerOffset + 36) if err != nil { @@ -241,7 +230,6 @@ func (f *Factory) readCreatorHostOsType() (HostOsType, error) { // beginning of footer. This size does not include the size consumed by vhd metadata such as // header, footer BAT, block's bitmap // This value is stored in big-endian format. -// func (f *Factory) readPhysicalSize() (int64, error) { value, err := f.vhdReader.ReadInt64(f.footerOffset + 40) if err != nil { @@ -258,7 +246,6 @@ func (f *Factory) readPhysicalSize() (int64, error) { // beginning of footer. This size does not include the size consumed by vhd metadata such as // header, footer BAT, block's bitmap // This value is stored in big-endian format. -// func (f *Factory) readVirtualSize() (int64, error) { value, err := f.vhdReader.ReadInt64(f.footerOffset + 48) if err != nil { @@ -271,7 +258,6 @@ func (f *Factory) readVirtualSize() (int64, error) { // track value for the hard disk. This function return error if no or fewer bytes could // be read. The value is stored starting starting at offset 56 relative to the beginning of // footer. This value is stored in big-endian format. -// func (f *Factory) readDiskGeometry() (*DiskGeometry, error) { diskGeometry := &DiskGeometry{} cylinder, err := f.vhdReader.ReadUInt16(f.footerOffset + 56 + 0) @@ -296,7 +282,6 @@ func (f *Factory) readDiskGeometry() (*DiskGeometry, error) { // This function return error if no or fewer bytes could be read. // The value is stored as 4 byte value starting at offset 60 relative to the beginning // of footer. This value is stored in big-endian format. -// func (f *Factory) readDiskType() (DiskType, error) { value, err := f.vhdReader.ReadUInt32(f.footerOffset + 60) if err != nil { @@ -309,7 +294,6 @@ func (f *Factory) readDiskType() (DiskType, error) { // This function return error if no or fewer bytes could be read. // The value is stored as 4 byte value starting at offset 64 relative to the beginning // of footer. This value is stored in big-endian format. -// func (f *Factory) readCheckSum() (uint32, error) { value, err := f.vhdReader.ReadUInt32(f.footerOffset + 64) if err != nil { @@ -323,7 +307,6 @@ func (f *Factory) readCheckSum() (uint32, error) { // This function return error if no or fewer bytes could be read. // The value is stored as 16 byte value starting at offset 68 relative to the beginning // of footer. -// func (f *Factory) readUniqueID() (*common.UUID, error) { value, err := f.vhdReader.ReadUUID(f.footerOffset + 68) if err != nil { @@ -336,7 +319,6 @@ func (f *Factory) readUniqueID() (*common.UUID, error) { // This function return error if the byte could be read. // The value is stored as 1 byte value starting at offset 84 relative to the beginning // of footer. -// func (f *Factory) readSavedState() (bool, error) { value, err := f.vhdReader.ReadBoolean(f.footerOffset + 84) if err != nil { @@ -349,7 +331,6 @@ func (f *Factory) readSavedState() (bool, error) { // This function return error if the byte could be read. // It is 427 bytes in size starting at offset 85 relative to the beginning // of footer. -// func (f *Factory) readReserved() ([]byte, error) { reserved := make([]byte, 427) _, err := f.vhdReader.ReadBytes(f.footerOffset+85, reserved) @@ -361,7 +342,6 @@ func (f *Factory) readReserved() ([]byte, error) { // readWholeFooter reads the entire footer as a raw bytes. This function return // error if the byte could be read. -// func (f *Factory) readWholeFooter() ([]byte, error) { rawData := make([]byte, 512) _, err := f.vhdReader.ReadBytes(f.footerOffset+0, rawData) diff --git a/vhdcore/footer/footer.go b/vhdcore/footer/footer.go index 9cf77c93..9d8d1ee1 100644 --- a/vhdcore/footer/footer.go +++ b/vhdcore/footer/footer.go @@ -11,7 +11,6 @@ import ( // Footer represents the footer of the vhd, the size of the footer is 512 bytes. // The last 512 bytes of the disk is footer. In case of dynamic and differential // vhds, the footer is replicated at the beginning of the disk as well. -// type Footer struct { // Offset = 0, Size = 8 Cookie *vhdcore.Cookie @@ -53,7 +52,6 @@ type Footer struct { } // CreateCopy creates and returns a deep copy of this instance. -// func (v *Footer) CreateCopy() *Footer { return &Footer{ Cookie: v.Cookie.CreateCopy(), @@ -78,7 +76,6 @@ func (v *Footer) CreateCopy() *Footer { // Equal returns true if this and other points to the same instance or if contents // of the fields of these two instances are same. -// func (v *Footer) Equal(other *Footer) bool { if other == nil { return false diff --git a/vhdcore/footer/hostOsType.go b/vhdcore/footer/hostOsType.go index 8f3e81cb..287cf0c7 100644 --- a/vhdcore/footer/hostOsType.go +++ b/vhdcore/footer/hostOsType.go @@ -4,7 +4,6 @@ import "fmt" // HostOsType represents the host operating system a disk image is created on. // Value is stored in the footer in big-endian format. -// type HostOsType uint32 const ( @@ -19,7 +18,6 @@ const ( // String returns the string representation of the HostOsType. If the int type // value does not match with the predefined OS types then this function convert // the int to string and return -// func (h HostOsType) String() string { switch h { case HostOsTypeWindows: diff --git a/vhdcore/footer/parseError.go b/vhdcore/footer/parseError.go index 37c91eb3..a3448c01 100644 --- a/vhdcore/footer/parseError.go +++ b/vhdcore/footer/parseError.go @@ -1,20 +1,17 @@ package footer // ParseError is the error type representing disk footer parse error. -// type ParseError struct { FooterField string err error } // Error returns the string representation of the ParseError instance. -// func (e *ParseError) Error() string { return "Parse footer field" + " '" + e.FooterField + "' failed: " + e.err.Error() } // GetInnerErr returns the inner error, this method satisfies InnerErr interface -// func (e *ParseError) GetInnerErr() error { return e.err } @@ -22,7 +19,6 @@ func (e *ParseError) GetInnerErr() error { // NewParseError returns a new ParseError instance. // The parameter footerField represents the field in the footer that failed to parse // The parameter err is the underlying error for parse failure. -// func NewParseError(footerField string, err error) error { return &ParseError{ FooterField: footerField, diff --git a/vhdcore/footer/vhdCreatorVersion.go b/vhdcore/footer/vhdCreatorVersion.go index c3baae3d..40db44cd 100644 --- a/vhdcore/footer/vhdCreatorVersion.go +++ b/vhdcore/footer/vhdCreatorVersion.go @@ -5,7 +5,6 @@ import "fmt" // VhdCreatorVersion represents the major/minor version of the application that // created the hard disk image. The version is stored in the vhd footer in // big-endian format. -// type VhdCreatorVersion uint32 const ( @@ -22,7 +21,6 @@ const ( // String returns the string representation of the VhdCreatorVersion. If the int // VhdCreatorVersion value does not match with the predefined CreatorVersions then // this function convert the int to string and return. -// func (v VhdCreatorVersion) String() string { switch v { case VhdCreatorVersionVS2004: diff --git a/vhdcore/footer/vhdFeature.go b/vhdcore/footer/vhdFeature.go index 6f331f95..d602113b 100644 --- a/vhdcore/footer/vhdFeature.go +++ b/vhdcore/footer/vhdFeature.go @@ -4,7 +4,6 @@ import "fmt" // VhdFeature represents a bit field used to indicate specific feature support. // Value is stored in the footer in big-endian format. -// type VhdFeature uint32 const ( @@ -24,7 +23,6 @@ const ( // String returns the string representation of the VhdFeature. If the int VhdFeature // value does not match with the predefined VhdFeatures then this function convert // int to string and return -// func (v VhdFeature) String() string { switch v { case VhdFeatureNoFeaturesEnabled: diff --git a/vhdcore/footer/vhdFileFormatVersion.go b/vhdcore/footer/vhdFileFormatVersion.go index ca539f0d..9cd929b9 100644 --- a/vhdcore/footer/vhdFileFormatVersion.go +++ b/vhdcore/footer/vhdFileFormatVersion.go @@ -4,20 +4,16 @@ package footer // the vhd. The version is stored in the vhd footer in big-endian format. // This is a 4 byte value - most-significant two bytes are for the major version. // The least-significant two bytes are the minor version -// type VhdFileFormatVersion uint32 // VhdFileFormatVersionDefault represents the currently supported vhd specification version. -// const VhdFileFormatVersionDefault VhdFileFormatVersion = 0x00010000 // VhdFileFormatVersionNone represents invalid version -// const VhdFileFormatVersionNone VhdFileFormatVersion = 0 // IsSupported returns true if this instance represents a supported vhd specification // version. -// func (v VhdFileFormatVersion) IsSupported() bool { return v == VhdFileFormatVersionDefault } diff --git a/vhdcore/footer/vhdFooterSerializer.go b/vhdcore/footer/vhdFooterSerializer.go index 58e90cc1..9d08c677 100644 --- a/vhdcore/footer/vhdFooterSerializer.go +++ b/vhdcore/footer/vhdFooterSerializer.go @@ -6,7 +6,6 @@ import ( ) // SerializeFooter returns the given VhdFooter instance as byte slice of length 512 bytes. -// func SerializeFooter(footer *Footer) []byte { buffer := make([]byte, vhdcore.VhdFooterSize) writer := writer.NewVhdWriterFromByteSlice(buffer) diff --git a/vhdcore/header/factory.go b/vhdcore/header/factory.go index baf3594a..d1186758 100644 --- a/vhdcore/header/factory.go +++ b/vhdcore/header/factory.go @@ -12,7 +12,6 @@ import ( ) // Factory type is used to create VhdHeader instance by reading vhd header section. -// type Factory struct { vhdReader *reader.VhdReader headerOffset int64 @@ -20,14 +19,12 @@ type Factory struct { // NewFactory creates a new instance of Factory, which can be used to create // a VhdHeader instance by reading the header section using vhdReader. -// func NewFactory(vhdReader *reader.VhdReader, headerOffset int64) *Factory { return &Factory{vhdReader: vhdReader, headerOffset: headerOffset} } // Create creates a Header instance by reading the header section of a expandable disk. // This function return error if any error occurs while reading or parsing the header fields. -// func (f *Factory) Create() (*Header, error) { header := &Header{} var err error @@ -107,7 +104,6 @@ func (f *Factory) Create() (*Header, error) { // This function return error if the cookie is invalid, if no or fewer bytes could be read. // Cookie is stored as eight-character ASCII string starting at offset 0 relative to the beginning // of header. -// func (f *Factory) readHeaderCookie() (*vhdcore.Cookie, error) { cookieData := make([]byte, 8) if _, err := f.vhdReader.ReadBytes(f.headerOffset+0, cookieData); err != nil { @@ -126,7 +122,6 @@ func (f *Factory) readHeaderCookie() (*vhdcore.Cookie, error) { // bytes could be read. // This value is stored as 8 bytes value starting at offset 8 relative to the beginning of header. // This value is stored in big-endian format. -// func (f *Factory) readDataOffset() (int64, error) { value, err := f.vhdReader.ReadInt64(f.headerOffset + 8) if err != nil { @@ -139,7 +134,6 @@ func (f *Factory) readDataOffset() (int64, error) { // disk. This function return error if no or fewer bytes could be read. // BATOffset is stored as 8 bytes value starting at offset 16 relative to the beginning of header. // This value is stored in big-endian format. -// func (f *Factory) readBATOffset() (int64, error) { value, err := f.vhdReader.ReadInt64(f.headerOffset + 16) if err != nil { @@ -151,7 +145,6 @@ func (f *Factory) readBATOffset() (int64, error) { // readHeaderVersion reads the value of the field the holds the major/minor version of the disk header. // This function return error if no or fewer bytes could be read. HeaderVersion is stored as 4 bytes // value starting at offset 24 relative to the beginning of header. -// func (f *Factory) readHeaderVersion() (VhdHeaderVersion, error) { value, err := f.vhdReader.ReadUInt32(f.headerOffset + 24) if err != nil { @@ -169,7 +162,6 @@ func (f *Factory) readHeaderVersion() (VhdHeaderVersion, error) { // error if no or fewer bytes could be read. // MaxTableEntries is stored as 4 bytes value starting at offset 28 relative to the beginning of // header. This value is stored in big-endian format. -// func (f *Factory) readMaxBATEntries() (uint32, error) { value, err := f.vhdReader.ReadUInt32(f.headerOffset + 28) if err != nil { @@ -182,7 +174,6 @@ func (f *Factory) readMaxBATEntries() (uint32, error) { // bitmap section'. This function return error if no or fewer bytes could be read. // BlockSize is stored as 4 bytes value starting at offset 32 relative to the beginning of header. // This value is stored in big-endian format. -// func (f *Factory) readBlockSize() (uint32, error) { value, err := f.vhdReader.ReadUInt32(f.headerOffset + 32) if err != nil { @@ -195,7 +186,6 @@ func (f *Factory) readBlockSize() (uint32, error) { // This function return error if no or fewer bytes could be read. // The value is stored as 4 byte value starting at offset 36 relative to the beginning of header. // This value is stored in big-endian format. -// func (f *Factory) readCheckSum() (uint32, error) { value, err := f.vhdReader.ReadUInt32(f.headerOffset + 36) if err != nil { @@ -208,7 +198,6 @@ func (f *Factory) readCheckSum() (uint32, error) { // field is used only for differencing disk. This is a 128-bit universally unique identifier (UUID). // This function return error if no or fewer bytes could be read. // The value is stored as 16 byte value starting at offset 40 relative to the beginning of header. -// func (f *Factory) readParentUniqueID() (*common.UUID, error) { value, err := f.vhdReader.ReadUUID(f.headerOffset + 40) if err != nil { @@ -222,7 +211,6 @@ func (f *Factory) readParentUniqueID() (*common.UUID, error) { // instance of time.Time. This function return error if no or fewer bytes could be read. // TimeStamp is stored as 4 bytes value starting at offset 56 relative to the beginning of header. // This value is stored in big-endian format. -// func (f *Factory) readParentTimeStamp() (*time.Time, error) { value, err := f.vhdReader.ReadDateTime(f.headerOffset + 56) if err != nil { @@ -234,7 +222,6 @@ func (f *Factory) readParentTimeStamp() (*time.Time, error) { // readReserved reads the reserved field which is not used and all set to zero. This function return // error if no or fewer bytes could be read. Reserved is stored as 4 bytes value starting at offset // 60 relative to the beginning of header. This value is stored in big-endian format. -// func (f *Factory) readReserved() (uint32, error) { value, err := f.vhdReader.ReadUInt32(f.headerOffset + 60) if err != nil { @@ -246,7 +233,6 @@ func (f *Factory) readReserved() (uint32, error) { // readParentPath reads the field storing parent hard disk file name. This function return error if // no or fewer bytes could be read. ParentPath is stored in UTF-16 as big-endian format, its length is // 512 bytes, starting at offset 64 relative to the beginning of header. -// func (f *Factory) readParentPath() (string, error) { parentPath := make([]byte, 512) _, err := f.vhdReader.ReadBytes(f.headerOffset+64, parentPath) @@ -259,7 +245,6 @@ func (f *Factory) readParentPath() (string, error) { // readParentLocators reads the collection of parent locator entries. This function return error if // no or fewer bytes could be read. There are 8 entries, each 24 bytes, starting at offset 576 relative // to the beginning of header. -// func (f *Factory) readParentLocators() (parentlocator.ParentLocators, error) { var err error count := 8 @@ -279,7 +264,6 @@ func (f *Factory) readParentLocators() (parentlocator.ParentLocators, error) { // readWholeHeader reads the entire header as a raw bytes. This function return error if the byte // could be read. -// func (f *Factory) readWholeHeader() ([]byte, error) { rawData := make([]byte, 1024) _, err := f.vhdReader.ReadBytes(f.headerOffset+0, rawData) diff --git a/vhdcore/header/header.go b/vhdcore/header/header.go index a982ad07..0ecd339c 100644 --- a/vhdcore/header/header.go +++ b/vhdcore/header/header.go @@ -14,7 +14,6 @@ import ( // replicated at the beginning of the disk as well, the header structure follows // this replicated footer, the field 'HeaderOffset' in the footer contains absolute // offset to the header structure. -// type Header struct { // Offset = 0, Size = 8 Cookie *vhdcore.Cookie diff --git a/vhdcore/header/parentlocator/factory.go b/vhdcore/header/parentlocator/factory.go index 4539cb50..770b66c6 100644 --- a/vhdcore/header/parentlocator/factory.go +++ b/vhdcore/header/parentlocator/factory.go @@ -7,7 +7,6 @@ import ( // Factory type is used to create ParentLocator instance by reading one entry // in vhd header's parent-hard-disk-locator-info collection section. -// type Factory struct { vhdReader *reader.VhdReader locatorOffset int64 @@ -16,7 +15,6 @@ type Factory struct { // NewFactory creates a new instance of Factory, which can be used to create ParentLocator instance // by reading one entry from the vhd header's parent-hard-disk-locator-info collection, // locatorOffset is the offset of the entry to read, vhdReader is the reader to be used to read the entry. -// func NewFactory(vhdReader *reader.VhdReader, locatorOffset int64) *Factory { return &Factory{vhdReader: vhdReader, locatorOffset: locatorOffset} } @@ -24,7 +22,6 @@ func NewFactory(vhdReader *reader.VhdReader, locatorOffset int64) *Factory { // Create creates a ParentLocator instance by reading one entry in vhd header's parent-hard-disk-locator-info // collection section of the disk. This function return error if any error occurs while reading or parsing // the parent locators table fields. -// func (f *Factory) Create() (*ParentLocator, error) { locator := &ParentLocator{} var err error @@ -73,7 +70,6 @@ func (f *Factory) Create() (*ParentLocator, error) { // This function return error if no or fewer bytes could be read. The value is stored as 4 byte // value starting at offset 0 relative to the beginning of this parent-hard-disk-locator. This value // is stored in big-endian format. -// func (f *Factory) readPlatformCode() (PlatformCode, error) { value, err := f.vhdReader.ReadInt32(f.locatorOffset + 0) if err != nil { @@ -86,7 +82,6 @@ func (f *Factory) readPlatformCode() (PlatformCode, error) { // the parent hard disk file locator. This function return error if no or fewer bytes could be read. // The value is stored as 4 byte value starting at offset 4 relative to the beginning parent-hard-disk-locator-info. // This value is stored in big-endian format. -// func (f *Factory) readPlatformDataSpace() (int32, error) { value, err := f.vhdReader.ReadInt32(f.locatorOffset + 4) if err != nil { @@ -99,7 +94,6 @@ func (f *Factory) readPlatformDataSpace() (int32, error) { // locator in bytes. This function return error if no or fewer bytes could be read. The value is stored // as 4 byte value starting at offset 8 relative to the beginning parent-hard-disk-locator-info. This value // is stored in big-endian format. -// func (f *Factory) readPlatformDataLength() (int32, error) { value, err := f.vhdReader.ReadInt32(f.locatorOffset + 8) if err != nil { @@ -112,7 +106,6 @@ func (f *Factory) readPlatformDataLength() (int32, error) { // This function return error if no or fewer bytes could be read. The value is stored as 4 byte // value starting at offset 12 relative to the beginning parent-hard-disk-locator-info. // This value is stored in big-endian format. -// func (f *Factory) readReserved() (int32, error) { value, err := f.vhdReader.ReadInt32(f.locatorOffset + 12) if err != nil { @@ -125,7 +118,6 @@ func (f *Factory) readReserved() (int32, error) { // specific file locator data is stored. Call to this function is panic if no or fewer bytes could be read. // The value is stored as 4 byte value starting at offset 16 relative to the beginning parent-hard-disk-locator-info. // This value is stored in big-endian format. -// func (f *Factory) readPlatformDataOffset() (int64, error) { value, err := f.vhdReader.ReadInt64(f.locatorOffset + 16) if err != nil { diff --git a/vhdcore/header/parentlocator/parentLocator.go b/vhdcore/header/parentlocator/parentLocator.go index 734b90ac..cd710b7c 100644 --- a/vhdcore/header/parentlocator/parentLocator.go +++ b/vhdcore/header/parentlocator/parentLocator.go @@ -9,7 +9,6 @@ import ( // ParentLocator represents an entry in Parent locator table. Each entry represents // details (parent-hard-disk-locator-info) of file locator which is used to locate // the parent disk file of differencing hard disk. -// type ParentLocator struct { // Offset = 0, Size = 4 // This field stores the code representing the platform-specific format used for @@ -43,7 +42,6 @@ type ParentLocator struct { // SetPlatformSpecificFileLocator retrieves the file locator value and store that in the property // PlatformSpecificFileLocator -// func (l *ParentLocator) SetPlatformSpecificFileLocator(fileLocator []byte) { // 1. For the platform codes - W2Ru and W2Ku, fileLocator contents is UTF-16 encoded. // 2. For the platform code - MacX, fileLocator contents is UTF-8 encoded. diff --git a/vhdcore/header/parentlocator/parentLocators.go b/vhdcore/header/parentlocator/parentLocators.go index ebec05d4..6b776b8c 100644 --- a/vhdcore/header/parentlocator/parentLocators.go +++ b/vhdcore/header/parentlocator/parentLocators.go @@ -4,24 +4,20 @@ package parentlocator // collection). The collection entries store an absolute byte offset in the file where the parent // locator for a differencing hard disk is stored. This field is used only for differencing disks // and should be set to zero for dynamic disks. -// type ParentLocators []*ParentLocator // GetAbsoluteParentPath returns the absolute path to the parent differencing hard disk -// func (p ParentLocators) GetAbsoluteParentPath() string { return p.getParentPath(PlatformCodeW2Ku) } // GetRelativeParentPath returns the relative path to the parent differencing hard disk -// func (p ParentLocators) GetRelativeParentPath() string { return p.getParentPath(PlatformCodeW2Ru) } // getParentPath returns path to the parent differencing hard disk corresponding to the // given platform code -// func (p ParentLocators) getParentPath(code PlatformCode) string { for _, l := range p { if l.PlatformCode == code { diff --git a/vhdcore/header/parentlocator/parseError.go b/vhdcore/header/parentlocator/parseError.go index 4d871687..bf81c07b 100644 --- a/vhdcore/header/parentlocator/parseError.go +++ b/vhdcore/header/parentlocator/parseError.go @@ -1,20 +1,17 @@ package parentlocator // ParseError is the error type representing disk header parent locator parse error. -// type ParseError struct { LocatorField string err error } // Error returns the string representation of the ParseError instance. -// func (e *ParseError) Error() string { return "Parse parent locator field" + " '" + e.LocatorField + "' failed: " + e.err.Error() } // GetInnerErr returns the inner error, this method satisfies InnerErr interface -// func (e *ParseError) GetInnerErr() error { return e.err } @@ -22,7 +19,6 @@ func (e *ParseError) GetInnerErr() error { // NewParseError returns a new ParseError instance. // The parameter headerField represents the field in the header parent locator that failed to parse // The parameter err is the underlying error for parse failure. -// func NewParseError(locatorField string, err error) error { return &ParseError{ LocatorField: locatorField, diff --git a/vhdcore/header/parentlocator/platformCode.go b/vhdcore/header/parentlocator/platformCode.go index 217d0d12..bd13b5d3 100644 --- a/vhdcore/header/parentlocator/platformCode.go +++ b/vhdcore/header/parentlocator/platformCode.go @@ -4,7 +4,6 @@ import "fmt" // The PlatformCode describes which platform-specific format is used for the file locator // This is the type of PlatformCode field in ParentLocator type. -// type PlatformCode int32 const ( @@ -36,7 +35,6 @@ const ( // String returns the string representation of the PlatformCode. If the int platform code // value does not match with the predefined PlatformCodes then this function convert the // int to string and return -// func (p PlatformCode) String() string { switch p { case PlatformCodeNone: diff --git a/vhdcore/header/parseError.go b/vhdcore/header/parseError.go index c8a33b34..8eff9060 100644 --- a/vhdcore/header/parseError.go +++ b/vhdcore/header/parseError.go @@ -1,20 +1,17 @@ package header // ParseError is the error type representing disk header parse error. -// type ParseError struct { HeaderField string err error } // Error returns the string representation of the ParseError instance. -// func (e *ParseError) Error() string { return "Parse header field" + " '" + e.HeaderField + "' failed: " + e.err.Error() } // GetInnerErr returns the inner error, this method satisfies InnerErr interface -// func (e *ParseError) GetInnerErr() error { return e.err } @@ -22,7 +19,6 @@ func (e *ParseError) GetInnerErr() error { // NewParseError returns a new ParseError instance. // The parameter headerField represents the field in the header that failed to parse // The parameter err is the underlying error for parse failure. -// func NewParseError(headerField string, err error) error { return &ParseError{ HeaderField: headerField, diff --git a/vhdcore/header/vhdHeaderVersion.go b/vhdcore/header/vhdHeaderVersion.go index 13c598e5..dd10c83c 100644 --- a/vhdcore/header/vhdHeaderVersion.go +++ b/vhdcore/header/vhdHeaderVersion.go @@ -3,20 +3,16 @@ package header // VhdHeaderVersion represents the major/minor version of the specification // used in creating the vhd. The version is stored in the vhd header in // big-endian format. -// type VhdHeaderVersion uint32 // VhdHeaderSupportedVersion indicates the current VHD specification version -// const VhdHeaderSupportedVersion VhdHeaderVersion = 0x00010000 // VhdHeaderVersionNone indicates an invalid VHD specification version -// const VhdHeaderVersionNone = 0 // IsSupported returns true if this instance represents a supported VHD specification // version. -// func (v VhdHeaderVersion) IsSupported() bool { return v == VhdHeaderSupportedVersion } diff --git a/vhdcore/innererror/innerError.go b/vhdcore/innererror/innerError.go index 22e34de3..509d03b2 100644 --- a/vhdcore/innererror/innerError.go +++ b/vhdcore/innererror/innerError.go @@ -1,7 +1,6 @@ package innererror // InnerErr is an interface satisfied by the error types which has inner error. -// type InnerErr interface { GetInnerErr() error } diff --git a/vhdcore/reader/binaryReader.go b/vhdcore/reader/binaryReader.go index a1b3b96f..86ca540d 100644 --- a/vhdcore/reader/binaryReader.go +++ b/vhdcore/reader/binaryReader.go @@ -8,11 +8,9 @@ import ( ) // bufferSizeInBytes is the size of the buffer used by BinaryReader -// const bufferSizeInBytes = 16 // ReadAtReader interface that composes io.ReaderAt and io.Reader interfaces. -// type ReadAtReader interface { io.ReaderAt io.Reader @@ -20,7 +18,6 @@ type ReadAtReader interface { // BinaryReader is the reader which can be used to read values of primitive types from a reader // The reader supports reading data stored both in little-endian or big-endian format. -// type BinaryReader struct { buffer []byte order binary.ByteOrder @@ -31,7 +28,6 @@ type BinaryReader struct { // NewBinaryReader creates a new instance of BinaryReader, from is the underlying data source // to read from, order is the byte order used to encode the data in the source, size is the // length of the data source in bytes. -// func NewBinaryReader(from ReadAtReader, order binary.ByteOrder, size int64) *BinaryReader { return &BinaryReader{ buffer: make([]byte, bufferSizeInBytes), @@ -45,13 +41,11 @@ func NewBinaryReader(from ReadAtReader, order binary.ByteOrder, size int64) *Bin // copied and an error if fewer bytes were read. The error is EOF only if no bytes were // read. If an EOF happens after reading some but not all the bytes, ReadBytes returns // ErrUnexpectedEOF. On return, n == len(buf) if and only if err == nil. -// func (b *BinaryReader) ReadBytes(offset int64, buf []byte) (int, error) { return b.from.ReadAt(buf, offset) } // ReadByte reads a byte from underlying source starting at byte offset off and returns it. -// func (b *BinaryReader) ReadByte(offset int64) (byte, error) { if _, err := b.readToBuffer(1, offset); err != nil { return 0, err @@ -62,7 +56,6 @@ func (b *BinaryReader) ReadByte(offset int64) (byte, error) { // ReadBoolean reads a byte from underlying source starting at byte offset off and // returns it as a bool. -// func (b *BinaryReader) ReadBoolean(offset int64) (bool, error) { if _, err := b.readToBuffer(1, offset); err != nil { return false, err @@ -72,7 +65,6 @@ func (b *BinaryReader) ReadBoolean(offset int64) (bool, error) { // ReadUInt16 reads an encoded unsigned 2 byte integer from underlying source starting // at byte offset off and return it as a uint16. -// func (b *BinaryReader) ReadUInt16(offset int64) (uint16, error) { if _, err := b.readToBuffer(2, offset); err != nil { return 0, err @@ -82,7 +74,6 @@ func (b *BinaryReader) ReadUInt16(offset int64) (uint16, error) { // ReadInt16 reads an encoded signed 2 byte integer from underlying source starting // at byte offset off returns it as a int16. -// func (b *BinaryReader) ReadInt16(off int64) (int16, error) { if _, err := b.readToBuffer(2, off); err != nil { return 0, err @@ -92,7 +83,6 @@ func (b *BinaryReader) ReadInt16(off int64) (int16, error) { // ReadUInt32 reads an encoded unsigned 4 byte integer from underlying source starting // at byte offset off returns it as a uint32. -// func (b *BinaryReader) ReadUInt32(off int64) (uint32, error) { if _, err := b.readToBuffer(4, off); err != nil { return 0, err @@ -102,7 +92,6 @@ func (b *BinaryReader) ReadUInt32(off int64) (uint32, error) { // ReadInt32 reads an encoded signed 4 byte integer from underlying source starting // at byte offset off and returns it as a int32. -// func (b *BinaryReader) ReadInt32(off int64) (int32, error) { if _, err := b.readToBuffer(4, off); err != nil { return 0, err @@ -112,7 +101,6 @@ func (b *BinaryReader) ReadInt32(off int64) (int32, error) { // ReadUInt64 reads an encoded unsigned 8 byte integer from underlying source starting // at byte offset off and returns it as a uint64. -// func (b *BinaryReader) ReadUInt64(off int64) (uint64, error) { if _, err := b.readToBuffer(8, off); err != nil { return 0, err @@ -122,7 +110,6 @@ func (b *BinaryReader) ReadUInt64(off int64) (uint64, error) { // ReadInt64 reads an encoded signed 4 byte integer from underlying source starting // at byte offset off and and returns it as a int64. -// func (b *BinaryReader) ReadInt64(off int64) (int64, error) { if _, err := b.readToBuffer(8, off); err != nil { return 0, err @@ -132,7 +119,6 @@ func (b *BinaryReader) ReadInt64(off int64) (int64, error) { // ReadUUID reads 16 byte character sequence from underlying source starting // at byte offset off and returns it as a UUID. -// func (b *BinaryReader) ReadUUID(off int64) (*common.UUID, error) { if _, err := b.readToBuffer(16, off); err != nil { return nil, err @@ -146,7 +132,6 @@ func (b *BinaryReader) ReadUUID(off int64) (*common.UUID, error) { // and the error, if any. // ReadAt always returns a non-nil error when n < len(numBytes). At end of file, that // error is io.EOF. -// func (b *BinaryReader) readToBuffer(numBytes int, off int64) (int, error) { if numBytes > bufferSizeInBytes { return 0, fmt.Errorf("Expected (0-%d) however found: %d", bufferSizeInBytes, numBytes) diff --git a/vhdcore/reader/vhdReader.go b/vhdcore/reader/vhdReader.go index 41c70f2b..a34136b0 100644 --- a/vhdcore/reader/vhdReader.go +++ b/vhdcore/reader/vhdReader.go @@ -11,14 +11,12 @@ import ( // VhdReader is the reader used by various components responsible for reading different // segments of VHD such as header, footer, BAT, block, bitmap and sector. -// type VhdReader struct { *BinaryReader } // NewVhdReader creates new instance of the VhdReader, that reads from the underlying // source, size is the size of the source in bytes. -// func NewVhdReader(source ReadAtReader, size int64) *VhdReader { var order binary.ByteOrder if isLittleEndian() { @@ -31,7 +29,6 @@ func NewVhdReader(source ReadAtReader, size int64) *VhdReader { // NewVhdReaderFromByteSlice creates a new instance of VhdReader, that uses the given // byte slice as the underlying source to read from. -// func NewVhdReaderFromByteSlice(b []byte) *VhdReader { source := bytes.NewReader(b) return NewVhdReader(source, int64(len(b))) @@ -39,7 +36,6 @@ func NewVhdReaderFromByteSlice(b []byte) *VhdReader { // ReadDateTime reads an encoded vhd timestamp from underlying source starting at byte // offset off and return it as a time.Time. -// func (r *VhdReader) ReadDateTime(off int64) (*time.Time, error) { d, err := r.ReadUInt32(off) if err != nil { @@ -51,7 +47,6 @@ func (r *VhdReader) ReadDateTime(off int64) (*time.Time, error) { // isLittleEndian returns true if the host machine is little endian, false for // big endian -// func isLittleEndian() bool { var i int32 = 0x01020304 u := unsafe.Pointer(&i) diff --git a/vhdcore/validator/validator.go b/vhdcore/validator/validator.go index 7ebbf829..8be84b23 100644 --- a/vhdcore/validator/validator.go +++ b/vhdcore/validator/validator.go @@ -8,11 +8,9 @@ import ( ) // oneTB is one TeraByte -// const oneTB int64 = 1024 * 1024 * 1024 * 1024 // ValidateVhd returns error if the vhdPath refer to invalid vhd. -// func ValidateVhd(vhdPath string) error { vFactory := &vhdfile.FileFactory{} _, err := vFactory.Create(vhdPath) @@ -24,7 +22,6 @@ func ValidateVhd(vhdPath string) error { // ValidateVhdSize returns error if size of the vhd referenced by vhdPath is more than // the maximum allowed size (1TB) -// func ValidateVhdSize(vhdPath string) error { stream, _ := diskstream.CreateNewDiskStream(vhdPath) if stream.GetSize() > oneTB { diff --git a/vhdcore/vhdCookie.go b/vhdcore/vhdCookie.go index 570ba1ef..42a83a7b 100644 --- a/vhdcore/vhdCookie.go +++ b/vhdcore/vhdCookie.go @@ -7,11 +7,9 @@ import "bytes" // Microsoft Virtual Server, Virtual PC, and predecessor products. The cookie is // stored as an eight-character ASCII string with the “c” in the first byte, // the “o” in the second byte, and so on. -// const VhdFooterCookie = "conectix" // VhdHeaderCookie is the header cookie which is always cxsparse -// const VhdHeaderCookie = "cxsparse" // Cookie represents the Vhd header or Vhd footer cookie. @@ -24,25 +22,21 @@ type Cookie struct { // CreateNewVhdCookie creates a new VhdCookie, the new instance's Data will be simply // reference to the byte slice data (i.e. this function will not create a copy) -// func CreateNewVhdCookie(isHeader bool, data []byte) *Cookie { return &Cookie{isHeader: isHeader, Data: data} } // CreateFooterCookie creates a VhdCookie representing vhd footer cookie -// func CreateFooterCookie() *Cookie { return CreateNewVhdCookie(false, []byte(VhdFooterCookie)) } // CreateHeaderCookie creates a VhdCookie representing vhd header cookie -// func CreateHeaderCookie() *Cookie { return CreateNewVhdCookie(true, []byte(VhdHeaderCookie)) } // IsValid checks whether this this instance's internal cookie string is valid. -// func (c *Cookie) IsValid() bool { if c.isHeader { return bytes.Equal(c.Data, []byte(VhdHeaderCookie)) @@ -52,7 +46,6 @@ func (c *Cookie) IsValid() bool { } // CreateCopy creates a copy of this instance -// func (c *Cookie) CreateCopy() *Cookie { cp := &Cookie{isHeader: c.isHeader} cp.Data = make([]byte, len(c.Data)) @@ -62,7 +55,6 @@ func (c *Cookie) CreateCopy() *Cookie { // Equal returns true if this and other points to the same instance or contents of field // values of two are same. -// func (c *Cookie) Equal(other *Cookie) bool { if other == nil { return false @@ -76,7 +68,6 @@ func (c *Cookie) Equal(other *Cookie) bool { } // String returns the string representation of this range, this satisfies stringer interface. -// func (c *Cookie) String() string { return string(c.Data) } diff --git a/vhdcore/vhdTimeStamp.go b/vhdcore/vhdTimeStamp.go index 7878d557..b5e4b76a 100644 --- a/vhdcore/vhdTimeStamp.go +++ b/vhdcore/vhdTimeStamp.go @@ -8,14 +8,12 @@ import ( // TimeStamp represents the the creation time of a hard disk image. This is the number // of seconds since vhd base time which is January 1, 2000 12:00:00 AM in UTC/GMT. // The disk creation time is stored in the vhd footer in big-endian format. -// type TimeStamp struct { TotalSeconds uint32 } // NewVhdTimeStamp creates new VhdTimeStamp with creation time as dateTime. This function // will panic if the given datetime is before the vhd base time. -// func NewVhdTimeStamp(dateTime *time.Time) *TimeStamp { vhdBaseTime := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) if !dateTime.After(vhdBaseTime) { @@ -29,13 +27,11 @@ func NewVhdTimeStamp(dateTime *time.Time) *TimeStamp { // NewVhdTimeStampFromSeconds creates new VhdTimeStamp, creation time is calculated by adding // given total seconds with the vhd base time. -// func NewVhdTimeStampFromSeconds(totalSeconds uint32) *TimeStamp { return &TimeStamp{TotalSeconds: totalSeconds} } // ToDateTime returns the time.Time representation of this instance. -// func (v *TimeStamp) ToDateTime() time.Time { vhdBaseTime := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) return vhdBaseTime.Add(time.Duration(v.TotalSeconds) * time.Second) diff --git a/vhdcore/vhdfile/vhdFile.go b/vhdcore/vhdfile/vhdFile.go index bb8390da..b80f887c 100644 --- a/vhdcore/vhdfile/vhdFile.go +++ b/vhdcore/vhdfile/vhdFile.go @@ -11,7 +11,6 @@ import ( ) // VhdFile represents a VHD. -// type VhdFile struct { // Footer represents the disk's footer. Footer *footer.Footer @@ -31,14 +30,12 @@ type VhdFile struct { // GetDiskType returns the type of the disk. Possible values are DiskTypeFixed, DiskTypeDynamic // and DiskTypeDifferencing. -// func (f *VhdFile) GetDiskType() footer.DiskType { return f.Footer.DiskType } // GetBlockFactory returns a BlockFactory instance that can be used to create Block instances // that represents blocks in the disk. -// func (f *VhdFile) GetBlockFactory() (block.Factory, error) { params := &block.FactoryParams{ VhdHeader: f.Header, @@ -82,7 +79,6 @@ func (f *VhdFile) GetBlockFactory() (block.Factory, error) { // GetIdentityChain returns VHD identity chain, for differencing disk this will be a slice with // unique ids of this and all it's ancestor disks. For fixed and dynamic disk, this will be a // slice with one entry representing disk's unique id. -// func (f *VhdFile) GetIdentityChain() []string { ids := []string{f.Footer.UniqueID.String()} for p := f.Parent; p != nil; p = p.Parent { diff --git a/vhdcore/vhdfile/vhdFileFactory.go b/vhdcore/vhdfile/vhdFileFactory.go index ed54c885..1e8b79dd 100644 --- a/vhdcore/vhdfile/vhdFileFactory.go +++ b/vhdcore/vhdfile/vhdFileFactory.go @@ -11,7 +11,6 @@ import ( ) // FileFactory is a type to create VhdFile representing VHD in the local machine -// type FileFactory struct { vhdDir string // Path to the directory holding VHD file fd *os.File // File descriptor of the VHD file @@ -20,7 +19,6 @@ type FileFactory struct { } // Create creates a new VhdFile representing a VHD in the local machine located at vhdPath -// func (f *FileFactory) Create(vhdPath string) (*VhdFile, error) { var err error if f.fd, err = os.Open(vhdPath); err != nil { @@ -41,7 +39,6 @@ func (f *FileFactory) Create(vhdPath string) (*VhdFile, error) { // CreateFromReaderAtReader creates a new VhdFile from a reader.ReadAtReader, which is a reader associated // with a VHD in the local machine. The parameter size is the size of the VHD in bytes -// func (f *FileFactory) CreateFromReaderAtReader(r reader.ReadAtReader, size int64) (*VhdFile, error) { vhdReader := reader.NewVhdReader(r, size) vhdFooter, err := (footer.NewFactory(vhdReader)).Create() @@ -95,7 +92,6 @@ func (f *FileFactory) CreateFromReaderAtReader(r reader.ReadAtReader, size int64 // Dispose disposes this instance of VhdFileFactory and VhdFileFactory instances of parent and child // VHDs -// func (f *FileFactory) Dispose(err error) { if f.fd != nil { f.fd.Close() @@ -112,7 +108,6 @@ func (f *FileFactory) Dispose(err error) { } // Dispose disposes this instance of VhdFileFactory and VhdFileFactory instances of all ancestor VHDs -// func (f *FileFactory) disposeUp(err error) { if f.fd != nil { f.fd.Close() @@ -125,7 +120,6 @@ func (f *FileFactory) disposeUp(err error) { } // Dispose disposes this instance of VhdFileFactory and VhdFileFactory instances of all descendant VHDs -// func (f *FileFactory) disposeDown(err error) { if f.fd != nil { f.fd.Close() diff --git a/vhdcore/writer/binaryWriter.go b/vhdcore/writer/binaryWriter.go index 7faf3122..adfef1bf 100644 --- a/vhdcore/writer/binaryWriter.go +++ b/vhdcore/writer/binaryWriter.go @@ -6,12 +6,10 @@ import ( ) // bufferSizeInBytes is the size of the buffer used by BinaryWriter -// const bufferSizeInBytes = 16 // BinaryWriter is the writer which can be used to write values of primitive types to a writer // The writer supports writing data both in little-endian or big-endian format. -// type BinaryWriter struct { buffer []byte order binary.ByteOrder @@ -22,7 +20,6 @@ type BinaryWriter struct { // NewBinaryWriter creates a new instance of BinaryWriter, to is the underlying data source // to write to, order is the byte order used to encode the data in the source, size is the // length of the data source in bytes. -// func NewBinaryWriter(to io.WriterAt, order binary.ByteOrder, size int64) *BinaryWriter { return &BinaryWriter{ buffer: make([]byte, bufferSizeInBytes), @@ -33,20 +30,17 @@ func NewBinaryWriter(to io.WriterAt, order binary.ByteOrder, size int64) *Binary } // WriteBytes writes a byte slice to the underlying writer at offset off. -// func (w *BinaryWriter) WriteBytes(off int64, value []byte) { w.to.WriteAt(value, off) } // WriteByte write a byte value to the underlying writer at offset off. -// func (w *BinaryWriter) WriteByte(off int64, value byte) { w.buffer[0] = value w.to.WriteAt(w.buffer[:1], off) } // WriteBoolean write a boolean value to the underlying writer at offset off. -// func (w *BinaryWriter) WriteBoolean(off int64, value bool) { if value { w.buffer[0] = 1 @@ -58,49 +52,42 @@ func (w *BinaryWriter) WriteBoolean(off int64, value bool) { } // WriteInt16 encodes an int16 and write it in the underlying writer at offset off. -// func (w *BinaryWriter) WriteInt16(off int64, value int16) { w.order.PutUint16(w.buffer, uint16(value)) w.to.WriteAt(w.buffer[:2], off) } // WriteUInt16 encodes an uint16 and write it in the underlying writer at offset off. -// func (w *BinaryWriter) WriteUInt16(off int64, value uint16) { w.order.PutUint16(w.buffer, value) w.to.WriteAt(w.buffer[:2], off) } // WriteInt32 encodes an int32 and write it in the underlying writer at offset off. -// func (w *BinaryWriter) WriteInt32(off int64, value int32) { w.order.PutUint32(w.buffer, uint32(value)) w.to.WriteAt(w.buffer[:4], off) } // WriteUInt32 encodes an uint32 and write it in the underlying writer at offset off. -// func (w *BinaryWriter) WriteUInt32(off int64, value uint32) { w.order.PutUint32(w.buffer, value) w.to.WriteAt(w.buffer[:4], off) } // WriteInt64 encodes an int64 and write it in the underlying writer at offset off. -// func (w *BinaryWriter) WriteInt64(off int64, value int64) { w.order.PutUint64(w.buffer, uint64(value)) w.to.WriteAt(w.buffer[:8], off) } // WriteUInt64 encodes an uint64 and write it in the underlying writer at offset off. -// func (w *BinaryWriter) WriteUInt64(off int64, value uint64) { w.order.PutUint64(w.buffer, value) w.to.WriteAt(w.buffer[:8], off) } // WriteString writes a string to the underlying writer at offset off. -// func (w *BinaryWriter) WriteString(off int64, value string) { w.to.WriteAt([]byte(value), off) } diff --git a/vhdcore/writer/vhdWriter.go b/vhdcore/writer/vhdWriter.go index fb33b62b..07f4114f 100644 --- a/vhdcore/writer/vhdWriter.go +++ b/vhdcore/writer/vhdWriter.go @@ -12,14 +12,12 @@ import ( // VhdWriter is the writer used by various components responsible for writing header and // footer of the VHD. -// type VhdWriter struct { *BinaryWriter } // NewVhdWriter creates new instance of the VhdWriter, that writes to the underlying target, // size is the size of the target in bytes. -// func NewVhdWriter(target io.WriterAt, size int64) *VhdWriter { var order binary.ByteOrder if isLittleEndian() { @@ -32,26 +30,22 @@ func NewVhdWriter(target io.WriterAt, size int64) *VhdWriter { // NewVhdWriterFromByteSlice creates a new instance of VhdWriter, that uses the given byte // slice as the underlying target to write to. -// func NewVhdWriterFromByteSlice(b []byte) *VhdWriter { return NewVhdWriter(ByteSliceWriteAt(b), int64(len(b))) } // WriteTimeStamp writes vhd timestamp represented by the given time to underlying source // starting at byte offset off. -// func (r *VhdWriter) WriteTimeStamp(off int64, time *time.Time) { vhdTimeStamp := vhdcore.NewVhdTimeStamp(time) r.WriteUInt32(off, vhdTimeStamp.TotalSeconds) } // ByteSliceWriteAt is a type that satisfies io.WriteAt interface for byte slice. -// type ByteSliceWriteAt []byte // WriteAt copies len(b) bytes to the byte slice starting at byte offset off. It returns the number // of bytes copied and an error, if any. WriteAt returns a non-nil error when n != len(b). -// func (s ByteSliceWriteAt) WriteAt(b []byte, off int64) (n int, err error) { if off < 0 || off > int64(len(s)) { err = fmt.Errorf("Index %d is out of the boundary %d", off, len(s)-1) @@ -67,7 +61,6 @@ func (s ByteSliceWriteAt) WriteAt(b []byte, off int64) (n int, err error) { } // isLittleEndian returns true if the host machine is little endian, false for big endian. -// func isLittleEndian() bool { var i int32 = 0x01020304 u := unsafe.Pointer(&i) From 1c9da1d27516c1f3a91a022e03d3286b9b9eb8df Mon Sep 17 00:00:00 2001 From: Krzesimir Nowak Date: Wed, 22 May 2024 16:00:38 +0200 Subject: [PATCH 3/4] *: Merge lint and fmt targets, add module tidying to it, fix issues Most of the changes here come from renaming read and write functions from WriteStuff and ReadStuff to WriteStuffAt and ReadStuffAt, respectively, so that go vet won't complain about certain functions with standard names having unexpected, non-standard prototype. --- Makefile | 10 +++--- go.mod | 7 +++- upload/concurrent/pool.go | 2 +- vhdcore/bat/blockAllocationTableFactory.go | 2 +- vhdcore/block/bitmap/factory.go | 2 +- vhdcore/block/dynamicDiskBlockReader.go | 2 +- vhdcore/block/fixedDiskBlockReader.go | 2 +- vhdcore/block/sectorFactory.go | 2 +- vhdcore/footer/factory.go | 38 +++++++++++----------- vhdcore/footer/vhdFooterSerializer.go | 36 ++++++++++---------- vhdcore/header/factory.go | 24 +++++++------- vhdcore/header/parentlocator/factory.go | 12 +++---- vhdcore/reader/binaryReader.go | 20 ++++++------ vhdcore/reader/vhdReader.go | 4 +-- vhdcore/writer/binaryWriter.go | 21 ++++++------ vhdcore/writer/vhdWriter.go | 4 +-- 16 files changed, 96 insertions(+), 92 deletions(-) diff --git a/Makefile b/Makefile index 4ff5e893..14365fb2 100644 --- a/Makefile +++ b/Makefile @@ -1,19 +1,17 @@ rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) .PHONY: all -all: test azure-vhd-utils lint fmt +all: test azure-vhd-utils lint .PHONY: lint lint: azure-vhd-utils - golint ./... - -.PHONY: fmt -fmt: azure-vhd-utils go fmt ./... + go vet ./... + go mod tidy azure-vhd-utils: $(call rwildcard, ., *.go) go build .PHONY: test test: - go test ./... \ No newline at end of file + go test ./... diff --git a/go.mod b/go.mod index 2e83b0a9..d58f870e 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,13 @@ module github.com/flatcar/azure-vhd-utils +go 1.20 + require ( github.com/Azure/azure-sdk-for-go v5.0.0-beta.0.20161118192335-3b1282355199+incompatible + gopkg.in/urfave/cli.v1 v1.19.1 +) + +require ( github.com/kr/pretty v0.1.0 // indirect gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect - gopkg.in/urfave/cli.v1 v1.19.1 ) diff --git a/upload/concurrent/pool.go b/upload/concurrent/pool.go index 8be3ba9c..60bf9a2e 100644 --- a/upload/concurrent/pool.go +++ b/upload/concurrent/pool.go @@ -27,7 +27,7 @@ func (p *Pool) Less(i, j int) bool { } // Swap swaps the Worker instances at the given indices i and j -func (p Pool) Swap(i, j int) { +func (p *Pool) Swap(i, j int) { p.Workers[i], p.Workers[j] = p.Workers[j], p.Workers[i] p.Workers[i].Index = i p.Workers[j].Index = j diff --git a/vhdcore/bat/blockAllocationTableFactory.go b/vhdcore/bat/blockAllocationTableFactory.go index dfa45bba..3c66bbae 100644 --- a/vhdcore/bat/blockAllocationTableFactory.go +++ b/vhdcore/bat/blockAllocationTableFactory.go @@ -31,7 +31,7 @@ func (f *BlockAllocationTableFactory) Create() (*BlockAllocationTable, error) { batEntryOffset := f.vhdHeader.TableOffset bat := make([]uint32, batEntriesCount) for i := uint32(0); i < batEntriesCount; i++ { - bat[i], err = f.vhdReader.ReadUInt32(batEntryOffset) + bat[i], err = f.vhdReader.ReadUInt32At(batEntryOffset) if err != nil { return nil, NewBlockAllocationTableParseError(i, err) } diff --git a/vhdcore/block/bitmap/factory.go b/vhdcore/block/bitmap/factory.go index 789f7a57..712a6d80 100644 --- a/vhdcore/block/bitmap/factory.go +++ b/vhdcore/block/bitmap/factory.go @@ -26,7 +26,7 @@ func (f *Factory) Create(blockIndex uint32) (*BitMap, error) { bitmapAbsoluteByteOffset := f.blockAllocationTable.GetBitmapAddress(blockIndex) bitmapSizeInBytes := f.blockAllocationTable.GetBitmapSizeInBytes() bitmapBytes := make([]byte, bitmapSizeInBytes) - if _, err := f.vhdReader.ReadBytes(bitmapAbsoluteByteOffset, bitmapBytes); err != nil { + if _, err := f.vhdReader.ReadBytesAt(bitmapAbsoluteByteOffset, bitmapBytes); err != nil { return nil, NewParseError(blockIndex, err) } return NewBitMapFromByteSlice(bitmapBytes), nil diff --git a/vhdcore/block/dynamicDiskBlockReader.go b/vhdcore/block/dynamicDiskBlockReader.go index 206e607b..f945ba62 100644 --- a/vhdcore/block/dynamicDiskBlockReader.go +++ b/vhdcore/block/dynamicDiskBlockReader.go @@ -46,7 +46,7 @@ func (r *DynamicDiskBlockReader) Read(block *Block) ([]byte, error) { blockDataByteOffset := r.blockAllocationTable.GetBlockDataAddress(blockIndex) blockDataBuffer := make([]byte, r.blockSizeInBytes) - n, err := r.vhdReader.ReadBytes(blockDataByteOffset, blockDataBuffer) + n, err := r.vhdReader.ReadBytesAt(blockDataByteOffset, blockDataBuffer) if err == io.ErrUnexpectedEOF { return blockDataBuffer[:n], nil } diff --git a/vhdcore/block/fixedDiskBlockReader.go b/vhdcore/block/fixedDiskBlockReader.go index fae1f610..4d8f78f2 100644 --- a/vhdcore/block/fixedDiskBlockReader.go +++ b/vhdcore/block/fixedDiskBlockReader.go @@ -32,7 +32,7 @@ func (r *FixedDiskBlockReader) Read(block *Block) ([]byte, error) { blockIndex := block.BlockIndex blockByteOffset := int64(blockIndex) * int64(r.blockSizeInBytes) blockDataBuffer := make([]byte, block.LogicalRange.Length()) - n, err := r.vhdReader.ReadBytes(blockByteOffset, blockDataBuffer) + n, err := r.vhdReader.ReadBytesAt(blockByteOffset, blockDataBuffer) if err == io.ErrUnexpectedEOF { return blockDataBuffer[:n], nil } diff --git a/vhdcore/block/sectorFactory.go b/vhdcore/block/sectorFactory.go index de50cac6..ae859048 100644 --- a/vhdcore/block/sectorFactory.go +++ b/vhdcore/block/sectorFactory.go @@ -44,7 +44,7 @@ func (f *SectorFactory) Create(block *Block, sectorIndex uint32) (*Sector, error blockDataSectionByteOffset := f.getBlockAddress(blockIndex) sectorByteOffset := blockDataSectionByteOffset + vhdcore.VhdSectorLength*int64(sectorIndex) sectorBuf := make([]byte, vhdcore.VhdSectorLength) - if _, err := f.vhdReader.ReadBytes(sectorByteOffset, sectorBuf); err != nil { + if _, err := f.vhdReader.ReadBytesAt(sectorByteOffset, sectorBuf); err != nil { return nil, NewSectorReadError(blockIndex, sectorIndex, err) } diff --git a/vhdcore/footer/factory.go b/vhdcore/footer/factory.go index 9c2d6e52..932136aa 100644 --- a/vhdcore/footer/factory.go +++ b/vhdcore/footer/factory.go @@ -123,7 +123,7 @@ func (f *Factory) Create() (*Footer, error) { // to the beginning of footer. func (f *Factory) readVhdCookie() (*vhdcore.Cookie, error) { cookieData := make([]byte, 8) - if _, err := f.vhdReader.ReadBytes(f.footerOffset+0, cookieData); err != nil { + if _, err := f.vhdReader.ReadBytesAt(f.footerOffset+0, cookieData); err != nil { return nil, NewParseError("Cookie", err) } @@ -139,7 +139,7 @@ func (f *Factory) readVhdCookie() (*vhdcore.Cookie, error) { // Feature is stored as 4 bytes value starting at offset 8 relative to the beginning of // footer. func (f *Factory) readFeatures() (VhdFeature, error) { - value, err := f.vhdReader.ReadUInt32(f.footerOffset + 8) + value, err := f.vhdReader.ReadUInt32At(f.footerOffset + 8) if err != nil { return VhdFeatureNoFeaturesEnabled, NewParseError("Features", err) } @@ -151,7 +151,7 @@ func (f *Factory) readFeatures() (VhdFeature, error) { // VhdFileFormatVersion is stored as 4 bytes value starting at offset 12 relative to the // beginning of footer. func (f *Factory) readFileFormatVersion() (VhdFileFormatVersion, error) { - value, err := f.vhdReader.ReadUInt32(f.footerOffset + 12) + value, err := f.vhdReader.ReadUInt32At(f.footerOffset + 12) if err != nil { return VhdFileFormatVersionNone, NewParseError("FileFormatVersion", err) } @@ -163,7 +163,7 @@ func (f *Factory) readFileFormatVersion() (VhdFileFormatVersion, error) { // Header offset is stored as 8 bytes value starting at offset 16 relative to the beginning // of footer. This value is stored in big-endian format. func (f *Factory) readHeaderOffset() (int64, error) { - value, err := f.vhdReader.ReadInt64(f.footerOffset + 16) + value, err := f.vhdReader.ReadInt64At(f.footerOffset + 16) if err != nil { return -1, NewParseError("HeaderOffset", err) } @@ -176,7 +176,7 @@ func (f *Factory) readHeaderOffset() (int64, error) { // TimeStamp is stored as 4 bytes value starting at offset 24 relative to the beginning // of footer. This value is stored in big-endian format. func (f *Factory) readTimeStamp() (*time.Time, error) { - value, err := f.vhdReader.ReadDateTime(f.footerOffset + 24) + value, err := f.vhdReader.ReadDateTimeAt(f.footerOffset + 24) if err != nil { return nil, NewParseError("TimeStamp", err) } @@ -190,7 +190,7 @@ func (f *Factory) readTimeStamp() (*time.Time, error) { // of footer. func (f *Factory) readCreatorApplication() (string, error) { creatorApp := make([]byte, 4) - _, err := f.vhdReader.ReadBytes(f.footerOffset+28, creatorApp) + _, err := f.vhdReader.ReadBytesAt(f.footerOffset+28, creatorApp) if err != nil { return "", NewParseError("CreatorApplication", err) } @@ -203,7 +203,7 @@ func (f *Factory) readCreatorApplication() (string, error) { // Version is stored as 4 bytes value starting at offset 32 relative to the beginning // of footer. func (f *Factory) readCreatorVersion() (VhdCreatorVersion, error) { - value, err := f.vhdReader.ReadUInt32(f.footerOffset + 32) + value, err := f.vhdReader.ReadUInt32At(f.footerOffset + 32) if err != nil { return VhdCreatorVersionNone, NewParseError("CreatorVersion", err) } @@ -216,7 +216,7 @@ func (f *Factory) readCreatorVersion() (VhdCreatorVersion, error) { // Version is stored as 4 bytes value starting at offset 36 relative to the beginning // of footer. func (f *Factory) readCreatorHostOsType() (HostOsType, error) { - value, err := f.vhdReader.ReadUInt32(f.footerOffset + 36) + value, err := f.vhdReader.ReadUInt32At(f.footerOffset + 36) if err != nil { return HostOsTypeNone, NewParseError("CreatorHostOsType", err) } @@ -231,7 +231,7 @@ func (f *Factory) readCreatorHostOsType() (HostOsType, error) { // header, footer BAT, block's bitmap // This value is stored in big-endian format. func (f *Factory) readPhysicalSize() (int64, error) { - value, err := f.vhdReader.ReadInt64(f.footerOffset + 40) + value, err := f.vhdReader.ReadInt64At(f.footerOffset + 40) if err != nil { return -1, NewParseError("PhysicalSize", err) } @@ -247,7 +247,7 @@ func (f *Factory) readPhysicalSize() (int64, error) { // header, footer BAT, block's bitmap // This value is stored in big-endian format. func (f *Factory) readVirtualSize() (int64, error) { - value, err := f.vhdReader.ReadInt64(f.footerOffset + 48) + value, err := f.vhdReader.ReadInt64At(f.footerOffset + 48) if err != nil { return -1, NewParseError("VirtualSize", err) } @@ -260,17 +260,17 @@ func (f *Factory) readVirtualSize() (int64, error) { // footer. This value is stored in big-endian format. func (f *Factory) readDiskGeometry() (*DiskGeometry, error) { diskGeometry := &DiskGeometry{} - cylinder, err := f.vhdReader.ReadUInt16(f.footerOffset + 56 + 0) + cylinder, err := f.vhdReader.ReadUInt16At(f.footerOffset + 56 + 0) if err != nil { return nil, NewParseError("DiskGeometry::Cylinder", err) } diskGeometry.Cylinder = cylinder - heads, err := f.vhdReader.ReadByte(f.footerOffset + 56 + 2) + heads, err := f.vhdReader.ReadByteAt(f.footerOffset + 56 + 2) if err != nil { return nil, NewParseError("DiskGeometry::Heads", err) } diskGeometry.Heads = heads - sectors, err := f.vhdReader.ReadByte(f.footerOffset + 56 + 3) + sectors, err := f.vhdReader.ReadByteAt(f.footerOffset + 56 + 3) if err != nil { return nil, NewParseError("DiskGeometry::Sectors", err) } @@ -283,7 +283,7 @@ func (f *Factory) readDiskGeometry() (*DiskGeometry, error) { // The value is stored as 4 byte value starting at offset 60 relative to the beginning // of footer. This value is stored in big-endian format. func (f *Factory) readDiskType() (DiskType, error) { - value, err := f.vhdReader.ReadUInt32(f.footerOffset + 60) + value, err := f.vhdReader.ReadUInt32At(f.footerOffset + 60) if err != nil { return DiskTypeNone, NewParseError("DiskType", err) } @@ -295,7 +295,7 @@ func (f *Factory) readDiskType() (DiskType, error) { // The value is stored as 4 byte value starting at offset 64 relative to the beginning // of footer. This value is stored in big-endian format. func (f *Factory) readCheckSum() (uint32, error) { - value, err := f.vhdReader.ReadUInt32(f.footerOffset + 64) + value, err := f.vhdReader.ReadUInt32At(f.footerOffset + 64) if err != nil { return 0, NewParseError("CheckSum", err) } @@ -308,7 +308,7 @@ func (f *Factory) readCheckSum() (uint32, error) { // The value is stored as 16 byte value starting at offset 68 relative to the beginning // of footer. func (f *Factory) readUniqueID() (*common.UUID, error) { - value, err := f.vhdReader.ReadUUID(f.footerOffset + 68) + value, err := f.vhdReader.ReadUUIDAt(f.footerOffset + 68) if err != nil { return nil, NewParseError("UniqueId", err) } @@ -320,7 +320,7 @@ func (f *Factory) readUniqueID() (*common.UUID, error) { // The value is stored as 1 byte value starting at offset 84 relative to the beginning // of footer. func (f *Factory) readSavedState() (bool, error) { - value, err := f.vhdReader.ReadBoolean(f.footerOffset + 84) + value, err := f.vhdReader.ReadBooleanAt(f.footerOffset + 84) if err != nil { return false, NewParseError("SavedState", err) } @@ -333,7 +333,7 @@ func (f *Factory) readSavedState() (bool, error) { // of footer. func (f *Factory) readReserved() ([]byte, error) { reserved := make([]byte, 427) - _, err := f.vhdReader.ReadBytes(f.footerOffset+85, reserved) + _, err := f.vhdReader.ReadBytesAt(f.footerOffset+85, reserved) if err != nil { return nil, NewParseError("Reserved", err) } @@ -344,7 +344,7 @@ func (f *Factory) readReserved() ([]byte, error) { // error if the byte could be read. func (f *Factory) readWholeFooter() ([]byte, error) { rawData := make([]byte, 512) - _, err := f.vhdReader.ReadBytes(f.footerOffset+0, rawData) + _, err := f.vhdReader.ReadBytesAt(f.footerOffset+0, rawData) if err != nil { return nil, err } diff --git a/vhdcore/footer/vhdFooterSerializer.go b/vhdcore/footer/vhdFooterSerializer.go index 9d08c677..72b10ce9 100644 --- a/vhdcore/footer/vhdFooterSerializer.go +++ b/vhdcore/footer/vhdFooterSerializer.go @@ -10,27 +10,27 @@ func SerializeFooter(footer *Footer) []byte { buffer := make([]byte, vhdcore.VhdFooterSize) writer := writer.NewVhdWriterFromByteSlice(buffer) - writer.WriteBytes(0, footer.Cookie.Data) - writer.WriteUInt32(8, uint32(footer.Features)) - writer.WriteUInt32(12, uint32(footer.FileFormatVersion)) - writer.WriteInt64(16, footer.HeaderOffset) - writer.WriteTimeStamp(24, footer.TimeStamp) + writer.WriteBytesAt(0, footer.Cookie.Data) + writer.WriteUInt32At(8, uint32(footer.Features)) + writer.WriteUInt32At(12, uint32(footer.FileFormatVersion)) + writer.WriteInt64At(16, footer.HeaderOffset) + writer.WriteTimeStampAt(24, footer.TimeStamp) creatorApp := make([]byte, 4) copy(creatorApp, footer.CreatorApplication) - writer.WriteBytes(28, creatorApp) - writer.WriteUInt32(32, uint32(footer.CreatorVersion)) - writer.WriteUInt32(36, uint32(footer.CreatorHostOsType)) - writer.WriteInt64(40, footer.PhysicalSize) - writer.WriteInt64(48, footer.VirtualSize) + writer.WriteBytesAt(28, creatorApp) + writer.WriteUInt32At(32, uint32(footer.CreatorVersion)) + writer.WriteUInt32At(36, uint32(footer.CreatorHostOsType)) + writer.WriteInt64At(40, footer.PhysicalSize) + writer.WriteInt64At(48, footer.VirtualSize) // + DiskGeometry - writer.WriteUInt16(56, footer.DiskGeometry.Cylinder) - writer.WriteByte(58, footer.DiskGeometry.Heads) - writer.WriteByte(59, footer.DiskGeometry.Sectors) + writer.WriteUInt16At(56, footer.DiskGeometry.Cylinder) + writer.WriteByteAt(58, footer.DiskGeometry.Heads) + writer.WriteByteAt(59, footer.DiskGeometry.Sectors) // - DiskGeometry - writer.WriteUInt32(60, uint32(footer.DiskType)) - writer.WriteBytes(68, footer.UniqueID.ToByteSlice()) - writer.WriteBoolean(84, footer.SavedState) - writer.WriteBytes(85, footer.Reserved) + writer.WriteUInt32At(60, uint32(footer.DiskType)) + writer.WriteBytesAt(68, footer.UniqueID.ToByteSlice()) + writer.WriteBooleanAt(84, footer.SavedState) + writer.WriteBytesAt(85, footer.Reserved) // + Checksum // // Checksum is one’s complement of the sum of all the bytes in the footer without the @@ -42,7 +42,7 @@ func SerializeFooter(footer *Footer) []byte { } } - writer.WriteUInt32(64, ^checkSum) + writer.WriteUInt32At(64, ^checkSum) // - Checksum return buffer diff --git a/vhdcore/header/factory.go b/vhdcore/header/factory.go index d1186758..ab4f9817 100644 --- a/vhdcore/header/factory.go +++ b/vhdcore/header/factory.go @@ -106,7 +106,7 @@ func (f *Factory) Create() (*Header, error) { // of header. func (f *Factory) readHeaderCookie() (*vhdcore.Cookie, error) { cookieData := make([]byte, 8) - if _, err := f.vhdReader.ReadBytes(f.headerOffset+0, cookieData); err != nil { + if _, err := f.vhdReader.ReadBytesAt(f.headerOffset+0, cookieData); err != nil { return nil, NewParseError("Cookie", err) } @@ -123,7 +123,7 @@ func (f *Factory) readHeaderCookie() (*vhdcore.Cookie, error) { // This value is stored as 8 bytes value starting at offset 8 relative to the beginning of header. // This value is stored in big-endian format. func (f *Factory) readDataOffset() (int64, error) { - value, err := f.vhdReader.ReadInt64(f.headerOffset + 8) + value, err := f.vhdReader.ReadInt64At(f.headerOffset + 8) if err != nil { return -1, NewParseError("DataOffset", err) } @@ -135,7 +135,7 @@ func (f *Factory) readDataOffset() (int64, error) { // BATOffset is stored as 8 bytes value starting at offset 16 relative to the beginning of header. // This value is stored in big-endian format. func (f *Factory) readBATOffset() (int64, error) { - value, err := f.vhdReader.ReadInt64(f.headerOffset + 16) + value, err := f.vhdReader.ReadInt64At(f.headerOffset + 16) if err != nil { return -1, NewParseError("BATOffset", err) } @@ -146,7 +146,7 @@ func (f *Factory) readBATOffset() (int64, error) { // This function return error if no or fewer bytes could be read. HeaderVersion is stored as 4 bytes // value starting at offset 24 relative to the beginning of header. func (f *Factory) readHeaderVersion() (VhdHeaderVersion, error) { - value, err := f.vhdReader.ReadUInt32(f.headerOffset + 24) + value, err := f.vhdReader.ReadUInt32At(f.headerOffset + 24) if err != nil { return VhdHeaderVersionNone, NewParseError("HeaderVersion", err) } @@ -163,7 +163,7 @@ func (f *Factory) readHeaderVersion() (VhdHeaderVersion, error) { // MaxTableEntries is stored as 4 bytes value starting at offset 28 relative to the beginning of // header. This value is stored in big-endian format. func (f *Factory) readMaxBATEntries() (uint32, error) { - value, err := f.vhdReader.ReadUInt32(f.headerOffset + 28) + value, err := f.vhdReader.ReadUInt32At(f.headerOffset + 28) if err != nil { return 0, NewParseError("MaxBATEntries", err) } @@ -175,7 +175,7 @@ func (f *Factory) readMaxBATEntries() (uint32, error) { // BlockSize is stored as 4 bytes value starting at offset 32 relative to the beginning of header. // This value is stored in big-endian format. func (f *Factory) readBlockSize() (uint32, error) { - value, err := f.vhdReader.ReadUInt32(f.headerOffset + 32) + value, err := f.vhdReader.ReadUInt32At(f.headerOffset + 32) if err != nil { return 0, NewParseError("BlockSize", err) } @@ -187,7 +187,7 @@ func (f *Factory) readBlockSize() (uint32, error) { // The value is stored as 4 byte value starting at offset 36 relative to the beginning of header. // This value is stored in big-endian format. func (f *Factory) readCheckSum() (uint32, error) { - value, err := f.vhdReader.ReadUInt32(f.headerOffset + 36) + value, err := f.vhdReader.ReadUInt32At(f.headerOffset + 36) if err != nil { return 0, NewParseError("CheckSum", err) } @@ -199,7 +199,7 @@ func (f *Factory) readCheckSum() (uint32, error) { // This function return error if no or fewer bytes could be read. // The value is stored as 16 byte value starting at offset 40 relative to the beginning of header. func (f *Factory) readParentUniqueID() (*common.UUID, error) { - value, err := f.vhdReader.ReadUUID(f.headerOffset + 40) + value, err := f.vhdReader.ReadUUIDAt(f.headerOffset + 40) if err != nil { return nil, NewParseError("ParentUniqueId", err) } @@ -212,7 +212,7 @@ func (f *Factory) readParentUniqueID() (*common.UUID, error) { // TimeStamp is stored as 4 bytes value starting at offset 56 relative to the beginning of header. // This value is stored in big-endian format. func (f *Factory) readParentTimeStamp() (*time.Time, error) { - value, err := f.vhdReader.ReadDateTime(f.headerOffset + 56) + value, err := f.vhdReader.ReadDateTimeAt(f.headerOffset + 56) if err != nil { return nil, NewParseError("ParentTimeStamp", err) } @@ -223,7 +223,7 @@ func (f *Factory) readParentTimeStamp() (*time.Time, error) { // error if no or fewer bytes could be read. Reserved is stored as 4 bytes value starting at offset // 60 relative to the beginning of header. This value is stored in big-endian format. func (f *Factory) readReserved() (uint32, error) { - value, err := f.vhdReader.ReadUInt32(f.headerOffset + 60) + value, err := f.vhdReader.ReadUInt32At(f.headerOffset + 60) if err != nil { return 0, NewParseError("Reserved", err) } @@ -235,7 +235,7 @@ func (f *Factory) readReserved() (uint32, error) { // 512 bytes, starting at offset 64 relative to the beginning of header. func (f *Factory) readParentPath() (string, error) { parentPath := make([]byte, 512) - _, err := f.vhdReader.ReadBytes(f.headerOffset+64, parentPath) + _, err := f.vhdReader.ReadBytesAt(f.headerOffset+64, parentPath) if err != nil { return "", NewParseError("ParentPath", err) } @@ -266,7 +266,7 @@ func (f *Factory) readParentLocators() (parentlocator.ParentLocators, error) { // could be read. func (f *Factory) readWholeHeader() ([]byte, error) { rawData := make([]byte, 1024) - _, err := f.vhdReader.ReadBytes(f.headerOffset+0, rawData) + _, err := f.vhdReader.ReadBytesAt(f.headerOffset+0, rawData) if err != nil { return nil, err } diff --git a/vhdcore/header/parentlocator/factory.go b/vhdcore/header/parentlocator/factory.go index 770b66c6..be4a5f03 100644 --- a/vhdcore/header/parentlocator/factory.go +++ b/vhdcore/header/parentlocator/factory.go @@ -55,7 +55,7 @@ func (f *Factory) Create() (*ParentLocator, error) { } fileLocator := make([]byte, locator.PlatformDataLength) - _, err = f.vhdReader.ReadBytes(locator.PlatformDataOffset, fileLocator) + _, err = f.vhdReader.ReadBytesAt(locator.PlatformDataOffset, fileLocator) if err != nil { err = NewParseError("ParentLocator", fmt.Errorf("Unable to resolve file locator: %v", err)) return errDone() @@ -71,7 +71,7 @@ func (f *Factory) Create() (*ParentLocator, error) { // value starting at offset 0 relative to the beginning of this parent-hard-disk-locator. This value // is stored in big-endian format. func (f *Factory) readPlatformCode() (PlatformCode, error) { - value, err := f.vhdReader.ReadInt32(f.locatorOffset + 0) + value, err := f.vhdReader.ReadInt32At(f.locatorOffset + 0) if err != nil { return PlatformCodeNone, NewParseError("PlatformCode", err) } @@ -83,7 +83,7 @@ func (f *Factory) readPlatformCode() (PlatformCode, error) { // The value is stored as 4 byte value starting at offset 4 relative to the beginning parent-hard-disk-locator-info. // This value is stored in big-endian format. func (f *Factory) readPlatformDataSpace() (int32, error) { - value, err := f.vhdReader.ReadInt32(f.locatorOffset + 4) + value, err := f.vhdReader.ReadInt32At(f.locatorOffset + 4) if err != nil { return -1, NewParseError("PlatformDataSpace", err) } @@ -95,7 +95,7 @@ func (f *Factory) readPlatformDataSpace() (int32, error) { // as 4 byte value starting at offset 8 relative to the beginning parent-hard-disk-locator-info. This value // is stored in big-endian format. func (f *Factory) readPlatformDataLength() (int32, error) { - value, err := f.vhdReader.ReadInt32(f.locatorOffset + 8) + value, err := f.vhdReader.ReadInt32At(f.locatorOffset + 8) if err != nil { return -1, NewParseError("PlatformDataLength", err) } @@ -107,7 +107,7 @@ func (f *Factory) readPlatformDataLength() (int32, error) { // value starting at offset 12 relative to the beginning parent-hard-disk-locator-info. // This value is stored in big-endian format. func (f *Factory) readReserved() (int32, error) { - value, err := f.vhdReader.ReadInt32(f.locatorOffset + 12) + value, err := f.vhdReader.ReadInt32At(f.locatorOffset + 12) if err != nil { return -1, NewParseError("Reserved", err) } @@ -119,7 +119,7 @@ func (f *Factory) readReserved() (int32, error) { // The value is stored as 4 byte value starting at offset 16 relative to the beginning parent-hard-disk-locator-info. // This value is stored in big-endian format. func (f *Factory) readPlatformDataOffset() (int64, error) { - value, err := f.vhdReader.ReadInt64(f.locatorOffset + 16) + value, err := f.vhdReader.ReadInt64At(f.locatorOffset + 16) if err != nil { return -1, NewParseError("PlatformDataOffset", err) } diff --git a/vhdcore/reader/binaryReader.go b/vhdcore/reader/binaryReader.go index 86ca540d..67b7a4e9 100644 --- a/vhdcore/reader/binaryReader.go +++ b/vhdcore/reader/binaryReader.go @@ -41,12 +41,12 @@ func NewBinaryReader(from ReadAtReader, order binary.ByteOrder, size int64) *Bin // copied and an error if fewer bytes were read. The error is EOF only if no bytes were // read. If an EOF happens after reading some but not all the bytes, ReadBytes returns // ErrUnexpectedEOF. On return, n == len(buf) if and only if err == nil. -func (b *BinaryReader) ReadBytes(offset int64, buf []byte) (int, error) { +func (b *BinaryReader) ReadBytesAt(offset int64, buf []byte) (int, error) { return b.from.ReadAt(buf, offset) } // ReadByte reads a byte from underlying source starting at byte offset off and returns it. -func (b *BinaryReader) ReadByte(offset int64) (byte, error) { +func (b *BinaryReader) ReadByteAt(offset int64) (byte, error) { if _, err := b.readToBuffer(1, offset); err != nil { return 0, err } @@ -56,7 +56,7 @@ func (b *BinaryReader) ReadByte(offset int64) (byte, error) { // ReadBoolean reads a byte from underlying source starting at byte offset off and // returns it as a bool. -func (b *BinaryReader) ReadBoolean(offset int64) (bool, error) { +func (b *BinaryReader) ReadBooleanAt(offset int64) (bool, error) { if _, err := b.readToBuffer(1, offset); err != nil { return false, err } @@ -65,7 +65,7 @@ func (b *BinaryReader) ReadBoolean(offset int64) (bool, error) { // ReadUInt16 reads an encoded unsigned 2 byte integer from underlying source starting // at byte offset off and return it as a uint16. -func (b *BinaryReader) ReadUInt16(offset int64) (uint16, error) { +func (b *BinaryReader) ReadUInt16At(offset int64) (uint16, error) { if _, err := b.readToBuffer(2, offset); err != nil { return 0, err } @@ -74,7 +74,7 @@ func (b *BinaryReader) ReadUInt16(offset int64) (uint16, error) { // ReadInt16 reads an encoded signed 2 byte integer from underlying source starting // at byte offset off returns it as a int16. -func (b *BinaryReader) ReadInt16(off int64) (int16, error) { +func (b *BinaryReader) ReadInt16At(off int64) (int16, error) { if _, err := b.readToBuffer(2, off); err != nil { return 0, err } @@ -83,7 +83,7 @@ func (b *BinaryReader) ReadInt16(off int64) (int16, error) { // ReadUInt32 reads an encoded unsigned 4 byte integer from underlying source starting // at byte offset off returns it as a uint32. -func (b *BinaryReader) ReadUInt32(off int64) (uint32, error) { +func (b *BinaryReader) ReadUInt32At(off int64) (uint32, error) { if _, err := b.readToBuffer(4, off); err != nil { return 0, err } @@ -92,7 +92,7 @@ func (b *BinaryReader) ReadUInt32(off int64) (uint32, error) { // ReadInt32 reads an encoded signed 4 byte integer from underlying source starting // at byte offset off and returns it as a int32. -func (b *BinaryReader) ReadInt32(off int64) (int32, error) { +func (b *BinaryReader) ReadInt32At(off int64) (int32, error) { if _, err := b.readToBuffer(4, off); err != nil { return 0, err } @@ -101,7 +101,7 @@ func (b *BinaryReader) ReadInt32(off int64) (int32, error) { // ReadUInt64 reads an encoded unsigned 8 byte integer from underlying source starting // at byte offset off and returns it as a uint64. -func (b *BinaryReader) ReadUInt64(off int64) (uint64, error) { +func (b *BinaryReader) ReadUInt64At(off int64) (uint64, error) { if _, err := b.readToBuffer(8, off); err != nil { return 0, err } @@ -110,7 +110,7 @@ func (b *BinaryReader) ReadUInt64(off int64) (uint64, error) { // ReadInt64 reads an encoded signed 4 byte integer from underlying source starting // at byte offset off and and returns it as a int64. -func (b *BinaryReader) ReadInt64(off int64) (int64, error) { +func (b *BinaryReader) ReadInt64At(off int64) (int64, error) { if _, err := b.readToBuffer(8, off); err != nil { return 0, err } @@ -119,7 +119,7 @@ func (b *BinaryReader) ReadInt64(off int64) (int64, error) { // ReadUUID reads 16 byte character sequence from underlying source starting // at byte offset off and returns it as a UUID. -func (b *BinaryReader) ReadUUID(off int64) (*common.UUID, error) { +func (b *BinaryReader) ReadUUIDAt(off int64) (*common.UUID, error) { if _, err := b.readToBuffer(16, off); err != nil { return nil, err } diff --git a/vhdcore/reader/vhdReader.go b/vhdcore/reader/vhdReader.go index a34136b0..81580da4 100644 --- a/vhdcore/reader/vhdReader.go +++ b/vhdcore/reader/vhdReader.go @@ -36,8 +36,8 @@ func NewVhdReaderFromByteSlice(b []byte) *VhdReader { // ReadDateTime reads an encoded vhd timestamp from underlying source starting at byte // offset off and return it as a time.Time. -func (r *VhdReader) ReadDateTime(off int64) (*time.Time, error) { - d, err := r.ReadUInt32(off) +func (r *VhdReader) ReadDateTimeAt(off int64) (*time.Time, error) { + d, err := r.ReadUInt32At(off) if err != nil { return nil, err } diff --git a/vhdcore/writer/binaryWriter.go b/vhdcore/writer/binaryWriter.go index adfef1bf..edeff595 100644 --- a/vhdcore/writer/binaryWriter.go +++ b/vhdcore/writer/binaryWriter.go @@ -30,18 +30,19 @@ func NewBinaryWriter(to io.WriterAt, order binary.ByteOrder, size int64) *Binary } // WriteBytes writes a byte slice to the underlying writer at offset off. -func (w *BinaryWriter) WriteBytes(off int64, value []byte) { +func (w *BinaryWriter) WriteBytesAt(off int64, value []byte) { w.to.WriteAt(value, off) } // WriteByte write a byte value to the underlying writer at offset off. -func (w *BinaryWriter) WriteByte(off int64, value byte) { +func (w *BinaryWriter) WriteByteAt(off int64, value byte) error { w.buffer[0] = value w.to.WriteAt(w.buffer[:1], off) + return nil } // WriteBoolean write a boolean value to the underlying writer at offset off. -func (w *BinaryWriter) WriteBoolean(off int64, value bool) { +func (w *BinaryWriter) WriteBooleanAt(off int64, value bool) { if value { w.buffer[0] = 1 } else { @@ -52,42 +53,42 @@ func (w *BinaryWriter) WriteBoolean(off int64, value bool) { } // WriteInt16 encodes an int16 and write it in the underlying writer at offset off. -func (w *BinaryWriter) WriteInt16(off int64, value int16) { +func (w *BinaryWriter) WriteInt16At(off int64, value int16) { w.order.PutUint16(w.buffer, uint16(value)) w.to.WriteAt(w.buffer[:2], off) } // WriteUInt16 encodes an uint16 and write it in the underlying writer at offset off. -func (w *BinaryWriter) WriteUInt16(off int64, value uint16) { +func (w *BinaryWriter) WriteUInt16At(off int64, value uint16) { w.order.PutUint16(w.buffer, value) w.to.WriteAt(w.buffer[:2], off) } // WriteInt32 encodes an int32 and write it in the underlying writer at offset off. -func (w *BinaryWriter) WriteInt32(off int64, value int32) { +func (w *BinaryWriter) WriteInt32At(off int64, value int32) { w.order.PutUint32(w.buffer, uint32(value)) w.to.WriteAt(w.buffer[:4], off) } // WriteUInt32 encodes an uint32 and write it in the underlying writer at offset off. -func (w *BinaryWriter) WriteUInt32(off int64, value uint32) { +func (w *BinaryWriter) WriteUInt32At(off int64, value uint32) { w.order.PutUint32(w.buffer, value) w.to.WriteAt(w.buffer[:4], off) } // WriteInt64 encodes an int64 and write it in the underlying writer at offset off. -func (w *BinaryWriter) WriteInt64(off int64, value int64) { +func (w *BinaryWriter) WriteInt64At(off int64, value int64) { w.order.PutUint64(w.buffer, uint64(value)) w.to.WriteAt(w.buffer[:8], off) } // WriteUInt64 encodes an uint64 and write it in the underlying writer at offset off. -func (w *BinaryWriter) WriteUInt64(off int64, value uint64) { +func (w *BinaryWriter) WriteUInt64At(off int64, value uint64) { w.order.PutUint64(w.buffer, value) w.to.WriteAt(w.buffer[:8], off) } // WriteString writes a string to the underlying writer at offset off. -func (w *BinaryWriter) WriteString(off int64, value string) { +func (w *BinaryWriter) WriteStringAt(off int64, value string) { w.to.WriteAt([]byte(value), off) } diff --git a/vhdcore/writer/vhdWriter.go b/vhdcore/writer/vhdWriter.go index 07f4114f..2784357f 100644 --- a/vhdcore/writer/vhdWriter.go +++ b/vhdcore/writer/vhdWriter.go @@ -36,9 +36,9 @@ func NewVhdWriterFromByteSlice(b []byte) *VhdWriter { // WriteTimeStamp writes vhd timestamp represented by the given time to underlying source // starting at byte offset off. -func (r *VhdWriter) WriteTimeStamp(off int64, time *time.Time) { +func (r *VhdWriter) WriteTimeStampAt(off int64, time *time.Time) { vhdTimeStamp := vhdcore.NewVhdTimeStamp(time) - r.WriteUInt32(off, vhdTimeStamp.TotalSeconds) + r.WriteUInt32At(off, vhdTimeStamp.TotalSeconds) } // ByteSliceWriteAt is a type that satisfies io.WriteAt interface for byte slice. From b114b4a65a1779e3bb93be6d2118ef2bceed1dd5 Mon Sep 17 00:00:00 2001 From: Krzesimir Nowak Date: Wed, 22 May 2024 17:00:54 +0200 Subject: [PATCH 4/4] Makefile: Update Rebuild the utility when go.mod, go.sum or Makefile gets changed. Add a clean target. --- Makefile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 14365fb2..2ec749a1 100644 --- a/Makefile +++ b/Makefile @@ -9,9 +9,13 @@ lint: azure-vhd-utils go vet ./... go mod tidy -azure-vhd-utils: $(call rwildcard, ., *.go) +azure-vhd-utils: $(call rwildcard, ., *.go) go.mod go.sum Makefile go build .PHONY: test test: go test ./... + +.PHONY: clean +clean: + rm -f azure-vhd-utils