Skip to content

Commit

Permalink
Merge pull request #73 from Azure/remove-panics
Browse files Browse the repository at this point in the history
Remove panics
  • Loading branch information
zezha-msft authored Oct 19, 2018
2 parents c615185 + 8efe419 commit 5f82fed
Show file tree
Hide file tree
Showing 38 changed files with 562 additions and 744 deletions.
6 changes: 6 additions & 0 deletions BreakingChanges.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# Breaking Changes

> See the [Change Log](ChangeLog.md) for a summary of storage library changes.
## Version XX.XX.XX:
- Removed most panics from the library. Several functions now return an error.
6 changes: 6 additions & 0 deletions ChangeLog.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# Change Log

> See [BreakingChanges](BreakingChanges.md) for a detailed list of API breaks.
## Version XX.XX.XX:
- Removed most panics from the library. Several functions now return an error.
12 changes: 0 additions & 12 deletions azblob/atomicmorph.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,6 @@ const targetAndMorpherMustNotBeNil = "target and morpher must not be nil"

// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func atomicMorphInt32(target *int32, morpher atomicMorpherInt32) interface{} {
if target == nil || morpher == nil {
panic(targetAndMorpherMustNotBeNil)
}
for {
currentVal := atomic.LoadInt32(target)
desiredVal, morphResult := morpher(currentVal)
Expand All @@ -30,9 +27,6 @@ type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interfac

// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) interface{} {
if target == nil || morpher == nil {
panic(targetAndMorpherMustNotBeNil)
}
for {
currentVal := atomic.LoadUint32(target)
desiredVal, morphResult := morpher(currentVal)
Expand All @@ -49,9 +43,6 @@ type atomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{}

// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func atomicMorphInt64(target *int64, morpher atomicMorpherInt64) interface{} {
if target == nil || morpher == nil {
panic(targetAndMorpherMustNotBeNil)
}
for {
currentVal := atomic.LoadInt64(target)
desiredVal, morphResult := morpher(currentVal)
Expand All @@ -68,9 +59,6 @@ type atomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interfac

// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func atomicMorphUint64(target *uint64, morpher atomicMorpherUint64) interface{} {
if target == nil || morpher == nil {
panic(targetAndMorpherMustNotBeNil)
}
for {
currentVal := atomic.LoadUint64(target)
desiredVal, morphResult := morpher(currentVal)
Expand Down
37 changes: 3 additions & 34 deletions azblob/highlevel.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package azblob
import (
"context"
"encoding/base64"
"fmt"
"io"
"net/http"

Expand Down Expand Up @@ -63,12 +62,6 @@ type UploadToBlockBlobOptions struct {
// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob.
func UploadBufferToBlockBlob(ctx context.Context, b []byte,
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {

// Validate parameters and set defaults
if o.BlockSize < 0 || o.BlockSize > BlockBlobMaxUploadBlobBytes {
panic(fmt.Sprintf("BlockSize option must be > 0 and <= %d", BlockBlobMaxUploadBlobBytes))
}

bufferSize := int64(len(b))
if o.BlockSize == 0 {
// If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error
Expand Down Expand Up @@ -97,9 +90,6 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
}

var numBlocks = uint16(((bufferSize - 1) / o.BlockSize) + 1)
if numBlocks > BlockBlobMaxBlocks {
panic(fmt.Sprintf("The buffer's size is too big or the BlockSize is too small; the number of blocks must be <= %d", BlockBlobMaxBlocks))
}

blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs
progress := int64(0)
Expand Down Expand Up @@ -187,22 +177,10 @@ type DownloadFromBlobOptions struct {
// downloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
b []byte, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error {
// Validate parameters, and set defaults.
if o.BlockSize < 0 {
panic("BlockSize option must be >= 0")
}
if o.BlockSize == 0 {
o.BlockSize = BlobDefaultDownloadBlockSize
}

if offset < 0 {
panic("offset option must be >= 0")
}

if count < 0 {
panic("count option must be >= 0")
}

if count == CountToEnd { // If size not specified, calculate it
if initialDownloadResponse != nil {
count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it
Expand All @@ -216,10 +194,6 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
}
}

if int64(len(b)) < count {
panic(fmt.Errorf("the buffer's size should be equal to or larger than the request count of bytes: %d", count))
}

// Prepare and do parallel download.
progress := int64(0)
progressLock := &sync.Mutex{}
Expand Down Expand Up @@ -268,12 +242,7 @@ func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
// Offset and count are optional, pass 0 for both to download the entire blob.
func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64,
file *os.File, o DownloadFromBlobOptions) error {
// 1. Validate parameters.
if file == nil {
panic("file must not be nil")
}

// 2. Calculate the size of the destination file
// 1. Calculate the size of the destination file
var size int64

if count == CountToEnd {
Expand All @@ -287,7 +256,7 @@ func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, coun
size = count
}

// 3. Compare and try to resize local file's size if it doesn't match Azure blob's size.
// 2. Compare and try to resize local file's size if it doesn't match Azure blob's size.
stat, err := file.Stat()
if err != nil {
return err
Expand All @@ -299,7 +268,7 @@ func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, coun
}

if size > 0 {
// 4. Set mmap and call downloadBlobToBuffer.
// 3. Set mmap and call downloadBlobToBuffer.
m, err := newMMF(file, true, 0, int(size))
if err != nil {
return err
Expand Down
12 changes: 4 additions & 8 deletions azblob/sas_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,25 +28,21 @@ type BlobSASSignatureValues struct {

// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
// the proper SAS query parameters.
func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) SASQueryParameters {
if sharedKeyCredential == nil {
panic("sharedKeyCredential can't be nil")
}

func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) {
resource := "c"
if v.BlobName == "" {
// Make sure the permission characters are in the correct order
perms := &ContainerSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
panic(err)
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
} else {
resource = "b"
// Make sure the permission characters are in the correct order
perms := &BlobSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
panic(err)
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
}
Expand Down Expand Up @@ -89,7 +85,7 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Share
// Calculated SAS signature
signature: signature,
}
return p
return p, nil
}

// getCanonicalName computes the canonical name for a container or blob resource for SAS signing.
Expand Down
12 changes: 5 additions & 7 deletions azblob/url_append_blob.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,11 @@ func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata
func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers()
return ab.abClient.AppendBlock(ctx, body, validateSeekableStreamAt0AndGetCount(body), nil,
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return nil, err
}
return ab.abClient.AppendBlock(ctx, body, count, nil,
transactionalMD5, ac.LeaseAccessConditions.pointers(),
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
Expand Down Expand Up @@ -90,12 +94,6 @@ type AppendPositionAccessConditions struct {

// pointers is for internal infrastructure. It returns the fields as pointers.
func (ac AppendPositionAccessConditions) pointers() (iape *int64, imsltoe *int64) {
if ac.IfAppendPositionEqual < -1 {
panic("IfAppendPositionEqual can't be less than -1")
}
if ac.IfMaxSizeLessThanOrEqual < -1 {
panic("IfMaxSizeLessThanOrEqual can't be less than -1")
}
var zero int64 // defaults to 0
switch ac.IfAppendPositionEqual {
case -1:
Expand Down
10 changes: 2 additions & 8 deletions azblob/url_blob.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,6 @@ type BlobURL struct {

// NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline.
func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL {
if p == nil {
panic("p can't be nil")
}
blobClient := newBlobClient(url, p)
return BlobURL{blobClient: blobClient}
}
Expand All @@ -34,9 +31,6 @@ func (b BlobURL) String() string {

// WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline.
func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL {
if p == nil {
panic("p can't be nil")
}
return NewBlobURL(b.blobClient.URL(), p)
}

Expand Down Expand Up @@ -108,8 +102,8 @@ func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) {
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
// does not update the blob's ETag.
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType) (*BlobSetTierResponse, error) {
return b.blobClient.SetTier(ctx, tier, nil, nil)
func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions) (*BlobSetTierResponse, error) {
return b.blobClient.SetTier(ctx, tier, nil, nil, lac.pointers())
}

// GetBlobProperties returns the blob's properties.
Expand Down
15 changes: 10 additions & 5 deletions azblob/url_block_blob.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,6 @@ type BlockBlobURL struct {

// NewBlockBlobURL creates a BlockBlobURL object using the specified URL and request policy pipeline.
func NewBlockBlobURL(url url.URL, p pipeline.Pipeline) BlockBlobURL {
if p == nil {
panic("p can't be nil")
}
blobClient := newBlobClient(url, p)
bbClient := newBlockBlobClient(url, p)
return BlockBlobURL{BlobURL: BlobURL{blobClient: blobClient}, bbClient: bbClient}
Expand Down Expand Up @@ -60,7 +57,11 @@ func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL {
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobUploadResponse, error) {
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return bb.bbClient.Upload(ctx, body, validateSeekableStreamAt0AndGetCount(body), nil,
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return nil, err
}
return bb.bbClient.Upload(ctx, body, count, nil,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(),
&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
Expand All @@ -71,7 +72,11 @@ func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTT
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte) (*BlockBlobStageBlockResponse, error) {
return bb.bbClient.StageBlock(ctx, base64BlockID, validateSeekableStreamAt0AndGetCount(body), body, transactionalMD5, nil, ac.pointers(), nil)
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return nil, err
}
return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, ac.pointers(), nil)
}

// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
Expand Down
15 changes: 5 additions & 10 deletions azblob/url_container.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package azblob
import (
"bytes"
"context"
"errors"
"fmt"
"net/url"

Expand All @@ -16,9 +17,6 @@ type ContainerURL struct {

// NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline.
func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL {
if p == nil {
panic("p can't be nil")
}
client := newContainerClient(url, p)
return ContainerURL{client: client}
}
Expand Down Expand Up @@ -89,7 +87,7 @@ func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAcces
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container.
func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) {
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
}

ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers()
Expand All @@ -109,7 +107,7 @@ func (c ContainerURL) GetProperties(ctx context.Context, ac LeaseAccessCondition
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata.
func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) {
if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
panic("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service")
return nil, errors.New("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service")
}
ifModifiedSince, _, _, _ := ac.ModifiedAccessConditions.pointers()
return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil)
Expand Down Expand Up @@ -181,7 +179,7 @@ func (p *AccessPolicyPermission) Parse(s string) error {
func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier,
ac ContainerAccessConditions) (*ContainerSetAccessPolicyResponse, error) {
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
}
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers()
return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(),
Expand Down Expand Up @@ -241,7 +239,7 @@ func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchySegmentResponse, error) {
if o.Details.Snapshots {
panic("snapshots are not supported in this listing operation")
return nil, errors.New("snapshots are not supported in this listing operation")
}
prefix, include, maxResults := o.pointers()
return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.val, maxResults, include, nil, nil)
Expand All @@ -264,9 +262,6 @@ func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlob
}
include = o.Details.slice()
if o.MaxResults != 0 {
if o.MaxResults < 0 {
panic("MaxResults must be >= 0")
}
maxResults = &o.MaxResults
}
return
Expand Down
Loading

0 comments on commit 5f82fed

Please sign in to comment.