Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove panics #73

Merged
merged 4 commits into from
Oct 19, 2018
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions BreakingChanges.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# Breaking Changes

> See the [Change Log](ChangeLog.md) for a summary of storage library changes.

## Version XX.XX.XX:
- Removed most panics from the library. Several functions now return an error.
6 changes: 6 additions & 0 deletions ChangeLog.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# Change Log

> See [BreakingChanges](BreakingChanges.md) for a detailed list of API breaks.

## Version XX.XX.XX:
- Removed most panics from the library. Several functions now return an error.
12 changes: 0 additions & 12 deletions azblob/atomicmorph.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,6 @@ const targetAndMorpherMustNotBeNil = "target and morpher must not be nil"

// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func atomicMorphInt32(target *int32, morpher atomicMorpherInt32) interface{} {
if target == nil || morpher == nil {
panic(targetAndMorpherMustNotBeNil)
}
for {
currentVal := atomic.LoadInt32(target)
desiredVal, morphResult := morpher(currentVal)
Expand All @@ -30,9 +27,6 @@ type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interfac

// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) interface{} {
if target == nil || morpher == nil {
panic(targetAndMorpherMustNotBeNil)
}
for {
currentVal := atomic.LoadUint32(target)
desiredVal, morphResult := morpher(currentVal)
Expand All @@ -49,9 +43,6 @@ type atomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{}

// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func atomicMorphInt64(target *int64, morpher atomicMorpherInt64) interface{} {
if target == nil || morpher == nil {
panic(targetAndMorpherMustNotBeNil)
}
for {
currentVal := atomic.LoadInt64(target)
desiredVal, morphResult := morpher(currentVal)
Expand All @@ -68,9 +59,6 @@ type atomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interfac

// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func atomicMorphUint64(target *uint64, morpher atomicMorpherUint64) interface{} {
if target == nil || morpher == nil {
panic(targetAndMorpherMustNotBeNil)
}
for {
currentVal := atomic.LoadUint64(target)
desiredVal, morphResult := morpher(currentVal)
Expand Down
37 changes: 3 additions & 34 deletions azblob/highlevel.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package azblob
import (
"context"
"encoding/base64"
"fmt"
"io"
"net/http"

Expand Down Expand Up @@ -63,12 +62,6 @@ type UploadToBlockBlobOptions struct {
// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob.
func UploadBufferToBlockBlob(ctx context.Context, b []byte,
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {

// Validate parameters and set defaults
if o.BlockSize < 0 || o.BlockSize > BlockBlobMaxUploadBlobBytes {
panic(fmt.Sprintf("BlockSize option must be > 0 and <= %d", BlockBlobMaxUploadBlobBytes))
}

bufferSize := int64(len(b))
if o.BlockSize == 0 {
// If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error
Expand Down Expand Up @@ -97,9 +90,6 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte,
}

var numBlocks = uint16(((bufferSize - 1) / o.BlockSize) + 1)
if numBlocks > BlockBlobMaxBlocks {
panic(fmt.Sprintf("The buffer's size is too big or the BlockSize is too small; the number of blocks must be <= %d", BlockBlobMaxBlocks))
}

blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs
progress := int64(0)
Expand Down Expand Up @@ -187,22 +177,10 @@ type DownloadFromBlobOptions struct {
// downloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
b []byte, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error {
// Validate parameters, and set defaults.
if o.BlockSize < 0 {
panic("BlockSize option must be >= 0")
}
if o.BlockSize == 0 {
o.BlockSize = BlobDefaultDownloadBlockSize
}

if offset < 0 {
panic("offset option must be >= 0")
}

if count < 0 {
panic("count option must be >= 0")
}

if count == CountToEnd { // If size not specified, calculate it
if initialDownloadResponse != nil {
count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it
Expand All @@ -216,10 +194,6 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
}
}

if int64(len(b)) < count {
panic(fmt.Errorf("the buffer's size should be equal to or larger than the request count of bytes: %d", count))
}

// Prepare and do parallel download.
progress := int64(0)
progressLock := &sync.Mutex{}
Expand Down Expand Up @@ -268,12 +242,7 @@ func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co
// Offset and count are optional, pass 0 for both to download the entire blob.
func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64,
file *os.File, o DownloadFromBlobOptions) error {
// 1. Validate parameters.
if file == nil {
panic("file must not be nil")
}

// 2. Calculate the size of the destination file
// 1. Calculate the size of the destination file
var size int64

if count == CountToEnd {
Expand All @@ -287,7 +256,7 @@ func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, coun
size = count
}

// 3. Compare and try to resize local file's size if it doesn't match Azure blob's size.
// 2. Compare and try to resize local file's size if it doesn't match Azure blob's size.
stat, err := file.Stat()
if err != nil {
return err
Expand All @@ -299,7 +268,7 @@ func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, coun
}

if size > 0 {
// 4. Set mmap and call downloadBlobToBuffer.
// 3. Set mmap and call downloadBlobToBuffer.
m, err := newMMF(file, true, 0, int(size))
if err != nil {
return err
Expand Down
12 changes: 4 additions & 8 deletions azblob/sas_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,25 +28,21 @@ type BlobSASSignatureValues struct {

// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
// the proper SAS query parameters.
func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) SASQueryParameters {
if sharedKeyCredential == nil {
panic("sharedKeyCredential can't be nil")
}

func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) {
resource := "c"
if v.BlobName == "" {
// Make sure the permission characters are in the correct order
perms := &ContainerSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
panic(err)
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
} else {
resource = "b"
// Make sure the permission characters are in the correct order
perms := &BlobSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
panic(err)
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
}
Expand Down Expand Up @@ -89,7 +85,7 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Share
// Calculated SAS signature
signature: signature,
}
return p
return p, nil
}

// getCanonicalName computes the canonical name for a container or blob resource for SAS signing.
Expand Down
6 changes: 0 additions & 6 deletions azblob/url_append_blob.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,12 +90,6 @@ type AppendPositionAccessConditions struct {

// pointers is for internal infrastructure. It returns the fields as pointers.
func (ac AppendPositionAccessConditions) pointers() (iape *int64, imsltoe *int64) {
if ac.IfAppendPositionEqual < -1 {
panic("IfAppendPositionEqual can't be less than -1")
}
if ac.IfMaxSizeLessThanOrEqual < -1 {
panic("IfMaxSizeLessThanOrEqual can't be less than -1")
}
var zero int64 // defaults to 0
switch ac.IfAppendPositionEqual {
case -1:
Expand Down
10 changes: 2 additions & 8 deletions azblob/url_blob.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,6 @@ type BlobURL struct {

// NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline.
func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL {
if p == nil {
panic("p can't be nil")
}
blobClient := newBlobClient(url, p)
return BlobURL{blobClient: blobClient}
}
Expand All @@ -34,9 +31,6 @@ func (b BlobURL) String() string {

// WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline.
func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL {
if p == nil {
panic("p can't be nil")
}
return NewBlobURL(b.blobClient.URL(), p)
}

Expand Down Expand Up @@ -108,8 +102,8 @@ func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) {
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
// does not update the blob's ETag.
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType) (*BlobSetTierResponse, error) {
return b.blobClient.SetTier(ctx, tier, nil, nil)
func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions) (*BlobSetTierResponse, error) {
return b.blobClient.SetTier(ctx, tier, nil, nil, lac.pointers())
}

// GetBlobProperties returns the blob's properties.
Expand Down
3 changes: 0 additions & 3 deletions azblob/url_block_blob.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,6 @@ type BlockBlobURL struct {

// NewBlockBlobURL creates a BlockBlobURL object using the specified URL and request policy pipeline.
func NewBlockBlobURL(url url.URL, p pipeline.Pipeline) BlockBlobURL {
if p == nil {
panic("p can't be nil")
}
blobClient := newBlobClient(url, p)
bbClient := newBlockBlobClient(url, p)
return BlockBlobURL{BlobURL: BlobURL{blobClient: blobClient}, bbClient: bbClient}
Expand Down
14 changes: 4 additions & 10 deletions azblob/url_container.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,6 @@ type ContainerURL struct {

// NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline.
func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL {
if p == nil {
panic("p can't be nil")
}
client := newContainerClient(url, p)
return ContainerURL{client: client}
}
Expand Down Expand Up @@ -89,7 +86,7 @@ func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAcces
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container.
func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) {
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
sanityCheckFailed("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
zezha-msft marked this conversation as resolved.
Show resolved Hide resolved
}

ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers()
Expand All @@ -109,7 +106,7 @@ func (c ContainerURL) GetProperties(ctx context.Context, ac LeaseAccessCondition
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata.
func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) {
if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
panic("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service")
sanityCheckFailed("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This function already has an error on the return signature. Why not return an error here rather than a panic?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Great question! We perform a sanity check here because these parameters are not supported on the service end, and there could be potential data loss if the user relied on these parameters (and we let them).

Even if we returned this as an error instead, how could the user reasonably react to it? Would they call the API again without the offending access condition?

In my understanding, errors should be actionable. In this case, it is certainly not; so it might be better to panic and let the user catch this programmer error in their testing environment.

Copy link
Member

@devigned devigned Oct 12, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree the error should be actionable just the same way that you would get an error from parsing something. Of course, you could retry parsing the same input, but you will still get the same error. The error is actually actionable. Most of all, it does not require special handling to ensure the application does not crash by the consumer of your API.

I find this usage of panic particularly subjective.

If you would like to indicate some level of severity with the error, then I suggest making a more informative error type. That way, you could provide some indication on the error type similar to how net.Error https://golang.org/pkg/net/#Error indicates some category of errors are temporary.

Copy link
Member

@marstr marstr Oct 12, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We keep going around and around on this. By the widely excepted convention documented in the golang wiki, panic/recover is just a way to shortcut control flow inside of your package and shouldn't be exposed through package boundaries. While calling it "sanityCheckFailed" it at least spares folks the number of hits they see when they grep panic on this library, the spirit of it doesn't change at all. Simply put, the community does not recognize any subtlety to when to return an error and when to panic: there's just a dangerous one and a safe one. Whether or not we agree with this assessment is not really relevant.

In all cases where the function already ready returns an error, at the moment is is absolutely the correct thing to use that error.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @devigned, I'm a bit confused by what you mean here:

Of course, you could retry parsing the same input, but you will still get the same error. The error is actually actionable. Most of all, it does not require special handling to ensure the application does not crash by the consumer of your API.

To clarify, the receiver SetMetadata manipulates metadata on the container, but the Storage service ignores certain container access conditions(IfUnmodifiedSince, IfMatch, and IfNoneMatch). If the user relied on these access conditions, there could be data corruption and it's very hard to recover their data when they discover this problem. In other words, we are not doing some generic parsing here; we are trying to help the user not corrupt their data. Furthermore, the same problem exists with 2 other APIs, including Delete on ContainerURL, which ignores IfMatch and IfNoneMatch. We certainly do not want the user to mistakenly delete their entire container.

If we take a step back, there are indeed other ways of helping the user with this problem. The two possibilities that I see are:

  1. Return an error instead of panicking.
  2. Make these 3 receivers on ContainerURL take in individual parameters instead of ContainerAccessConditions.

Would you prefer option 1 or 2? And why?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @marstr, thanks for the review. We indeed agreed to stop debating on whether panics are good/bad; instead we should be looking at our usage of panics on a case by case basis, and see if there's a way of removing it without impacting the quality/safety of the SDK. I certainly hope that you could see the progress we've made so far(there are only 13 calls to sanityCheckFailed). And with your help, I also hope to merge/publish this PR as soon as possible, to alleviate customer pain.

Here are the 13 calls to sanityCheckFailed.
image

  1. 4 of these calls are on ContainerURL. They panic when the user relies on access conditions or parameters that are silently ignored by the Storage service. We are trying to protect the customers from data corruption/loss with the panics. We obviously do not expect the customers to write recover code for these APIs, we instead expect their code to fail immediately in the test environment when they attempt to use them.
  2. 1 such call is on SharedKeyCredential's buildCanonicalizedResource. When failing to parse the query parameters, we panic instead of throwing an error because there must be a serious problem with the SDK itself if we are unable to parse the query parameters that own our code added. In other words, we can no longer proceed if we cannot trust the protocol layer.
  3. Another call is on tokenCredential's constructor. We panic if the user attempts to use HTTP instead of HTTPS. This is because the user is at the risk of exposing their bearer token with HTTP. Instead of letting the call go out, we want the user to catch this in their test environment.
  4. The next 2 calls are on the unmap API of the memory-mapped-file types(Unix and Windows). We've already talked about this usage, since if we cannot unmap a memory-mapped-file, we cannot proceed as there could be memory corruption.
  5. The next call is in the retry policy. We panic when we are unable to rewind the body for a retry. This is a serious problem since the Body is a ReadSeeker; if we cannot rewind it for a retry, there could be data corruption if we continued. We can indeed return an error here instead of panicing, but what is the user supposed to do with this error? The last 3 panics on the list are also about seeking failures on the body.
  6. The other call in the retry policy is when the response is nil while there is no error. This one is a consistency check, and basically should never happen. If we didn't check for it though, it will panic on the next line.

The above is a brief explanation for each remaining usage of panic. Each case has some nuances and took us a good amount of time to consider their validity. Please feel free to reach out if you want to discuss them.

In short, the statement that panics means unsafe code is simply not true. At least the usage number 4 proves it. If we cannot unmap a memory-mapped-file, why shouldn't we crash with a clear message? The other two options in this case are:

  1. Remove the check for error
    This makes no sense, memory could be corrupted.
  2. Return the error to the user
    What are they supposed to do with it? Do you really expect them to write code to detect this?

Therefore, panics are in fact a necessity in some cases.

Copy link
Member

@devigned devigned Oct 15, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@zezha-msft, let me try to clarify. You previously stated:

Even if we returned this as an error instead, how could the user reasonably react to it? Would they call the API again without the offending access condition?

I was giving an example of where an error is returned, which provides information to the user about what went wrong (actionable), but should not be retried. For example, when parsing / unmarshalling JSON into a struct the error returned is actionable, but you will receive the same result if you call unmarshal a second time. By no means should the code below panic (though it does under the covers, I believe, but is handled before it hits the package boundary).

err = json.Unmarshal(content, &structure)
if err != nil{
    fmt.Print("Error:", err)
}

In the case of SetMetadata and similar methods, if you return an error to the consumer, you'd have the same API protecting effect as the panic, but without the undesirable possibility of crashing the consumers application. Since you'd return the error after you check if etags exist, you have ensured the user will not be able to call the API (c.client.SetAccessPolicy) and possibly cause data loss.

So the short answer is that I would select option 1. Return the error. Provide adequate information in the error for the consumer to understand the reason for the error and how they would change their code to avoid the error in the future.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @devigned, thanks for the explanation! I'm ok with removing the panics in this case.

Could you please also take a look at the other cases that I've mentioned above? And see if you have any thoughts on them.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you, @zezha-msft.

  • ContainerURL
    • Return an error. I would use the same logic applied to SetMetadata and alike.
  • buildCanonicalizedResource and SharedKeyCredential
    • Return an error. Since these are part of the generated code and thus should be verifiable through tests produced by the generator, I would not panic, but ensure the generated code or the generator has sufficient tests to ensure this error will not occur. Furthermore, the pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) allows for an error return, so returning an error from f.buildStringToSign will not cause much pain for upward error propagation.
  • tokenCredential
    • Return an error. I would use the same logic applied to SetMetadata and alike.
  • unmap
    • Panic. I'm of two minds on this one. I don't like the panic, but something is wrong at the OS level. I'm ok with bailing out on this one. @marstr, @bketelsen or @erikstmartin what do you think.
  • ReadSeeker
    • Return an error explaining that the consumer must supply a ReadSeeker. I'd use the same logic applied to SetMetadata
  • response == nil
    • Return an error explaining what happened and to file an issue in this repo. The function you are in already has an error return, func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error). If you'd like to have a stack for these error, consider using or building something like https://godoc.org/github.com/pkg/errors#WithStack.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

  • unmap
    • Panic. I'm of two minds on this one. I don't like the panic, but something is wrong at the OS level. I'm ok with bailing out on this one. @marstr, @bketelsen or @erikstmartin what do you think.

I agree, this seems like an acceptable place to panic.

}
ifModifiedSince, _, _, _ := ac.ModifiedAccessConditions.pointers()
return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil)
Expand Down Expand Up @@ -181,7 +178,7 @@ func (p *AccessPolicyPermission) Parse(s string) error {
func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier,
ac ContainerAccessConditions) (*ContainerSetAccessPolicyResponse, error) {
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
panic("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
sanityCheckFailed("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
zezha-msft marked this conversation as resolved.
Show resolved Hide resolved
}
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers()
return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(),
Expand Down Expand Up @@ -241,7 +238,7 @@ func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchySegmentResponse, error) {
if o.Details.Snapshots {
panic("snapshots are not supported in this listing operation")
sanityCheckFailed("snapshots are not supported in this listing operation")
zezha-msft marked this conversation as resolved.
Show resolved Hide resolved
}
prefix, include, maxResults := o.pointers()
return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.val, maxResults, include, nil, nil)
Expand All @@ -264,9 +261,6 @@ func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlob
}
include = o.Details.slice()
if o.MaxResults != 0 {
if o.MaxResults < 0 {
panic("MaxResults must be >= 0")
}
maxResults = &o.MaxResults
}
return
Expand Down
39 changes: 1 addition & 38 deletions azblob/url_page_blob.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,6 @@ type PageBlobURL struct {

// NewPageBlobURL creates a PageBlobURL object using the specified URL and request policy pipeline.
func NewPageBlobURL(url url.URL, p pipeline.Pipeline) PageBlobURL {
if p == nil {
panic("p can't be nil")
}
blobClient := newBlobClient(url, p)
pbClient := newPageBlobClient(url, p)
return PageBlobURL{BlobURL: BlobURL{blobClient: blobClient}, pbClient: pbClient}
Expand All @@ -50,9 +47,6 @@ func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL {
// Create creates a page blob of the specified length. Call PutPage to upload data data to a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) {
if sequenceNumber < 0 {
panic("sequenceNumber must be greater than or equal to 0")
}
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return pb.pbClient.Create(ctx, 0, size, nil,
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl,
Expand Down Expand Up @@ -111,9 +105,6 @@ func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count
// Resize resizes the page blob to the specified size (which must be a multiple of 512).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions) (*PageBlobResizeResponse, error) {
if size%PageBlobPageBytes != 0 {
panic("Size must be a multiple of PageBlobPageBytes (512)")
}
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(),
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
Expand All @@ -122,9 +113,6 @@ func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessCondi
// SetSequenceNumber sets the page blob's sequence number.
func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64,
ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) {
if sequenceNumber < 0 {
panic("sequenceNumber must be greater than or equal to 0")
}
sn := &sequenceNumber
if action == SequenceNumberActionIncrement {
sn = nil
Expand All @@ -145,26 +133,11 @@ func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL,
qp := source.Query()
qp.Set("snapshot", snapshot)
source.RawQuery = qp.Encode()
return pb.pbClient.CopyIncremental(ctx, source.String(), nil, nil,
return pb.pbClient.CopyIncremental(ctx, source.String(), nil,
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil)
}

func (pr PageRange) pointers() *string {
if pr.Start < 0 {
panic("PageRange's Start value must be greater than or equal to 0")
}
if pr.End <= 0 {
panic("PageRange's End value must be greater than 0")
}
if pr.Start%PageBlobPageBytes != 0 {
panic("PageRange's Start value must be a multiple of 512")
}
if pr.End%PageBlobPageBytes != (PageBlobPageBytes - 1) {
panic("PageRange's End value must be 1 less than a multiple of 512")
}
if pr.End <= pr.Start {
panic("PageRange's End value must be after the start")
}
endOffset := strconv.FormatInt(int64(pr.End), 10)
asString := fmt.Sprintf("bytes=%v-%s", pr.Start, endOffset)
return &asString
Expand Down Expand Up @@ -202,16 +175,6 @@ type SequenceNumberAccessConditions struct {

// pointers is for internal infrastructure. It returns the fields as pointers.
func (ac SequenceNumberAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) {
if ac.IfSequenceNumberLessThan < -1 {
panic("Ifsequencenumberlessthan can't be less than -1")
}
if ac.IfSequenceNumberLessThanOrEqual < -1 {
panic("IfSequenceNumberLessThanOrEqual can't be less than -1")
}
if ac.IfSequenceNumberEqual < -1 {
panic("IfSequenceNumberEqual can't be less than -1")
}

var zero int64 // Defaults to 0
switch ac.IfSequenceNumberLessThan {
case -1:
Expand Down
Loading