diff --git a/fileuploader/large_file_upload_task.go b/fileuploader/large_file_upload_task.go index 80a5631..7468f24 100644 --- a/fileuploader/large_file_upload_task.go +++ b/fileuploader/large_file_upload_task.go @@ -10,10 +10,13 @@ import ( "strconv" "strings" "sync" + "time" ) type LargeFileUploadTask[T interface{}] interface { - UploadAsync(progress ProgressCallBack) UploadResult[T] + Upload(progress ProgressCallBack) UploadResult[T] + Resume(progress ProgressCallBack) (UploadResult[T], error) + Cancel() error } // ByteStream is an interface that represents a stream of bytes @@ -44,7 +47,7 @@ func NewLargeFileUploadTask[T interface{}](adapter abstractions.RequestAdapter, } // UploadAsync uploads the byteStream in slices and returns the result of the upload -func (l *largeFileUploadTask[T]) UploadAsync(progress ProgressCallBack) UploadResult[T] { +func (l *largeFileUploadTask[T]) Upload(progress ProgressCallBack) UploadResult[T] { result := NewUploadResult[T]() var wg sync.WaitGroup slices := l.createUploadSlices() @@ -68,7 +71,12 @@ func (l *largeFileUploadTask[T]) Resume(progress ProgressCallBack) (UploadResult if len(l.uploadSession.GetNextExpectedRanges()) == 0 { return nil, errors.New("UploadSession does not have next expected ranges") } - return l.UploadAsync(progress), nil + + if l.uploadSession.GetExpirationDateTime().After(time.Now()) { + return nil, errors.New("UploadSession has expired") + } + + return l.Upload(progress), nil } // Cancel cancels the upload @@ -139,8 +147,3 @@ func (l *largeFileUploadTask[T]) fileSize() int64 { fileInfo, _ := l.byteStream.Stat() return fileInfo.Size() } - -func (l *largeFileUploadTask[T]) nextSliceLength(rangeBegin int64, rangeEnd int64) int64 { - sizeBasedOnRange := rangeEnd - rangeBegin + 1 - return minOf(sizeBasedOnRange, l.maxSlice) -} diff --git a/fileuploader/large_file_upload_test.go b/fileuploader/large_file_upload_test.go index b35cf0c..3f7361b 100644 --- a/fileuploader/large_file_upload_test.go +++ b/fileuploader/large_file_upload_test.go @@ -15,19 +15,7 @@ import ( "time" ) -func TestLargeFileUploadTask(t *testing.T) { - testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - w.Header().Set("Content-Type", "application/json") - jsonResponse := `{ - "@odata.context": "https://graph.microsoft.com/v1.0/$metadata#microsoft.graph.uploadSession", - "uploadUrl": "https://uploadUrl", - "expirationDateTime": "2021-08-10T00:00:00Z" - }` - w.WriteHeader(200) - fmt.Fprint(w, jsonResponse) - })) - defer testServer.Close() - +func prepareUploader(testServer *httptest.Server) LargeFileUploadTask[internal.UploadResponseble] { absser.DefaultParseNodeFactoryInstance.ContentTypeAssociatedFactories["application/json"] = jsonserialization.NewJsonParseNodeFactory() reqAdapter, _ := msgraphgocore.NewGraphRequestAdapterBase(&authentication.AnonymousAuthenticationProvider{}, msgraphgocore.GraphClientOptions{ @@ -55,20 +43,79 @@ func TestLargeFileUploadTask(t *testing.T) { "5XX": internal.CreateSampleErrorFromDiscriminatorValue, } - uploader := NewLargeFileUploadTask[internal.UploadResponseble](reqAdapter, uploadSession, byteStream, int64(maxSliceSize), internal.CreateUploadResponseFromDiscriminatorValue, errorMapping) + return NewLargeFileUploadTask[internal.UploadResponseble](reqAdapter, uploadSession, byteStream, int64(maxSliceSize), internal.CreateUploadResponseFromDiscriminatorValue, errorMapping) +} + +func TestLargeFileUploadTask(t *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json") + jsonResponse := `{ + "@odata.context": "https://graph.microsoft.com/v1.0/$metadata#microsoft.graph.uploadSession", + "uploadUrl": "https://uploadUrl", + "expirationDateTime": "2021-08-10T00:00:00Z" + }` + w.WriteHeader(200) + fmt.Fprint(w, jsonResponse) + })) + defer testServer.Close() + + uploader := prepareUploader(testServer) // verify that the object was created correctly // verify the number of sub upload tasks + progressCall := 0 + progress := func(progress int64, total int64) { + progressCall++ + } + result := uploader.Upload(progress) + + // verify that status is correct + assert.True(t, result.GetUploadSucceeded()) + assert.Equal(t, 12, progressCall) // progress callback should be called for every sub upload task +} + +func TestResumeLargeFileUploadTask(t *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json") + jsonResponse := `{ + "@odata.context": "https://graph.microsoft.com/v1.0/$metadata#microsoft.graph.uploadSession", + "uploadUrl": "https://uploadUrl", + "expirationDateTime": "2021-08-10T00:00:00Z" + }` + w.WriteHeader(200) + fmt.Fprint(w, jsonResponse) + })) + defer testServer.Close() + + uploader := prepareUploader(testServer) progressCall := 0 progress := func(progress int64, total int64) { progressCall++ } - result := uploader.UploadAsync(progress) + result, err := uploader.Resume(progress) + assert.NoError(t, err) // verify that status is correct assert.True(t, result.GetUploadSucceeded()) assert.Equal(t, 12, progressCall) // progress callback should be called for every sub upload task + +} + +func TestCancelLargeFileUploadTask(t *testing.T) { + + var receivedReq *http.Request + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(204) + receivedReq = req + })) + defer testServer.Close() + + uploader := prepareUploader(testServer) + err := uploader.Cancel() + assert.NoError(t, err) + assert.Equal(t, "DELETE", receivedReq.Method) } type mockUploadSession struct {