diff --git a/azbfs/zt_retry_reader_test.go b/azbfs/zt_retry_reader_test.go index 0467b2e4f..c00810288 100644 --- a/azbfs/zt_retry_reader_test.go +++ b/azbfs/zt_retry_reader_test.go @@ -6,12 +6,12 @@ import ( "errors" "fmt" "github.com/Azure/azure-storage-azcopy/v10/azbfs" + "github.com/stretchr/testify/assert" "io" "net" "net/http" + "testing" "time" - - chk "gopkg.in/check.v1" ) // Testings for RetryReader @@ -86,7 +86,8 @@ func (r *perByteReader) Close() error { // Test normal retry succeed, note initial response not provided. // Tests both with and without notification of failures -func (r *aztestsSuite) TestRetryReaderReadWithRetry(c *chk.C) { +func TestRetryReaderReadWithRetry(t *testing.T) { + a := assert.New(t) // Test twice, the second time using the optional "logging"/notification callback for failed tries // We must test both with and without the callback, since be testing without // we are testing that it is, indeed, optional to provide the callback @@ -124,7 +125,7 @@ func (r *aztestsSuite) TestRetryReaderReadWithRetry(c *chk.C) { httpGetterInfo := azbfs.HTTPGetterInfo{Offset: 0, Count: int64(byteCount)} initResponse, err := getter(context.Background(), httpGetterInfo) - c.Assert(err, chk.IsNil) + a.Nil(err) rrOptions := azbfs.RetryReaderOptions{MaxRetryRequests: 1} if logThisRun { @@ -135,28 +136,29 @@ func (r *aztestsSuite) TestRetryReaderReadWithRetry(c *chk.C) { // should fail and succeed through retry can := make([]byte, 1) n, err := retryReader.Read(can) - c.Assert(n, chk.Equals, 1) - c.Assert(err, chk.IsNil) + a.Equal(1, n) + a.Nil(err) // check "logging", if it was enabled if logThisRun { // We only expect one failed try in this test // And the notification method is not called for successes - c.Assert(failureMethodNumCalls, chk.Equals, 1) // this is the number of calls we counted - c.Assert(failureWillRetryCount, chk.Equals, 1) // the sole failure was retried - c.Assert(failureLastReportedFailureCount, chk.Equals, 1) // this is the number of failures reported by the notification method - c.Assert(failureLastReportedError, chk.NotNil) + a.Equal(1, failureMethodNumCalls) // this is the number of calls we counted + a.Equal(1, failureWillRetryCount) // the sole failure was retried + a.Equal(1, failureLastReportedFailureCount) // this is the number of failures reported by the notification method + a.NotNil(failureLastReportedError) } // should return EOF n, err = retryReader.Read(can) - c.Assert(n, chk.Equals, 0) - c.Assert(err, chk.Equals, io.EOF) + a.Zero(n) + a.Equal(io.EOF, err) } } // Test normal retry succeed, note initial response not provided. // Tests both with and without notification of failures -func (r *aztestsSuite) TestRetryReaderReadWithRetryIoUnexpectedEOF(c *chk.C) { +func TestRetryReaderReadWithRetryIoUnexpectedEOF(t *testing.T) { + a := assert.New(t) // Test twice, the second time using the optional "logging"/notification callback for failed tries // We must test both with and without the callback, since be testing without // we are testing that it is, indeed, optional to provide the callback @@ -194,7 +196,7 @@ func (r *aztestsSuite) TestRetryReaderReadWithRetryIoUnexpectedEOF(c *chk.C) { httpGetterInfo := azbfs.HTTPGetterInfo{Offset: 0, Count: int64(byteCount)} initResponse, err := getter(context.Background(), httpGetterInfo) - c.Assert(err, chk.IsNil) + a.Nil(err) rrOptions := azbfs.RetryReaderOptions{MaxRetryRequests: 1} if logThisRun { @@ -205,27 +207,28 @@ func (r *aztestsSuite) TestRetryReaderReadWithRetryIoUnexpectedEOF(c *chk.C) { // should fail and succeed through retry can := make([]byte, 1) n, err := retryReader.Read(can) - c.Assert(n, chk.Equals, 1) - c.Assert(err, chk.IsNil) + a.Equal(1, n) + a.Nil(err) // check "logging", if it was enabled if logThisRun { // We only expect one failed try in this test // And the notification method is not called for successes - c.Assert(failureMethodNumCalls, chk.Equals, 1) // this is the number of calls we counted - c.Assert(failureWillRetryCount, chk.Equals, 1) // the sole failure was retried - c.Assert(failureLastReportedFailureCount, chk.Equals, 1) // this is the number of failures reported by the notification method - c.Assert(failureLastReportedError, chk.NotNil) + a.Equal(1, failureMethodNumCalls) // this is the number of calls we counted + a.Equal(1, failureWillRetryCount) // the sole failure was retried + a.Equal(1, failureLastReportedFailureCount) // this is the number of failures reported by the notification method + a.NotNil(failureLastReportedError) } // should return EOF n, err = retryReader.Read(can) - c.Assert(n, chk.Equals, 0) - c.Assert(err, chk.Equals, io.EOF) + a.Zero(n) + a.Equal(io.EOF, err) } } // Test normal retry fail as retry Count not enough. -func (r *aztestsSuite) TestRetryReaderReadNegativeNormalFail(c *chk.C) { +func TestRetryReaderReadNegativeNormalFail(t *testing.T) { + a := assert.New(t) // Extra setup for testing notification of failures (i.e. of unsuccessful tries) failureMethodNumCalls := 0 failureWillRetryCount := 0 @@ -267,20 +270,21 @@ func (r *aztestsSuite) TestRetryReaderReadNegativeNormalFail(c *chk.C) { // should fail can := make([]byte, 1) n, err := retryReader.Read(can) - c.Assert(n, chk.Equals, 0) - c.Assert(err, chk.Equals, body.injectedError) + a.Zero(n) + a.Equal(body.injectedError, err) // Check that we received the right notification callbacks // We only expect two failed tries in this test, but only one // of the would have had willRetry = true - c.Assert(failureMethodNumCalls, chk.Equals, 2) // this is the number of calls we counted - c.Assert(failureWillRetryCount, chk.Equals, 1) // only the first failure was retried - c.Assert(failureLastReportedFailureCount, chk.Equals, 2) // this is the number of failures reported by the notification method - c.Assert(failureLastReportedError, chk.NotNil) + a.Equal(2, failureMethodNumCalls) // this is the number of calls we counted + a.Equal(1, failureWillRetryCount) // only the first failure was retried + a.Equal(2, failureLastReportedFailureCount) // this is the number of failures reported by the notification method + a.NotNil(failureLastReportedError) } // Test boundary case when Count equals to 0 and fail. -func (r *aztestsSuite) TestRetryReaderReadCount0(c *chk.C) { +func TestRetryReaderReadCount0(t *testing.T) { + a := assert.New(t) byteCount := 1 body := newPerByteReader(byteCount) body.doInjectError = true @@ -304,16 +308,17 @@ func (r *aztestsSuite) TestRetryReaderReadCount0(c *chk.C) { // should consume the only byte can := make([]byte, 1) n, err := retryReader.Read(can) - c.Assert(n, chk.Equals, 1) - c.Assert(err, chk.IsNil) + a.Equal(1, n) + a.Nil(err) // should not read when Count=0, and should return EOF n, err = retryReader.Read(can) - c.Assert(n, chk.Equals, 0) - c.Assert(err, chk.Equals, io.EOF) + a.Zero(n) + a.Equal(io.EOF, err) } -func (r *aztestsSuite) TestRetryReaderReadNegativeNonRetriableError(c *chk.C) { +func TestRetryReaderReadNegativeNonRetriableError(t *testing.T) { + a := assert.New(t) byteCount := 1 body := newPerByteReader(byteCount) body.doInjectError = true @@ -336,7 +341,7 @@ func (r *aztestsSuite) TestRetryReaderReadNegativeNonRetriableError(c *chk.C) { dest := make([]byte, 1) _, err := retryReader.Read(dest) - c.Assert(err, chk.Equals, body.injectedError) + a.Equal(body.injectedError, err) } // Test the case where we programmatically force a retry to happen, via closing the body early from another goroutine @@ -344,8 +349,8 @@ func (r *aztestsSuite) TestRetryReaderReadNegativeNonRetriableError(c *chk.C) { // purposes of unit testing, here we are testing the cancellation mechanism that is exposed to // consumers of the API, to allow programmatic forcing of retries (e.g. if the consumer deems // the read to be taking too long, they may force a retry in the hope of better performance next time). -func (r *aztestsSuite) TestRetryReaderReadWithForcedRetry(c *chk.C) { - +func TestRetryReaderReadWithForcedRetry(t *testing.T) { + a := assert.New(t) for _, enableRetryOnEarlyClose := range []bool{false, true} { // use the notification callback, so we know that the retry really did happen @@ -371,7 +376,7 @@ func (r *aztestsSuite) TestRetryReaderReadWithForcedRetry(c *chk.C) { httpGetterInfo := azbfs.HTTPGetterInfo{Offset: 0, Count: int64(byteCount)} initResponse, err := getter(context.Background(), httpGetterInfo) - c.Assert(err, chk.IsNil) + a.Nil(err) rrOptions := azbfs.RetryReaderOptions{MaxRetryRequests: 2, TreatEarlyCloseAsError: !enableRetryOnEarlyClose} rrOptions.NotifyFailedRead = failureMethod @@ -387,12 +392,12 @@ func (r *aztestsSuite) TestRetryReaderReadWithForcedRetry(c *chk.C) { output := make([]byte, byteCount) n, err := io.ReadFull(retryReader, output) if enableRetryOnEarlyClose { - c.Assert(n, chk.Equals, byteCount) - c.Assert(err, chk.IsNil) - c.Assert(output, chk.DeepEquals, randBytes) - c.Assert(failureMethodNumCalls, chk.Equals, 1) // assert that the cancellation did indeed happen + a.Equal(byteCount, n) + a.Nil(err) + a.Equal(randBytes, output) + a.Equal(1, failureMethodNumCalls) // assert that the cancellation did indeed happen } else { - c.Assert(err, chk.NotNil) + a.NotNil(err) } } } diff --git a/azbfs/zt_test.go b/azbfs/zt_test.go index 53e8c6ace..0851a3a08 100644 --- a/azbfs/zt_test.go +++ b/azbfs/zt_test.go @@ -3,23 +3,17 @@ package azbfs_test import ( "context" "fmt" + "github.com/stretchr/testify/assert" + "net/http" "net/url" "os" "runtime" "strings" - "testing" "time" "github.com/Azure/azure-storage-azcopy/v10/azbfs" - chk "gopkg.in/check.v1" ) -func Test(t *testing.T) { chk.TestingT(t) } - -type aztestsSuite struct{} - -var _ = chk.Suite(&aztestsSuite{}) - const ( fileSystemPrefix = "go" directoryPrefix = "gotestdirectory" @@ -59,7 +53,7 @@ func generateName(prefix string) string { runtime.Callers(0, pc) f := runtime.FuncForPC(pc[0]) name := f.Name() - for i := 0; !strings.Contains(name, "Suite"); i++ { // The tests are all scoped to the suite, so this ensures getting the actual test name + for i := 0; !strings.Contains(name, "Test"); i++ { // The tests are all scoped to the suite, so this ensures getting the actual test name f = runtime.FuncForPC(pc[i]) name = f.Name() } @@ -83,67 +77,83 @@ func generateFileName() string { return generateName(filePrefix) } -func getFileSystemURL(c *chk.C, fsu azbfs.ServiceURL) (fs azbfs.FileSystemURL, name string) { +func getFileSystemURL(a *assert.Assertions, fsu azbfs.ServiceURL) (fs azbfs.FileSystemURL, name string) { name = generateFileSystemName() fs = fsu.NewFileSystemURL(name) return fs, name } -func getDirectoryURLFromFileSystem(c *chk.C, fs azbfs.FileSystemURL) (directory azbfs.DirectoryURL, name string) { +func getDirectoryURLFromFileSystem(a *assert.Assertions, fs azbfs.FileSystemURL) (directory azbfs.DirectoryURL, name string) { name = generateDirectoryName() directory = fs.NewDirectoryURL(name) return directory, name } -func getDirectoryURLFromDirectory(c *chk.C, parentDirectory azbfs.DirectoryURL) (directory azbfs.DirectoryURL, name string) { +func getDirectoryURLFromDirectory(a *assert.Assertions, parentDirectory azbfs.DirectoryURL) (directory azbfs.DirectoryURL, name string) { name = generateDirectoryName() directory = parentDirectory.NewDirectoryURL(name) return directory, name } // This is a convenience method, No public API to create file URL from fileSystem now. This method uses fileSystem's root directory. -func getFileURLFromFileSystem(c *chk.C, fs azbfs.FileSystemURL) (file azbfs.FileURL, name string) { +func getFileURLFromFileSystem(a *assert.Assertions, fs azbfs.FileSystemURL) (file azbfs.FileURL, name string) { name = generateFileName() file = fs.NewRootDirectoryURL().NewFileURL(name) return file, name } -func getFileURLFromDirectory(c *chk.C, directory azbfs.DirectoryURL) (file azbfs.FileURL, name string) { +func getFileURLFromDirectory(a *assert.Assertions, directory azbfs.DirectoryURL) (file azbfs.FileURL, name string) { name = generateFileName() file = directory.NewFileURL(name) return file, name } -func createNewFileSystem(c *chk.C, fsu azbfs.ServiceURL) (fs azbfs.FileSystemURL, name string) { - fs, name = getFileSystemURL(c, fsu) +func createNewFileSystem(a *assert.Assertions, fsu azbfs.ServiceURL) (fs azbfs.FileSystemURL, name string) { + fs, name = getFileSystemURL(a, fsu) - cResp, err := fs.Create(ctx) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + _, err := fs.Create(ctx) + a.Nil(err) return fs, name } -func createNewDirectoryFromFileSystem(c *chk.C, fileSystem azbfs.FileSystemURL) (dir azbfs.DirectoryURL, name string) { - dir, name = getDirectoryURLFromFileSystem(c, fileSystem) +func createNewDirectoryFromFileSystem(a *assert.Assertions, fileSystem azbfs.FileSystemURL) (dir azbfs.DirectoryURL, name string) { + dir, name = getDirectoryURLFromFileSystem(a, fileSystem) - cResp, err := dir.Create(ctx, true) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + _, err := dir.Create(ctx, true) + a.Nil(err) return dir, name } // This is a convenience method, No public API to create file URL from fileSystem now. This method uses fileSystem's root directory. -func createNewFileFromFileSystem(c *chk.C, fileSystem azbfs.FileSystemURL) (file azbfs.FileURL, name string) { +func createNewFileFromFileSystem(a *assert.Assertions, fileSystem azbfs.FileSystemURL) (file azbfs.FileURL, name string) { dir := fileSystem.NewRootDirectoryURL() - file, name = getFileURLFromDirectory(c, dir) + file, name = getFileURLFromDirectory(a, dir) - cResp, err := file.Create(ctx, azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{}) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + _, err := file.Create(ctx, azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{}) + a.Nil(err) return file, name } + +func deleteFileSystem(a *assert.Assertions, fs azbfs.FileSystemURL) { + resp, err := fs.Delete(context.Background()) + a.Nil(err) + a.Equal(http.StatusAccepted, resp.Response().StatusCode) +} + +// deleteDirectory deletes the directory represented by directory Url +func deleteDirectory(a *assert.Assertions, dul azbfs.DirectoryURL) { + resp, err := dul.Delete(context.Background(), nil, true) + a.Nil(err) + a.Equal(http.StatusOK, resp.Response().StatusCode) +} + +func deleteFile(a *assert.Assertions, file azbfs.FileURL) { + resp, err := file.Delete(context.Background()) + a.Nil(err) + a.Equal(http.StatusOK, resp.Response().StatusCode) +} diff --git a/azbfs/zt_url_directory_test.go b/azbfs/zt_url_directory_test.go index d70ca2c34..b0ae7af8b 100644 --- a/azbfs/zt_url_directory_test.go +++ b/azbfs/zt_url_directory_test.go @@ -3,267 +3,264 @@ package azbfs_test import ( "context" "fmt" + "github.com/stretchr/testify/assert" "net/http" "net/url" "strings" + "testing" "time" "github.com/Azure/azure-storage-azcopy/v10/azbfs" - chk "gopkg.in/check.v1" ) -type DirectoryUrlSuite struct{} - -var _ = chk.Suite(&DirectoryUrlSuite{}) - -// deleteDirectory deletes the directory represented by directory Url -func deleteDirectory(c *chk.C, dul azbfs.DirectoryURL) { - resp, err := dul.Delete(context.Background(), nil, true) - c.Assert(err, chk.IsNil) - c.Assert(resp.Response().StatusCode, chk.Equals, http.StatusOK) -} - // TestCreateDirectory test the creation of a directory -func (dus *DirectoryUrlSuite) TestCreateDeleteDirectory(c *chk.C) { +func TestCreateDeleteDirectory(t *testing.T) { + a := assert.New(t) // Create a file system fsu := getBfsServiceURL() - fsURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fsURL) + fsURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fsURL) // Create a directory url from the fileSystem Url - dirUrl, _ := getDirectoryURLFromFileSystem(c, fsURL) + dirUrl, _ := getDirectoryURLFromFileSystem(a, fsURL) cResp, err := dirUrl.Create(context.Background(), true) - defer deleteDirectory(c, dirUrl) + defer deleteDirectory(a, dirUrl) // Assert the directory create response header attributes - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, http.StatusCreated) - c.Assert(cResp.ETag(), chk.Not(chk.Equals), "") - c.Assert(cResp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(cResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusCreated, cResp.StatusCode()) + a.NotEqual("", cResp.ETag()) + a.NotEqual("", cResp.LastModified()) + a.NotEqual("", cResp.XMsRequestID()) + a.NotEqual("", cResp.XMsVersion()) + a.NotEqual("", cResp.Date()) } // TestCreateSubDir tests creating the sub-directory inside a directory -func (dus *DirectoryUrlSuite) TestCreateSubDir(c *chk.C) { +func TestCreateSubDir(t *testing.T) { + a := assert.New(t) // Create the file system fsu := getBfsServiceURL() - fsURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fsURL) + fsURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fsURL) // Create the directory Url from fileSystem Url and create directory - dirUrl, _ := getDirectoryURLFromFileSystem(c, fsURL) + dirUrl, _ := getDirectoryURLFromFileSystem(a, fsURL) cResp, err := dirUrl.Create(context.Background(), true) - defer deleteDirectory(c, dirUrl) + defer deleteDirectory(a, dirUrl) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, http.StatusCreated) - c.Assert(cResp.ETag(), chk.Not(chk.Equals), "") - c.Assert(cResp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(cResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusCreated, cResp.StatusCode()) + a.NotEqual("", cResp.ETag()) + a.NotEqual("", cResp.LastModified()) + a.NotEqual("", cResp.XMsRequestID()) + a.NotEqual("", cResp.XMsVersion()) + a.NotEqual("", cResp.Date()) // Create the sub-directory url from directory Url and create sub-directory - subDirUrl, _ := getDirectoryURLFromDirectory(c, dirUrl) + subDirUrl, _ := getDirectoryURLFromDirectory(a, dirUrl) cResp, err = subDirUrl.Create(context.Background(), true) - defer deleteDirectory(c, subDirUrl) + defer deleteDirectory(a, subDirUrl) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, http.StatusCreated) - c.Assert(cResp.ETag(), chk.Not(chk.Equals), "") - c.Assert(cResp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(cResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusCreated, cResp.StatusCode()) + a.NotEqual("", cResp.ETag()) + a.NotEqual("", cResp.LastModified()) + a.NotEqual("", cResp.XMsRequestID()) + a.NotEqual("", cResp.XMsVersion()) + a.NotEqual("", cResp.Date()) } // TestDirectoryCreateAndGetProperties tests the create directory and // get directory properties -func (dus *DirectoryUrlSuite) TestDirectoryCreateAndGetProperties(c *chk.C) { +func TestDirectoryCreateAndGetProperties(t *testing.T) { + a := assert.New(t) // Create file system fsu := getBfsServiceURL() - fsURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fsURL) + fsURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fsURL) // Create directory url from fileSystemUrl and create directory - dirUrl, _ := getDirectoryURLFromFileSystem(c, fsURL) + dirUrl, _ := getDirectoryURLFromFileSystem(a, fsURL) cResp, err := dirUrl.Create(context.Background(), true) - defer deleteDirectory(c, dirUrl) + defer deleteDirectory(a, dirUrl) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, http.StatusCreated) - c.Assert(cResp.ETag(), chk.Not(chk.Equals), "") - c.Assert(cResp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(cResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusCreated, cResp.StatusCode()) + a.NotEqual("", cResp.ETag()) + a.NotEqual("", cResp.LastModified()) + a.NotEqual("", cResp.XMsRequestID()) + a.NotEqual("", cResp.XMsVersion()) + a.NotEqual("", cResp.Date()) // Get the directory properties and verify the resource type gResp, err := dirUrl.GetProperties(context.Background()) - c.Assert(err, chk.IsNil) - c.Assert(gResp.StatusCode(), chk.Equals, http.StatusOK) - c.Assert(gResp.XMsResourceType(), chk.Equals, "directory") + a.Nil(err) + a.Equal(http.StatusOK, gResp.StatusCode()) + a.Equal("directory", gResp.XMsResourceType()) } // TestCreateDirectoryAndFiles tests the create directory and create file inside the directory -func (dus *DirectoryUrlSuite) TestCreateDirectoryAndFiles(c *chk.C) { +func TestCreateDirectoryAndFiles(t *testing.T) { + a := assert.New(t) // Create the file system fsu := getBfsServiceURL() - fsURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fsURL) + fsURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fsURL) // Create the directoryUrl from fileSystemUrl // and create directory - dirUrl, _ := getDirectoryURLFromFileSystem(c, fsURL) + dirUrl, _ := getDirectoryURLFromFileSystem(a, fsURL) cResp, err := dirUrl.Create(context.Background(), true) - defer deleteDirectory(c, dirUrl) + defer deleteDirectory(a, dirUrl) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, http.StatusCreated) - c.Assert(cResp.ETag(), chk.Not(chk.Equals), "") - c.Assert(cResp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(cResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusCreated, cResp.StatusCode()) + a.NotEqual("", cResp.ETag()) + a.NotEqual("", cResp.LastModified()) + a.NotEqual("", cResp.XMsRequestID()) + a.NotEqual("", cResp.XMsVersion()) + a.NotEqual("", cResp.Date()) // Create fileUrl from directoryUrl and create file inside the directory - fileUrl, _ := getFileURLFromDirectory(c, dirUrl) + fileUrl, _ := getFileURLFromDirectory(a, dirUrl) fresp, err := fileUrl.Create(context.Background(), azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{}) - defer delFile(c, fileUrl) + defer deleteFile(a, fileUrl) - c.Assert(err, chk.IsNil) - c.Assert(fresp.Response().StatusCode, chk.Equals, http.StatusCreated) - c.Assert(fresp.ETag(), chk.Not(chk.Equals), "") - c.Assert(fresp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(fresp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(fresp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(fresp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusCreated, fresp.Response().StatusCode) + a.NotEqual("", fresp.ETag()) + a.NotEqual("", fresp.LastModified()) + a.NotEqual("", fresp.XMsRequestID()) + a.NotEqual("", fresp.XMsVersion()) + a.NotEqual("", fresp.Date()) } // TestReCreateDirectory tests the creation of directories that already exist -func (dus *DirectoryUrlSuite) TestReCreateDirectory(c *chk.C) { +func TestReCreateDirectory(t *testing.T) { + a := assert.New(t) // Create the file system fsu := getBfsServiceURL() - fsURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fsURL) + fsURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fsURL) // Create the directoryUrl from fileSystemUrl and create directory - dirUrl, _ := getDirectoryURLFromFileSystem(c, fsURL) + dirUrl, _ := getDirectoryURLFromFileSystem(a, fsURL) cResp, err := dirUrl.Create(context.Background(), true) - defer deleteDirectory(c, dirUrl) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, http.StatusCreated) + defer deleteDirectory(a, dirUrl) + a.Nil(err) + a.Equal(http.StatusCreated, cResp.StatusCode()) // Re-create it (allowing overwrite) // TODO: put some files in it before this, and make assertions about what happens to them after the re-creation cResp, err = dirUrl.Create(context.Background(), true) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, http.StatusCreated) + a.Nil(err) + a.Equal(http.StatusCreated, cResp.StatusCode()) // Attempt to re-create it (but do NOT allow overwrite) cResp, err = dirUrl.Create(context.Background(), false) // <- false for re-create - c.Assert(err, chk.NotNil) + a.NotNil(err) stgErr, ok := err.(azbfs.StorageError) - c.Assert(ok, chk.Equals, true) - c.Assert(stgErr.Response().StatusCode, chk.Equals, http.StatusConflict) - c.Assert(stgErr.ServiceCode(), chk.Equals, azbfs.ServiceCodePathAlreadyExists) + a.True(ok) + a.Equal(http.StatusConflict, stgErr.Response().StatusCode) + a.Equal(azbfs.ServiceCodePathAlreadyExists, stgErr.ServiceCode()) } // TestCreateMetadataDeleteDirectory test the creation of a directory with metadata -func (dus *DirectoryUrlSuite) TestCreateMetadataDeleteDirectory(c *chk.C) { +func TestCreateMetadataDeleteDirectory(t *testing.T) { + a := assert.New(t) // Create a file system fsu := getBfsServiceURL() - fsURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fsURL) + fsURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fsURL) // Create metadata metadata := make(map[string]string) metadata["foo"] = "bar" // Create a directory url from the fileSystem Url - dirUrl, _ := getDirectoryURLFromFileSystem(c, fsURL) + dirUrl, _ := getDirectoryURLFromFileSystem(a, fsURL) cResp, err := dirUrl.CreateWithOptions(context.Background(), azbfs.CreateDirectoryOptions{RecreateIfExists: true, Metadata: metadata}) - defer deleteDirectory(c, dirUrl) + defer deleteDirectory(a, dirUrl) // Assert the directory create response header attributes - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, http.StatusCreated) - c.Assert(cResp.ETag(), chk.Not(chk.Equals), "") - c.Assert(cResp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(cResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusCreated, cResp.StatusCode()) + a.NotEqual("", cResp.ETag()) + a.NotEqual("", cResp.LastModified()) + a.NotEqual("", cResp.XMsRequestID()) + a.NotEqual("", cResp.XMsVersion()) + a.NotEqual("", cResp.Date()) getResp, err := dirUrl.GetProperties(context.Background()) - c.Assert(err, chk.IsNil) - c.Assert(getResp.Response().StatusCode, chk.Equals, http.StatusOK) - c.Assert(getResp.XMsProperties(), chk.Not(chk.Equals), "") // Check metadata returned is not null. + a.Nil(err) + a.Equal(http.StatusOK, getResp.StatusCode()) + a.NotEqual("", getResp.XMsProperties()) // Check metadata returned is not null. } // TestDirectoryStructure tests creating dir, sub-dir inside dir and files // inside dirs and sub-dirs. Then verify the count of files / sub-dirs inside directory -func (dus *DirectoryUrlSuite) TestDirectoryStructure(c *chk.C) { +func TestDirectoryStructure(t *testing.T) { + a := assert.New(t) // Create file system fsu := getBfsServiceURL() - fsURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fsURL) + fsURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fsURL) // Create a directory inside filesystem - dirUrl, _ := getDirectoryURLFromFileSystem(c, fsURL) + dirUrl, _ := getDirectoryURLFromFileSystem(a, fsURL) cResp, err := dirUrl.Create(context.Background(), true) - defer deleteDirectory(c, dirUrl) + defer deleteDirectory(a, dirUrl) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, http.StatusCreated) - c.Assert(cResp.ETag(), chk.Not(chk.Equals), "") - c.Assert(cResp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(cResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusCreated, cResp.StatusCode()) + a.NotEqual("", cResp.ETag()) + a.NotEqual("", cResp.LastModified()) + a.NotEqual("", cResp.XMsRequestID()) + a.NotEqual("", cResp.XMsVersion()) + a.NotEqual("", cResp.Date()) // Create a sub-dir inside the above create directory - subDirUrl, _ := getDirectoryURLFromDirectory(c, dirUrl) + subDirUrl, _ := getDirectoryURLFromDirectory(a, dirUrl) cResp, err = subDirUrl.Create(context.Background(), true) - defer deleteDirectory(c, subDirUrl) + defer deleteDirectory(a, subDirUrl) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, http.StatusCreated) - c.Assert(cResp.ETag(), chk.Not(chk.Equals), "") - c.Assert(cResp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(cResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusCreated, cResp.StatusCode()) + a.NotEqual("", cResp.ETag()) + a.NotEqual("", cResp.LastModified()) + a.NotEqual("", cResp.XMsRequestID()) + a.NotEqual("", cResp.XMsVersion()) + a.NotEqual("", cResp.Date()) // Create a file inside directory - fileUrl, _ := getFileURLFromDirectory(c, dirUrl) + fileUrl, _ := getFileURLFromDirectory(a, dirUrl) fresp, err := fileUrl.Create(context.Background(), azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{}) - defer delFile(c, fileUrl) + defer deleteFile(a, fileUrl) - c.Assert(err, chk.IsNil) - c.Assert(fresp.Response().StatusCode, chk.Equals, http.StatusCreated) - c.Assert(fresp.ETag(), chk.Not(chk.Equals), "") - c.Assert(fresp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(fresp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(fresp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(fresp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusCreated, fresp.Response().StatusCode) + a.NotEqual("", fresp.ETag()) + a.NotEqual("", fresp.LastModified()) + a.NotEqual("", fresp.XMsRequestID()) + a.NotEqual("", fresp.XMsVersion()) + a.NotEqual("", fresp.Date()) // create a file inside the sub-dir created above - subDirfileUrl, _ := getFileURLFromDirectory(c, subDirUrl) + subDirfileUrl, _ := getFileURLFromDirectory(a, subDirUrl) fresp, err = subDirfileUrl.Create(context.Background(), azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{}) - defer delFile(c, subDirfileUrl) + defer deleteFile(a, subDirfileUrl) - c.Assert(err, chk.IsNil) - c.Assert(fresp.Response().StatusCode, chk.Equals, http.StatusCreated) - c.Assert(fresp.ETag(), chk.Not(chk.Equals), "") - c.Assert(fresp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(fresp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(fresp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(fresp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusCreated, fresp.Response().StatusCode) + a.NotEqual("", fresp.ETag()) + a.NotEqual("", fresp.LastModified()) + a.NotEqual("", fresp.XMsRequestID()) + a.NotEqual("", fresp.XMsVersion()) + a.NotEqual("", fresp.Date()) // list the directory create above. // expected number of file inside the dir is 2 i.e one @@ -272,156 +269,161 @@ func (dus *DirectoryUrlSuite) TestDirectoryStructure(c *chk.C) { continuationMarker := "" lresp, err := dirUrl.ListDirectorySegment(context.Background(), &continuationMarker, true) - c.Assert(err, chk.IsNil) - c.Assert(lresp.Response().StatusCode, chk.Equals, http.StatusOK) - c.Assert(len(lresp.Files()), chk.Equals, 2) - c.Assert(len(lresp.Directories()), chk.Equals, 1) - c.Assert(lresp.ETag(), chk.Equals, "") - c.Assert(lresp.LastModified(), chk.Equals, "") - c.Assert(lresp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(lresp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(lresp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusOK, lresp.Response().StatusCode) + a.Equal(2, len(lresp.Files())) + a.Equal(1, len(lresp.Directories())) + a.Equal("", lresp.ETag()) + a.Equal("", lresp.LastModified()) + a.NotEqual("", lresp.XMsRequestID()) + a.NotEqual("", lresp.XMsVersion()) + a.NotEqual("", lresp.Date()) } -func (dus *DirectoryUrlSuite) TestListDirectoryWithSpaces(c *chk.C) { +func TestListDirectoryWithSpaces(t *testing.T) { + a := assert.New(t) // Create file system fsu := getBfsServiceURL() - fsURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fsURL) + fsURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fsURL) // Create a directory inside filesystem dirUrl := fsURL.NewDirectoryURL("New Folder Test 2") _, err := dirUrl.Create(context.Background(), true) - defer deleteDirectory(c, dirUrl) + defer deleteDirectory(a, dirUrl) // Create a file inside directory - fileUrl, _ := getFileURLFromDirectory(c, dirUrl) + fileUrl, _ := getFileURLFromDirectory(a, dirUrl) _, err = fileUrl.Create(context.Background(), azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{}) - defer delFile(c, fileUrl) + defer deleteFile(a, fileUrl) // list the directory created above. // expected number of files inside the dir is 1 continuationMarker := "" lresp, err := dirUrl.ListDirectorySegment(context.Background(), &continuationMarker, true) - c.Assert(err, chk.IsNil) - c.Assert(lresp.Response().StatusCode, chk.Equals, http.StatusOK) - c.Assert(len(lresp.Files()), chk.Equals, 1) - c.Assert(len(lresp.Directories()), chk.Equals, 0) - c.Assert(lresp.ETag(), chk.Equals, "") - c.Assert(lresp.LastModified(), chk.Equals, "") - c.Assert(lresp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(lresp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(lresp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusOK, lresp.Response().StatusCode) + a.Equal(1, len(lresp.Files())) + a.Equal(0, len(lresp.Directories())) + a.Equal("", lresp.ETag()) + a.Equal("", lresp.LastModified()) + a.NotEqual("", lresp.XMsRequestID()) + a.NotEqual("", lresp.XMsVersion()) + a.NotEqual("", lresp.Date()) } -func (s *FileURLSuite) TestRenameDirectory(c *chk.C) { +func TestRenameDirectory(t *testing.T) { + a := assert.New(t) fsu := getBfsServiceURL() - fileSystemURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fileSystemURL) + fileSystemURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fileSystemURL) - dirURL, dirName := createNewDirectoryFromFileSystem(c, fileSystemURL) + dirURL, dirName := createNewDirectoryFromFileSystem(a, fileSystemURL) dirRename := dirName + "rename" renamedDirURL, err := dirURL.Rename(context.Background(), azbfs.RenameDirectoryOptions{DestinationPath: dirRename}) - c.Assert(renamedDirURL, chk.NotNil) - c.Assert(err, chk.IsNil) + a.NotNil(renamedDirURL) + a.Nil(err) // Check that the old directory does not exist getPropertiesResp, err := dirURL.GetProperties(context.Background()) - c.Assert(err, chk.NotNil) // TODO: I want to check the status code is 404 but not sure how since the resp is nil - c.Assert(getPropertiesResp, chk.IsNil) + a.NotNil(err) // TODO: I want to check the status code is 404 but not sure how since the resp is nil + a.Nil(getPropertiesResp) // Check that the renamed directory does exist getPropertiesResp, err = renamedDirURL.GetProperties(context.Background()) - c.Assert(getPropertiesResp.StatusCode(), chk.Equals, http.StatusOK) - c.Assert(err, chk.IsNil) + a.Equal(http.StatusOK, getPropertiesResp.StatusCode()) + a.Nil(err) } -func (s *FileURLSuite) TestRenameDirWithFile(c *chk.C) { +func TestRenameDirWithFile(t *testing.T) { + a := assert.New(t) fsu := getBfsServiceURL() - fileSystemURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fileSystemURL) + fileSystemURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fileSystemURL) - dirURL, dirName := createNewDirectoryFromFileSystem(c, fileSystemURL) + dirURL, dirName := createNewDirectoryFromFileSystem(a, fileSystemURL) fileName := "test.txt" fileURL := dirURL.NewFileURL(fileName) dirRename := dirName + "rename" renamedDirURL, err := dirURL.Rename(context.Background(), azbfs.RenameDirectoryOptions{DestinationPath: dirRename}) - c.Assert(renamedDirURL, chk.NotNil) - c.Assert(err, chk.IsNil) + a.NotNil(renamedDirURL) + a.Nil(err) // Check that the old directory and file do not exist getPropertiesResp, err := dirURL.GetProperties(context.Background()) - c.Assert(err, chk.NotNil) // TODO: I want to check the status code is 404 but not sure how since the resp is nil - c.Assert(getPropertiesResp, chk.IsNil) + a.NotNil(err) // TODO: I want to check the status code is 404 but not sure how since the resp is nil + a.Nil(getPropertiesResp) getPropertiesResp2, err := fileURL.GetProperties(context.Background()) - c.Assert(err, chk.NotNil) // TODO: I want to check the status code is 404 but not sure how since the resp is nil - c.Assert(getPropertiesResp2, chk.IsNil) + a.NotNil(err) // TODO: I want to check the status code is 404 but not sure how since the resp is nil + a.Nil(getPropertiesResp2) // Check that the renamed directory and file do exist getPropertiesResp, err = renamedDirURL.GetProperties(context.Background()) - c.Assert(getPropertiesResp.StatusCode(), chk.Equals, http.StatusOK) - c.Assert(err, chk.IsNil) + a.Equal(http.StatusOK, getPropertiesResp.StatusCode()) + a.Nil(err) getPropertiesResp2, err = renamedDirURL.NewFileURL(fileName).GetProperties(context.Background()) - c.Assert(err, chk.NotNil) // TODO: I want to check the status code is 404 but not sure how since the resp is nil - c.Assert(getPropertiesResp2, chk.IsNil) + a.NotNil(err) // TODO: I want to check the status code is 404 but not sure how since the resp is nil + a.Nil(getPropertiesResp2) } -func (dus *DirectoryUrlSuite) TestSetACL(c *chk.C) { +func TestSetACL(t *testing.T) { + a := assert.New(t) // Create a filesystem fsu := getBfsServiceURL() - fsURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fsURL) + fsURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fsURL) // Create a directory inside the filesystem dirURL := fsURL.NewDirectoryURL("test") _, err := dirURL.Create(ctx, true) - c.Assert(err, chk.IsNil) + a.Nil(err) // Grab it's default ACLs folderAccess, err := dirURL.GetAccessControl(ctx) - c.Assert(err, chk.IsNil) + a.Nil(err) // Modify it slightly folderAccess.ACL = "user::r-x,group::r-x,other::---" folderAccess.Permissions = "" _, err = dirURL.SetAccessControl(ctx, folderAccess) - c.Assert(err, chk.IsNil) + a.Nil(err) // Compare them folderAccessToValidate, err := dirURL.GetAccessControl(ctx) - c.Assert(err, chk.IsNil) + a.Nil(err) // We're checking ACLs are the same folderAccessToValidate.Permissions = "" - c.Assert(folderAccessToValidate, chk.Equals, folderAccess) + a.Equal(folderAccess, folderAccessToValidate) // Create a file fileUrl := dirURL.NewFileURL("foo.bar") _, err = fileUrl.Create(ctx, azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{}) - c.Assert(err, chk.IsNil) + a.Nil(err) // Grab it's default ACLs fileAccess, err := fileUrl.GetAccessControl(ctx) - c.Assert(err, chk.IsNil) + a.Nil(err) // Modify it slightly. fileAccess.ACL = "user::r-x,group::r-x,other::---" fileAccess.Permissions = "" _, err = fileUrl.SetAccessControl(ctx, fileAccess) - c.Assert(err, chk.IsNil) + a.Nil(err) // Compare them fileAccessToValidate, err := fileUrl.GetAccessControl(ctx) - c.Assert(err, chk.IsNil) + a.Nil(err) // We're checking ACLs are the same fileAccessToValidate.Permissions = "" - c.Assert(fileAccessToValidate, chk.Equals, fileAccess) + a.Equal(fileAccess, fileAccessToValidate) // Don't bother testing the root ACLs, since it calls into the directoryclient } -func (s *FileURLSuite) TestRenameDirectoryWithSas(c *chk.C) { +func TestRenameDirectoryWithSas(t *testing.T) { + a := assert.New(t) name, key := getAccountAndKey() credential := azbfs.NewSharedKeyCredential(name, key) sasQueryParams, err := azbfs.AccountSASSignatureValues{ @@ -431,38 +433,39 @@ func (s *FileURLSuite) TestRenameDirectoryWithSas(c *chk.C) { Services: azbfs.AccountSASServices{File: true, Blob: true, Queue: true}.String(), ResourceTypes: azbfs.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), }.NewSASQueryParameters(credential) - c.Assert(err, chk.IsNil) + a.Nil(err) qp := sasQueryParams.Encode() rawURL := fmt.Sprintf("https://%s.dfs.core.windows.net/?%s", credential.AccountName(), qp) fullURL, err := url.Parse(rawURL) - c.Assert(err, chk.IsNil) + a.Nil(err) fsu := azbfs.NewServiceURL(*fullURL, azbfs.NewPipeline(azbfs.NewAnonymousCredential(), azbfs.PipelineOptions{})) - fileSystemURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fileSystemURL) + fileSystemURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fileSystemURL) - dirURL, dirName := createNewDirectoryFromFileSystem(c, fileSystemURL) + dirURL, dirName := createNewDirectoryFromFileSystem(a, fileSystemURL) dirRename := dirName + "rename" renamedDirURL, err := dirURL.Rename(context.Background(), azbfs.RenameDirectoryOptions{DestinationPath: dirRename}) - c.Assert(renamedDirURL, chk.NotNil) - c.Assert(err, chk.IsNil) + a.NotNil(renamedDirURL) + a.Nil(err) // Check that the old directory does not exist getPropertiesResp, err := dirURL.GetProperties(context.Background()) - c.Assert(err, chk.NotNil) // TODO: I want to check the status code is 404 but not sure how since the resp is nil - c.Assert(getPropertiesResp, chk.IsNil) + a.NotNil(err) // TODO: I want to check the status code is 404 but not sure how since the resp is nil + a.Nil(getPropertiesResp) // Check that the renamed directory does exist getPropertiesResp, err = renamedDirURL.GetProperties(context.Background()) - c.Assert(getPropertiesResp.StatusCode(), chk.Equals, http.StatusOK) - c.Assert(err, chk.IsNil) + a.Equal(http.StatusOK, getPropertiesResp.StatusCode()) + a.Nil(err) } -func (s *FileURLSuite) TestRenameDirectoryWithDestinationSas(c *chk.C) { +func TestRenameDirectoryWithDestinationSas(t *testing.T) { + a := assert.New(t) name, key := getAccountAndKey() credential := azbfs.NewSharedKeyCredential(name, key) sourceSasQueryParams, err := azbfs.AccountSASSignatureValues{ @@ -472,7 +475,7 @@ func (s *FileURLSuite) TestRenameDirectoryWithDestinationSas(c *chk.C) { Services: azbfs.AccountSASServices{File: true, Blob: true, Queue: true}.String(), ResourceTypes: azbfs.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), }.NewSASQueryParameters(credential) - c.Assert(err, chk.IsNil) + a.Nil(err) destinationSasQueryParams, err := azbfs.AccountSASSignatureValues{ Protocol: azbfs.SASProtocolHTTPS, ExpiryTime: time.Now().Add(24 * time.Hour), @@ -480,38 +483,38 @@ func (s *FileURLSuite) TestRenameDirectoryWithDestinationSas(c *chk.C) { Services: azbfs.AccountSASServices{File: true, Blob: true}.String(), ResourceTypes: azbfs.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), }.NewSASQueryParameters(credential) - c.Assert(err, chk.IsNil) + a.Nil(err) sourceQp := sourceSasQueryParams.Encode() destQp := destinationSasQueryParams.Encode() rawURL := fmt.Sprintf("https://%s.dfs.core.windows.net/?%s", credential.AccountName(), sourceQp) fullURL, err := url.Parse(rawURL) - c.Assert(err, chk.IsNil) + a.Nil(err) fsu := azbfs.NewServiceURL(*fullURL, azbfs.NewPipeline(azbfs.NewAnonymousCredential(), azbfs.PipelineOptions{})) - fileSystemURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fileSystemURL) + fileSystemURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fileSystemURL) - dirURL, dirName := createNewDirectoryFromFileSystem(c, fileSystemURL) + dirURL, dirName := createNewDirectoryFromFileSystem(a, fileSystemURL) dirRename := dirName + "rename" renamedDirURL, err := dirURL.Rename( context.Background(), azbfs.RenameDirectoryOptions{DestinationPath: dirRename, DestinationSas: &destQp}) - c.Assert(renamedDirURL, chk.NotNil) - c.Assert(err, chk.IsNil) + a.NotNil(renamedDirURL) + a.Nil(err) found := strings.Contains(renamedDirURL.String(), destQp) // make sure the correct SAS is used - c.Assert(found, chk.Equals, true) + a.True(found) // Check that the old directory does not exist getPropertiesResp, err := dirURL.GetProperties(context.Background()) - c.Assert(err, chk.NotNil) // TODO: I want to check the status code is 404 but not sure how since the resp is nil - c.Assert(getPropertiesResp, chk.IsNil) + a.NotNil(err) // TODO: I want to check the status code is 404 but not sure how since the resp is nil + a.Nil(getPropertiesResp) // Check that the renamed directory does exist getPropertiesResp, err = renamedDirURL.GetProperties(context.Background()) - c.Assert(getPropertiesResp.StatusCode(), chk.Equals, http.StatusOK) - c.Assert(err, chk.IsNil) -} + a.Equal(http.StatusOK, getPropertiesResp.StatusCode()) + a.Nil(err) +} \ No newline at end of file diff --git a/azbfs/zt_url_file_test.go b/azbfs/zt_url_file_test.go index b6e8b77ee..0057e5535 100644 --- a/azbfs/zt_url_file_test.go +++ b/azbfs/zt_url_file_test.go @@ -5,8 +5,10 @@ import ( "context" "errors" "fmt" + "github.com/stretchr/testify/assert" "io" "strings" + "testing" "time" //"crypto/md5" @@ -19,19 +21,8 @@ import ( "net/http" "github.com/Azure/azure-storage-azcopy/v10/azbfs" - chk "gopkg.in/check.v1" // go get gopkg.in/check.v1 ) -type FileURLSuite struct{} - -var _ = chk.Suite(&FileURLSuite{}) - -func delFile(c *chk.C, file azbfs.FileURL) { - resp, err := file.Delete(context.Background()) - c.Assert(err, chk.IsNil) - c.Assert(resp.Response().StatusCode, chk.Equals, 200) -} - func getRandomDataAndReader(n int) (*bytes.Reader, []byte) { data := make([]byte, n, n) for i := 0; i < n; i++ { @@ -40,162 +31,169 @@ func getRandomDataAndReader(n int) (*bytes.Reader, []byte) { return bytes.NewReader(data), data } -func (s *FileURLSuite) TestFileNewFileURLNegative(c *chk.C) { - c.Assert(func() { azbfs.NewFileURL(url.URL{}, nil) }, chk.Panics, "p can't be nil") +func TestFileNewFileURLNegative(t *testing.T) { + a := assert.New(t) + a.Panics(func() { azbfs.NewFileURL(url.URL{}, nil) }, "p can't be nil") } -func (s *FileURLSuite) TestFileCreateDelete(c *chk.C) { +func TestFileCreateDelete(t *testing.T) { + a := assert.New(t) fsu := getBfsServiceURL() - fsURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fsURL) + fsURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fsURL) // Create and delete file in root directory. - file, _ := getFileURLFromFileSystem(c, fsURL) + file, _ := getFileURLFromFileSystem(a, fsURL) cResp, err := file.Create(context.Background(), azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{}) - c.Assert(err, chk.IsNil) - c.Assert(cResp.Response().StatusCode, chk.Equals, http.StatusCreated) - c.Assert(cResp.ETag(), chk.Not(chk.Equals), "") - c.Assert(cResp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(cResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusCreated, cResp.Response().StatusCode) + a.NotEqual("", cResp.ETag()) + a.NotEqual("", cResp.LastModified()) + a.NotEqual("", cResp.XMsRequestID()) + a.NotEqual("", cResp.XMsVersion()) + a.NotEqual("", cResp.Date()) delResp, err := file.Delete(context.Background()) - c.Assert(err, chk.IsNil) - c.Assert(delResp.Response().StatusCode, chk.Equals, http.StatusOK) - c.Assert(delResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(delResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(delResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusOK, delResp.Response().StatusCode) + a.NotEqual("", delResp.XMsRequestID()) + a.NotEqual("", delResp.XMsVersion()) + a.NotEqual("", delResp.Date()) - dirURL, _ := createNewDirectoryFromFileSystem(c, fsURL) - defer deleteDirectory(c, dirURL) + dirURL, _ := createNewDirectoryFromFileSystem(a, fsURL) + defer deleteDirectory(a, dirURL) // Create and delete file in named directory. - file, _ = getFileURLFromDirectory(c, dirURL) + file, _ = getFileURLFromDirectory(a, dirURL) cResp, err = file.Create(context.Background(), azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{}) - c.Assert(err, chk.IsNil) - c.Assert(cResp.Response().StatusCode, chk.Equals, http.StatusCreated) - c.Assert(cResp.ETag(), chk.Not(chk.Equals), "") - c.Assert(cResp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(cResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusCreated, cResp.Response().StatusCode) + a.NotEqual("", cResp.ETag()) + a.NotEqual("", cResp.LastModified()) + a.NotEqual("", cResp.XMsRequestID()) + a.NotEqual("", cResp.XMsVersion()) + a.NotEqual("", cResp.Date()) delResp, err = file.Delete(context.Background()) - c.Assert(err, chk.IsNil) - c.Assert(delResp.Response().StatusCode, chk.Equals, http.StatusOK) - c.Assert(delResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(delResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(delResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusOK, delResp.Response().StatusCode) + a.NotEqual("", delResp.XMsRequestID()) + a.NotEqual("", delResp.XMsVersion()) + a.NotEqual("", delResp.Date()) } -func (s *FileURLSuite) TestFileCreateWithPermissions(c *chk.C) { +func TestFileCreateWithPermissions(t *testing.T) { + a := assert.New(t) fsu := getBfsServiceURL() - fsURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fsURL) + fsURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fsURL) // Create and delete file in root directory. - file, _ := getFileURLFromFileSystem(c, fsURL) + file, _ := getFileURLFromFileSystem(a, fsURL) _, err := file.Create(context.Background(), azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{Permissions: "0444"}) - defer delFile(c, file) + defer deleteFile(a, file) getResp, err := file.GetAccessControl(context.Background()) - c.Assert(err, chk.IsNil) - c.Assert(getResp.Permissions, chk.Equals, "r--r-----") + a.Nil(err) + a.Equal("r--r-----", getResp.Permissions) } -func (s *FileURLSuite) TestFileCreateDeleteNonExistingParent(c *chk.C) { +func TestFileCreateDeleteNonExistingParent(t *testing.T) { + a := assert.New(t) fsu := getBfsServiceURL() - fsURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fsURL) + fsURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fsURL) // Create and delete file in directory that does not exist yet. - dirNotExist, _ := getDirectoryURLFromFileSystem(c, fsURL) - file, _ := getFileURLFromDirectory(c, dirNotExist) + dirNotExist, _ := getDirectoryURLFromFileSystem(a, fsURL) + file, _ := getFileURLFromDirectory(a, dirNotExist) // Verify that the file was created even though its parent directory does not exist yet cResp, err := file.Create(context.Background(), azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{}) - c.Assert(err, chk.IsNil) - c.Assert(cResp.Response().StatusCode, chk.Equals, http.StatusCreated) - c.Assert(cResp.ETag(), chk.Not(chk.Equals), "") - c.Assert(cResp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(cResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusCreated, cResp.Response().StatusCode) + a.NotEqual("", cResp.ETag()) + a.NotEqual("", cResp.LastModified()) + a.NotEqual("", cResp.XMsRequestID()) + a.NotEqual("", cResp.XMsVersion()) + a.NotEqual("", cResp.Date()) // Verify that the parent directory was created successfully dirResp, err := dirNotExist.GetProperties(context.Background()) - c.Assert(err, chk.IsNil) - c.Assert(dirResp.StatusCode(), chk.Equals, http.StatusOK) + a.Nil(err) + a.Equal(http.StatusOK, dirResp.StatusCode()) } -func (s *FileURLSuite) TestFileCreateWithMetadataDelete(c *chk.C) { +func TestFileCreateWithMetadataDelete(t *testing.T) { + a := assert.New(t) fsu := getBfsServiceURL() - fsURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fsURL) + fsURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fsURL) - file, _ := getFileURLFromFileSystem(c, fsURL) + file, _ := getFileURLFromFileSystem(a, fsURL) metadata := make(map[string]string) metadata["foo"] = "bar" cResp, err := file.CreateWithOptions(context.Background(), azbfs.CreateFileOptions{Metadata: metadata}, azbfs.BlobFSAccessControl{}) - c.Assert(err, chk.IsNil) - c.Assert(cResp.Response().StatusCode, chk.Equals, http.StatusCreated) - c.Assert(cResp.ETag(), chk.Not(chk.Equals), "") - c.Assert(cResp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(cResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(cResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusCreated, cResp.Response().StatusCode) + a.NotEqual("", cResp.ETag()) + a.NotEqual("", cResp.LastModified()) + a.NotEqual("", cResp.XMsRequestID()) + a.NotEqual("", cResp.XMsVersion()) + a.NotEqual("", cResp.Date()) getResp, err := file.GetProperties(context.Background()) - c.Assert(err, chk.IsNil) - c.Assert(getResp.Response().StatusCode, chk.Equals, http.StatusOK) - c.Assert(getResp.XMsProperties(), chk.Not(chk.Equals), "") // Check metadata returned is not null. + a.Nil(err) + a.Equal(http.StatusOK, getResp.Response().StatusCode) + a.NotEqual("", getResp.XMsProperties()) // Check metadata returned is not null. delResp, err := file.Delete(context.Background()) - c.Assert(err, chk.IsNil) - c.Assert(delResp.Response().StatusCode, chk.Equals, http.StatusOK) - c.Assert(delResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(delResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(delResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusOK, delResp.Response().StatusCode) + a.NotEqual("", delResp.XMsRequestID()) + a.NotEqual("", delResp.XMsVersion()) + a.NotEqual("", delResp.Date()) } -func (s *FileURLSuite) TestFileGetProperties(c *chk.C) { +func TestFileGetProperties(t *testing.T) { + a := assert.New(t) fsu := getBfsServiceURL() - fileSystemURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fileSystemURL) + fileSystemURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fileSystemURL) - fileURL, _ := createNewFileFromFileSystem(c, fileSystemURL) - defer delFile(c, fileURL) + fileURL, _ := createNewFileFromFileSystem(a, fileSystemURL) + defer deleteFile(a, fileURL) getResp, err := fileURL.GetProperties(context.Background()) - c.Assert(err, chk.IsNil) - c.Assert(getResp.Response().StatusCode, chk.Equals, http.StatusOK) - c.Assert(getResp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(getResp.XMsResourceType(), chk.Equals, "file") - c.Assert(getResp.ETag(), chk.Not(chk.Equals), "") - c.Assert(getResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(getResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(getResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusOK, getResp.Response().StatusCode) + a.NotEqual("", getResp.LastModified()) + a.Equal("file", getResp.XMsResourceType()) + a.NotEqual("", getResp.ETag()) + a.NotEqual("", getResp.XMsRequestID()) + a.NotEqual("", getResp.XMsVersion()) + a.NotEqual("", getResp.Date()) } ////TODO this is failing on the service side at the moment, the spec is not accurate -//func (s *FileURLSuite) TestCreateFileWithBody(c *chk.C) { +//func TestCreateFileWithBody(t *testing.T) { +// a := assert.New(t) // fsu := getBfsServiceURL() -// fileSystemURL, _ := createNewFileSystem(c, fsu) -// defer delFileSystem(c, fileSystemURL) +// fileSystemURL, _ := createNewFileSystem(a, fsu) +// defer deleteFileSystem(a, fileSystemURL) // -// fileURL, _ := createNewFileFromFileSystem(c, fileSystemURL, 2048) -// defer delFile(c, fileURL) +// fileURL, _ := createNewFileFromFileSystem(a, fileSystemURL, 2048) +// defer deleteFile(a, fileURL) // // contentR, contentD := getRandomDataAndReader(2048) // // pResp, err := fileURL.Create(context.Background(), contentR) -// c.Assert(err, chk.IsNil) +// a.Nil(err) // c.Assert(pResp.StatusCode(), chk.Equals, http.StatusCreated) // c.Assert(pResp.ETag(), chk.Not(chk.Equals), "") // c.Assert(pResp.LastModified(), chk.Not(chk.Equals), "") @@ -206,68 +204,70 @@ func (s *FileURLSuite) TestFileGetProperties(c *chk.C) { // // Get with rangeGetContentMD5 enabled. // // Partial data, check status code 206. // resp, err := fileURL.Download(context.Background(), 0, 1024) -// c.Assert(err, chk.IsNil) +// a.Nil(err) // c.Assert(resp.StatusCode(), chk.Equals, http.StatusPartialContent) // c.Assert(resp.ContentLength(), chk.Equals, "1024") // c.Assert(resp.ContentType(), chk.Equals, "application/octet-stream") // c.Assert(resp.Status(), chk.Not(chk.Equals), "") // // download, err := io.ReadAll(resp.Response().Body) -// c.Assert(err, chk.IsNil) +// a.Nil(err) // c.Assert(download, chk.DeepEquals, contentD[:1024]) //} -func (s *FileURLSuite) TestUnexpectedEOFRecovery(c *chk.C) { +func TestUnexpectedEOFRecovery(t *testing.T) { + a := assert.New(t) fsu := getBfsServiceURL() - fileSystemURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fileSystemURL) + fileSystemURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fileSystemURL) - fileURL, _ := createNewFileFromFileSystem(c, fileSystemURL) - defer delFile(c, fileURL) + fileURL, _ := createNewFileFromFileSystem(a, fileSystemURL) + defer deleteFile(a, fileURL) contentR, contentD := getRandomDataAndReader(2048) resp, err := fileURL.AppendData(context.Background(), 0, contentR) - c.Assert(err, chk.IsNil) - c.Assert(resp.StatusCode(), chk.Equals, http.StatusAccepted) - c.Assert(resp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(resp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(resp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusAccepted, resp.StatusCode()) + a.NotEqual("", resp.XMsRequestID()) + a.NotEqual("", resp.XMsVersion()) + a.NotEqual("", resp.Date()) resp, err = fileURL.FlushData(context.Background(), 2048, nil, azbfs.BlobFSHTTPHeaders{}, false, true) - c.Assert(err, chk.IsNil) - c.Assert(resp.StatusCode(), chk.Equals, http.StatusOK) - c.Assert(resp.ETag(), chk.Not(chk.Equals), "") - c.Assert(resp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(resp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(resp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(resp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusOK, resp.StatusCode()) + a.NotEqual("", resp.ETag()) + a.NotEqual("", resp.LastModified()) + a.NotEqual("", resp.XMsRequestID()) + a.NotEqual("", resp.XMsVersion()) + a.NotEqual("", resp.Date()) dResp, err := fileURL.Download(context.Background(), 0, 2048) - c.Assert(err, chk.IsNil) + a.Nil(err) // Verify that we can inject errors first. reader := dResp.Body(azbfs.InjectErrorInRetryReaderOptions(errors.New("unrecoverable error"))) _, err = io.ReadAll(reader) - c.Assert(err, chk.NotNil) - c.Assert(err.Error(), chk.Equals, "unrecoverable error") + a.NotNil(err) + a.Equal("unrecoverable error", err.Error()) // Then inject the retryable error. reader = dResp.Body(azbfs.InjectErrorInRetryReaderOptions(io.ErrUnexpectedEOF)) buf, err := io.ReadAll(reader) - c.Assert(err, chk.IsNil) - c.Assert(buf, chk.DeepEquals, contentD) + a.Nil(err) + a.Equal(contentD, buf) } -func (s *FileURLSuite) TestUploadDownloadRoundTrip(c *chk.C) { +func TestUploadDownloadRoundTrip(t *testing.T) { + a := assert.New(t) fsu := getBfsServiceURL() - fileSystemURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fileSystemURL) + fileSystemURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fileSystemURL) - fileURL, _ := createNewFileFromFileSystem(c, fileSystemURL) - defer delFile(c, fileURL) + fileURL, _ := createNewFileFromFileSystem(a, fileSystemURL) + defer deleteFile(a, fileURL) // The file content will be made up of two parts contentR1, contentD1 := getRandomDataAndReader(2048) @@ -275,62 +275,63 @@ func (s *FileURLSuite) TestUploadDownloadRoundTrip(c *chk.C) { // Append first part pResp, err := fileURL.AppendData(context.Background(), 0, contentR1) - c.Assert(err, chk.IsNil) - c.Assert(pResp.StatusCode(), chk.Equals, http.StatusAccepted) - c.Assert(pResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(pResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(pResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusAccepted, pResp.StatusCode()) + a.NotEqual("", pResp.XMsRequestID()) + a.NotEqual("", pResp.XMsVersion()) + a.NotEqual("", pResp.Date()) // Append second part pResp, err = fileURL.AppendData(context.Background(), 2048, contentR2) - c.Assert(err, chk.IsNil) - c.Assert(pResp.StatusCode(), chk.Equals, http.StatusAccepted) - c.Assert(pResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(pResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(pResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusAccepted, pResp.StatusCode()) + a.NotEqual("", pResp.XMsRequestID()) + a.NotEqual("", pResp.XMsVersion()) + a.NotEqual("", pResp.Date()) // Flush data fResp, err := fileURL.FlushData(context.Background(), 4096, make([]byte, 0), azbfs.BlobFSHTTPHeaders{}, false, true) - c.Assert(err, chk.IsNil) - c.Assert(fResp.StatusCode(), chk.Equals, http.StatusOK) - c.Assert(fResp.ETag(), chk.Not(chk.Equals), "") - c.Assert(fResp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(fResp.XMsRequestID(), chk.Not(chk.Equals), "") - c.Assert(fResp.XMsVersion(), chk.Not(chk.Equals), "") - c.Assert(fResp.Date(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusOK, fResp.StatusCode()) + a.NotEqual("", fResp.ETag()) + a.NotEqual("", fResp.LastModified()) + a.NotEqual("", fResp.XMsRequestID()) + a.NotEqual("", fResp.XMsVersion()) + a.NotEqual("", fResp.Date()) // Get Partial data, check status code 206. resp, err := fileURL.Download(context.Background(), 0, 1024) - c.Assert(err, chk.IsNil) - c.Assert(resp.StatusCode(), chk.Equals, http.StatusPartialContent) - c.Assert(resp.ContentLength(), chk.Equals, int64(1024)) - c.Assert(resp.ContentType(), chk.Equals, "application/octet-stream") - c.Assert(resp.Status(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusPartialContent, resp.StatusCode()) + a.EqualValues(1024, resp.ContentLength()) + a.Equal("application/octet-stream", resp.ContentType()) + a.NotEqual("", resp.Status()) // Verify the partial data download, err := io.ReadAll(resp.Response().Body) - c.Assert(err, chk.IsNil) - c.Assert(download, chk.DeepEquals, contentD1[:1024]) + a.Nil(err) + a.Equal(contentD1[:1024], download) // Get entire fileURL, check status code 200. resp, err = fileURL.Download(context.Background(), 0, 0) - c.Assert(err, chk.IsNil) - c.Assert(resp.StatusCode(), chk.Equals, http.StatusOK) - c.Assert(resp.ContentLength(), chk.Equals, int64(4096)) - c.Assert(resp.Date(), chk.Not(chk.Equals), "") - c.Assert(resp.ETag(), chk.Not(chk.Equals), "") - c.Assert(resp.LastModified(), chk.Not(chk.Equals), "") - c.Assert(resp.RequestID(), chk.Not(chk.Equals), "") - c.Assert(resp.Version(), chk.Not(chk.Equals), "") + a.Nil(err) + a.Equal(http.StatusOK, resp.StatusCode()) + a.EqualValues(4096, resp.ContentLength()) + a.NotEqual("", resp.Date()) + a.NotEqual("", resp.ETag()) + a.NotEqual("", resp.LastModified()) + a.NotEqual("", resp.RequestID()) + a.NotEqual("", resp.Version()) // Verify the entire content download, err = io.ReadAll(resp.Response().Body) - c.Assert(err, chk.IsNil) - c.Assert(download[:2048], chk.DeepEquals, contentD1[:]) - c.Assert(download[2048:], chk.DeepEquals, contentD2[:]) + a.Nil(err) + a.Equal(contentD1[:], download[:2048]) + a.Equal(contentD2[:], download[2048:]) } -func (s *FileURLSuite) TestBlobURLPartsSASQueryTimes(c *chk.C) { +func TestBlobURLPartsSASQueryTimes(t *testing.T) { + a := assert.New(t) StartTimesInputs := []string{ "2020-04-20", "2020-04-20T07:00Z", @@ -369,46 +370,46 @@ func (s *FileURLSuite) TestBlobURLPartsSASQueryTimes(c *chk.C) { url, _ := url.Parse(urlString) parts := azbfs.NewBfsURLParts(*url) - c.Assert(parts.Scheme, chk.Equals, "https") - c.Assert(parts.Host, chk.Equals, "myaccount.dfs.core.windows.net") - c.Assert(parts.FileSystemName, chk.Equals, "myfilesystem") - c.Assert(parts.DirectoryOrFilePath, chk.Equals, "mydirectory/myfile.txt") + a.Equal("https", parts.Scheme) + a.Equal("myaccount.dfs.core.windows.net", parts.Host) + a.Equal("myfilesystem", parts.FileSystemName) + a.Equal("mydirectory/myfile.txt", parts.DirectoryOrFilePath) sas := parts.SAS - c.Assert(sas.StartTime(), chk.Equals, StartTimesExpected[i]) - c.Assert(sas.ExpiryTime(), chk.Equals, ExpiryTimesExpected[i]) + a.Equal(StartTimesExpected[i], sas.StartTime()) + a.Equal(ExpiryTimesExpected[i], sas.ExpiryTime()) uResult := parts.URL() - c.Log(uResult.String()) - c.Log(urlString) - c.Assert(uResult.String(), chk.Equals, urlString) + a.Equal(urlString, uResult.String()) } } -func (s *FileURLSuite) TestRenameFile(c *chk.C) { +func TestRenameFile(t *testing.T) { + a := assert.New(t) fsu := getBfsServiceURL() - fileSystemURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fileSystemURL) + fileSystemURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fileSystemURL) - fileURL, fileName := createNewFileFromFileSystem(c, fileSystemURL) + fileURL, fileName := createNewFileFromFileSystem(a, fileSystemURL) fileRename := fileName + "rename" renamedFileURL, err := fileURL.Rename(context.Background(), azbfs.RenameFileOptions{DestinationPath: fileRename}) - c.Assert(renamedFileURL, chk.NotNil) - c.Assert(err, chk.IsNil) + a.NotNil(renamedFileURL) + a.Nil(err) // Check that the old file does not exist getPropertiesResp, err := fileURL.GetProperties(context.Background()) - c.Assert(err, chk.NotNil) // TODO: I want to check the status code is 404 but not sure how since the resp is nil - c.Assert(getPropertiesResp, chk.IsNil) + a.NotNil(err) // TODO: I want to check the status code is 404 but not sure how since the resp is nil + a.Nil(getPropertiesResp) // Check that the renamed file does exist getPropertiesResp, err = renamedFileURL.GetProperties(context.Background()) - c.Assert(getPropertiesResp.StatusCode(), chk.Equals, http.StatusOK) - c.Assert(err, chk.IsNil) + a.Equal(http.StatusOK, getPropertiesResp.StatusCode()) + a.Nil(err) } -func (s *FileURLSuite) TestRenameFileWithSas(c *chk.C) { +func TestRenameFileWithSas(t *testing.T) { + a := assert.New(t) name, key := getAccountAndKey() credential := azbfs.NewSharedKeyCredential(name, key) sasQueryParams, err := azbfs.AccountSASSignatureValues{ @@ -418,38 +419,39 @@ func (s *FileURLSuite) TestRenameFileWithSas(c *chk.C) { Services: azbfs.AccountSASServices{File: true, Blob: true, Queue: true}.String(), ResourceTypes: azbfs.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), }.NewSASQueryParameters(credential) - c.Assert(err, chk.IsNil) + a.Nil(err) qp := sasQueryParams.Encode() rawURL := fmt.Sprintf("https://%s.dfs.core.windows.net/?%s", credential.AccountName(), qp) fullURL, err := url.Parse(rawURL) - c.Assert(err, chk.IsNil) + a.Nil(err) fsu := azbfs.NewServiceURL(*fullURL, azbfs.NewPipeline(azbfs.NewAnonymousCredential(), azbfs.PipelineOptions{})) - fileSystemURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fileSystemURL) + fileSystemURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fileSystemURL) - fileURL, fileName := createNewFileFromFileSystem(c, fileSystemURL) + fileURL, fileName := createNewFileFromFileSystem(a, fileSystemURL) fileRename := fileName + "rename" renamedFileURL, err := fileURL.Rename(context.Background(), azbfs.RenameFileOptions{DestinationPath: fileRename}) - c.Assert(renamedFileURL, chk.NotNil) - c.Assert(err, chk.IsNil) + a.NotNil(renamedFileURL) + a.Nil(err) // Check that the old file does not exist getPropertiesResp, err := fileURL.GetProperties(context.Background()) - c.Assert(err, chk.NotNil) // TODO: I want to check the status code is 404 but not sure how since the resp is nil - c.Assert(getPropertiesResp, chk.IsNil) + a.NotNil(err) // TODO: I want to check the status code is 404 but not sure how since the resp is nil + a.Nil(getPropertiesResp) // Check that the renamed file does exist getPropertiesResp, err = renamedFileURL.GetProperties(context.Background()) - c.Assert(getPropertiesResp.StatusCode(), chk.Equals, http.StatusOK) - c.Assert(err, chk.IsNil) + a.Equal(http.StatusOK, getPropertiesResp.StatusCode()) + a.Nil(err) } -func (s *FileURLSuite) TestRenameFileWithDestinationSas(c *chk.C) { +func TestRenameFileWithDestinationSas(t *testing.T) { + a := assert.New(t) name, key := getAccountAndKey() credential := azbfs.NewSharedKeyCredential(name, key) sourceSasQueryParams, err := azbfs.AccountSASSignatureValues{ @@ -459,7 +461,7 @@ func (s *FileURLSuite) TestRenameFileWithDestinationSas(c *chk.C) { Services: azbfs.AccountSASServices{File: true, Blob: true, Queue: true}.String(), ResourceTypes: azbfs.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), }.NewSASQueryParameters(credential) - c.Assert(err, chk.IsNil) + a.Nil(err) // new SAS destinationSasQueryParams, err := azbfs.AccountSASSignatureValues{ @@ -469,38 +471,38 @@ func (s *FileURLSuite) TestRenameFileWithDestinationSas(c *chk.C) { Services: azbfs.AccountSASServices{File: true, Blob: true}.String(), ResourceTypes: azbfs.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), }.NewSASQueryParameters(credential) - c.Assert(err, chk.IsNil) + a.Nil(err) sourceQp := sourceSasQueryParams.Encode() destQp := destinationSasQueryParams.Encode() rawURL := fmt.Sprintf("https://%s.dfs.core.windows.net/?%s", credential.AccountName(), sourceQp) fullURL, err := url.Parse(rawURL) - c.Assert(err, chk.IsNil) + a.Nil(err) fsu := azbfs.NewServiceURL(*fullURL, azbfs.NewPipeline(azbfs.NewAnonymousCredential(), azbfs.PipelineOptions{})) - fileSystemURL, _ := createNewFileSystem(c, fsu) - defer delFileSystem(c, fileSystemURL) + fileSystemURL, _ := createNewFileSystem(a, fsu) + defer deleteFileSystem(a, fileSystemURL) - fileURL, fileName := createNewFileFromFileSystem(c, fileSystemURL) + fileURL, fileName := createNewFileFromFileSystem(a, fileSystemURL) fileRename := fileName + "rename" renamedFileURL, err := fileURL.Rename( context.Background(), azbfs.RenameFileOptions{DestinationPath: fileRename, DestinationSas: &destQp}) - c.Assert(renamedFileURL, chk.NotNil) - c.Assert(err, chk.IsNil) + a.NotNil(renamedFileURL) + a.Nil(err) found := strings.Contains(renamedFileURL.String(), destQp) // make sure the correct SAS is used - c.Assert(found, chk.Equals, true) + a.True(found) // Check that the old file does not exist getPropertiesResp, err := fileURL.GetProperties(context.Background()) - c.Assert(err, chk.NotNil) // TODO: I want to check the status code is 404 but not sure how since the resp is nil - c.Assert(getPropertiesResp, chk.IsNil) + a.NotNil(err) // TODO: I want to check the status code is 404 but not sure how since the resp is nil + a.Nil(getPropertiesResp) // Check that the renamed file does exist getPropertiesResp, err = renamedFileURL.GetProperties(context.Background()) - c.Assert(getPropertiesResp.StatusCode(), chk.Equals, http.StatusOK) - c.Assert(err, chk.IsNil) -} + a.Equal(http.StatusOK, getPropertiesResp.StatusCode()) + a.Nil(err) +} \ No newline at end of file diff --git a/azbfs/zt_url_filesystem_test.go b/azbfs/zt_url_filesystem_test.go index 0e1df91c4..8088f623a 100644 --- a/azbfs/zt_url_filesystem_test.go +++ b/azbfs/zt_url_filesystem_test.go @@ -2,86 +2,82 @@ package azbfs_test import ( "context" + "github.com/stretchr/testify/assert" "os" + "testing" "net/http" "net/url" "github.com/Azure/azure-storage-azcopy/v10/azbfs" - chk "gopkg.in/check.v1" ) -type FileSystemURLSuite struct{} - -var _ = chk.Suite(&FileSystemURLSuite{}) - -func delFileSystem(c *chk.C, fs azbfs.FileSystemURL) { - resp, err := fs.Delete(context.Background()) - c.Assert(err, chk.IsNil) - c.Assert(resp.Response().StatusCode, chk.Equals, http.StatusAccepted) -} - -func (s *FileSystemURLSuite) TestFileSystemCreateRootDirectoryURL(c *chk.C) { +func TestFileSystemCreateRootDirectoryURL(t *testing.T) { + a := assert.New(t) fsu := getBfsServiceURL() testURL := fsu.NewFileSystemURL(fileSystemPrefix).NewRootDirectoryURL() correctURL := "https://" + os.Getenv("ACCOUNT_NAME") + ".dfs.core.windows.net/" + fileSystemPrefix temp := testURL.URL() - c.Assert(temp.String(), chk.Equals, correctURL) + a.Equal(correctURL, temp.String()) } -func (s *FileSystemURLSuite) TestFileSystemCreateDirectoryURL(c *chk.C) { +func TestFileSystemCreateDirectoryURL(t *testing.T) { + a := assert.New(t) fsu := getBfsServiceURL() testURL := fsu.NewFileSystemURL(fileSystemPrefix).NewDirectoryURL(directoryPrefix) correctURL := "https://" + os.Getenv("ACCOUNT_NAME") + ".dfs.core.windows.net/" + fileSystemPrefix + "/" + directoryPrefix temp := testURL.URL() - c.Assert(temp.String(), chk.Equals, correctURL) - c.Assert(testURL.String(), chk.Equals, correctURL) + a.Equal(correctURL, temp.String()) + a.Equal(correctURL, testURL.String()) } -func (s *FileSystemURLSuite) TestFileSystemNewFileSystemURLNegative(c *chk.C) { - c.Assert(func() { azbfs.NewFileSystemURL(url.URL{}, nil) }, chk.Panics, "p can't be nil") +func TestFileSystemNewFileSystemURLNegative(t *testing.T) { + a := assert.New(t) + a.Panics(func() { azbfs.NewFileSystemURL(url.URL{}, nil) }, "p can't be nil") } -func (s *FileSystemURLSuite) TestFileSystemCreateDelete(c *chk.C) { +func TestFileSystemCreateDelete(t *testing.T) { + a := assert.New(t) fsu := getBfsServiceURL() - fileSystemURL, _ := getFileSystemURL(c, fsu) + fileSystemURL, _ := getFileSystemURL(a, fsu) _, err := fileSystemURL.Create(ctx) - defer delFileSystem(c, fileSystemURL) - c.Assert(err, chk.IsNil) + defer deleteFileSystem(a, fileSystemURL) + a.Nil(err) // Test get properties resp, err := fileSystemURL.GetProperties(ctx) - c.Assert(resp.StatusCode(), chk.Equals, http.StatusOK) - c.Assert(err, chk.IsNil) + a.Equal(http.StatusOK, resp.StatusCode()) + a.Nil(err) } -func (s *FileSystemURLSuite) TestFileSystemList(c *chk.C) { +func TestFileSystemList(t *testing.T) { + a := assert.New(t) fsu := getBfsServiceURL() - fileSystemURL, _ := getFileSystemURL(c, fsu) + fileSystemURL, _ := getFileSystemURL(a, fsu) _, err := fileSystemURL.Create(ctx) - defer delFileSystem(c, fileSystemURL) - c.Assert(err, chk.IsNil) + defer deleteFileSystem(a, fileSystemURL) + a.Nil(err) // List Setup - dirUrl, dirName := getDirectoryURLFromFileSystem(c, fileSystemURL) + dirUrl, dirName := getDirectoryURLFromFileSystem(a, fileSystemURL) dirUrl.Create(context.Background(), true) - fileUrl, fileName := getFileURLFromFileSystem(c, fileSystemURL) + fileUrl, fileName := getFileURLFromFileSystem(a, fileSystemURL) fileUrl.Create(context.Background(), azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{}) // List paths, err := fileSystemURL.ListPaths(context.Background(), azbfs.ListPathsFilesystemOptions{Recursive: false}) - c.Assert(err, chk.IsNil) - c.Assert(paths.Paths, chk.NotNil) - c.Assert(len(paths.Paths), chk.Equals, 2) + a.Nil(err) + a.NotNil(paths.Paths) + a.Len(paths.Paths, 2) dirPath := paths.Paths[0] - c.Assert(*dirPath.Name, chk.Equals, dirName) - c.Assert(*dirPath.IsDirectory, chk.Equals, true) + a.Equal(dirName, *dirPath.Name) + a.True(*dirPath.IsDirectory) filePath := paths.Paths[1] - c.Assert(*filePath.Name, chk.Equals, fileName) - c.Assert(filePath.IsDirectory, chk.IsNil) -} + a.Equal(fileName, *filePath.Name) + a.Nil(filePath.IsDirectory) +} \ No newline at end of file diff --git a/azure-pipelines.yml b/azure-pipelines.yml index c0931a5b8..ff22afdd8 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -1,5 +1,5 @@ variables: - AZCOPY_GOLANG_VERSION: $(AZCOPY_GO_VER) + AZCOPY_GOLANG_VERSION: '1.20.2' trigger: branches: @@ -115,13 +115,22 @@ jobs: matrix: Ubuntu-20: imageName: 'ubuntu-20.04' - type: 'linux' + build_name: 'azcopy_linux_amd64' + display_name: "Linux" + go_path: '$(go env GOPATH)/bin/' + suffix: '' Windows: imageName: 'windows-2019' - type: 'windows' + build_name: 'azcopy_windows_amd64.exe' + display_name: "Windows" + go_path: 'C:\Users\VssAdministrator\go\bin\' + suffix: '.exe' MacOS: imageName: 'macos-11' - type: 'mac-os' + build_name: 'azcopy_darwin_amd64' + display_name: "MacOS" + go_path: '$(go env GOPATH)/bin/' + suffix: '' pool: vmImage: $(imageName) @@ -129,37 +138,26 @@ jobs: - task: GoTool@0 inputs: version: $(AZCOPY_GOLANG_VERSION) - - # Running E2E Tests on Linux - AMD64 - script: | - set -e - GOARCH=amd64 GOOS=linux go build -o azcopy_linux_amd64 - export AZCOPY_E2E_EXECUTABLE_PATH=$(pwd)/azcopy_linux_amd64 - go test -timeout 60m -race -short -cover ./e2etest - env: - AZCOPY_E2E_ACCOUNT_KEY: $(AZCOPY_E2E_ACCOUNT_KEY) - AZCOPY_E2E_ACCOUNT_NAME: $(AZCOPY_E2E_ACCOUNT_NAME) - AZCOPY_E2E_ACCOUNT_KEY_HNS: $(AZCOPY_E2E_ACCOUNT_KEY_HNS) - AZCOPY_E2E_ACCOUNT_NAME_HNS: $(AZCOPY_E2E_ACCOUNT_NAME_HNS) - AZCOPY_E2E_TENANT_ID: $(OAUTH_TENANT_ID) - AZCOPY_E2E_APPLICATION_ID: $(ACTIVE_DIRECTORY_APPLICATION_ID) - AZCOPY_E2E_CLIENT_SECRET: $(AZCOPY_SPA_CLIENT_SECRET) - AZCOPY_E2E_CLASSIC_ACCOUNT_NAME: $(AZCOPY_E2E_CLASSIC_ACCOUNT_NAME) - AZCOPY_E2E_CLASSIC_ACCOUNT_KEY: $(AZCOPY_E2E_CLASSIC_ACCOUNT_KEY) - AZCOPY_E2E_LOG_OUTPUT: '$(System.DefaultWorkingDirectory)/logs' - AZCOPY_E2E_OAUTH_MANAGED_DISK_CONFIG: $(AZCOPY_E2E_OAUTH_MANAGED_DISK_CONFIG) - AZCOPY_E2E_STD_MANAGED_DISK_CONFIG: $(AZCOPY_E2E_STD_MANAGED_DISK_CONFIG) - CPK_ENCRYPTION_KEY: $(CPK_ENCRYPTION_KEY) - CPK_ENCRYPTION_KEY_SHA256: $(CPK_ENCRYPTION_KEY_SHA256) - displayName: 'E2E Test Linux - AMD64' - condition: eq(variables.type, 'linux') + go install github.com/jstemmer/go-junit-report@v0.9.1 + go install github.com/axw/gocov/gocov@v1.1.0 + go install github.com/AlekSi/gocov-xml@v1.0.0 + go install github.com/matm/gocov-html@v0.0.0-20200509184451-71874e2e203b + displayName: 'Installing dependencies' - # Running E2E Tests on Windows - AMD64 + # Running E2E Tests on AMD64 - script: | - go build -o $(System.DefaultWorkingDirectory)/azcopy_windows_amd64.exe - go build -o $(System.DefaultWorkingDirectory)/azcopy_windows_386.exe - echo 'starting E2E tests on windows' - go test -timeout 60m -race -cover -v ./e2etest + mkdir coverage + echo 'Building executable' + go build -cover -o $(build_name) + echo 'Running tests' + go test -timeout=1h -v ./e2etest 2>&1 | $(go_path)go-junit-report$(suffix) > report.xml + echo 'Formatting coverage directory to legacy txt format' + go tool covdata textfmt -i=coverage -o coverage.txt + echo 'Formatting coverage to json format' + $(go_path)gocov$(suffix) convert coverage.txt > coverage.json + echo 'Formatting coverage to xml format' + $(go_path)gocov-xml$(suffix) < coverage.json > coverage.xml env: AZCOPY_E2E_ACCOUNT_KEY: $(AZCOPY_E2E_ACCOUNT_KEY) AZCOPY_E2E_ACCOUNT_NAME: $(AZCOPY_E2E_ACCOUNT_NAME) @@ -175,34 +173,9 @@ jobs: AZCOPY_E2E_STD_MANAGED_DISK_CONFIG: $(AZCOPY_E2E_STD_MANAGED_DISK_CONFIG) CPK_ENCRYPTION_KEY: $(CPK_ENCRYPTION_KEY) CPK_ENCRYPTION_KEY_SHA256: $(CPK_ENCRYPTION_KEY_SHA256) - AZCOPY_E2E_EXECUTABLE_PATH: $(System.DefaultWorkingDirectory)/azcopy_windows_amd64.exe - displayName: 'E2E Test Windows - AMD64' - condition: eq(variables.type, 'windows') - - # Running E2E Tests on Mac - - script: | - set -e - go build -o azcopy_darwin_amd64 - echo 'starting E2E tests on mac-os' - export AZCOPY_E2E_EXECUTABLE_PATH=$(pwd)/azcopy_darwin_amd64 - go test -timeout 60m -race -cover -v ./e2etest - env: - AZCOPY_E2E_ACCOUNT_KEY: $(AZCOPY_E2E_ACCOUNT_KEY) - AZCOPY_E2E_ACCOUNT_NAME: $(AZCOPY_E2E_ACCOUNT_NAME) - AZCOPY_E2E_ACCOUNT_KEY_HNS: $(AZCOPY_E2E_ACCOUNT_KEY_HNS) - AZCOPY_E2E_ACCOUNT_NAME_HNS: $(AZCOPY_E2E_ACCOUNT_NAME_HNS) - AZCOPY_E2E_TENANT_ID: $(OAUTH_TENANT_ID) - AZCOPY_E2E_APPLICATION_ID: $(ACTIVE_DIRECTORY_APPLICATION_ID) - AZCOPY_E2E_CLIENT_SECRET: $(AZCOPY_SPA_CLIENT_SECRET) - AZCOPY_E2E_CLASSIC_ACCOUNT_NAME: $(AZCOPY_E2E_CLASSIC_ACCOUNT_NAME) - AZCOPY_E2E_CLASSIC_ACCOUNT_KEY: $(AZCOPY_E2E_CLASSIC_ACCOUNT_KEY) - AZCOPY_E2E_LOG_OUTPUT: '$(System.DefaultWorkingDirectory)/logs' - AZCOPY_E2E_OAUTH_MANAGED_DISK_CONFIG: $(AZCOPY_E2E_OAUTH_MANAGED_DISK_CONFIG) - AZCOPY_E2E_STD_MANAGED_DISK_CONFIG: $(AZCOPY_E2E_STD_MANAGED_DISK_CONFIG) - CPK_ENCRYPTION_KEY: $(CPK_ENCRYPTION_KEY) - CPK_ENCRYPTION_KEY_SHA256: $(CPK_ENCRYPTION_KEY_SHA256) - displayName: 'E2E Test MacOs AMD64' - condition: eq(variables.type, 'mac-os') + AZCOPY_E2E_EXECUTABLE_PATH: $(System.DefaultWorkingDirectory)/$(build_name) + GOCOVERDIR: '$(System.DefaultWorkingDirectory)/coverage' + displayName: 'E2E Test $(display_name) - AMD64' - task: PublishBuildArtifacts@1 displayName: 'Publish logs' @@ -211,6 +184,20 @@ jobs: pathToPublish: '$(System.DefaultWorkingDirectory)/logs' artifactName: logs + - task: PublishTestResults@2 + condition: succeededOrFailed() + inputs: + testRunner: JUnit + testResultsFiles: $(System.DefaultWorkingDirectory)/**/report.xml + testRunTitle: 'Go on $(display_name)' + + - task: PublishCodeCoverageResults@1 + condition: succeededOrFailed() + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: $(System.DefaultWorkingDirectory)/**/coverage.xml + additionalCodeCoverageFiles: $(System.DefaultWorkingDirectory)/**/coverage.html + - job: Test_On_Ubuntu variables: isMutexSet: 'false' @@ -232,6 +219,12 @@ jobs: displayName: 'Download GCS Service Account Key' inputs: secureFile: 'ci-gcs-dev.json' + - script: | + go install github.com/jstemmer/go-junit-report@v0.9.1 + go install github.com/axw/gocov/gocov@v1.1.0 + go install github.com/AlekSi/gocov-xml@v1.0.0 + go install github.com/matm/gocov-html@v0.0.0-20200509184451-71874e2e203b + displayName: 'Install dependencies' - script: | pip install azure-storage-blob==12.12.0 # set the variable to indicate that the mutex is being acquired @@ -243,31 +236,47 @@ jobs: # acquire the mutex before running live tests to avoid conflicts python ./tool_distributed_mutex.py lock "$(MUTEX_URL)" name: 'Acquire_the_distributed_mutex' + - template: azurePipelineTemplates/run-ut.yml + parameters: + directory: 'azbfs' + coverage_name: 'azbfs' + - template: azurePipelineTemplates/run-ut.yml + parameters: + directory: 'cmd' + coverage_name: 'cmd' + - template: azurePipelineTemplates/run-ut.yml + parameters: + directory: 'common' + coverage_name: 'common' + - template: azurePipelineTemplates/run-ut.yml + parameters: + directory: 'common/parallel' + coverage_name: 'parallel' + - template: azurePipelineTemplates/run-ut.yml + parameters: + directory: 'ste' + coverage_name: 'ste' + - template: azurePipelineTemplates/run-ut.yml + parameters: + directory: 'sddl' + coverage_name: 'sddl' - script: | - # run unit test and build executable - # the set -e line is needed so that the unit tests failure would cause the job to fail properly - # "-check.v" (must be after package list) outputs timings - set -e - go test -timeout 60m -race -short -cover ./cmd ./common ./common/parallel ./ste ./azbfs ./sddl "-check.v" - GOARCH=amd64 GOOS=linux go build -o azcopy_linux_amd64 - name: 'Run_unit_tests' - env: - ACCOUNT_NAME: $(ACCOUNT_NAME) - ACCOUNT_KEY: $(ACCOUNT_KEY) - AZCOPY_E2E_ACCOUNT_KEY: $(AZCOPY_E2E_ACCOUNT_KEY) - AZCOPY_E2E_ACCOUNT_NAME: $(AZCOPY_E2E_ACCOUNT_NAME) - AWS_ACCESS_KEY_ID: $(AWS_ACCESS_KEY_ID) - AWS_SECRET_ACCESS_KEY: $(AWS_SECRET_ACCESS_KEY) - GOOGLE_APPLICATION_CREDENTIALS: $(ciGCSServiceAccountKey.secureFilePath) - GOOGLE_CLOUD_PROJECT: $(GOOGLE_CLOUD_PROJECT) - - script: | + GOARCH=amd64 GOOS=linux go build -cover -o azcopy_linux_amd64 go build -o test-validator ./testSuite/ mkdir test-temp + mkdir coverage export AZCOPY_EXECUTABLE_PATH=$(pwd)/azcopy_linux_amd64 export TEST_SUITE_EXECUTABLE_LOCATION=$(pwd)/test-validator export TEST_DIRECTORY_PATH=$(pwd)/test-temp keyctl session test python ./testSuite/scripts/run.py + + echo 'Formatting coverage directory to legacy txt format' + go tool covdata textfmt -i=coverage -o smoke_coverage.txt + echo 'Formatting coverage to json format' + $(go env GOPATH)/bin/gocov convert smoke_coverage.txt > smoke_coverage.json + echo 'Formatting coverage to xml format' + $(go env GOPATH)/bin/gocov-xml < smoke_coverage.json > smoke_coverage.xml name: 'Run_smoke_tests' env: ACCOUNT_NAME: $(ACCOUNT_NAME) @@ -292,6 +301,14 @@ jobs: S2S_SRC_S3_SERVICE_URL: $(S2S_SRC_S3_SERVICE_URL) S2S_SRC_GCP_SERVICE_URL: $(S2S_SRC_GCP_SERVICE_URL) SHARE_SAS_URL: $(SHARE_SAS_URL) + GOCOVERDIR: '$(System.DefaultWorkingDirectory)/coverage' + condition: succeededOrFailed() + # Smoke Tests Publishing + - task: PublishCodeCoverageResults@1 + condition: succeededOrFailed() + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: $(System.DefaultWorkingDirectory)/**/smoke_coverage.xml - script: | pip install azure-storage-blob==12.12.0 python ./tool_distributed_mutex.py unlock "$(MUTEX_URL)" diff --git a/azurePipelineTemplates/run-ut.yml b/azurePipelineTemplates/run-ut.yml new file mode 100644 index 000000000..7ba21ea14 --- /dev/null +++ b/azurePipelineTemplates/run-ut.yml @@ -0,0 +1,46 @@ +parameters: + - name: directory + type: string + - name: coverage_name + type: string +steps: + - script: | + set -e + echo "Running tests in" ${directory} + go test -timeout=1h -v -coverprofile=${coverage_name}_coverage.txt ./${directory} | tee ${coverage_name}.txt + exit_code=${PIPESTATUS[0]} + echo "Generating junit report for" ${directory} + cat ${coverage_name}.txt | $(go env GOPATH)/bin/go-junit-report > ${coverage_name}_report.xml + echo "Generating json coverage report for" ${directory} + $(go env GOPATH)/bin/gocov convert ${coverage_name}_coverage.txt > ${coverage_name}_coverage.json + echo "Generating xml coverage report for" ${directory} + $(go env GOPATH)/bin/gocov-xml < ${coverage_name}_coverage.json > ${coverage_name}_coverage.xml + exit "$exit_code" + displayName: 'Run ${{ parameters.directory }} tests' + env: + directory: ${{ parameters.directory }} + coverage_name: ${{ parameters.coverage_name }} + ACCOUNT_NAME: $(ACCOUNT_NAME) + ACCOUNT_KEY: $(ACCOUNT_KEY) + AZCOPY_E2E_ACCOUNT_KEY: $(AZCOPY_E2E_ACCOUNT_KEY) + AZCOPY_E2E_ACCOUNT_NAME: $(AZCOPY_E2E_ACCOUNT_NAME) + AWS_ACCESS_KEY_ID: $(AWS_ACCESS_KEY_ID) + AWS_SECRET_ACCESS_KEY: $(AWS_SECRET_ACCESS_KEY) + GOOGLE_APPLICATION_CREDENTIALS: $(ciGCSServiceAccountKey.secureFilePath) + GOOGLE_CLOUD_PROJECT: $(GOOGLE_CLOUD_PROJECT) + condition: succeededOrFailed() + - task: PublishTestResults@2 + condition: succeededOrFailed() + inputs: + testRunner: JUnit + testResultsFiles: $(System.DefaultWorkingDirectory)/**/${{ parameters.coverage_name }}_report.xml + testRunTitle: '${{ parameters.directory }} Tests' + env: + coverage_name: ${{ parameters.coverage_name }} + - task: PublishCodeCoverageResults@1 + condition: succeededOrFailed() + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: $(System.DefaultWorkingDirectory)/**/${{ parameters.coverage_name }}_coverage.xml + env: + coverage_name: ${{ parameters.coverage_name }} \ No newline at end of file diff --git a/cmd/bytesizetostring_test.go b/cmd/bytesizetostring_test.go index 1a13e7234..7b60fd29b 100644 --- a/cmd/bytesizetostring_test.go +++ b/cmd/bytesizetostring_test.go @@ -1,79 +1,83 @@ package cmd import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "testing" ) -type byteSizeToStringTestSuite struct{} - -var _ = chk.Suite(&byteSizeToStringTestSuite{}) - -func (s *byteSizeToStringTestSuite) TestBToString(c *chk.C) { +func TestBToString(t *testing.T) { + a := assert.New(t) inputs := []int64{50, 100, 125} expects := []string{"50.00 B", "100.00 B", "125.00 B"} for k, v := range inputs { output := byteSizeToString(v) - c.Assert(output, chk.Equals, expects[k]) + a.Equal(expects[k], output) } } -func (s *byteSizeToStringTestSuite) TestKiBToString(c *chk.C) { +func TestKiBToString(t *testing.T) { + a := assert.New(t) inputs := []int64{1024, 51200, 128000, 5632, 5376} expects := []string{"1.00 KiB", "50.00 KiB", "125.00 KiB", "5.50 KiB", "5.25 KiB"} for k, v := range inputs { output := byteSizeToString(v) - c.Assert(output, chk.Equals, expects[k]) + a.Equal(expects[k], output) } } -func (s *byteSizeToStringTestSuite) TestMiBToString(c *chk.C) { +func TestMiBToString(t *testing.T) { + a := assert.New(t) inputs := []int64{1048576, 52428800, 131072000, 5767168, 5505024} expects := []string{"1.00 MiB", "50.00 MiB", "125.00 MiB", "5.50 MiB", "5.25 MiB"} for k, v := range inputs { output := byteSizeToString(v) - c.Assert(output, chk.Equals, expects[k]) + a.Equal(expects[k], output) } } -func (s *byteSizeToStringTestSuite) TestGiBToString(c *chk.C) { +func TestGiBToString(t *testing.T) { + a := assert.New(t) inputs := []int64{1073741824, 53687091200, 134217728000, 5905580032, 5637144576} expects := []string{"1.00 GiB", "50.00 GiB", "125.00 GiB", "5.50 GiB", "5.25 GiB"} for k, v := range inputs { output := byteSizeToString(v) - c.Assert(output, chk.Equals, expects[k]) + a.Equal(expects[k], output) } } -func (s *byteSizeToStringTestSuite) TestTiBToString(c *chk.C) { +func TestTiBToString(t *testing.T) { + a := assert.New(t) inputs := []int64{1099511627776, 54975581388800, 137438953472000, 6047313952768, 5772436045824} expects := []string{"1.00 TiB", "50.00 TiB", "125.00 TiB", "5.50 TiB", "5.25 TiB"} for k, v := range inputs { output := byteSizeToString(v) - c.Assert(output, chk.Equals, expects[k]) + a.Equal(expects[k], output) } } -func (s *byteSizeToStringTestSuite) TestPiBToString(c *chk.C) { +func TestPiBToString(t *testing.T) { + a := assert.New(t) inputs := []int64{1125899906842624, 56294995342131200, 140737488355328000, 6192449487634432, 5910974510923776} expects := []string{"1.00 PiB", "50.00 PiB", "125.00 PiB", "5.50 PiB", "5.25 PiB"} for k, v := range inputs { output := byteSizeToString(v) - c.Assert(output, chk.Equals, expects[k]) + a.Equal(expects[k], output) } } -func (s *byteSizeToStringTestSuite) TestEiBToString(c *chk.C) { +func TestEiBToString(t *testing.T) { + a := assert.New(t) inputs := []int64{1152921504606846976, 6341068275337658368, 6052837899185946624} expects := []string{"1.00 EiB", "5.50 EiB", "5.25 EiB"} //50 & 125 aren't present Because they overflow int64 for k, v := range inputs { output := byteSizeToString(v) - c.Assert(output, chk.Equals, expects[k]) + a.Equal(expects[k], output) } -} +} \ No newline at end of file diff --git a/cmd/copyEnumeratorHelper_test.go b/cmd/copyEnumeratorHelper_test.go index 8ef304997..3a1f4ef69 100644 --- a/cmd/copyEnumeratorHelper_test.go +++ b/cmd/copyEnumeratorHelper_test.go @@ -22,13 +22,10 @@ package cmd import ( "github.com/Azure/azure-storage-azcopy/v10/common" - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "testing" ) -type copyEnumeratorHelperTestSuite struct{} - -var _ = chk.Suite(©EnumeratorHelperTestSuite{}) - func newLocalRes(path string) common.ResourceString { return common.ResourceString{Value: path} } @@ -41,7 +38,8 @@ func newRemoteRes(url string) common.ResourceString { return r } -func (s *copyEnumeratorHelperTestSuite) TestRelativePath(c *chk.C) { +func TestRelativePath(t *testing.T) { + a := assert.New(t) // setup cca := CookedCopyCmdArgs{ Source: newLocalRes("a/b/"), @@ -59,6 +57,6 @@ func (s *copyEnumeratorHelperTestSuite) TestRelativePath(c *chk.C) { destRelPath := cca.MakeEscapedRelativePath(false, true, false, object) // assert - c.Assert(srcRelPath, chk.Equals, "/c.txt") - c.Assert(destRelPath, chk.Equals, "/c.txt") -} + a.Equal("/c.txt", srcRelPath) + a.Equal("/c.txt", destRelPath) +} \ No newline at end of file diff --git a/cmd/copyEnumeratorInit_test.go b/cmd/copyEnumeratorInit_test.go index 8a66a088c..f3f632a46 100644 --- a/cmd/copyEnumeratorInit_test.go +++ b/cmd/copyEnumeratorInit_test.go @@ -25,51 +25,50 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-azcopy/v10/ste" "github.com/Azure/azure-storage-blob-go/azblob" - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "testing" ) -type copyEnumeratorSuite struct{} - -var _ = chk.Suite(©EnumeratorSuite{}) - // ============================================= BLOB TRAVERSER TESTS ======================================= -func (ce *copyEnumeratorSuite) TestValidateSourceDirThatExists(c *chk.C) { +func TestValidateSourceDirThatExists(t *testing.T) { + a := assert.New(t) bsu := getBSU() // Generate source container and blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) dirName := "source_dir" - createNewDirectoryStub(c, containerURL, dirName) + createNewDirectoryStub(a, containerURL, dirName) // set up to create blob traverser ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, dirName) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, dirName) blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) // dir but recursive flag not set - fail cca := CookedCopyCmdArgs{StripTopDir: false, Recursive: false} err := cca.validateSourceDir(blobTraverser) - c.Assert(err.Error(), chk.Equals, "cannot use directory as source without --recursive or a trailing wildcard (/*)") + a.Equal("cannot use directory as source without --recursive or a trailing wildcard (/*)", err.Error()) // dir but recursive flag set - pass cca.Recursive = true err = cca.validateSourceDir(blobTraverser) - c.Assert(err, chk.IsNil) - c.Assert(cca.IsSourceDir, chk.Equals, true) + a.Nil(err) + a.True(cca.IsSourceDir) } -func (ce *copyEnumeratorSuite) TestValidateSourceDirDoesNotExist(c *chk.C) { +func TestValidateSourceDirDoesNotExist(t *testing.T) { + a := assert.New(t) bsu := getBSU() // Generate source container and blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) dirName := "source_dir/" // set up to create blob traverser @@ -77,52 +76,54 @@ func (ce *copyEnumeratorSuite) TestValidateSourceDirDoesNotExist(c *chk.C) { p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, dirName) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, dirName) blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) // dir but recursive flag not set - fail cca := CookedCopyCmdArgs{StripTopDir: false, Recursive: false} err := cca.validateSourceDir(blobTraverser) - c.Assert(err.Error(), chk.Equals, "cannot use directory as source without --recursive or a trailing wildcard (/*)") + a.Equal("cannot use directory as source without --recursive or a trailing wildcard (/*)", err.Error()) // dir but recursive flag set - pass cca.Recursive = true err = cca.validateSourceDir(blobTraverser) - c.Assert(err, chk.IsNil) - c.Assert(cca.IsSourceDir, chk.Equals, true) + a.Nil(err) + a.True(cca.IsSourceDir) } -func (ce *copyEnumeratorSuite) TestValidateSourceFileExists(c *chk.C) { +func TestValidateSourceFileExists(t *testing.T) { + a := assert.New(t) bsu := getBSU() // Generate source container and blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) fileName := "source_file" - _, fileName = createNewBlockBlob(c, containerURL, fileName) + _, fileName = createNewBlockBlob(a, containerURL, fileName) ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, fileName) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, fileName) blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) cca := CookedCopyCmdArgs{StripTopDir: false, Recursive: false} err := cca.validateSourceDir(blobTraverser) - c.Assert(err, chk.IsNil) - c.Assert(cca.IsSourceDir, chk.Equals, false) + a.Nil(err) + a.False(cca.IsSourceDir) } -func (ce *copyEnumeratorSuite) TestValidateSourceFileDoesNotExist(c *chk.C) { +func TestValidateSourceFileDoesNotExist(t *testing.T) { + a := assert.New(t) bsu := getBSU() // Generate source container and blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) fileName := "source_file" @@ -130,22 +131,23 @@ func (ce *copyEnumeratorSuite) TestValidateSourceFileDoesNotExist(c *chk.C) { p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, fileName) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, fileName) blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) cca := CookedCopyCmdArgs{StripTopDir: false, Recursive: false} err := cca.validateSourceDir(blobTraverser) - c.Assert(err.Error(), chk.Equals, common.FILE_NOT_FOUND) - c.Assert(cca.IsSourceDir, chk.Equals, false) + a.Equal(common.FILE_NOT_FOUND, err.Error()) + a.False(cca.IsSourceDir) } -func (ce *copyEnumeratorSuite) TestValidateSourceWithWildCard(c *chk.C) { +func TestValidateSourceWithWildCard(t *testing.T) { + a := assert.New(t) bsu := getBSU() // Generate source container and blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) dirName := "source_dir_does_not_exist" // set up to create blob traverser @@ -153,12 +155,12 @@ func (ce *copyEnumeratorSuite) TestValidateSourceWithWildCard(c *chk.C) { p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, dirName) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, dirName) blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) // dir but recursive flag not set - fail cca := CookedCopyCmdArgs{StripTopDir: true, Recursive: false} err := cca.validateSourceDir(blobTraverser) - c.Assert(err, chk.IsNil) - c.Assert(cca.IsSourceDir, chk.Equals, false) -} + a.Nil(err) + a.False(cca.IsSourceDir) +} \ No newline at end of file diff --git a/cmd/copyUtil_test.go b/cmd/copyUtil_test.go index 1b9e3e861..f4194bc85 100644 --- a/cmd/copyUtil_test.go +++ b/cmd/copyUtil_test.go @@ -21,61 +21,60 @@ package cmd import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" "net/url" + "testing" ) -type copyUtilTestSuite struct{} - -var _ = chk.Suite(©UtilTestSuite{}) - -func (s *copyUtilTestSuite) TestUrlIsContainerOrBlob(c *chk.C) { +func TestUrlIsContainerOrBlob(t *testing.T) { + a := assert.New(t) util := copyHandlerUtil{} testUrl := url.URL{Path: "/container/dir1"} isContainer := util.urlIsContainerOrVirtualDirectory(&testUrl) - c.Assert(isContainer, chk.Equals, false) + a.False(isContainer) testUrl.Path = "/container/dir1/dir2" isContainer = util.urlIsContainerOrVirtualDirectory(&testUrl) - c.Assert(isContainer, chk.Equals, false) + a.False(isContainer) testUrl.Path = "/container/" isContainer = util.urlIsContainerOrVirtualDirectory(&testUrl) - c.Assert(isContainer, chk.Equals, true) + a.True(isContainer) testUrl.Path = "/container" isContainer = util.urlIsContainerOrVirtualDirectory(&testUrl) - c.Assert(isContainer, chk.Equals, true) + a.True(isContainer) // root container testUrl.Path = "/" isContainer = util.urlIsContainerOrVirtualDirectory(&testUrl) - c.Assert(isContainer, chk.Equals, true) + a.True(isContainer) } -func (s *copyUtilTestSuite) TestIPIsContainerOrBlob(c *chk.C) { +func TestIPIsContainerOrBlob(t *testing.T) { + a := assert.New(t) util := copyHandlerUtil{} testIP := url.URL{Host: "127.0.0.1:8256", Path: "/account/container"} testURL := url.URL{Path: "/account/container"} isContainerIP := util.urlIsContainerOrVirtualDirectory(&testIP) isContainerURL := util.urlIsContainerOrVirtualDirectory(&testURL) - c.Assert(isContainerIP, chk.Equals, true) // IP endpoints contain the account in the path, making the container the second entry - c.Assert(isContainerURL, chk.Equals, false) // URL endpoints do not contain the account in the path, making the container the first entry. + a.True(isContainerIP) // IP endpoints contain the account in the path, making the container the second entry + a.False(isContainerURL) // URL endpoints do not contain the account in the path, making the container the first entry. testURL.Path = "/account/container/folder" testIP.Path = "/account/container/folder" isContainerIP = util.urlIsContainerOrVirtualDirectory(&testIP) isContainerURL = util.urlIsContainerOrVirtualDirectory(&testURL) - c.Assert(isContainerIP, chk.Equals, false) // IP endpoints contain the account in the path, making the container the second entry - c.Assert(isContainerURL, chk.Equals, false) // URL endpoints do not contain the account in the path, making the container the first entry. + a.False(isContainerIP) // IP endpoints contain the account in the path, making the container the second entry + a.False(isContainerURL) // URL endpoints do not contain the account in the path, making the container the first entry. testURL.Path = "/account/container/folder/" testIP.Path = "/account/container/folder/" isContainerIP = util.urlIsContainerOrVirtualDirectory(&testIP) isContainerURL = util.urlIsContainerOrVirtualDirectory(&testURL) - c.Assert(isContainerIP, chk.Equals, true) // IP endpoints contain the account in the path, making the container the second entry - c.Assert(isContainerURL, chk.Equals, true) // URL endpoints do not contain the account in the path, making the container the first entry. + a.True(isContainerIP) // IP endpoints contain the account in the path, making the container the second entry + a.True(isContainerURL) // URL endpoints do not contain the account in the path, making the container the first entry. // The behaviour isn't too different from here. -} +} \ No newline at end of file diff --git a/cmd/gcpNameResolver_test.go b/cmd/gcpNameResolver_test.go index f041a1391..5a9693a31 100644 --- a/cmd/gcpNameResolver_test.go +++ b/cmd/gcpNameResolver_test.go @@ -2,63 +2,61 @@ package cmd import ( "github.com/Azure/azure-storage-azcopy/v10/common" - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" "strings" + "testing" ) -type gcpNameResolverTestSuite struct{} - -var _ = chk.Suite(&gcpNameResolverTestSuite{}) - -func (s *gcpNameResolverTestSuite) TestGCPBucketNameToAzureResourceResolverBucketName(c *chk.C) { +func TestGCPBucketNameToAzureResourceResolverBucketName(t *testing.T) { + a := assert.New(t) r := NewGCPBucketNameToAzureResourcesResolver([]string{"bucket.name.1"}) resolvedName, err := r.ResolveName("bucket.name.1") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-name-1") + a.Nil(err) + a.Equal("bucket-name-1", resolvedName) r = NewGCPBucketNameToAzureResourcesResolver([]string{"bucket-name"}) resolvedName, err = r.ResolveName("bucket-name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-name") + a.Nil(err) + a.Equal("bucket-name", resolvedName) r = NewGCPBucketNameToAzureResourcesResolver([]string{"bucket--name"}) resolvedName, err = r.ResolveName("bucket--name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-2-name") + a.Nil(err) + a.Equal("bucket-2-name", resolvedName) r = NewGCPBucketNameToAzureResourcesResolver([]string{"bucketvalidname"}) resolvedName, err = r.ResolveName("bucketvalidname") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucketvalidname") + a.Nil(err) + a.Equal("bucketvalidname", resolvedName) r = NewGCPBucketNameToAzureResourcesResolver([]string{"0123456789.0123456789.0123456789.012345678901234567890123456789"}) resolvedName, err = r.ResolveName("0123456789.0123456789.0123456789.012345678901234567890123456789") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "0123456789-0123456789-0123456789-012345678901234567890123456789") + a.Nil(err) + a.Equal("0123456789-0123456789-0123456789-012345678901234567890123456789", resolvedName) r = NewGCPBucketNameToAzureResourcesResolver([]string{"0123456789--01234567890123456789012345678901234567890123456789"}) resolvedName, err = r.ResolveName("0123456789--01234567890123456789012345678901234567890123456789") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "0123456789-2-01234567890123456789012345678901234567890123456789") + a.Nil(err) + a.Equal("0123456789-2-01234567890123456789012345678901234567890123456789", resolvedName) r = NewGCPBucketNameToAzureResourcesResolver([]string{"bucket_name_1"}) resolvedName, err = r.ResolveName("bucket_name_1") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-name-1") + a.Nil(err) + a.Equal("bucket-name-1", resolvedName) r = NewGCPBucketNameToAzureResourcesResolver([]string{"bucket__name"}) resolvedName, err = r.ResolveName("bucket__name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-2-name") + a.Nil(err) + a.Equal("bucket-2-name", resolvedName) r = NewGCPBucketNameToAzureResourcesResolver([]string{"bucket-_name"}) resolvedName, err = r.ResolveName("bucket-_name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-2-name") - + a.Nil(err) + a.Equal("bucket-2-name", resolvedName) } -func (s *gcpNameResolverTestSuite) TestGCPBucketNameToAzureResourceResolverMultipleBucketNames(c *chk.C) { +func TestGCPBucketNameToAzureResourceResolverMultipleBucketNames(t *testing.T) { + a := assert.New(t) r := NewGCPBucketNameToAzureResourcesResolver( []string{"bucket.name", "bucket-name", "bucket-name-2", "bucket-name-3", "bucket---name", "bucket-s--s---s", "abcdefghijklmnopqrstuvwxyz-s--s---s-s0123456789", @@ -66,87 +64,81 @@ func (s *gcpNameResolverTestSuite) TestGCPBucketNameToAzureResourceResolverMulti "a-b---c", "a.b---c", "blah__name", "blah_bucket_1"}) // Need resolve resolvedName, err := r.ResolveName("bucket---name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-3-name") + a.Nil(err) + a.Equal("bucket-3-name", resolvedName) resolvedName, err = r.ResolveName("bucket-s--s---s") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-s-2-s-3-s") + a.Nil(err) + a.Equal("bucket-s-2-s-3-s", resolvedName) resolvedName, err = r.ResolveName("abcdefghijklmnopqrstuvwxyz-s--s---s-s0123456789") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "abcdefghijklmnopqrstuvwxyz-s-2-s-3-s-s0123456789") + a.Nil(err) + a.Equal("abcdefghijklmnopqrstuvwxyz-s-2-s-3-s-s0123456789", resolvedName) // Resolved, and need add further add suffix resolvedName, err = r.ResolveName("bucket.name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-name-4") + a.Nil(err) + a.Equal("bucket-name-4", resolvedName) resolvedName, err = r.ResolveName("bucket--name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-2-name-2") + a.Nil(err) + a.Equal("bucket-2-name-2", resolvedName) // Names don't need resolve resolvedName, err = r.ResolveName("bucket-name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-name") + a.Nil(err) + a.Equal("bucket-name", resolvedName) resolvedName, err = r.ResolveName("bucket-name-2") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-name-2") + a.Nil(err) + a.Equal("bucket-name-2", resolvedName) resolvedName, err = r.ResolveName("bucket-name-3") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-name-3") + a.Nil(err) + a.Equal("bucket-name-3", resolvedName) resolvedName, err = r.ResolveName("bucket-2-name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-2-name") + a.Nil(err) + a.Equal("bucket-2-name", resolvedName) resolvedName, err = r.ResolveName("bucket-2-name-3") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-2-name-3") + a.Nil(err) + a.Equal("bucket-2-name-3", resolvedName) resolvedName, err = r.ResolveName("bucket.compose----name.1---hello") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-compose-4-name-1-3-hello") + a.Nil(err) + a.Equal("bucket-compose-4-name-1-3-hello", resolvedName) resolvedName, err = r.ResolveName("blah__name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "blah-2-name") + a.Nil(err) + a.Equal("blah-2-name", resolvedName) resolvedName, err = r.ResolveName("blah_bucket_1") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "blah-bucket-1") + a.Nil(err) + a.Equal("blah-bucket-1", resolvedName) resolvedNameCollision1, err := r.ResolveName("a.b---c") - c.Assert(err, chk.IsNil) + a.Nil(err) resolvedNameCollision2, err := r.ResolveName("a-b---c") - c.Assert(err, chk.IsNil) + a.Nil(err) - c.Assert(common.Iffint8(resolvedNameCollision1 == "a-b-3-c", 1, 0)^common.Iffint8(resolvedNameCollision2 == "a-b-3-c", 1, 0), chk.Equals, int8(1)) - c.Assert(common.Iffint8(resolvedNameCollision1 == "a-b-3-c-2", 1, 0)^common.Iffint8(resolvedNameCollision2 == "a-b-3-c-2", 1, 0), chk.Equals, int8(1)) + a.EqualValues(1, common.Iffint8(resolvedNameCollision1 == "a-b-3-c", 1, 0)^common.Iffint8(resolvedNameCollision2 == "a-b-3-c", 1, 0)) + a.EqualValues(1, common.Iffint8(resolvedNameCollision1 == "a-b-3-c-2", 1, 0)^common.Iffint8(resolvedNameCollision2 == "a-b-3-c-2", 1, 0)) } -func (s *gcpNameResolverTestSuite) TestGCPBucketNameToAzureResourceResolverNegative(c *chk.C) { +func TestGCPBucketNameToAzureResourceResolverNegative(t *testing.T) { + a := assert.New(t) r := NewGCPBucketNameToAzureResourcesResolver([]string{"0123456789.0123456789.0123456789.012345678901234567890123456789", "0123456789-0123456789-0123456789-012345678901234567890123456789"}) // with length 64 _, err := r.ResolveName("0123456789.0123456789.0123456789.012345678901234567890123456789") - c.Assert(err, chk.NotNil) - c.Assert( - strings.Contains(err.Error(), "invalid for destination"), - chk.Equals, - true) + a.NotNil(err) + a.True(strings.Contains(err.Error(), "invalid for destination")) r = NewGCPBucketNameToAzureResourcesResolver([]string{"0123456789--0123456789-0123456789012345678901234567890123456789"}) _, err = r.ResolveName("0123456789--0123456789-0123456789012345678901234567890123456789") - c.Assert(err, chk.NotNil) - c.Assert( - strings.Contains(err.Error(), "invalid for destination"), - chk.Equals, - true) + a.NotNil(err) + a.True(strings.Contains(err.Error(), "invalid for destination")) r = NewGCPBucketNameToAzureResourcesResolver([]string{"namea"}) _, err = r.ResolveName("specialnewnameb") - c.Assert(err, chk.IsNil) - -} + a.Nil(err) +} \ No newline at end of file diff --git a/cmd/jobsList_test.go b/cmd/jobsList_test.go index bc0473dd3..48e393fca 100644 --- a/cmd/jobsList_test.go +++ b/cmd/jobsList_test.go @@ -21,18 +21,14 @@ package cmd import ( - "time" - "github.com/Azure/azure-storage-azcopy/v10/common" - - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "testing" + "time" ) -type jobsListTestSuite struct{} - -var _ = chk.Suite(&jobsListTestSuite{}) - -func (s *jobsListTestSuite) TestSortJobs(c *chk.C) { +func TestSortJobs(t *testing.T) { + a := assert.New(t) // setup job2 := common.JobIDDetails{ JobId: common.NewJobID(), @@ -61,7 +57,7 @@ func (s *jobsListTestSuite) TestSortJobs(c *chk.C) { sortJobs(jobsList) // verify - c.Assert(jobsList[0], chk.DeepEquals, job0) - c.Assert(jobsList[1], chk.DeepEquals, job1) - c.Assert(jobsList[2], chk.DeepEquals, job2) -} + a.Equal(job0, jobsList[0]) + a.Equal(job1, jobsList[1]) + a.Equal(job2, jobsList[2]) +} \ No newline at end of file diff --git a/cmd/s3NameResolver_test.go b/cmd/s3NameResolver_test.go index 967f46265..7a4023fe7 100644 --- a/cmd/s3NameResolver_test.go +++ b/cmd/s3NameResolver_test.go @@ -21,50 +21,48 @@ package cmd import ( + "github.com/stretchr/testify/assert" "strings" + "testing" "github.com/Azure/azure-storage-azcopy/v10/common" - chk "gopkg.in/check.v1" ) -// Hookup to the testing framework -type s3NameResolverTestSuite struct{} - -var _ = chk.Suite(&s3NameResolverTestSuite{}) - -func (s *s3NameResolverTestSuite) TestS3BucketNameToAzureResourceResolverSingleBucketName(c *chk.C) { +func TestS3BucketNameToAzureResourceResolverSingleBucketName(t *testing.T) { + a := assert.New(t) r := NewS3BucketNameToAzureResourcesResolver([]string{"bucket.name.1"}) resolvedName, err := r.ResolveName("bucket.name.1") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-name-1") + a.Nil(err) + a.Equal("bucket-name-1", resolvedName) r = NewS3BucketNameToAzureResourcesResolver([]string{"bucket-name"}) resolvedName, err = r.ResolveName("bucket-name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-name") + a.Nil(err) + a.Equal("bucket-name", resolvedName) r = NewS3BucketNameToAzureResourcesResolver([]string{"bucket--name"}) resolvedName, err = r.ResolveName("bucket--name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-2-name") + a.Nil(err) + a.Equal("bucket-2-name", resolvedName) r = NewS3BucketNameToAzureResourcesResolver([]string{"bucketvalidname"}) resolvedName, err = r.ResolveName("bucketvalidname") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucketvalidname") + a.Nil(err) + a.Equal("bucketvalidname", resolvedName) r = NewS3BucketNameToAzureResourcesResolver([]string{"0123456789.0123456789.0123456789.012345678901234567890123456789"}) resolvedName, err = r.ResolveName("0123456789.0123456789.0123456789.012345678901234567890123456789") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "0123456789-0123456789-0123456789-012345678901234567890123456789") + a.Nil(err) + a.Equal("0123456789-0123456789-0123456789-012345678901234567890123456789", resolvedName) r = NewS3BucketNameToAzureResourcesResolver([]string{"0123456789--01234567890123456789012345678901234567890123456789"}) resolvedName, err = r.ResolveName("0123456789--01234567890123456789012345678901234567890123456789") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "0123456789-2-01234567890123456789012345678901234567890123456789") + a.Nil(err) + a.Equal("0123456789-2-01234567890123456789012345678901234567890123456789", resolvedName) } -func (s *s3NameResolverTestSuite) TestS3BucketNameToAzureResourceResolverMultipleBucketNames(c *chk.C) { +func TestS3BucketNameToAzureResourceResolverMultipleBucketNames(t *testing.T) { + a := assert.New(t) r := NewS3BucketNameToAzureResourcesResolver( []string{"bucket.name", "bucket-name", "bucket-name-2", "bucket-name-3", "bucket---name", "bucket-s--s---s", "abcdefghijklmnopqrstuvwxyz-s--s---s-s0123456789", @@ -72,78 +70,73 @@ func (s *s3NameResolverTestSuite) TestS3BucketNameToAzureResourceResolverMultipl "a-b---c", "a.b---c"}) // Need resolve resolvedName, err := r.ResolveName("bucket---name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-3-name") + a.Nil(err) + a.Equal("bucket-3-name", resolvedName) resolvedName, err = r.ResolveName("bucket-s--s---s") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-s-2-s-3-s") + a.Nil(err) + a.Equal("bucket-s-2-s-3-s", resolvedName) resolvedName, err = r.ResolveName("abcdefghijklmnopqrstuvwxyz-s--s---s-s0123456789") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "abcdefghijklmnopqrstuvwxyz-s-2-s-3-s-s0123456789") + a.Nil(err) + a.Equal("abcdefghijklmnopqrstuvwxyz-s-2-s-3-s-s0123456789", resolvedName) // Resolved, and need add further add suffix resolvedName, err = r.ResolveName("bucket.name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-name-4") + a.Nil(err) + a.Equal("bucket-name-4", resolvedName) resolvedName, err = r.ResolveName("bucket--name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-2-name-2") + a.Nil(err) + a.Equal("bucket-2-name-2", resolvedName) // Names don't need resolve resolvedName, err = r.ResolveName("bucket-name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-name") + a.Nil(err) + a.Equal("bucket-name", resolvedName) resolvedName, err = r.ResolveName("bucket-name-2") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-name-2") + a.Nil(err) + a.Equal("bucket-name-2", resolvedName) resolvedName, err = r.ResolveName("bucket-name-3") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-name-3") + a.Nil(err) + a.Equal("bucket-name-3", resolvedName) resolvedName, err = r.ResolveName("bucket-2-name") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-2-name") + a.Nil(err) + a.Equal("bucket-2-name", resolvedName) resolvedName, err = r.ResolveName("bucket-2-name-3") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-2-name-3") + a.Nil(err) + a.Equal("bucket-2-name-3", resolvedName) resolvedName, err = r.ResolveName("bucket.compose----name.1---hello") - c.Assert(err, chk.IsNil) - c.Assert(resolvedName, chk.Equals, "bucket-compose-4-name-1-3-hello") + a.Nil(err) + a.Equal("bucket-compose-4-name-1-3-hello", resolvedName) resolvedNameCollision1, err := r.ResolveName("a.b---c") - c.Assert(err, chk.IsNil) + a.Nil(err) resolvedNameCollision2, err := r.ResolveName("a-b---c") - c.Assert(err, chk.IsNil) + a.Nil(err) - c.Assert(common.Iffint8(resolvedNameCollision1 == "a-b-3-c", 1, 0)^common.Iffint8(resolvedNameCollision2 == "a-b-3-c", 1, 0), chk.Equals, int8(1)) - c.Assert(common.Iffint8(resolvedNameCollision1 == "a-b-3-c-2", 1, 0)^common.Iffint8(resolvedNameCollision2 == "a-b-3-c-2", 1, 0), chk.Equals, int8(1)) + a.EqualValues(1, common.Iffint8(resolvedNameCollision1 == "a-b-3-c", 1, 0)^common.Iffint8(resolvedNameCollision2 == "a-b-3-c", 1, 0)) + a.EqualValues(1, common.Iffint8(resolvedNameCollision1 == "a-b-3-c-2", 1, 0)^common.Iffint8(resolvedNameCollision2 == "a-b-3-c-2", 1, 0)) } -func (s *s3NameResolverTestSuite) TestS3BucketNameToAzureResourceResolverNegative(c *chk.C) { +func TestS3BucketNameToAzureResourceResolverNegative(t *testing.T) { + a := assert.New(t) r := NewS3BucketNameToAzureResourcesResolver([]string{"0123456789.0123456789.0123456789.012345678901234567890123456789", "0123456789-0123456789-0123456789-012345678901234567890123456789"}) // with length 64 _, err := r.ResolveName("0123456789.0123456789.0123456789.012345678901234567890123456789") - c.Assert(err, chk.NotNil) - c.Assert( - strings.Contains(err.Error(), "invalid for the destination"), - chk.Equals, - true) + a.NotNil(err) + a.True(strings.Contains(err.Error(), "invalid for the destination")) r = NewS3BucketNameToAzureResourcesResolver([]string{"0123456789--0123456789-0123456789012345678901234567890123456789"}) _, err = r.ResolveName("0123456789--0123456789-0123456789012345678901234567890123456789") - c.Assert(err, chk.NotNil) - c.Assert( - strings.Contains(err.Error(), "invalid for the destination"), - chk.Equals, - true) + a.NotNil(err) + a.True(strings.Contains(err.Error(), "invalid for the destination")) r = NewS3BucketNameToAzureResourcesResolver([]string{"namea"}) _, err = r.ResolveName("specialnewnameb") - c.Assert(err, chk.IsNil) // Bucket resolver now supports new names being injected -} + a.Nil(err) // Bucket resolver now supports new names being injected +} \ No newline at end of file diff --git a/cmd/versionChecker_test.go b/cmd/versionChecker_test.go index 34670c2e7..9f30d6ae2 100644 --- a/cmd/versionChecker_test.go +++ b/cmd/versionChecker_test.go @@ -21,70 +21,70 @@ package cmd import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "testing" ) -type versionCheckerTestSuite struct{} - -var _ = chk.Suite(&versionCheckerTestSuite{}) - -func (s *versionCheckerTestSuite) TestVersionEquality(c *chk.C) { +func TestVersionEquality(t *testing.T) { + a := assert.New(t) // simple equal v1, _ := NewVersion("10.0.0") v2, _ := NewVersion("10.0.0") - c.Assert(v1.compare(*v2), chk.Equals, 0) + a.Zero(v1.compare(*v2)) // preview version equal v1, _ = NewVersion("10.0.0-preview") v2, _ = NewVersion("10.0.0-preview") - c.Assert(v1.compare(*v2), chk.Equals, 0) + a.Zero(v1.compare(*v2)) // future version equal v1, _ = NewVersion("10.0.0-preview") v2, _ = NewVersion("10.0.0-beta5") - c.Assert(v1.compare(*v2), chk.Equals, 0) + a.Zero(v1.compare(*v2)) } -func (s *versionCheckerTestSuite) TestVersionSuperiority(c *chk.C) { +func TestVersionSuperiority(t *testing.T) { + a := assert.New(t) // major version bigger v1, _ := NewVersion("11.3.0") v2, _ := NewVersion("10.8.3") - c.Assert(v1.compare(*v2), chk.Equals, 1) + a.Equal(1, v1.compare(*v2)) // minor version bigger v1, _ = NewVersion("15.5.6") v2, _ = NewVersion("15.3.5") - c.Assert(v1.compare(*v2), chk.Equals, 1) + a.Equal(1, v1.compare(*v2)) // patch version bigger v1, _ = NewVersion("15.5.6") v2, _ = NewVersion("15.5.5") - c.Assert(v1.compare(*v2), chk.Equals, 1) + a.Equal(1, v1.compare(*v2)) // preview bigger v1, _ = NewVersion("15.5.5") v2, _ = NewVersion("15.5.5-preview") - c.Assert(v1.compare(*v2), chk.Equals, 1) + a.Equal(1, v1.compare(*v2)) } -func (s *versionCheckerTestSuite) TestVersionInferiority(c *chk.C) { +func TestVersionInferiority(t *testing.T) { + a := assert.New(t) // major version smaller v1, _ := NewVersion("10.5.6") v2, _ := NewVersion("11.8.3") - c.Assert(v1.compare(*v2), chk.Equals, -1) + a.Equal(-1, v1.compare(*v2)) // minor version smaller v1, _ = NewVersion("15.3.6") v2, _ = NewVersion("15.5.5") - c.Assert(v1.compare(*v2), chk.Equals, -1) + a.Equal(-1, v1.compare(*v2)) // patch version smaller v1, _ = NewVersion("15.5.5") v2, _ = NewVersion("15.5.6") - c.Assert(v1.compare(*v2), chk.Equals, -1) + a.Equal(-1, v1.compare(*v2)) // preview smaller v1, _ = NewVersion("15.5.5-preview") v2, _ = NewVersion("15.5.5") - c.Assert(v1.compare(*v2), chk.Equals, -1) -} + a.Equal(-1, v1.compare(*v2)) +} \ No newline at end of file diff --git a/cmd/zc_traverser_local_test.go b/cmd/zc_traverser_local_test.go index 44e3bc5f6..d64c3a957 100644 --- a/cmd/zc_traverser_local_test.go +++ b/cmd/zc_traverser_local_test.go @@ -1,16 +1,13 @@ package cmd import ( + "github.com/stretchr/testify/assert" "os" - - chk "gopkg.in/check.v1" + "testing" ) -type localTraverserTestSuite struct{} - -var _ = chk.Suite(&localTraverserTestSuite{}) - -func (s *localTraverserTestSuite) TestCleanLocalPath(c *chk.C) { +func TestCleanLocalPath(t *testing.T) { + a := assert.New(t) testCases := map[string]string{ "/user/foo/bar": "/user/foo/bar", // regular unix path with no change "/user/foo/bar/": "/user/foo/bar", // regular unix path with extra slash @@ -21,15 +18,16 @@ func (s *localTraverserTestSuite) TestCleanLocalPath(c *chk.C) { } for orig, expected := range testCases { - c.Assert(cleanLocalPath(orig), chk.Equals, expected) + a.Equal(expected, cleanLocalPath(orig)) } } -func (s *localTraverserTestSuite) TestCleanLocalPathForWindows(c *chk.C) { +func TestCleanLocalPathForWindows(t *testing.T) { + a := assert.New(t) // ignore these tests when not running on Windows // as the path cleaning behavior depends on the platform if os.PathSeparator != '\\' { - c.Skip("not running since the test applies to Windows only") + t.Skip("not running since the test applies to Windows only") } // Paths on Windows get consolidated to backwards-slash typically. @@ -47,6 +45,6 @@ func (s *localTraverserTestSuite) TestCleanLocalPathForWindows(c *chk.C) { } for orig, expected := range testCases { - c.Assert(cleanLocalPath(orig), chk.Equals, expected) + a.Equal(expected, cleanLocalPath(orig)) } -} +} \ No newline at end of file diff --git a/cmd/zt_block_size_to_bytes_test.go b/cmd/zt_block_size_to_bytes_test.go index 1365bcd30..3959aa25f 100644 --- a/cmd/zt_block_size_to_bytes_test.go +++ b/cmd/zt_block_size_to_bytes_test.go @@ -21,15 +21,12 @@ package cmd import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "testing" ) -type blockSizeFilterSuite struct{} - -var _ = chk.Suite(&blockSizeFilterSuite{}) - -func (s *genericFilterSuite) TestConversions(c *chk.C) { - +func TestConversions(t *testing.T) { + a := assert.New(t) testData := []struct { floatMiB float64 expectedBytes int64 @@ -46,9 +43,9 @@ func (s *genericFilterSuite) TestConversions(c *chk.C) { for _, d := range testData { actualBytes, err := blockSizeInBytes(d.floatMiB) if d.expectedErrorMsg != "" { - c.Check(err.Error(), chk.Equals, d.expectedErrorMsg) + a.Equal(d.expectedErrorMsg, err.Error()) } else { - c.Check(actualBytes, chk.Equals, d.expectedBytes) + a.Equal(d.expectedBytes, actualBytes) } } -} +} \ No newline at end of file diff --git a/cmd/zt_copy_blob_download_test.go b/cmd/zt_copy_blob_download_test.go index f75c8bf77..79eed563f 100644 --- a/cmd/zt_copy_blob_download_test.go +++ b/cmd/zt_copy_blob_download_test.go @@ -23,21 +23,22 @@ package cmd import ( "encoding/json" "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/stretchr/testify/assert" "os" "path" "path/filepath" "sort" "strings" - - "github.com/Azure/azure-storage-blob-go/azblob" - chk "gopkg.in/check.v1" + "testing" "github.com/Azure/azure-storage-azcopy/v10/common" + "github.com/Azure/azure-storage-blob-go/azblob" ) -func (s *cmdIntegrationSuite) TestInferredStripTopDirDownload(c *chk.C) { +func TestInferredStripTopDirDownload(t *testing.T) { + a := assert.New(t) bsu := getBSU() - cURL, cName := createNewContainer(c, bsu) + cURL, cName := createNewContainer(a, bsu) blobNames := []string{ "*", // File name that we want to retain compatibility with @@ -49,11 +50,11 @@ func (s *cmdIntegrationSuite) TestInferredStripTopDirDownload(c *chk.C) { // ----- TEST # 1: Test inferred as false by using escaped * ----- // set up container name - scenarioHelper{}.generateBlobsFromList(c, cURL, blobNames, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, cURL, blobNames, blockBlobDefaultData) - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) - rawContainerURL := scenarioHelper{}.getRawContainerURLWithSAS(c, cName) + rawContainerURL := scenarioHelper{}.getRawContainerURLWithSAS(a, cName) // Don't add /* while still in URL form-- it will get improperly encoded, and azcopy will ignore it. rawContainerString := rawContainerURL.String() @@ -74,14 +75,14 @@ func (s *cmdIntegrationSuite) TestInferredStripTopDirDownload(c *chk.C) { // Test inference of striptopdir cooked, err := raw.cook() - c.Assert(err, chk.IsNil) - c.Assert(cooked.StripTopDir, chk.Equals, false) + a.Nil(err) + a.False(cooked.StripTopDir) // Test and ensure only one file is being downloaded - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) }) // ----- TEST # 2: Test inferred as true by using unescaped * ----- @@ -101,14 +102,14 @@ func (s *cmdIntegrationSuite) TestInferredStripTopDirDownload(c *chk.C) { // Test inference of striptopdir cooked, err = raw.cook() - c.Assert(err, chk.IsNil) - c.Assert(cooked.StripTopDir, chk.Equals, true) + a.Nil(err) + a.True(cooked.StripTopDir) // Test and ensure only 3 files get scheduled, nothing under the sub-directory - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 3) + a.Equal(3, len(mockedRPC.transfers)) }) // ----- TEST # 3: Attempt to use the * in the folder name without encoding ---- @@ -128,8 +129,8 @@ func (s *cmdIntegrationSuite) TestInferredStripTopDirDownload(c *chk.C) { // test error cooked, err = raw.cook() - c.Assert(err, chk.NotNil) - c.Assert(err.Error(), StringContains, "cannot use wildcards") + a.NotNil(err) + a.Contains(err.Error(), "cannot use wildcards") // no actual test needed-- this is where the error lives. @@ -150,27 +151,28 @@ func (s *cmdIntegrationSuite) TestInferredStripTopDirDownload(c *chk.C) { // test cook cooked, err = raw.cook() - c.Assert(err, chk.IsNil) - c.Assert(cooked.StripTopDir, chk.Equals, true) + a.Nil(err) + a.True(cooked.StripTopDir) // Test and ensure only one file got scheduled - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) }) } // Test downloading the entire account. -func (s *cmdIntegrationSuite) TestDownloadAccount(c *chk.C) { +func TestDownloadAccount(t *testing.T) { + a := assert.New(t) bsu := getBSU() - rawBSU := scenarioHelper{}.getRawBlobServiceURLWithSAS(c) + rawBSU := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) p, err := InitPipeline(ctx, common.ELocation.Blob(), common.CredentialInfo{CredentialType: common.ECredentialType.Anonymous()}, pipeline.LogNone, common.ETrailingDotOption.Enable()) - c.Assert(err, chk.IsNil) + a.Nil(err) // Just in case there are no existing containers... - curl, _ := createNewContainer(c, bsu) - scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, curl, "") + curl, _ := createNewContainer(a, bsu) + scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, curl, "") // Traverse the account ahead of time and determine the relative paths for testing. relPaths := make([]string, 0) // Use a map for easy lookup @@ -182,10 +184,10 @@ func (s *cmdIntegrationSuite) TestDownloadAccount(c *chk.C) { return nil } err = blobTraverser.Traverse(noPreProccessor, processor, []ObjectFilter{}) - c.Assert(err, chk.IsNil) + a.Nil(err) // set up a destination - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -196,26 +198,27 @@ func (s *cmdIntegrationSuite) TestDownloadAccount(c *chk.C) { raw := getDefaultCopyRawInput(rawBSU.String(), dstDirName) raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - validateDownloadTransfersAreScheduled(c, "", "", relPaths, mockedRPC) + validateDownloadTransfersAreScheduled(a, "", "", relPaths, mockedRPC) }) } // Test downloading the entire account. -func (s *cmdIntegrationSuite) TestDownloadAccountWildcard(c *chk.C) { +func TestDownloadAccountWildcard(t *testing.T) { + a := assert.New(t) bsu := getBSU() - rawBSU := scenarioHelper{}.getRawBlobServiceURLWithSAS(c) + rawBSU := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) p, err := InitPipeline(ctx, common.ELocation.Blob(), common.CredentialInfo{CredentialType: common.ECredentialType.Anonymous()}, pipeline.LogNone, common.ETrailingDotOption.Enable()) - c.Assert(err, chk.IsNil) + a.Nil(err) // Create a unique container to be targeted. cname := generateName("blah-unique-blah", 63) curl := bsu.NewContainerURL(cname) _, err = curl.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) - c.Assert(err, chk.IsNil) - scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, curl, "") + a.Nil(err) + scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, curl, "") // update the raw BSU to match the unique container name rawBSU.Path = "/blah-unique-blah*" @@ -230,10 +233,10 @@ func (s *cmdIntegrationSuite) TestDownloadAccountWildcard(c *chk.C) { return nil } err = blobTraverser.Traverse(noPreProccessor, processor, []ObjectFilter{}) - c.Assert(err, chk.IsNil) + a.Nil(err) // set up a destination - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -244,30 +247,31 @@ func (s *cmdIntegrationSuite) TestDownloadAccountWildcard(c *chk.C) { raw := getDefaultCopyRawInput(rawBSU.String(), dstDirName) raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - validateDownloadTransfersAreScheduled(c, "", "", relPaths, mockedRPC) + validateDownloadTransfersAreScheduled(a, "", "", relPaths, mockedRPC) }) } // regular blob->local file download -func (s *cmdIntegrationSuite) TestDownloadSingleBlobToFile(c *chk.C) { +func TestDownloadSingleBlobToFile(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) for _, blobName := range []string{"singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobList, blockBlobDefaultData) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) + a.NotNil(containerURL) // set up the destination as a single file - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) dstFileName := "whatever" - scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, blobList) + scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, blobList) // set up interceptor mockedRPC := interceptor{} @@ -275,14 +279,14 @@ func (s *cmdIntegrationSuite) TestDownloadSingleBlobToFile(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobList[0]) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobList[0]) raw := getDefaultCopyRawInput(rawBlobURLWithSAS.String(), filepath.Join(dstDirName, dstFileName)) // the file was created after the blob, so no sync should happen - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - validateDownloadTransfersAreScheduled(c, "", "", []string{""}, mockedRPC) + validateDownloadTransfersAreScheduled(a, "", "", []string{""}, mockedRPC) }) // clean the RPC for the next test @@ -292,32 +296,33 @@ func (s *cmdIntegrationSuite) TestDownloadSingleBlobToFile(c *chk.C) { raw = getDefaultCopyRawInput(rawBlobURLWithSAS.String(), dstDirName) // the file was created after the blob, so no sync should happen - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // verify explicitly since the source and destination names will be different: // the source is "" since the given URL points to the blob itself // the destination should be the blob name, since the given local path points to the parent dir - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) - c.Assert(mockedRPC.transfers[0].Source, chk.Equals, "") - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, common.AZCOPY_PATH_SEPARATOR_STRING+blobName) + a.Equal(1, len(mockedRPC.transfers)) + a.Equal("", mockedRPC.transfers[0].Source) + a.Equal(common.AZCOPY_PATH_SEPARATOR_STRING+blobName, mockedRPC.transfers[0].Destination) }) } } // regular container->directory download -func (s *cmdIntegrationSuite) TestDownloadBlobContainer(c *chk.C) { +func TestDownloadBlobContainer(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotEqual(0, len(blobList)) // set up the destination with an empty folder - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -326,44 +331,45 @@ func (s *cmdIntegrationSuite) TestDownloadBlobContainer(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultCopyRawInput(rawContainerURLWithSAS.String(), dstDirName) raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateDownloadTransfersAreScheduled(c, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING+containerName+common.AZCOPY_PATH_SEPARATOR_STRING, blobList, mockedRPC) + validateDownloadTransfersAreScheduled(a, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING+containerName+common.AZCOPY_PATH_SEPARATOR_STRING, blobList, mockedRPC) }) // turn off recursive, this time nothing should be transferred raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + runCopyAndVerify(a, raw, func(err error) { + a.NotNil(err) + a.Zero(len(mockedRPC.transfers)) }) } // regular vdir->dir download -func (s *cmdIntegrationSuite) TestDownloadBlobVirtualDirectory(c *chk.C) { +func TestDownloadBlobVirtualDirectory(t *testing.T) { + a := assert.New(t) bsu := getBSU() vdirName := "vdir1" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotEqual(0, len(blobList)) // set up the destination with an empty folder - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -372,19 +378,19 @@ func (s *cmdIntegrationSuite) TestDownloadBlobVirtualDirectory(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, vdirName) + rawContainerURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, vdirName) raw := getDefaultCopyRawInput(rawContainerURLWithSAS.String(), dstDirName) raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent expectedTransfers := scenarioHelper{}.shaveOffPrefix(blobList, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) - validateDownloadTransfersAreScheduled(c, common.AZCOPY_PATH_SEPARATOR_STRING, + validateDownloadTransfersAreScheduled(a, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING+vdirName+common.AZCOPY_PATH_SEPARATOR_STRING, expectedTransfers, mockedRPC) }) @@ -392,31 +398,32 @@ func (s *cmdIntegrationSuite) TestDownloadBlobVirtualDirectory(c *chk.C) { raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + runCopyAndVerify(a, raw, func(err error) { + a.NotNil(err) + a.Zero(len(mockedRPC.transfers)) }) } // blobs(from pattern)->directory download // TODO the current pattern matching behavior is inconsistent with the posix filesystem // update test after re-writing copy enumerators -func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithPattern(c *chk.C) { +func TestDownloadBlobContainerWithPattern(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobsToIgnore := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobsToIgnore), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobsToIgnore := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotEqual(0, len(blobsToIgnore)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.pdf", "includeSub/wow/amazing.pdf"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToInclude, blockBlobDefaultData) // set up the destination with an empty folder - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -425,21 +432,21 @@ func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithPattern(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) rawContainerURLWithSAS.Path = path.Join(rawContainerURLWithSAS.Path, string([]byte{0x00})) containerString := strings.ReplaceAll(rawContainerURLWithSAS.String(), "%00", "*") raw := getDefaultCopyRawInput(containerString, dstDirName) raw.recursive = true raw.include = "*.pdf" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobsToInclude)) + a.Equal(len(blobsToInclude), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateDownloadTransfersAreScheduled(c, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING, + validateDownloadTransfersAreScheduled(a, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING, blobsToInclude, mockedRPC) }) @@ -447,34 +454,35 @@ func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithPattern(c *chk.C) { raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // only the top pdf should be included - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) - c.Assert(mockedRPC.transfers[0].Source, chk.Equals, mockedRPC.transfers[0].Destination) - c.Assert(strings.HasSuffix(mockedRPC.transfers[0].Source, ".pdf"), chk.Equals, true) - c.Assert(strings.Contains(mockedRPC.transfers[0].Source[1:], common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.Equal(1, len(mockedRPC.transfers)) + a.Equal(mockedRPC.transfers[0].Destination, mockedRPC.transfers[0].Source) + a.True(strings.HasSuffix(mockedRPC.transfers[0].Source, ".pdf")) + a.False(strings.Contains(mockedRPC.transfers[0].Source[1:], common.AZCOPY_PATH_SEPARATOR_STRING)) }) } // test for include with one regular expression -func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithRegexInclude(c *chk.C) { +func TestDownloadBlobContainerWithRegexInclude(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with blobs - containerURL, containerName := createNewContainer(c, bsu) - blobsToIgnore := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobsToIgnore), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobsToIgnore := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotEqual(0, len(blobsToIgnore)) // add blobs that we wish to include blobsToInclude := []string{"tessssssssssssst.txt", "subOne/tetingessssss.jpeg", "subOne/tessssst/hi.pdf"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToInclude, blockBlobDefaultData) // set up the destination with an empty folder - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -483,17 +491,17 @@ func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithRegexInclude(c *chk.C mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) rawContainerURLWithSAS.Path = path.Join(rawContainerURLWithSAS.Path, string([]byte{0x00})) containerString := strings.ReplaceAll(rawContainerURLWithSAS.String(), "%00", "*") raw := getDefaultCopyRawInput(containerString, dstDirName) raw.recursive = true raw.includeRegex = "es{4,}" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobsToInclude)) + a.Equal(len(blobsToInclude), len(mockedRPC.transfers)) // comparing is names of files match actualTransfer := []string{} for i := 0; i < len(mockedRPC.transfers); i++ { @@ -501,31 +509,32 @@ func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithRegexInclude(c *chk.C } sort.Strings(actualTransfer) sort.Strings(blobsToInclude) - c.Assert(actualTransfer, chk.DeepEquals, blobsToInclude) + a.Equal(blobsToInclude, actualTransfer) // validate that the right transfers were sent - validateDownloadTransfersAreScheduled(c, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING, + validateDownloadTransfersAreScheduled(a, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING, blobsToInclude, mockedRPC) }) } // test multiple regular expression with include -func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithMultRegexInclude(c *chk.C) { +func TestDownloadBlobContainerWithMultRegexInclude(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with blobs - containerURL, containerName := createNewContainer(c, bsu) - blobsToIgnore := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobsToIgnore), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobsToIgnore := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotEqual(0, len(blobsToIgnore)) // add blobs that we wish to include blobsToInclude := []string{"tessssssssssssst.txt", "zxcfile.txt", "subOne/tetingessssss.jpeg", "subOne/subTwo/tessssst.pdf"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToInclude, blockBlobDefaultData) // set up the destination with an empty folder - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -534,17 +543,17 @@ func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithMultRegexInclude(c *c mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) rawContainerURLWithSAS.Path = path.Join(rawContainerURLWithSAS.Path, string([]byte{0x00})) containerString := strings.ReplaceAll(rawContainerURLWithSAS.String(), "%00", "*") raw := getDefaultCopyRawInput(containerString, dstDirName) raw.recursive = true raw.includeRegex = "es{4,};^zxc" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobsToInclude)) + a.Equal(len(blobsToInclude), len(mockedRPC.transfers)) // validate that the right transfers were sent // comparing is names of files, since not in order need to sort each string and the compare them @@ -554,27 +563,28 @@ func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithMultRegexInclude(c *c } sort.Strings(actualTransfer) sort.Strings(blobsToInclude) - c.Assert(actualTransfer, chk.DeepEquals, blobsToInclude) + a.Equal(blobsToInclude, actualTransfer) - validateDownloadTransfersAreScheduled(c, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING, + validateDownloadTransfersAreScheduled(a, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING, blobsToInclude, mockedRPC) }) } // testing empty expressions for both include and exclude -func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithEmptyRegex(c *chk.C) { +func TestDownloadBlobContainerWithEmptyRegex(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with blobs - containerURL, containerName := createNewContainer(c, bsu) + containerURL, containerName := createNewContainer(a, bsu) // test empty regex flag so all blobs will be included since there is no filter - blobsToInclude := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobsToInclude), chk.Not(chk.Equals), 0) + blobsToInclude := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotEqual(0, len(blobsToInclude)) // set up the destination with an empty folder - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -583,7 +593,7 @@ func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithEmptyRegex(c *chk.C) mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) rawContainerURLWithSAS.Path = path.Join(rawContainerURLWithSAS.Path, string([]byte{0x00})) containerString := strings.ReplaceAll(rawContainerURLWithSAS.String(), "%00", "*") raw := getDefaultCopyRawInput(containerString, dstDirName) @@ -591,34 +601,35 @@ func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithEmptyRegex(c *chk.C) raw.includeRegex = "" raw.excludeRegex = "" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobsToInclude)) + a.Equal(len(blobsToInclude), len(mockedRPC.transfers)) // do not need to check file names since all files for blobsToInclude are passed bc flags are empty // validate that the right transfers were sent - validateDownloadTransfersAreScheduled(c, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING, + validateDownloadTransfersAreScheduled(a, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING, blobsToInclude, mockedRPC) }) } // testing exclude with one regular expression -func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithRegexExclude(c *chk.C) { +func TestDownloadBlobContainerWithRegexExclude(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with blobs - containerURL, containerName := createNewContainer(c, bsu) - blobsToInclude := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobsToInclude), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobsToInclude := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotEqual(0, len(blobsToInclude)) // add blobs that we wish to exclude blobsToIgnore := []string{"tessssssssssssst.txt", "subOne/tetingessssss.jpeg", "subOne/subTwo/tessssst.pdf"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToIgnore, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToIgnore, blockBlobDefaultData) // set up the destination with an empty folder - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -627,17 +638,17 @@ func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithRegexExclude(c *chk.C mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) rawContainerURLWithSAS.Path = path.Join(rawContainerURLWithSAS.Path, string([]byte{0x00})) containerString := strings.ReplaceAll(rawContainerURLWithSAS.String(), "%00", "*") raw := getDefaultCopyRawInput(containerString, dstDirName) raw.recursive = true raw.excludeRegex = "es{4,}" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that only blobsTo - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobsToInclude)) + a.Equal(len(blobsToInclude), len(mockedRPC.transfers)) // comparing is names of files, since not in order need to sort each string and the compare them actualTransfer := []string{} for i := 0; i < len(mockedRPC.transfers); i++ { @@ -645,31 +656,32 @@ func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithRegexExclude(c *chk.C } sort.Strings(actualTransfer) sort.Strings(blobsToInclude) - c.Assert(actualTransfer, chk.DeepEquals, blobsToInclude) + a.Equal(blobsToInclude, actualTransfer) // validate that the right transfers were sent - validateDownloadTransfersAreScheduled(c, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING, + validateDownloadTransfersAreScheduled(a, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING, blobsToInclude, mockedRPC) }) } // testing exclude with multiple regular expressions -func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithMultRegexExclude(c *chk.C) { +func TestDownloadBlobContainerWithMultRegexExclude(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with blobs - containerURL, containerName := createNewContainer(c, bsu) - blobsToInclude := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobsToInclude), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobsToInclude := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotEqual(0, len(blobsToInclude)) // add blobs that we wish to exclude blobsToIgnore := []string{"tessssssssssssst.txt", "subOne/dogs.jpeg", "subOne/subTwo/tessssst.pdf"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToIgnore, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToIgnore, blockBlobDefaultData) // set up the destination with an empty folder - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -678,17 +690,17 @@ func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithMultRegexExclude(c *c mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) rawContainerURLWithSAS.Path = path.Join(rawContainerURLWithSAS.Path, string([]byte{0x00})) containerString := strings.ReplaceAll(rawContainerURLWithSAS.String(), "%00", "*") raw := getDefaultCopyRawInput(containerString, dstDirName) raw.recursive = true raw.excludeRegex = "es{4,};o(g)" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobsToInclude)) + a.Equal(len(blobsToInclude), len(mockedRPC.transfers)) // comparing is names of files, since not in order need to sort each string and the compare them actualTransfer := []string{} for i := 0; i < len(mockedRPC.transfers); i++ { @@ -696,28 +708,29 @@ func (s *cmdIntegrationSuite) TestDownloadBlobContainerWithMultRegexExclude(c *c } sort.Strings(actualTransfer) sort.Strings(blobsToInclude) - c.Assert(actualTransfer, chk.DeepEquals, blobsToInclude) + a.Equal(blobsToInclude, actualTransfer) // validate that the right transfers were sent - validateDownloadTransfersAreScheduled(c, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING, + validateDownloadTransfersAreScheduled(a, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING, blobsToInclude, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestDryrunCopyLocalToBlob(c *chk.C) { +func TestDryrunCopyLocalToBlob(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the local source blobsToInclude := []string{"AzURE2021.jpeg", "sub1/dir2/HELLO-4.txt", "sub1/test/testing.txt"} - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, blobsToInclude) - c.Assert(srcDirName, chk.NotNil) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, blobsToInclude) + a.NotNil(srcDirName) // set up the destination container - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, dstContainerURL) - c.Assert(dstContainerURL, chk.NotNil) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, dstContainerURL) + a.NotNil(dstContainerURL) // set up interceptor mockedRPC := interceptor{} @@ -727,41 +740,42 @@ func (s *cmdIntegrationSuite) TestDryrunCopyLocalToBlob(c *chk.C) { glcm = &mockedLcm // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultCopyRawInput(srcDirName, rawContainerURLWithSAS.String()) raw.dryrun = true raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that none where transferred - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) msg := mockedLcm.GatherAllLogs(mockedLcm.dryrunLog) for i := 0; i < len(blobsToInclude); i++ { - c.Check(strings.Contains(msg[i], "DRYRUN: copy"), chk.Equals, true) - c.Check(strings.Contains(msg[i], srcDirName), chk.Equals, true) - c.Check(strings.Contains(msg[i], dstContainerURL.String()), chk.Equals, true) + a.True(strings.Contains(msg[i], "DRYRUN: copy")) + a.True(strings.Contains(msg[i], srcDirName)) + a.True(strings.Contains(msg[i], dstContainerURL.String())) } - c.Check(testDryrunStatements(blobsToInclude, msg), chk.Equals, true) + a.True(testDryrunStatements(blobsToInclude, msg)) }) } -func (s *cmdIntegrationSuite) TestDryrunCopyBlobToBlob(c *chk.C) { +func TestDryrunCopyBlobToBlob(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up src container - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) blobsToInclude := []string{"AzURE2021.jpeg", "sub1/dir2/HELLO-4.txt", "sub1/test/testing.txt"} - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobsToInclude, blockBlobDefaultData) - c.Assert(srcContainerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToInclude, blockBlobDefaultData) + a.NotNil(srcContainerURL) // set up the destination - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, dstContainerURL) - c.Assert(dstContainerURL, chk.NotNil) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, dstContainerURL) + a.NotNil(dstContainerURL) // set up interceptor mockedRPC := interceptor{} @@ -771,41 +785,42 @@ func (s *cmdIntegrationSuite) TestDryrunCopyBlobToBlob(c *chk.C) { glcm = &mockedLcm // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultCopyRawInput(rawContainerURLWithSAS.String(), rawDstContainerURLWithSAS.String()) raw.dryrun = true raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that none where transferred - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) msg := mockedLcm.GatherAllLogs(mockedLcm.dryrunLog) for i := 0; i < len(blobsToInclude); i++ { - c.Check(strings.Contains(msg[i], "DRYRUN: copy"), chk.Equals, true) - c.Check(strings.Contains(msg[i], srcContainerURL.String()), chk.Equals, true) - c.Check(strings.Contains(msg[i], dstContainerURL.String()), chk.Equals, true) + a.True(strings.Contains(msg[i], "DRYRUN: copy")) + a.True(strings.Contains(msg[i], srcContainerURL.String())) + a.True(strings.Contains(msg[i], dstContainerURL.String())) } - c.Check(testDryrunStatements(blobsToInclude, msg), chk.Equals, true) + a.True(testDryrunStatements(blobsToInclude, msg)) }) } -func (s *cmdIntegrationSuite) TestDryrunCopyBlobToBlobJson(c *chk.C) { +func TestDryrunCopyBlobToBlobJson(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up src container - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) blobsToInclude := []string{"AzURE2021.jpeg"} - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobsToInclude, blockBlobDefaultData) - c.Assert(srcContainerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToInclude, blockBlobDefaultData) + a.NotNil(srcContainerURL) // set up the destination - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, dstContainerURL) - c.Assert(dstContainerURL, chk.NotNil) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, dstContainerURL) + a.NotNil(dstContainerURL) // set up interceptor mockedRPC := interceptor{} @@ -815,42 +830,43 @@ func (s *cmdIntegrationSuite) TestDryrunCopyBlobToBlobJson(c *chk.C) { glcm = &mockedLcm // construct the raw input to simulate user input - rawSrcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultCopyRawInput(rawSrcContainerURLWithSAS.String(), rawDstContainerURLWithSAS.String()) raw.dryrun = true raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that none where transferred - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) msg := <-mockedLcm.dryrunLog copyMessage := common.CopyTransfer{} errMarshal := json.Unmarshal([]byte(msg), ©Message) - c.Assert(errMarshal, chk.IsNil) + a.Nil(errMarshal) // comparing some values of copyMessage - c.Check(strings.Compare(strings.Trim(copyMessage.Source, "/"), blobsToInclude[0]), chk.Equals, 0) - c.Check(strings.Compare(strings.Trim(copyMessage.Destination, "/"), blobsToInclude[0]), chk.Equals, 0) - c.Check(strings.Compare(copyMessage.EntityType.String(), common.EEntityType.File().String()), chk.Equals, 0) - c.Check(strings.Compare(string(copyMessage.BlobType), "BlockBlob"), chk.Equals, 0) + a.Zero(strings.Compare(strings.Trim(copyMessage.Source, "/"), blobsToInclude[0])) + a.Zero(strings.Compare(strings.Trim(copyMessage.Destination, "/"), blobsToInclude[0])) + a.Zero(strings.Compare(copyMessage.EntityType.String(), common.EEntityType.File().String())) + a.Zero(strings.Compare(string(copyMessage.BlobType), "BlockBlob")) }) } -func (s *cmdIntegrationSuite) TestDryrunCopyS3toBlob(c *chk.C) { - skipIfS3Disabled(c) +func TestDryrunCopyS3toBlob(t *testing.T) { + a := assert.New(t) + skipIfS3Disabled(t) s3Client, err := createS3ClientWithMinio(createS3ResOptions{}) if err != nil { - c.Skip("S3 client credentials not supplied") + t.Skip("S3 client credentials not supplied") } // set up src s3 bucket bucketName := generateBucketName() - createNewBucketWithName(c, s3Client, bucketName, createS3ResOptions{}) - defer deleteBucket(c, s3Client, bucketName, true) + createNewBucketWithName(a, s3Client, bucketName, createS3ResOptions{}) + defer deleteBucket(s3Client, bucketName, true) objectList := []string{"AzURE2021.jpeg"} - scenarioHelper{}.generateObjects(c, s3Client, bucketName, objectList) + scenarioHelper{}.generateObjects(a, s3Client, bucketName, objectList) // initialize dst container dstContainerName := generateContainerName() @@ -863,40 +879,40 @@ func (s *cmdIntegrationSuite) TestDryrunCopyS3toBlob(c *chk.C) { glcm = &mockedLcm // construct the raw input to simulate user input - rawSrcS3ObjectURL := scenarioHelper{}.getRawS3ObjectURL(c, "", bucketName, "AzURE2021.jpeg") - rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcS3ObjectURL := scenarioHelper{}.getRawS3ObjectURL(a, "", bucketName, "AzURE2021.jpeg") + rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcS3ObjectURL.String(), rawDstContainerURLWithSAS.String()) raw.dryrun = true raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that none where transferred - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) msg := mockedLcm.GatherAllLogs(mockedLcm.dryrunLog) dstPath := strings.Split(rawDstContainerURLWithSAS.String(), "?") - c.Check(strings.Contains(msg[0], "DRYRUN: copy"), chk.Equals, true) - c.Check(strings.Contains(msg[0], rawSrcS3ObjectURL.String()), chk.Equals, true) - c.Check(strings.Contains(msg[0], dstPath[0]), chk.Equals, true) - - c.Check(testDryrunStatements(objectList, msg), chk.Equals, true) + a.True(strings.Contains(msg[0], "DRYRUN: copy")) + a.True(strings.Contains(msg[0], rawSrcS3ObjectURL.String())) + a.True(strings.Contains(msg[0], dstPath[0])) + a.True(testDryrunStatements(objectList, msg)) }) } -func (s *cmdIntegrationSuite) TestDryrunCopyGCPtoBlob(c *chk.C) { - skipIfGCPDisabled(c) +func TestDryrunCopyGCPtoBlob(t *testing.T) { + a := assert.New(t) + skipIfGCPDisabled(t) gcpClient, err := createGCPClientWithGCSSDK() if err != nil { - c.Skip("GCP client credentials not supplied") + t.Skip("GCP client credentials not supplied") } // set up src gcp bucket bucketName := generateBucketName() - createNewGCPBucketWithName(c, gcpClient, bucketName) - defer deleteGCPBucket(c, gcpClient, bucketName, true) + createNewGCPBucketWithName(a, gcpClient, bucketName) + defer deleteGCPBucket(gcpClient, bucketName, true) blobsToInclude := []string{"AzURE2021.jpeg"} - scenarioHelper{}.generateGCPObjects(c, gcpClient, bucketName, blobsToInclude) - c.Assert(gcpClient, chk.NotNil) + scenarioHelper{}.generateGCPObjects(a, gcpClient, bucketName, blobsToInclude) + a.NotNil(gcpClient) // initialize dst container dstContainerName := generateContainerName() @@ -909,23 +925,22 @@ func (s *cmdIntegrationSuite) TestDryrunCopyGCPtoBlob(c *chk.C) { glcm = &mockedLcm // construct the raw input to simulate user input - rawSrcGCPObjectURL := scenarioHelper{}.getRawGCPObjectURL(c, bucketName, "AzURE2021.jpeg") // Use default region - rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcGCPObjectURL := scenarioHelper{}.getRawGCPObjectURL(a, bucketName, "AzURE2021.jpeg") // Use default region + rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcGCPObjectURL.String(), rawDstContainerURLWithSAS.String()) raw.dryrun = true raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that none where transferred - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) msg := mockedLcm.GatherAllLogs(mockedLcm.dryrunLog) dstPath := strings.Split(rawDstContainerURLWithSAS.String(), "?") - c.Check(strings.Contains(msg[0], "DRYRUN: copy"), chk.Equals, true) - c.Check(strings.Contains(msg[0], rawSrcGCPObjectURL.String()), chk.Equals, true) - c.Check(strings.Contains(msg[0], dstPath[0]), chk.Equals, true) - - c.Check(testDryrunStatements(blobsToInclude, msg), chk.Equals, true) + a.True(strings.Contains(msg[0], "DRYRUN: copy")) + a.True(strings.Contains(msg[0], rawSrcGCPObjectURL.String())) + a.True(strings.Contains(msg[0], dstPath[0])) + a.True(testDryrunStatements(blobsToInclude, msg)) }) -} +} \ No newline at end of file diff --git a/cmd/zt_copy_blob_file_test.go b/cmd/zt_copy_blob_file_test.go index 416f3b5c7..ccbfae601 100644 --- a/cmd/zt_copy_blob_file_test.go +++ b/cmd/zt_copy_blob_file_test.go @@ -22,19 +22,21 @@ package cmd import ( "fmt" + "github.com/stretchr/testify/assert" "strings" + "testing" "github.com/Azure/azure-storage-blob-go/azblob" - chk "gopkg.in/check.v1" ) // TestBlobAccountCopyToFileShareS2S actually ends up testing the entire account->container scenario as that is not dependent on destination or source. -func (s *cmdIntegrationSuite) TestBlobAccountCopyToFileShareS2S(c *chk.C) { +func TestBlobAccountCopyToFileShareS2S(t *testing.T) { + a := assert.New(t) bsu := getBSU() fsu := getFSU() // Ensure no containers with similar naming schemes exist - cleanBlobAccount(c, bsu) + cleanBlobAccount(a, bsu) containerSources := map[string]azblob.ContainerURL{} expectedTransfers := make([]string, 0) @@ -45,21 +47,21 @@ func (s *cmdIntegrationSuite) TestBlobAccountCopyToFileShareS2S(c *chk.C) { // create the container containerSources[name] = bsu.NewContainerURL(name) _, err := containerSources[name].Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) - c.Assert(err, chk.IsNil) + a.Nil(err) // Generate the remote scenario - fileNames := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerSources[name], "") + fileNames := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerSources[name], "") fileNames = scenarioHelper{}.addPrefix(fileNames, name+"/") expectedTransfers = append(expectedTransfers, fileNames...) // Prepare to delete all 5 containers //noinspection GoDeferInLoop - defer deleteContainer(c, containerSources[name]) + defer deleteContainer(a, containerSources[name]) } // generate destination share - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, dstShareURL) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, dstShareURL) // initialize mocked RPC mockedRPC := interceptor{} @@ -67,36 +69,37 @@ func (s *cmdIntegrationSuite) TestBlobAccountCopyToFileShareS2S(c *chk.C) { mockedRPC.init() // generate raw input - blobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(c) + blobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) blobServiceURLWithSAS.Path = "/blobacc-file*container*" // wildcard the container name - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) raw := getDefaultRawCopyInput(blobServiceURLWithSAS.String(), dstShareURLWithSAS.String()) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, len(expectedTransfers)) + a.Equal(len(expectedTransfers), len(mockedRPC.transfers)) - validateS2STransfersAreScheduled(c, "/", "/", expectedTransfers, mockedRPC) + validateS2STransfersAreScheduled(a, "/", "/", expectedTransfers, mockedRPC) }) } // TestBlobCopyToFileS2SImplicitDstShare uses a service-level URL on the destination to implicitly create the destination share. -func (s *cmdIntegrationSuite) TestBlobCopyToFileS2SImplicitDstShare(c *chk.C) { +func TestBlobCopyToFileS2SImplicitDstShare(t *testing.T) { + a := assert.New(t) bsu := getBSU() fsu := getFSU() // create source container - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) // prepare a destination container URL to be deleted. dstShareURL := fsu.NewShareURL(srcContainerName) - defer deleteShare(c, dstShareURL) + defer deleteShare(a, dstShareURL) // create a scenario on the source container - fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, "blobFileImplicitDest") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) // Ensure that at least one blob is present + fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "blobFileImplicitDest") + a.NotZero(len(fileList)) // Ensure that at least one blob is present // initialize the mocked RPC mockedRPC := interceptor{} @@ -104,36 +107,37 @@ func (s *cmdIntegrationSuite) TestBlobCopyToFileS2SImplicitDstShare(c *chk.C) { mockedRPC.init() // Create raw arguments - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstServiceURLWithSAS := scenarioHelper{}.getRawFileServiceURLWithSAS(c) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstServiceURLWithSAS := scenarioHelper{}.getRawFileServiceURLWithSAS(a) raw := getDefaultRawCopyInput(srcContainerURLWithSAS.String(), dstServiceURLWithSAS.String()) // recursive is enabled by default // run the copy, check the container, and check the transfer success. - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) // Check there was no error + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // Check there was no error _, err = dstShareURL.GetProperties(ctx) - c.Assert(err, chk.IsNil) // Ensure the destination share exists + a.Nil(err) // Ensure the destination share exists // Ensure the transfers were scheduled - validateS2STransfersAreScheduled(c, "/", "/"+srcContainerName+"/", fileList, mockedRPC) + validateS2STransfersAreScheduled(a, "/", "/"+srcContainerName+"/", fileList, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestBlobCopyToFileS2SWithSingleFile(c *chk.C) { +func TestBlobCopyToFileS2SWithSingleFile(t *testing.T) { + a := assert.New(t) bsu := getBSU() fsu := getFSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteContainer(c, srcContainerURL) - defer deleteShare(c, dstShareURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteContainer(a, srcContainerURL) + defer deleteShare(a, dstShareURL) // copy to explicit destination for _, fileName := range []string{"singlefileisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the source container with a single file - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, []string{fileName}, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, []string{fileName}, blockBlobDefaultData) // set up the interceptor mockedRPC := interceptor{} @@ -141,14 +145,14 @@ func (s *cmdIntegrationSuite) TestBlobCopyToFileS2SWithSingleFile(c *chk.C) { mockedRPC.init() // construct the raw input for explicit destination - srcBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, fileName) - dstFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(c, dstShareName, fileName) + srcBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, fileName) + dstFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, dstShareName, fileName) raw := getDefaultRawCopyInput(srcBlobURLWithSAS.String(), dstFileURLWithSAS.String()) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - validateS2STransfersAreScheduled(c, "", "", []string{""}, mockedRPC) + validateS2STransfersAreScheduled(a, "", "", []string{""}, mockedRPC) }) } @@ -162,34 +166,35 @@ func (s *cmdIntegrationSuite) TestBlobCopyToFileS2SWithSingleFile(c *chk.C) { mockedRPC.init() // construct the raw input for implicit destination - srcBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, fileName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) + srcBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, fileName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) raw := getDefaultRawCopyInput(srcBlobURLWithSAS.String(), dstShareURLWithSAS.String()) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // put the filename in the destination dir name // this is because validateS2STransfersAreScheduled dislikes when the relative paths differ // In this case, the relative path should absolutely differ. (explicit file path -> implicit) - validateS2STransfersAreScheduled(c, "", "/"+strings.ReplaceAll(fileName, "%", "%25"), []string{""}, mockedRPC) + validateS2STransfersAreScheduled(a, "", "/"+strings.ReplaceAll(fileName, "%", "%25"), []string{""}, mockedRPC) }) } } -func (s *cmdIntegrationSuite) TestContainerToShareCopyS2S(c *chk.C) { +func TestContainerToShareCopyS2S(t *testing.T) { + a := assert.New(t) bsu := getBSU() fsu := getFSU() // Create source container and destination share, schedule their deletion - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteContainer(c, srcContainerURL) - defer deleteShare(c, dstShareURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteContainer(a, srcContainerURL) + defer deleteShare(a, dstShareURL) // set up the source container with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, "") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + a.NotZero(len(fileList)) // set up the interceptor mockedRPC := interceptor{} @@ -197,39 +202,40 @@ func (s *cmdIntegrationSuite) TestContainerToShareCopyS2S(c *chk.C) { mockedRPC.init() // set up the raw input with recursive = true to copy - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) raw := getDefaultRawCopyInput(srcContainerURLWithSAS.String(), dstShareURLWithSAS.String()) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate the transfer count is correct - c.Assert(len(mockedRPC.transfers), chk.Equals, len(fileList)) + a.Equal(len(fileList), len(mockedRPC.transfers)) - validateS2STransfersAreScheduled(c, "/", "/", fileList, mockedRPC) + validateS2STransfersAreScheduled(a, "/", "/", fileList, mockedRPC) }) // turn off recursive and set recursive to false raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) + runCopyAndVerify(a, raw, func(err error) { + a.NotNil(err) // make sure that the failure was due to the recursive flag - c.Assert(err.Error(), StringContains, "recursive") + a.Contains(err.Error(), "recursive") }) } -func (s *cmdIntegrationSuite) TestBlobFileCopyS2SWithIncludeAndIncludeDirFlag(c *chk.C) { +func TestBlobFileCopyS2SWithIncludeAndIncludeDirFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() fsu := getFSU() // generate source container and destination fileshare - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteContainer(c, srcContainerURL) - defer deleteShare(c, dstShareURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteContainer(a, srcContainerURL) + defer deleteShare(a, dstShareURL) // create file list to include against fileList := []string{ @@ -259,7 +265,7 @@ func (s *cmdIntegrationSuite) TestBlobFileCopyS2SWithIncludeAndIncludeDirFlag(c // set up filters and generate blobs includeString := "*.pdf;*.jpeg;exactName" includePathString := "subdir/" - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, fileList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, fileList, blockBlobDefaultData) // set up the interceptor mockedRPC := interceptor{} @@ -267,34 +273,35 @@ func (s *cmdIntegrationSuite) TestBlobFileCopyS2SWithIncludeAndIncludeDirFlag(c mockedRPC.init() // construct the raw input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) raw := getDefaultRawCopyInput(srcContainerURLWithSAS.String(), dstShareURLWithSAS.String()) raw.include = includeString raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2STransfersAreScheduled(c, "/", "/", loneIncludeList, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2STransfersAreScheduled(a, "/", "/", loneIncludeList, mockedRPC) }) mockedRPC.reset() raw.includePath = includePathString - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2STransfersAreScheduled(c, "/", "/", includePathAndIncludeList, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2STransfersAreScheduled(a, "/", "/", includePathAndIncludeList, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestBlobToFileCopyS2SWithExcludeAndExcludeDirFlag(c *chk.C) { +func TestBlobToFileCopyS2SWithExcludeAndExcludeDirFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() fsu := getFSU() // generate source container and destination fileshare - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteContainer(c, srcContainerURL) - defer deleteShare(c, dstShareURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteContainer(a, srcContainerURL) + defer deleteShare(a, dstShareURL) // create file list to include against fileList := []string{ @@ -321,7 +328,7 @@ func (s *cmdIntegrationSuite) TestBlobToFileCopyS2SWithExcludeAndExcludeDirFlag( // set up filters and generate blobs excludeString := "*.pdf;*.jpeg;exactName" excludePathString := "subdir/" - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, fileList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, fileList, blockBlobDefaultData) // set up the interceptor mockedRPC := interceptor{} @@ -329,34 +336,35 @@ func (s *cmdIntegrationSuite) TestBlobToFileCopyS2SWithExcludeAndExcludeDirFlag( mockedRPC.init() // construct the raw input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) raw := getDefaultRawCopyInput(srcContainerURLWithSAS.String(), dstShareURLWithSAS.String()) raw.exclude = excludeString raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2STransfersAreScheduled(c, "/", "/", loneExcludeList, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2STransfersAreScheduled(a, "/", "/", loneExcludeList, mockedRPC) }) mockedRPC.reset() raw.excludePath = excludePathString - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2STransfersAreScheduled(c, "/", "/", excludePathAndExcludeList, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2STransfersAreScheduled(a, "/", "/", excludePathAndExcludeList, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestBlobToFileCopyS2SIncludeExcludeMix(c *chk.C) { +func TestBlobToFileCopyS2SIncludeExcludeMix(t *testing.T) { + a := assert.New(t) bsu := getBSU() fsu := getFSU() // generate source container and destination fileshare - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteContainer(c, srcContainerURL) - defer deleteShare(c, dstShareURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteContainer(a, srcContainerURL) + defer deleteShare(a, dstShareURL) // create file list to include against fileList := []string{ @@ -382,7 +390,7 @@ func (s *cmdIntegrationSuite) TestBlobToFileCopyS2SIncludeExcludeMix(c *chk.C) { // set up filters and generate blobs includeString := "*.pdf;*.jpeg;exactName" excludeString := "ohno*;why*;exactName" - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, fileList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, fileList, blockBlobDefaultData) // set up the interceptor mockedRPC := interceptor{} @@ -390,33 +398,34 @@ func (s *cmdIntegrationSuite) TestBlobToFileCopyS2SIncludeExcludeMix(c *chk.C) { mockedRPC.init() // construct the raw input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) raw := getDefaultRawCopyInput(srcContainerURLWithSAS.String(), dstShareURLWithSAS.String()) raw.include = includeString raw.exclude = excludeString raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2STransfersAreScheduled(c, "/", "/", toInclude, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2STransfersAreScheduled(a, "/", "/", toInclude, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestBlobToFileCopyS2SWithDirectory(c *chk.C) { +func TestBlobToFileCopyS2SWithDirectory(t *testing.T) { + a := assert.New(t) bsu := getBSU() fsu := getFSU() // create container and share - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteContainer(c, srcContainerURL) - defer deleteShare(c, dstShareURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteContainer(a, srcContainerURL) + defer deleteShare(a, dstShareURL) // create source scenario dirName := "copyme" - fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, dirName+"/") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, dirName+"/") + a.NotZero(len(fileList)) // initialize mocked RPC mockedRPC := interceptor{} @@ -424,16 +433,16 @@ func (s *cmdIntegrationSuite) TestBlobToFileCopyS2SWithDirectory(c *chk.C) { mockedRPC.init() // generate raw copy command - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) srcContainerURLWithSAS.Path += "/copyme/" raw := getDefaultRawCopyInput(srcContainerURLWithSAS.String(), dstShareURLWithSAS.String()) raw.recursive = true // test folder copies expectedList := scenarioHelper{}.shaveOffPrefix(fileList, dirName+"/") - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2STransfersAreScheduled(c, "/", "/"+dirName+"/", expectedList, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2STransfersAreScheduled(a, "/", "/"+dirName+"/", expectedList, mockedRPC) }) -} +} \ No newline at end of file diff --git a/cmd/zt_copy_blob_upload_test.go b/cmd/zt_copy_blob_upload_test.go index 02d504000..e12a3bbfb 100644 --- a/cmd/zt_copy_blob_upload_test.go +++ b/cmd/zt_copy_blob_upload_test.go @@ -22,18 +22,20 @@ package cmd import ( "github.com/Azure/azure-storage-azcopy/v10/common" - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" "net/url" "os" "path/filepath" "strings" + "testing" "time" ) -func (s *cmdIntegrationSuite) TestIncludeDirSimple(c *chk.C) { +func TestIncludeDirSimple(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) files := []string{ "filea", @@ -46,32 +48,33 @@ func (s *cmdIntegrationSuite) TestIncludeDirSimple(c *chk.C) { "sub/child/filec", } - dirPath := scenarioHelper{}.generateLocalDirectory(c) + dirPath := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dirPath) - scenarioHelper{}.generateLocalFilesFromList(c, dirPath, files) + scenarioHelper{}.generateLocalFilesFromList(a, dirPath, files) mockedRPC := interceptor{} Rpc = mockedRPC.intercept mockedRPC.init() - rawBlobURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawBlobURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultRawCopyInput(dirPath, rawBlobURLWithSAS.String()) raw.recursive = true raw.includePath = "sub" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 3) + a.Equal(3, len(mockedRPC.transfers)) // trim / and /folder/ off - validateDownloadTransfersAreScheduled(c, "/", "/"+filepath.Base(dirPath)+"/", files[5:], mockedRPC) + validateDownloadTransfersAreScheduled(a, "/", "/"+filepath.Base(dirPath)+"/", files[5:], mockedRPC) }) } -func (s *cmdIntegrationSuite) TestIncludeDir(c *chk.C) { +func TestIncludeDir(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) files := []string{ "filea", @@ -87,32 +90,33 @@ func (s *cmdIntegrationSuite) TestIncludeDir(c *chk.C) { "sub/subsub/filec", } - dirPath := scenarioHelper{}.generateLocalDirectory(c) + dirPath := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dirPath) - scenarioHelper{}.generateLocalFilesFromList(c, dirPath, files) + scenarioHelper{}.generateLocalFilesFromList(a, dirPath, files) mockedRPC := interceptor{} Rpc = mockedRPC.intercept mockedRPC.init() - rawBlobURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawBlobURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultRawCopyInput(dirPath, rawBlobURLWithSAS.String()) raw.recursive = true raw.includePath = "sub/subsub" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 3) + a.Equal(3, len(mockedRPC.transfers)) // trim / and /folder/ off - validateDownloadTransfersAreScheduled(c, "/", "/"+filepath.Base(dirPath)+"/", files[8:], mockedRPC) + validateDownloadTransfersAreScheduled(a, "/", "/"+filepath.Base(dirPath)+"/", files[8:], mockedRPC) }) } -func (s *cmdIntegrationSuite) TestExcludeDir(c *chk.C) { +func TestExcludeDir(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) files := []string{ "filea", @@ -128,32 +132,33 @@ func (s *cmdIntegrationSuite) TestExcludeDir(c *chk.C) { "sub/subsub/filec", } - dirPath := scenarioHelper{}.generateLocalDirectory(c) + dirPath := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dirPath) - scenarioHelper{}.generateLocalFilesFromList(c, dirPath, files) + scenarioHelper{}.generateLocalFilesFromList(a, dirPath, files) mockedRPC := interceptor{} Rpc = mockedRPC.intercept mockedRPC.init() - rawBlobURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawBlobURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultRawCopyInput(dirPath, rawBlobURLWithSAS.String()) raw.recursive = true raw.excludePath = "sub/subsub" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 8) + a.Equal(8, len(mockedRPC.transfers)) // Trim / and /folder/ off - validateDownloadTransfersAreScheduled(c, "/", "/"+filepath.Base(dirPath)+"/", files[:8], mockedRPC) + validateDownloadTransfersAreScheduled(a, "/", "/"+filepath.Base(dirPath)+"/", files[:8], mockedRPC) }) } -func (s *cmdIntegrationSuite) TestIncludeAndExcludeDir(c *chk.C) { +func TestIncludeAndExcludeDir(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) files := []string{ "xyz/aaa", @@ -164,41 +169,42 @@ func (s *cmdIntegrationSuite) TestIncludeAndExcludeDir(c *chk.C) { "filec", } - dirPath := scenarioHelper{}.generateLocalDirectory(c) + dirPath := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dirPath) - scenarioHelper{}.generateLocalFilesFromList(c, dirPath, files) + scenarioHelper{}.generateLocalFilesFromList(a, dirPath, files) mockedRPC := interceptor{} Rpc = mockedRPC.intercept mockedRPC.init() - rawBlobURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawBlobURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultRawCopyInput(dirPath, rawBlobURLWithSAS.String()) raw.recursive = true raw.includePath = "xyz" raw.excludePath = "def" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 2) + a.Equal(2, len(mockedRPC.transfers)) // Trim / and /folder/ off - validateDownloadTransfersAreScheduled(c, "/", "/"+filepath.Base(dirPath)+"/", files[:2], mockedRPC) + validateDownloadTransfersAreScheduled(a, "/", "/"+filepath.Base(dirPath)+"/", files[:2], mockedRPC) }) } // regular local file->blob upload -func (s *cmdIntegrationSuite) TestUploadSingleFileToBlobVirtualDirectory(c *chk.C) { +func TestUploadSingleFileToBlobVirtualDirectory(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) for _, srcFileName := range []string{"singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the source as a single file - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) fileList := []string{srcFileName} - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, fileList) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, fileList) // set up the destination container with a single blob dstBlobName := "testfolder/" @@ -209,61 +215,62 @@ func (s *cmdIntegrationSuite) TestUploadSingleFileToBlobVirtualDirectory(c *chk. mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, dstBlobName) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, dstBlobName) raw := getDefaultCopyRawInput(filepath.Join(srcDirName, srcFileName), rawBlobURLWithSAS.String()) // the blob was created after the file, so no sync should happen - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // Validate that the destination is the file name (within the folder). // The destination being the folder *was* the issue in the past. // The service would just name the file as the folder if we didn't explicitly specify it. - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) d, err := url.PathUnescape(mockedRPC.transfers[0].Destination) //Unescape the destination, as we have special characters. - c.Assert(err, chk.IsNil) - c.Assert(d, chk.Equals, common.AZCOPY_PATH_SEPARATOR_STRING+srcFileName) + a.Nil(err) + a.Equal(common.AZCOPY_PATH_SEPARATOR_STRING+srcFileName, d) }) // clean the RPC for the next test mockedRPC.reset() // now target the destination container, the result should be the same - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw = getDefaultCopyRawInput(filepath.Join(srcDirName, srcFileName), rawContainerURLWithSAS.String()) // the file was created after the blob, so no sync should happen - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // verify explicitly since the source and destination names will be different: // the source is "" since the given URL points to the blob itself // the destination should be the blob name, since the given local path points to the parent dir - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[0].Source, chk.Equals, "") - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, common.AZCOPY_PATH_SEPARATOR_STRING+url.PathEscape(srcFileName)) + a.Equal("", mockedRPC.transfers[0].Source) + a.Equal(common.AZCOPY_PATH_SEPARATOR_STRING+url.PathEscape(srcFileName), mockedRPC.transfers[0].Destination) }) } } // regular local file->blob upload -func (s *cmdIntegrationSuite) TestUploadSingleFileToBlob(c *chk.C) { +func TestUploadSingleFileToBlob(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) for _, srcFileName := range []string{"singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the source as a single file - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) fileList := []string{srcFileName} - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, fileList) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, fileList) // set up the destination container with a single blob dstBlobName := "whatever" - scenarioHelper{}.generateBlobsFromList(c, containerURL, []string{dstBlobName}, blockBlobDefaultData) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromList(a, containerURL, []string{dstBlobName}, blockBlobDefaultData) + a.NotNil(containerURL) // set up interceptor mockedRPC := interceptor{} @@ -271,51 +278,52 @@ func (s *cmdIntegrationSuite) TestUploadSingleFileToBlob(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, dstBlobName) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, dstBlobName) raw := getDefaultCopyRawInput(filepath.Join(srcDirName, srcFileName), rawBlobURLWithSAS.String()) // the blob was created after the file, so no sync should happen - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - validateUploadTransfersAreScheduled(c, "", "", []string{""}, mockedRPC) + validateUploadTransfersAreScheduled(a, "", "", []string{""}, mockedRPC) }) // clean the RPC for the next test mockedRPC.reset() // now target the destination container, the result should be the same - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw = getDefaultCopyRawInput(filepath.Join(srcDirName, srcFileName), rawContainerURLWithSAS.String()) // the file was created after the blob, so no sync should happen - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // verify explicitly since the source and destination names will be different: // the source is "" since the given URL points to the blob itself // the destination should be the blob name, since the given local path points to the parent dir - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[0].Source, chk.Equals, "") - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, common.AZCOPY_PATH_SEPARATOR_STRING+url.PathEscape(srcFileName)) + a.Equal("", mockedRPC.transfers[0].Source) + a.Equal(common.AZCOPY_PATH_SEPARATOR_STRING+url.PathEscape(srcFileName), mockedRPC.transfers[0].Destination) }) } } // regular directory->container upload -func (s *cmdIntegrationSuite) TestUploadDirectoryToContainer(c *chk.C) { +func TestUploadDirectoryToContainer(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the source with numerous files - srcDirPath := scenarioHelper{}.generateLocalDirectory(c) + srcDirPath := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirPath) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirPath, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirPath, "") // set up an empty container - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up interceptor mockedRPC := interceptor{} @@ -323,18 +331,18 @@ func (s *cmdIntegrationSuite) TestUploadDirectoryToContainer(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultCopyRawInput(srcDirPath, rawContainerURLWithSAS.String()) raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(fileList)) + a.Equal(len(fileList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateUploadTransfersAreScheduled(c, common.AZCOPY_PATH_SEPARATOR_STRING, + validateUploadTransfersAreScheduled(a, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING+filepath.Base(srcDirPath)+common.AZCOPY_PATH_SEPARATOR_STRING, fileList, mockedRPC) }) @@ -342,25 +350,26 @@ func (s *cmdIntegrationSuite) TestUploadDirectoryToContainer(c *chk.C) { raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + runCopyAndVerify(a, raw, func(err error) { + a.NotNil(err) + a.Zero(len(mockedRPC.transfers)) }) } // regular directory->virtual dir upload -func (s *cmdIntegrationSuite) TestUploadDirectoryToVirtualDirectory(c *chk.C) { +func TestUploadDirectoryToVirtualDirectory(t *testing.T) { + a := assert.New(t) bsu := getBSU() vdirName := "vdir" // set up the source with numerous files - srcDirPath := scenarioHelper{}.generateLocalDirectory(c) + srcDirPath := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirPath) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirPath, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirPath, "") // set up an empty container - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up interceptor mockedRPC := interceptor{} @@ -368,19 +377,19 @@ func (s *cmdIntegrationSuite) TestUploadDirectoryToVirtualDirectory(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, vdirName) + rawContainerURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, vdirName) raw := getDefaultCopyRawInput(srcDirPath, rawContainerURLWithSAS.String()) raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(fileList)) + a.Equal(len(fileList), len(mockedRPC.transfers)) // validate that the right transfers were sent expectedTransfers := scenarioHelper{}.shaveOffPrefix(fileList, filepath.Base(srcDirPath)+common.AZCOPY_PATH_SEPARATOR_STRING) - validateUploadTransfersAreScheduled(c, common.AZCOPY_PATH_SEPARATOR_STRING, + validateUploadTransfersAreScheduled(a, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING+filepath.Base(srcDirPath)+common.AZCOPY_PATH_SEPARATOR_STRING, expectedTransfers, mockedRPC) }) @@ -388,28 +397,29 @@ func (s *cmdIntegrationSuite) TestUploadDirectoryToVirtualDirectory(c *chk.C) { raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + runCopyAndVerify(a, raw, func(err error) { + a.NotNil(err) + a.Zero(len(mockedRPC.transfers)) }) } // files(from pattern)->container upload -func (s *cmdIntegrationSuite) TestUploadDirectoryToContainerWithPattern(c *chk.C) { +func TestUploadDirectoryToContainerWithPattern(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the source with numerous files - srcDirPath := scenarioHelper{}.generateLocalDirectory(c) + srcDirPath := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirPath) - scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirPath, "") + scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirPath, "") // add special files that we wish to include filesToInclude := []string{"important.pdf", "includeSub/amazing.pdf", "includeSub/wow/amazing.pdf"} - scenarioHelper{}.generateLocalFilesFromList(c, srcDirPath, filesToInclude) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirPath, filesToInclude) // set up an empty container - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up interceptor mockedRPC := interceptor{} @@ -417,52 +427,54 @@ func (s *cmdIntegrationSuite) TestUploadDirectoryToContainerWithPattern(c *chk.C mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultCopyRawInput(filepath.Join(srcDirPath, "/*.pdf"), rawContainerURLWithSAS.String()) raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) // only the top pdf should be included - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) - c.Assert(mockedRPC.transfers[0].Source, chk.Equals, mockedRPC.transfers[0].Destination) - c.Assert(strings.HasSuffix(mockedRPC.transfers[0].Source, ".pdf"), chk.Equals, true) - c.Assert(strings.Contains(mockedRPC.transfers[0].Source[1:], common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.Equal(1, len(mockedRPC.transfers)) + a.Equal(mockedRPC.transfers[0].Destination, mockedRPC.transfers[0].Source) + a.True(strings.HasSuffix(mockedRPC.transfers[0].Source, ".pdf")) + a.False(strings.Contains(mockedRPC.transfers[0].Source[1:], common.AZCOPY_PATH_SEPARATOR_STRING)) }) } -func (s *cmdIntegrationSuite) TestUploadDirectoryToContainerWithIncludeBefore_UTC(c *chk.C) { - s.doTestUploadDirectoryToContainerWithIncludeBefore(true, c) +func TestUploadDirectoryToContainerWithIncludeBefore_UTC(t *testing.T) { + a := assert.New(t) + doTestUploadDirectoryToContainerWithIncludeBefore(true, a) } -func (s *cmdIntegrationSuite) TestUploadDirectoryToContainerWithIncludeBefore_LocalTime(c *chk.C) { - s.doTestUploadDirectoryToContainerWithIncludeBefore(false, c) +func TestUploadDirectoryToContainerWithIncludeBefore_LocalTime(t *testing.T) { + a := assert.New(t) + doTestUploadDirectoryToContainerWithIncludeBefore(false, a) } -func (s *cmdIntegrationSuite) doTestUploadDirectoryToContainerWithIncludeBefore(useUtc bool, c *chk.C) { +func doTestUploadDirectoryToContainerWithIncludeBefore(useUtc bool, a *assert.Assertions) { bsu := getBSU() // set up the source directory - srcDirPath := scenarioHelper{}.generateLocalDirectory(c) + srcDirPath := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirPath) // add newer files, which we wish to include filesToInclude := []string{"important.txt", "includeSub/amazing.txt", "includeSub/wow/amazing.txt"} - scenarioHelper{}.generateLocalFilesFromList(c, srcDirPath, filesToInclude) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirPath, filesToInclude) // sleep a little longer, to give clear LMT separation between the files above and those below (should not be copied) time.Sleep(1500 * time.Millisecond) includeFrom := time.Now() extraIgnoredFiles := []string{"ignored.txt", "includeSub/ignored.txt", "includeSub/wow/ignored.txt"} - scenarioHelper{}.generateLocalFilesFromList(c, srcDirPath, extraIgnoredFiles) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirPath, extraIgnoredFiles) // set up an empty container - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up interceptor mockedRPC := interceptor{} @@ -470,7 +482,7 @@ func (s *cmdIntegrationSuite) doTestUploadDirectoryToContainerWithIncludeBefore( mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultCopyRawInput(srcDirPath, rawContainerURLWithSAS.String()) raw.recursive = true if useUtc { @@ -479,34 +491,36 @@ func (s *cmdIntegrationSuite) doTestUploadDirectoryToContainerWithIncludeBefore( raw.includeBefore = includeFrom.Format("2006-01-02T15:04:05") // local time, no timezone } - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(filesToInclude)) + a.Equal(len(filesToInclude), len(mockedRPC.transfers)) // validate that the right transfers were sent expectedTransfers := scenarioHelper{}.shaveOffPrefix(filesToInclude, filepath.Base(srcDirPath)+common.AZCOPY_PATH_SEPARATOR_STRING) - validateUploadTransfersAreScheduled(c, common.AZCOPY_PATH_SEPARATOR_STRING, + validateUploadTransfersAreScheduled(a, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING+filepath.Base(srcDirPath)+common.AZCOPY_PATH_SEPARATOR_STRING, expectedTransfers, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestUploadDirectoryToContainerWithIncludeAfter_UTC(c *chk.C) { - s.doTestUploadDirectoryToContainerWithIncludeAfter(true, c) +func TestUploadDirectoryToContainerWithIncludeAfter_UTC(t *testing.T) { + a := assert.New(t) + doTestUploadDirectoryToContainerWithIncludeAfter(true, a) } -func (s *cmdIntegrationSuite) TestUploadDirectoryToContainerWithIncludeAfter_LocalTime(c *chk.C) { - s.doTestUploadDirectoryToContainerWithIncludeAfter(false, c) +func TestUploadDirectoryToContainerWithIncludeAfter_LocalTime(t *testing.T) { + a := assert.New(t) + doTestUploadDirectoryToContainerWithIncludeAfter(false, a) } -func (s *cmdIntegrationSuite) doTestUploadDirectoryToContainerWithIncludeAfter(useUtc bool, c *chk.C) { +func doTestUploadDirectoryToContainerWithIncludeAfter(useUtc bool, a *assert.Assertions) { bsu := getBSU() // set up the source with numerous files - srcDirPath := scenarioHelper{}.generateLocalDirectory(c) + srcDirPath := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirPath) - scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirPath, "") + scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirPath, "") // sleep a little longer, to give clear LMT separation between the files above and those below time.Sleep(1500 * time.Millisecond) @@ -514,11 +528,11 @@ func (s *cmdIntegrationSuite) doTestUploadDirectoryToContainerWithIncludeAfter(u // add newer files, which we wish to include filesToInclude := []string{"important.txt", "includeSub/amazing.txt", "includeSub/wow/amazing.txt"} - scenarioHelper{}.generateLocalFilesFromList(c, srcDirPath, filesToInclude) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirPath, filesToInclude) // set up an empty container - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up interceptor mockedRPC := interceptor{} @@ -526,7 +540,7 @@ func (s *cmdIntegrationSuite) doTestUploadDirectoryToContainerWithIncludeAfter(u mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultCopyRawInput(srcDirPath, rawContainerURLWithSAS.String()) raw.recursive = true if useUtc { @@ -535,32 +549,33 @@ func (s *cmdIntegrationSuite) doTestUploadDirectoryToContainerWithIncludeAfter(u raw.includeAfter = includeFrom.Format("2006-01-02T15:04:05") // local time, no timezone } - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 3) + a.Equal(3, len(mockedRPC.transfers)) // validate that the right transfers were sent expectedTransfers := scenarioHelper{}.shaveOffPrefix(filesToInclude, filepath.Base(srcDirPath)+common.AZCOPY_PATH_SEPARATOR_STRING) - validateUploadTransfersAreScheduled(c, common.AZCOPY_PATH_SEPARATOR_STRING, + validateUploadTransfersAreScheduled(a, common.AZCOPY_PATH_SEPARATOR_STRING, common.AZCOPY_PATH_SEPARATOR_STRING+filepath.Base(srcDirPath)+common.AZCOPY_PATH_SEPARATOR_STRING, expectedTransfers, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestDisableAutoDecoding(c *chk.C) { +func TestDisableAutoDecoding(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // Encoded file name since Windows won't create name with invalid chars srcFileName := `%3C %3E %5C %2F %3A %22 %7C %3F %2A invalidcharsfile` // set up the source as a single file - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) _, err := scenarioHelper{}.generateLocalFile(filepath.Join(srcDirName, srcFileName), defaultFileSize) - c.Assert(err, chk.IsNil) + a.Nil(err) // set up interceptor mockedRPC := interceptor{} @@ -571,20 +586,20 @@ func (s *cmdIntegrationSuite) TestDisableAutoDecoding(c *chk.C) { mockedRPC.reset() // now target the destination container, the result should be the same - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultCopyRawInput(filepath.Join(srcDirName, srcFileName), rawContainerURLWithSAS.String()) raw.disableAutoDecoding = true // the file was created after the blob, so no sync should happen - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // verify explicitly since the source and destination names will be different: // the source is "" since the given URL points to the blob itself // the destination should be the source file name, since decoding has been disabled - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[0].Source, chk.Equals, "") - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, common.AZCOPY_PATH_SEPARATOR_STRING+url.PathEscape(srcFileName)) + a.Equal("", mockedRPC.transfers[0].Source) + a.Equal(common.AZCOPY_PATH_SEPARATOR_STRING+url.PathEscape(srcFileName), mockedRPC.transfers[0].Destination) }) } diff --git a/cmd/zt_copy_file_file_test.go b/cmd/zt_copy_file_file_test.go index 94b2db042..42d2d0788 100644 --- a/cmd/zt_copy_file_file_test.go +++ b/cmd/zt_copy_file_file_test.go @@ -21,23 +21,24 @@ package cmd import ( + "github.com/stretchr/testify/assert" "strings" - - chk "gopkg.in/check.v1" + "testing" ) // regular file->file copy -func (s *cmdIntegrationSuite) TestFileCopyS2SWithSingleFile(c *chk.C) { +func TestFileCopyS2SWithSingleFile(t *testing.T) { + a := assert.New(t) fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) for _, fileName := range []string{"singlefileisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the source share with a single file fileList := []string{fileName} - scenarioHelper{}.generateAzureFilesFromList(c, srcShareURL, fileList) + scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, fileList) // set up interceptor mockedRPC := interceptor{} @@ -45,13 +46,13 @@ func (s *cmdIntegrationSuite) TestFileCopyS2SWithSingleFile(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(c, srcShareName, fileList[0]) - dstFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(c, dstShareName, fileList[0]) + srcFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, srcShareName, fileList[0]) + dstFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, dstShareName, fileList[0]) raw := getDefaultCopyRawInput(srcFileURLWithSAS.String(), dstFileURLWithSAS.String()) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2STransfersAreScheduled(c, "", "", []string{""}, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2STransfersAreScheduled(a, "", "", []string{""}, mockedRPC) }) } @@ -64,32 +65,33 @@ func (s *cmdIntegrationSuite) TestFileCopyS2SWithSingleFile(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(c, srcShareName, fileName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) + srcFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, srcShareName, fileName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) raw := getDefaultCopyRawInput(srcFileURLWithSAS.String(), dstShareURLWithSAS.String()) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // put the filename in the destination dir name // this is because validateS2STransfersAreScheduled dislikes when the relative paths differ // In this case, the relative path should absolutely differ. (explicit file path -> implicit) - validateS2STransfersAreScheduled(c, "", "/"+strings.ReplaceAll(fileName, "%", "%25"), []string{""}, mockedRPC) + validateS2STransfersAreScheduled(a, "", "/"+strings.ReplaceAll(fileName, "%", "%25"), []string{""}, mockedRPC) }) } } // regular share->share copy -func (s *cmdIntegrationSuite) TestFileCopyS2SWithShares(c *chk.C) { +func TestFileCopyS2SWithShares(t *testing.T) { + a := assert.New(t) fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, srcShareURL, "") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + a.NotZero(len(fileList)) // set up interceptor mockedRPC := interceptor{} @@ -97,48 +99,49 @@ func (s *cmdIntegrationSuite) TestFileCopyS2SWithShares(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) + srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) raw := getDefaultCopyRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) raw.recursive = true // all files at source should be copied to destination expectedList := scenarioHelper{}.addFoldersToList(fileList, false) // since this is files-to-files and so folder aware - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(expectedList)) + a.Equal(len(expectedList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateS2STransfersAreScheduled(c, "/", "/", expectedList, mockedRPC) + validateS2STransfersAreScheduled(a, "/", "/", expectedList, mockedRPC) }) // turn off recursive, we should be getting an error raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) + runCopyAndVerify(a, raw, func(err error) { + a.NotNil(err) // make sure the failure was due to the recursive flag - c.Assert(err.Error(), StringContains, "recursive") + a.Contains(err.Error(), "recursive") }) } // include flag limits the scope of source/destination comparison -func (s *cmdIntegrationSuite) TestFileCopyS2SWithIncludeFlag(c *chk.C) { +func TestFileCopyS2SWithIncludeFlag(t *testing.T) { + a := assert.New(t) fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, srcShareURL, "") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + a.NotZero(len(fileList)) // add special files that we wish to include filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateAzureFilesFromList(c, srcShareURL, filesToInclude) + scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToInclude) includeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -147,34 +150,35 @@ func (s *cmdIntegrationSuite) TestFileCopyS2SWithIncludeFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) + srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) raw := getDefaultCopyRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) raw.include = includeString raw.recursive = true // verify that only the files specified by the include flag are copied - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2STransfersAreScheduled(c, "/", "/", filesToInclude, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2STransfersAreScheduled(a, "/", "/", filesToInclude, mockedRPC) }) } // exclude flag limits the scope of source/destination comparison -func (s *cmdIntegrationSuite) TestFileCopyS2SWithExcludeFlag(c *chk.C) { +func TestFileCopyS2SWithExcludeFlag(t *testing.T) { + a := assert.New(t) fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, srcShareURL, "") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + a.NotZero(len(fileList)) // add special files that we wish to exclude filesToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateAzureFilesFromList(c, srcShareURL, filesToExclude) + scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToExclude) excludeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -183,40 +187,41 @@ func (s *cmdIntegrationSuite) TestFileCopyS2SWithExcludeFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) + srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) raw := getDefaultCopyRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) raw.exclude = excludeString raw.recursive = true // make sure the list doesn't include the files specified by the exclude flag - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2STransfersAreScheduled(c, "/", "/", fileList, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2STransfersAreScheduled(a, "/", "/", fileList, mockedRPC) }) } // include and exclude flag can work together to limit the scope of source/destination comparison -func (s *cmdIntegrationSuite) TestFileCopyS2SWithIncludeAndExcludeFlag(c *chk.C) { +func TestFileCopyS2SWithIncludeAndExcludeFlag(t *testing.T) { + a := assert.New(t) fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, srcShareURL, "") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + a.NotZero(len(fileList)) // add special files that we wish to include filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateAzureFilesFromList(c, srcShareURL, filesToInclude) + scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToInclude) includeString := "*.pdf;*.jpeg;exactName" // add special files that we wish to exclude // note that the excluded files also match the include string filesToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateAzureFilesFromList(c, srcShareURL, filesToExclude) + scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToExclude) excludeString := "so*;not*;exactName" // set up interceptor @@ -225,35 +230,36 @@ func (s *cmdIntegrationSuite) TestFileCopyS2SWithIncludeAndExcludeFlag(c *chk.C) mockedRPC.init() // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) + srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) raw := getDefaultCopyRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) raw.include = includeString raw.exclude = excludeString raw.recursive = true // verify that only the files specified by the include flag are copied - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2STransfersAreScheduled(c, "/", "/", filesToInclude, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2STransfersAreScheduled(a, "/", "/", filesToInclude, mockedRPC) }) } // regular dir -> dir copy -func (s *cmdIntegrationSuite) TestFileCopyS2SWithDirectory(c *chk.C) { +func TestFileCopyS2SWithDirectory(t *testing.T) { + a := assert.New(t) fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) // set up the source share with numerous files dirName := "dir" - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, srcShareURL, dirName+"/") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, dirName+"/") + a.NotZero(len(fileList)) // set up the destination with the exact same files - scenarioHelper{}.generateAzureFilesFromList(c, dstShareURL, fileList) + scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, fileList) // set up interceptor mockedRPC := interceptor{} @@ -261,16 +267,16 @@ func (s *cmdIntegrationSuite) TestFileCopyS2SWithDirectory(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) + srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) srcShareURLWithSAS.Path += "/" + dirName raw := getDefaultCopyRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) raw.recursive = true expectedList := scenarioHelper{}.shaveOffPrefix(fileList, dirName+"/") expectedList = scenarioHelper{}.addFoldersToList(expectedList, true) // since this is files-to-files and so folder aware - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2STransfersAreScheduled(c, "/", "/"+dirName+"/", expectedList, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2STransfersAreScheduled(a, "/", "/"+dirName+"/", expectedList, mockedRPC) }) -} +} \ No newline at end of file diff --git a/cmd/zt_copy_s2smigration_test.go b/cmd/zt_copy_s2smigration_test.go index 158d4f346..0563d3125 100644 --- a/cmd/zt_copy_s2smigration_test.go +++ b/cmd/zt_copy_s2smigration_test.go @@ -23,14 +23,16 @@ package cmd import ( "context" "fmt" + "github.com/stretchr/testify/assert" "net/url" + "os" "strings" + "testing" "time" "github.com/Azure/azure-storage-azcopy/v10/azbfs" "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-blob-go/azblob" - chk "gopkg.in/check.v1" ) // Additional S2S migration cases, besides E2E smoke testing cases for S3/blob/file source contained in test_service_to_service_copy.py @@ -50,24 +52,25 @@ const ( var defaultS2SInvalideMetadataHandleOption = common.DefaultInvalidMetadataHandleOption -func (s *cmdIntegrationSuite) SetUpSuite(c *chk.C) { +func TestMain(m *testing.M) { if !isS3Disabled() { if s3Client, err := createS3ClientWithMinio(createS3ResOptions{}); err == nil { - cleanS3Account(c, s3Client) + cleanS3Account(s3Client) } else { // If S3 credentials aren't supplied, we're probably only trying to run Azure tests. // As such, gracefully return here instead of cancelling every test because we couldn't clean up S3. - c.Log("S3 client could not be successfully initialised") + fmt.Println("S3 client could not be successfully initialised") } } if !gcpTestsDisabled() { if gcpClient, err := createGCPClientWithGCSSDK(); err == nil { - cleanGCPAccount(c, gcpClient) + cleanGCPAccount(gcpClient) } else { - c.Log("GCP client could not be successfully initialised") + fmt.Println("GCP client could not be successfully initialised") } } + os.Exit(m.Run()) } func getDefaultRawCopyInput(src, dst string) rawCopyCmdArgs { @@ -91,9 +94,9 @@ func getDefaultRawCopyInput(src, dst string) rawCopyCmdArgs { } } -func validateS2STransfersAreScheduled(c *chk.C, srcDirName string, dstDirName string, expectedTransfers []string, mockedRPC interceptor) { +func validateS2STransfersAreScheduled(a *assert.Assertions, srcDirName string, dstDirName string, expectedTransfers []string, mockedRPC interceptor) { // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(expectedTransfers)) + a.Equal(len(expectedTransfers), len(mockedRPC.transfers)) if debugMode { fmt.Println("expectedTransfers: ") @@ -130,11 +133,11 @@ func validateS2STransfersAreScheduled(c *chk.C, srcDirName string, dstDirName st } // the relative paths should be equal - c.Assert(srcRelativeFilePath, chk.Equals, dstRelativeFilePath) + a.Equal(dstRelativeFilePath, srcRelativeFilePath) // look up the transfer is expected _, dstExist := lookupMap[dstRelativeFilePath] - c.Assert(dstExist, chk.Equals, true) + a.True(dstExist) } } @@ -144,11 +147,12 @@ func printTransfers(ts []string) { } } -func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithBucketNameNeedBeResolved(c *chk.C) { - skipIfS3Disabled(c) +func TestS2SCopyFromS3ToBlobWithBucketNameNeedBeResolved(t *testing.T) { + a := assert.New(t) + skipIfS3Disabled(t) s3Client, err := createS3ClientWithMinio(createS3ResOptions{}) if err != nil { - c.Skip("S3 client credentials not supplied") + t.Skip("S3 client credentials not supplied") } invalidPrefix := "invalid---bucketname.for---azure" @@ -156,11 +160,11 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithBucketNameNeedBeResolve // Generate source bucket bucketName := generateBucketNameWithCustomizedPrefix(invalidPrefix) - createNewBucketWithName(c, s3Client, bucketName, createS3ResOptions{}) - defer deleteBucket(c, s3Client, bucketName, true) + createNewBucketWithName(a, s3Client, bucketName, createS3ResOptions{}) + defer deleteBucket(s3Client, bucketName, true) - objectList := scenarioHelper{}.generateCommonRemoteScenarioForS3(c, s3Client, bucketName, "", false) - c.Assert(len(objectList), chk.Not(chk.Equals), 0) + objectList := scenarioHelper{}.generateCommonRemoteScenarioForS3(a, s3Client, bucketName, "", false) + a.NotZero(len(objectList)) // set up interceptor mockedRPC := interceptor{} @@ -168,23 +172,23 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithBucketNameNeedBeResolve mockedRPC.init() // construct the raw input to simulate user input - rawSrcS3BucketURL := scenarioHelper{}.getRawS3BucketURL(c, "", bucketName) // Use default region - rawDstBlobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(c) + rawSrcS3BucketURL := scenarioHelper{}.getRawS3BucketURL(a, "", bucketName) // Use default region + rawDstBlobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) raw := getDefaultRawCopyInput(rawSrcS3BucketURL.String(), rawDstBlobServiceURLWithSAS.String()) // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(objectList)) + a.Equal(len(objectList), len(mockedRPC.transfers)) // Check container with resolved name has been created resolvedBucketName := strings.Replace(bucketName, invalidPrefix, resolvedPrefix, 1) - blobServiceURL := scenarioHelper{}.getBlobServiceURL(c) + blobServiceURL := scenarioHelper{}.getBlobServiceURL(a) containerURL := blobServiceURL.NewContainerURL(resolvedBucketName) - c.Assert(scenarioHelper{}.containerExists(containerURL), chk.Equals, true) - defer deleteContainer(c, containerURL) + a.True(scenarioHelper{}.containerExists(containerURL)) + defer deleteContainer(a, containerURL) // Check correct entry are scheduled. // Example: @@ -193,15 +197,16 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithBucketNameNeedBeResolve // transfer.Source by design be scheduled: /tops3objects2scopyfroms3toblobwithbucketnameneedberesolved4243293354900 // transfer.Destination by design be scheduled: /invalid-3-bucketname-for-3-azures2scopyfroms3toblobwithbucketna/tops3objects2scopyfroms3toblobwithbucketnameneedberesolved4243293354900 // Nothing should be replaced during matching for source, and resolved bucket name should be replaced for destination. - validateS2STransfersAreScheduled(c, "", common.AZCOPY_PATH_SEPARATOR_STRING+resolvedBucketName, objectList, mockedRPC) + validateS2STransfersAreScheduled(a, "", common.AZCOPY_PATH_SEPARATOR_STRING+resolvedBucketName, objectList, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithWildcardInSrcAndBucketNameNeedBeResolved(c *chk.C) { - skipIfS3Disabled(c) +func TestS2SCopyFromS3ToBlobWithWildcardInSrcAndBucketNameNeedBeResolved(t *testing.T) { + a := assert.New(t) + skipIfS3Disabled(t) s3Client, err := createS3ClientWithMinio(createS3ResOptions{}) if err != nil { - c.Skip("S3 client credentials not supplied") + t.Skip("S3 client credentials not supplied") } invalidPrefix := "invalid----bucketname.for-azure" @@ -209,11 +214,11 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithWildcardInSrcAndBucketN // Generate source bucket bucketName := generateBucketNameWithCustomizedPrefix(invalidPrefix) - createNewBucketWithName(c, s3Client, bucketName, createS3ResOptions{}) - defer deleteBucket(c, s3Client, bucketName, true) + createNewBucketWithName(a, s3Client, bucketName, createS3ResOptions{}) + defer deleteBucket(s3Client, bucketName, true) - objectList := scenarioHelper{}.generateCommonRemoteScenarioForS3(c, s3Client, bucketName, "", false) - c.Assert(len(objectList), chk.Not(chk.Equals), 0) + objectList := scenarioHelper{}.generateCommonRemoteScenarioForS3(a, s3Client, bucketName, "", false) + a.NotZero(len(objectList)) // set up interceptor mockedRPC := interceptor{} @@ -221,24 +226,24 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithWildcardInSrcAndBucketN mockedRPC.init() // construct the raw input to simulate user input - rawSrcS3BucketURL := scenarioHelper{}.getRawS3BucketURL(c, "", bucketName) // Use default region - rawDstBlobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(c) + rawSrcS3BucketURL := scenarioHelper{}.getRawS3BucketURL(a, "", bucketName) // Use default region + rawDstBlobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) rawSrcS3BucketStrWithWirdcard := strings.Replace(rawSrcS3BucketURL.String(), invalidPrefix, "invalid----*", 1) raw := getDefaultRawCopyInput(rawSrcS3BucketStrWithWirdcard, rawDstBlobServiceURLWithSAS.String()) // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(objectList)) + a.Equal(len(objectList), len(mockedRPC.transfers)) // Check container with resolved name has been created resolvedBucketName := strings.Replace(bucketName, invalidPrefix, resolvedPrefix, 1) - blobServiceURL := scenarioHelper{}.getBlobServiceURL(c) + blobServiceURL := scenarioHelper{}.getBlobServiceURL(a) containerURL := blobServiceURL.NewContainerURL(resolvedBucketName) - c.Assert(scenarioHelper{}.containerExists(containerURL), chk.Equals, true) - defer deleteContainer(c, containerURL) + a.True(scenarioHelper{}.containerExists(containerURL)) + defer deleteContainer(a, containerURL) // Check correct entry are scheduled. // Example: @@ -247,17 +252,18 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithWildcardInSrcAndBucketN // transfer.Source by design be scheduled: /invalid----bucketname.for-azures2scopyfroms3toblobwithwildcardi/sub1/sub3/sub5/s3objects2scopyfroms3toblobwithwildcardinsrcandbucketnameneedberesolved435110281300 // transfer.Destination by design be scheduled: /invalid-4-bucketname-for-azures2scopyfroms3toblobwithwildcardi/sub1/sub3/sub5/s3objects2scopyfroms3toblobwithwildcardinsrcandbucketnameneedberesolved435110281300 // org bucket name should be replaced during matching for source, and resolved bucket name should be replaced for destination. - validateS2STransfersAreScheduled(c, common.AZCOPY_PATH_SEPARATOR_STRING+bucketName, common.AZCOPY_PATH_SEPARATOR_STRING+resolvedBucketName, objectList, mockedRPC) + validateS2STransfersAreScheduled(a, common.AZCOPY_PATH_SEPARATOR_STRING+bucketName, common.AZCOPY_PATH_SEPARATOR_STRING+resolvedBucketName, objectList, mockedRPC) }) } // This is negative because generateBucketNameWithCustomizedPrefix will return a bucket name with length 63, // and resolving logic will resolve -- to -2- which means the length to be 64. This exceeds valid container name, so error will be returned. -func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithBucketNameNeedBeResolvedNegative(c *chk.C) { - skipIfS3Disabled(c) +func TestS2SCopyFromS3ToBlobWithBucketNameNeedBeResolvedNegative(t *testing.T) { + a := assert.New(t) + skipIfS3Disabled(t) s3Client, err := createS3ClientWithMinio(createS3ResOptions{}) if err != nil { - c.Skip("S3 client credentials not supplied") + t.Skip("S3 client credentials not supplied") } invalidPrefix := "invalid.bucketname--for.azure" @@ -265,12 +271,12 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithBucketNameNeedBeResolve // Generate source bucket bucketName := generateBucketNameWithCustomizedPrefix(invalidPrefix) - createNewBucketWithName(c, s3Client, bucketName, createS3ResOptions{}) + createNewBucketWithName(a, s3Client, bucketName, createS3ResOptions{}) - defer deleteBucket(c, s3Client, bucketName, true) + defer deleteBucket(s3Client, bucketName, true) - objectList := scenarioHelper{}.generateCommonRemoteScenarioForS3(c, s3Client, bucketName, "", false) - c.Assert(len(objectList), chk.Not(chk.Equals), 0) + objectList := scenarioHelper{}.generateCommonRemoteScenarioForS3(a, s3Client, bucketName, "", false) + a.NotZero(len(objectList)) // set up interceptor mockedRPC := interceptor{} @@ -278,13 +284,13 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithBucketNameNeedBeResolve mockedRPC.init() // construct the raw input to simulate user input - rawSrcS3BucketURL := scenarioHelper{}.getRawS3BucketURL(c, "", bucketName) // Use default region - rawDstBlobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(c) + rawSrcS3BucketURL := scenarioHelper{}.getRawS3BucketURL(a, "", bucketName) // Use default region + rawDstBlobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) raw := getDefaultRawCopyInput(rawSrcS3BucketURL.String(), rawDstBlobServiceURLWithSAS.String()) // bucket should not be resolved, and objects should not be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) + runCopyAndVerify(a, raw, func(err error) { + a.NotNil(err) loggedError := false log := glcm.(*mockedLifecycleManager).infoLog @@ -297,27 +303,28 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithBucketNameNeedBeResolve count = len(log) } - c.Assert(loggedError, chk.Equals, true) + a.True(loggedError) }) } // Copy from virtual directory to container, with normal encoding ' ' as ' '. -func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithSpaceInSrcNotEncoded(c *chk.C) { - skipIfS3Disabled(c) +func TestS2SCopyFromS3ToBlobWithSpaceInSrcNotEncoded(t *testing.T) { + a := assert.New(t) + skipIfS3Disabled(t) s3Client, err := createS3ClientWithMinio(createS3ResOptions{}) if err != nil { - c.Skip("S3 client credentials not supplied") + t.Skip("S3 client credentials not supplied") } // Generate source bucket bucketName := generateBucketName() - createNewBucketWithName(c, s3Client, bucketName, createS3ResOptions{}) - defer deleteBucket(c, s3Client, bucketName, true) + createNewBucketWithName(a, s3Client, bucketName, createS3ResOptions{}) + defer deleteBucket(s3Client, bucketName, true) dstContainerName := generateContainerName() objectList := []string{"space dir/space object"} - scenarioHelper{}.generateObjects(c, s3Client, bucketName, objectList) + scenarioHelper{}.generateObjects(a, s3Client, bucketName, objectList) // set up interceptor mockedRPC := interceptor{} @@ -325,42 +332,43 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithSpaceInSrcNotEncoded(c mockedRPC.init() // construct the raw input to simulate user input - rawSrcS3BucketURL := scenarioHelper{}.getRawS3BucketURL(c, "", bucketName) // Use default region + rawSrcS3BucketURL := scenarioHelper{}.getRawS3BucketURL(a, "", bucketName) // Use default region rawSrcS3DirStr := rawSrcS3BucketURL.String() + "/space dir" - rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcS3DirStr, rawDstContainerURLWithSAS.String()) // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) // common.AZCOPY_PATH_SEPARATOR_STRING added for JobPartPlan file change. // The destination is URL encoded, as go's URL method do the encoding. - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/space%20dir/space%20object") + a.Equal("/space%20dir/space%20object", mockedRPC.transfers[0].Destination) }) } // Copy from virtual directory to container, with special encoding ' ' to '+' by S3 management portal. // '+' is handled in copy.go before extract the SourceRoot. // The scheduled transfer would be URL encoded no matter what's the raw source/destination provided by user. -func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithSpaceInSrcEncodedAsPlus(c *chk.C) { - skipIfS3Disabled(c) +func TestS2SCopyFromS3ToBlobWithSpaceInSrcEncodedAsPlus(t *testing.T) { + a := assert.New(t) + skipIfS3Disabled(t) s3Client, err := createS3ClientWithMinio(createS3ResOptions{}) if err != nil { - c.Skip("S3 client credentials not supplied") + t.Skip("S3 client credentials not supplied") } // Generate source bucket bucketName := generateBucketName() - createNewBucketWithName(c, s3Client, bucketName, createS3ResOptions{}) - defer deleteBucket(c, s3Client, bucketName, true) + createNewBucketWithName(a, s3Client, bucketName, createS3ResOptions{}) + defer deleteBucket(s3Client, bucketName, true) dstContainerName := generateContainerName() objectList := []string{"space dir/space object"} - scenarioHelper{}.generateObjects(c, s3Client, bucketName, objectList) + scenarioHelper{}.generateObjects(a, s3Client, bucketName, objectList) // set up interceptor mockedRPC := interceptor{} @@ -368,40 +376,41 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithSpaceInSrcEncodedAsPlus mockedRPC.init() // construct the raw input to simulate user input - rawSrcS3BucketURL := scenarioHelper{}.getRawS3BucketURL(c, "", bucketName) // Use default region + rawSrcS3BucketURL := scenarioHelper{}.getRawS3BucketURL(a, "", bucketName) // Use default region rawSrcS3DirStr := rawSrcS3BucketURL.String() + "/space+dir" - rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcS3DirStr, rawDstContainerURLWithSAS.String()) // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) // common.AZCOPY_PATH_SEPARATOR_STRING added for JobPartPlan file change. // The destination is URL encoded, as go's URL method do the encoding. - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/space%20dir/space%20object") + a.Equal("/space%20dir/space%20object", mockedRPC.transfers[0].Destination) }) } // By design, when source directory contains objects with suffix ‘/’, objects with suffix ‘/’ should be ignored. -func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithObjectUsingSlashAsSuffix(c *chk.C) { - skipIfS3Disabled(c) +func TestS2SCopyFromS3ToBlobWithObjectUsingSlashAsSuffix(t *testing.T) { + a := assert.New(t) + skipIfS3Disabled(t) s3Client, err := createS3ClientWithMinio(createS3ResOptions{}) if err != nil { - c.Skip("S3 client credentials not supplied") + t.Skip("S3 client credentials not supplied") } // Generate source bucket bucketName := generateBucketName() - createNewBucketWithName(c, s3Client, bucketName, createS3ResOptions{}) - defer deleteBucket(c, s3Client, bucketName, true) + createNewBucketWithName(a, s3Client, bucketName, createS3ResOptions{}) + defer deleteBucket(s3Client, bucketName, true) dstContainerName := generateContainerName() objectList := []string{"fileConsiderdAsDirectory/", "file", "sub1/file"} - scenarioHelper{}.generateObjects(c, s3Client, bucketName, objectList) + scenarioHelper{}.generateObjects(a, s3Client, bucketName, objectList) validateObjectList := []string{"/file", "/sub1/file"} // common.AZCOPY_PATH_SEPARATOR_STRING added for JobPartPlan file change. @@ -411,43 +420,44 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromS3ToBlobWithObjectUsingSlashAsSuffi mockedRPC.init() // construct the raw input to simulate user input - rawSrcS3BucketURL := scenarioHelper{}.getRawS3BucketURL(c, "", bucketName) // Use default region - rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcS3BucketURL := scenarioHelper{}.getRawS3BucketURL(a, "", bucketName) // Use default region + rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcS3BucketURL.String(), rawDstContainerURLWithSAS.String()) // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(validateObjectList)) + a.Equal(len(validateObjectList), len(mockedRPC.transfers)) - validateS2STransfersAreScheduled(c, "", "/"+bucketName, validateObjectList, mockedRPC) + validateS2STransfersAreScheduled(a, "", "/"+bucketName, validateObjectList, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestS2SCopyFromS3AccountWithBucketInDifferentRegionsAndListUseDefaultEndpoint(c *chk.C) { - skipIfS3Disabled(c) +func TestS2SCopyFromS3AccountWithBucketInDifferentRegionsAndListUseDefaultEndpoint(t *testing.T) { + a := assert.New(t) + skipIfS3Disabled(t) s3Client, err := createS3ClientWithMinio(createS3ResOptions{}) if err != nil { - c.Skip("S3 client credentials not supplied") + t.Skip("S3 client credentials not supplied") } // Generate source bucket bucketName1 := generateBucketNameWithCustomizedPrefix("default-region") - createNewBucketWithName(c, s3Client, bucketName1, createS3ResOptions{}) - defer deleteBucket(c, s3Client, bucketName1, true) + createNewBucketWithName(a, s3Client, bucketName1, createS3ResOptions{}) + defer deleteBucket(s3Client, bucketName1, true) bucketName2 := generateBucketNameWithCustomizedPrefix("us-west-2-region") bucketRegion2 := "us-west-1" // Use different region than other regional test to avoid conflicting - createNewBucketWithName(c, s3Client, bucketName2, createS3ResOptions{Location: bucketRegion2}) - defer deleteBucket(c, s3Client, bucketName2, true) + createNewBucketWithName(a, s3Client, bucketName2, createS3ResOptions{Location: bucketRegion2}) + defer deleteBucket(s3Client, bucketName2, true) - objectList1 := scenarioHelper{}.generateCommonRemoteScenarioForS3(c, s3Client, bucketName1, "", true) - c.Assert(len(objectList1), chk.Not(chk.Equals), 0) + objectList1 := scenarioHelper{}.generateCommonRemoteScenarioForS3(a, s3Client, bucketName1, "", true) + a.NotZero(len(objectList1)) - objectList2 := scenarioHelper{}.generateCommonRemoteScenarioForS3(c, s3Client, bucketName2, "", true) - c.Assert(len(objectList2), chk.Not(chk.Equals), 0) + objectList2 := scenarioHelper{}.generateCommonRemoteScenarioForS3(a, s3Client, bucketName2, "", true) + a.NotZero(len(objectList2)) validateObjectList := append(objectList1, objectList2...) @@ -457,42 +467,43 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromS3AccountWithBucketInDifferentRegio mockedRPC.init() // construct the raw input to simulate user input - rawSrcS3AccountURL := scenarioHelper{}.getRawS3AccountURL(c, "") // Use default region - rawDstBlobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(c) + rawSrcS3AccountURL := scenarioHelper{}.getRawS3AccountURL(a, "") // Use default region + rawDstBlobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) raw := getDefaultRawCopyInput(rawSrcS3AccountURL.String(), rawDstBlobServiceURLWithSAS.String()) // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - validateS2STransfersAreScheduled(c, "", "", validateObjectList, mockedRPC) + validateS2STransfersAreScheduled(a, "", "", validateObjectList, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestS2SCopyFromS3AccountWithBucketInDifferentRegionsAndListUseSpecificRegion(c *chk.C) { - skipIfS3Disabled(c) +func TestS2SCopyFromS3AccountWithBucketInDifferentRegionsAndListUseSpecificRegion(t *testing.T) { + a := assert.New(t) + skipIfS3Disabled(t) specificRegion := "us-west-2" s3Client, err := createS3ClientWithMinio(createS3ResOptions{}) if err != nil { - c.Skip("S3 client credentials not supplied") + t.Skip("S3 client credentials not supplied") } // Generate source bucket bucketName1 := generateBucketNameWithCustomizedPrefix("default-region") - createNewBucketWithName(c, s3Client, bucketName1, createS3ResOptions{}) - defer deleteBucket(c, s3Client, bucketName1, true) + createNewBucketWithName(a, s3Client, bucketName1, createS3ResOptions{}) + defer deleteBucket(s3Client, bucketName1, true) bucketName2 := generateBucketNameWithCustomizedPrefix(specificRegion) - createNewBucketWithName(c, s3Client, bucketName2, createS3ResOptions{Location: specificRegion}) - defer deleteBucket(c, s3Client, bucketName2, true) + createNewBucketWithName(a, s3Client, bucketName2, createS3ResOptions{Location: specificRegion}) + defer deleteBucket(s3Client, bucketName2, true) time.Sleep(30 * time.Second) // TODO: review and remove this, which was put here as a workaround to issues with buckets being reported as not existing - objectList1 := scenarioHelper{}.generateCommonRemoteScenarioForS3(c, s3Client, bucketName1, "", true) - c.Assert(len(objectList1), chk.Not(chk.Equals), 0) + objectList1 := scenarioHelper{}.generateCommonRemoteScenarioForS3(a, s3Client, bucketName1, "", true) + a.NotZero(len(objectList1)) - objectList2 := scenarioHelper{}.generateCommonRemoteScenarioForS3(c, s3Client, bucketName2, "", true) - c.Assert(len(objectList2), chk.Not(chk.Equals), 0) + objectList2 := scenarioHelper{}.generateCommonRemoteScenarioForS3(a, s3Client, bucketName2, "", true) + a.NotZero(len(objectList2)) // set up interceptor mockedRPC := interceptor{} @@ -500,34 +511,35 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromS3AccountWithBucketInDifferentRegio mockedRPC.init() // construct the raw input to simulate user input - rawSrcS3AccountURL := scenarioHelper{}.getRawS3AccountURL(c, specificRegion) - rawDstBlobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(c) + rawSrcS3AccountURL := scenarioHelper{}.getRawS3AccountURL(a, specificRegion) + rawDstBlobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) raw := getDefaultRawCopyInput(rawSrcS3AccountURL.String(), rawDstBlobServiceURLWithSAS.String()) // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - validateS2STransfersAreScheduled(c, "", "", objectList2, mockedRPC) + validateS2STransfersAreScheduled(a, "", "", objectList2, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestS2SCopyFromS3ObjectToBlobContainer(c *chk.C) { - skipIfS3Disabled(c) +func TestS2SCopyFromS3ObjectToBlobContainer(t *testing.T) { + a := assert.New(t) + skipIfS3Disabled(t) s3Client, err := createS3ClientWithMinio(createS3ResOptions{}) if err != nil { - c.Skip("S3 client credentials not supplied") + t.Skip("S3 client credentials not supplied") } // Generate source bucket bucketName := generateBucketName() - createNewBucketWithName(c, s3Client, bucketName, createS3ResOptions{}) - defer deleteBucket(c, s3Client, bucketName, true) + createNewBucketWithName(a, s3Client, bucketName, createS3ResOptions{}) + defer deleteBucket(s3Client, bucketName, true) dstContainerName := generateContainerName() objectList := []string{"file", "sub/file2"} - scenarioHelper{}.generateObjects(c, s3Client, bucketName, objectList) + scenarioHelper{}.generateObjects(a, s3Client, bucketName, objectList) // set up interceptor mockedRPC := interceptor{} @@ -535,77 +547,78 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromS3ObjectToBlobContainer(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawSrcS3ObjectURL := scenarioHelper{}.getRawS3ObjectURL(c, "", bucketName, "file") // Use default region - rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcS3ObjectURL := scenarioHelper{}.getRawS3ObjectURL(a, "", bucketName, "file") // Use default region + rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcS3ObjectURL.String(), rawDstContainerURLWithSAS.String()) // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/file") + a.Equal("/file", mockedRPC.transfers[0].Destination) }) mockedRPC.reset() - rawSrcS3ObjectURL = scenarioHelper{}.getRawS3ObjectURL(c, "", bucketName, "sub/file2") // Use default region - rawDstContainerURLWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcS3ObjectURL = scenarioHelper{}.getRawS3ObjectURL(a, "", bucketName, "sub/file2") // Use default region + rawDstContainerURLWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw = getDefaultRawCopyInput(rawSrcS3ObjectURL.String(), rawDstContainerURLWithSAS.String()) // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/file2") + a.Equal("/file2", mockedRPC.transfers[0].Destination) }) } -func (s *cmdIntegrationSuite) TestS2SCopyFromGCPToBlobWithBucketNameNeedBeResolved(c *chk.C) { - skipIfGCPDisabled(c) +func TestS2SCopyFromGCPToBlobWithBucketNameNeedBeResolved(t *testing.T) { + a := assert.New(t) + skipIfGCPDisabled(t) gcpClient, err := createGCPClientWithGCSSDK() if err != nil { - c.Skip("GCP credentials not supplied") + t.Skip("GCP credentials not supplied") } invalidPrefix := "invalid---bucket_name_for-azure" resolvedPrefix := "invalid-3-bucket-name-for-azure" bucketName := generateBucketNameWithCustomizedPrefix(invalidPrefix) - createNewGCPBucketWithName(c, gcpClient, bucketName) - defer deleteGCPBucket(c, gcpClient, bucketName, true) + createNewGCPBucketWithName(a, gcpClient, bucketName) + defer deleteGCPBucket(gcpClient, bucketName, true) - objectList := scenarioHelper{}.generateCommonRemoteScenarioForGCP(c, gcpClient, bucketName, "", false) - c.Assert(len(objectList), chk.Not(chk.Equals), 0) + objectList := scenarioHelper{}.generateCommonRemoteScenarioForGCP(a, gcpClient, bucketName, "", false) + a.NotZero(len(objectList)) mockedRPC := interceptor{} Rpc = mockedRPC.intercept mockedRPC.init() - rawSrcGCPBucketURL := scenarioHelper{}.getRawGCPBucketURL(c, bucketName) - rawDstBlobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(c) + rawSrcGCPBucketURL := scenarioHelper{}.getRawGCPBucketURL(a, bucketName) + rawDstBlobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) raw := getDefaultRawCopyInput(rawSrcGCPBucketURL.String(), rawDstBlobServiceURLWithSAS.String()) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(objectList)) + a.Equal(len(objectList), len(mockedRPC.transfers)) // Check container with resolved name has been created resolvedBucketName := strings.Replace(bucketName, invalidPrefix, resolvedPrefix, 1) - blobServiceURL := scenarioHelper{}.getBlobServiceURL(c) + blobServiceURL := scenarioHelper{}.getBlobServiceURL(a) containerURL := blobServiceURL.NewContainerURL(resolvedBucketName) - c.Assert(scenarioHelper{}.containerExists(containerURL), chk.Equals, true) - defer deleteContainer(c, containerURL) + a.True(scenarioHelper{}.containerExists(containerURL)) + defer deleteContainer(a, containerURL) // Check correct entry are scheduled. // Example: @@ -614,56 +627,58 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromGCPToBlobWithBucketNameNeedBeResolv // transfer.Source by design be scheduled: /tops3objects2scopyfroms3toblobwithbucketnameneedberesolved4243293354900 // transfer.Destination by design be scheduled: /invalid-3-bucketname-for-3-azures2scopyfroms3toblobwithbucketna/tops3objects2scopyfroms3toblobwithbucketnameneedberesolved4243293354900 // Nothing should be replaced during matching for source, and resolved bucket name should be replaced for destination. - validateS2STransfersAreScheduled(c, "", common.AZCOPY_PATH_SEPARATOR_STRING+resolvedBucketName, objectList, mockedRPC) + validateS2STransfersAreScheduled(a, "", common.AZCOPY_PATH_SEPARATOR_STRING+resolvedBucketName, objectList, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestS2SCopyFromGCPToBlobWithWildcardInSrcAndBucketNameNeedBeResolved(c *chk.C) { - skipIfGCPDisabled(c) +func TestS2SCopyFromGCPToBlobWithWildcardInSrcAndBucketNameNeedBeResolved(t *testing.T) { + a := assert.New(t) + skipIfGCPDisabled(t) gcpClient, err := createGCPClientWithGCSSDK() if err != nil { - c.Skip("GCP Credentials not Supplied") + t.Skip("GCP Credentials not Supplied") } invalidPrefix := "invalid----bucketname_for-azure" resolvedPrefix := "invalid-4-bucketname-for-azure" bucketName := generateBucketNameWithCustomizedPrefix(invalidPrefix) - createNewGCPBucketWithName(c, gcpClient, bucketName) - defer deleteGCPBucket(c, gcpClient, bucketName, true) + createNewGCPBucketWithName(a, gcpClient, bucketName) + defer deleteGCPBucket(gcpClient, bucketName, true) - objectList := scenarioHelper{}.generateCommonRemoteScenarioForGCP(c, gcpClient, bucketName, "", false) - c.Assert(len(objectList), chk.Not(chk.Equals), 0) + objectList := scenarioHelper{}.generateCommonRemoteScenarioForGCP(a, gcpClient, bucketName, "", false) + a.NotZero(len(objectList)) mockedRPC := interceptor{} Rpc = mockedRPC.intercept mockedRPC.init() - rawSrcGCPBucketURL := scenarioHelper{}.getRawGCPBucketURL(c, bucketName) - rawDstBlobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(c) + rawSrcGCPBucketURL := scenarioHelper{}.getRawGCPBucketURL(a, bucketName) + rawDstBlobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) rawSrcGCPBucketStrWithWildcard := strings.Replace(rawSrcGCPBucketURL.String(), invalidPrefix, "invalid----*", 1) raw := getDefaultRawCopyInput(rawSrcGCPBucketStrWithWildcard, rawDstBlobServiceURLWithSAS.String()) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(objectList)) + a.Equal(len(objectList), len(mockedRPC.transfers)) // Check container with resolved name has been created resolvedBucketName := strings.Replace(bucketName, invalidPrefix, resolvedPrefix, 1) - blobServiceURL := scenarioHelper{}.getBlobServiceURL(c) + blobServiceURL := scenarioHelper{}.getBlobServiceURL(a) containerURL := blobServiceURL.NewContainerURL(resolvedBucketName) - c.Assert(scenarioHelper{}.containerExists(containerURL), chk.Equals, true) - defer deleteContainer(c, containerURL) + a.True(scenarioHelper{}.containerExists(containerURL)) + defer deleteContainer(a, containerURL) - validateS2STransfersAreScheduled(c, common.AZCOPY_PATH_SEPARATOR_STRING+bucketName, common.AZCOPY_PATH_SEPARATOR_STRING+resolvedBucketName, objectList, mockedRPC) + validateS2STransfersAreScheduled(a, common.AZCOPY_PATH_SEPARATOR_STRING+bucketName, common.AZCOPY_PATH_SEPARATOR_STRING+resolvedBucketName, objectList, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestS2SCopyFromGCPToBlobWithBucketNameNeedBeResolvedNegative(c *chk.C) { - skipIfGCPDisabled(c) +func TestS2SCopyFromGCPToBlobWithBucketNameNeedBeResolvedNegative(t *testing.T) { + a := assert.New(t) + skipIfGCPDisabled(t) gcpClient, err := createGCPClientWithGCSSDK() if err != nil { - c.Skip("GCP client credentials not supplied") + t.Skip("GCP client credentials not supplied") } invalidPrefix := "invalid_bucketname--for_azure" @@ -671,11 +686,11 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromGCPToBlobWithBucketNameNeedBeResolv // Generate source bucket bucketName := generateBucketNameWithCustomizedPrefix(invalidPrefix) - createNewGCPBucketWithName(c, gcpClient, bucketName) - defer deleteGCPBucket(c, gcpClient, bucketName, true) + createNewGCPBucketWithName(a, gcpClient, bucketName) + defer deleteGCPBucket(gcpClient, bucketName, true) - objectList := scenarioHelper{}.generateCommonRemoteScenarioForGCP(c, gcpClient, bucketName, "", false) - c.Assert(len(objectList), chk.Not(chk.Equals), 0) + objectList := scenarioHelper{}.generateCommonRemoteScenarioForGCP(a, gcpClient, bucketName, "", false) + a.NotZero(len(objectList)) // set up interceptor mockedRPC := interceptor{} @@ -683,13 +698,13 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromGCPToBlobWithBucketNameNeedBeResolv mockedRPC.init() // construct the raw input to simulate user input - rawSrcGCPBucketURL := scenarioHelper{}.getRawGCPBucketURL(c, bucketName) - rawDstBlobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(c) + rawSrcGCPBucketURL := scenarioHelper{}.getRawGCPBucketURL(a, bucketName) + rawDstBlobServiceURLWithSAS := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) raw := getDefaultRawCopyInput(rawSrcGCPBucketURL.String(), rawDstBlobServiceURLWithSAS.String()) // bucket should not be resolved, and objects should not be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) + runCopyAndVerify(a, raw, func(err error) { + a.NotNil(err) loggedError := false log := glcm.(*mockedLifecycleManager).infoLog @@ -702,26 +717,27 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromGCPToBlobWithBucketNameNeedBeResolv count = len(log) } - c.Assert(loggedError, chk.Equals, true) + a.True(loggedError) }) } -func (s *cmdIntegrationSuite) TestS2SCopyFromGCPToBlobWithObjectUsingSlashAsSuffix(c *chk.C) { - skipIfGCPDisabled(c) +func TestS2SCopyFromGCPToBlobWithObjectUsingSlashAsSuffix(t *testing.T) { + a := assert.New(t) + skipIfGCPDisabled(t) gcpClient, err := createGCPClientWithGCSSDK() if err != nil { - c.Skip("GCP client credentials not supplied") + t.Skip("GCP client credentials not supplied") } // Generate source bucket bucketName := generateBucketName() - createNewGCPBucketWithName(c, gcpClient, bucketName) - defer deleteGCPBucket(c, gcpClient, bucketName, true) + createNewGCPBucketWithName(a, gcpClient, bucketName) + defer deleteGCPBucket(gcpClient, bucketName, true) dstContainerName := generateContainerName() objectList := []string{"fileConsiderdAsDirectory/", "file", "sub1/file"} - scenarioHelper{}.generateGCPObjects(c, gcpClient, bucketName, objectList) + scenarioHelper{}.generateGCPObjects(a, gcpClient, bucketName, objectList) validateObjectList := []string{"/file", "/sub1/file"} // common.AZCOPY_PATH_SEPARATOR_STRING added for JobPartPlan file change. @@ -731,87 +747,89 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromGCPToBlobWithObjectUsingSlashAsSuff mockedRPC.init() // construct the raw input to simulate user input - rawSrcGCPBucketURL := scenarioHelper{}.getRawGCPBucketURL(c, bucketName) // Use default region - rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcGCPBucketURL := scenarioHelper{}.getRawGCPBucketURL(a, bucketName) // Use default region + rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcGCPBucketURL.String(), rawDstContainerURLWithSAS.String()) // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(validateObjectList)) + a.Equal(len(validateObjectList), len(mockedRPC.transfers)) - validateS2STransfersAreScheduled(c, "", "/"+bucketName, validateObjectList, mockedRPC) + validateS2STransfersAreScheduled(a, "", "/"+bucketName, validateObjectList, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestS2SCopyFromGCPObjectToBlobContainer(c *chk.C) { - skipIfGCPDisabled(c) +func TestS2SCopyFromGCPObjectToBlobContainer(t *testing.T) { + a := assert.New(t) + skipIfGCPDisabled(t) gcpClient, err := createGCPClientWithGCSSDK() if err != nil { - c.Skip("GCP client credentials not supplied") + t.Skip("GCP client credentials not supplied") } bucketName := generateBucketName() - createNewGCPBucketWithName(c, gcpClient, bucketName) - defer deleteGCPBucket(c, gcpClient, bucketName, true) + createNewGCPBucketWithName(a, gcpClient, bucketName) + defer deleteGCPBucket(gcpClient, bucketName, true) dstContainerName := generateContainerName() objectList := []string{"file", "sub/file2"} - scenarioHelper{}.generateGCPObjects(c, gcpClient, bucketName, objectList) + scenarioHelper{}.generateGCPObjects(a, gcpClient, bucketName, objectList) mockedRPC := interceptor{} Rpc = mockedRPC.intercept mockedRPC.init() - rawSrcGCPObjectURL := scenarioHelper{}.getRawGCPObjectURL(c, bucketName, "file") // Use default region + rawSrcGCPObjectURL := scenarioHelper{}.getRawGCPObjectURL(a, bucketName, "file") // Use default region - rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcGCPObjectURL.String(), rawDstContainerURLWithSAS.String()) // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/file") + a.Equal("/file", mockedRPC.transfers[0].Destination) }) mockedRPC.reset() - rawSrcGCPObjectURL = scenarioHelper{}.getRawGCPObjectURL(c, bucketName, "sub/file2") // Use default region - rawDstContainerURLWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcGCPObjectURL = scenarioHelper{}.getRawGCPObjectURL(a, bucketName, "sub/file2") // Use default region + rawDstContainerURLWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw = getDefaultRawCopyInput(rawSrcGCPObjectURL.String(), rawDstContainerURLWithSAS.String()) // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/file2") + a.Equal("/file2", mockedRPC.transfers[0].Destination) }) } // Copy from container to container, preserve blob tier. -func (s *cmdIntegrationSuite) TestS2SCopyFromContainerToContainerPreserveBlobTier(c *chk.C) { +func TestS2SCopyFromContainerToContainerPreserveBlobTier(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - c.Assert(srcContainerURL, chk.NotNil) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + a.NotNil(srcContainerURL) blobName := "blobWithCoolTier" - scenarioHelper{}.generateBlockBlobWithAccessTier(c, srcContainerURL, blobName, azblob.AccessTierCool) + scenarioHelper{}.generateBlockBlobWithAccessTier(a, srcContainerURL, blobName, azblob.AccessTierCool) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, dstContainerURL) - c.Assert(dstContainerURL, chk.NotNil) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, dstContainerURL) + a.NotNil(dstContainerURL) // set up interceptor mockedRPC := interceptor{} @@ -819,35 +837,35 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromContainerToContainerPreserveBlobTie mockedRPC.init() // construct the raw input to simulate user input - rawSrcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcContainerURLWithSAS.String(), rawDstContainerURLWithSAS.String()) // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - validateS2STransfersAreScheduled(c, + validateS2STransfersAreScheduled(a, "", "/"+srcContainerName, []string{common.AZCOPY_PATH_SEPARATOR_STRING + blobName}, mockedRPC) // common.AZCOPY_PATH_SEPARATOR_STRING added for JobPartPlan file change. - - c.Assert(mockedRPC.transfers[0].BlobTier, chk.Equals, azblob.AccessTierCool) + a.Equal(azblob.AccessTierCool, mockedRPC.transfers[0].BlobTier) }) } // Copy from container to container, and don't preserve blob tier. -func (s *cmdIntegrationSuite) TestS2SCopyFromContainerToContainerNoPreserveBlobTier(c *chk.C) { +func TestS2SCopyFromContainerToContainerNoPreserveBlobTier(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - c.Assert(srcContainerURL, chk.NotNil) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + a.NotNil(srcContainerURL) blobName := "blobWithCoolTier" - scenarioHelper{}.generateBlockBlobWithAccessTier(c, srcContainerURL, blobName, azblob.AccessTierCool) + scenarioHelper{}.generateBlockBlobWithAccessTier(a, srcContainerURL, blobName, azblob.AccessTierCool) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, dstContainerURL) - c.Assert(dstContainerURL, chk.NotNil) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, dstContainerURL) + a.NotNil(dstContainerURL) // set up interceptor mockedRPC := interceptor{} @@ -855,38 +873,39 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromContainerToContainerNoPreserveBlobT mockedRPC.init() // construct the raw input to simulate user input - rawSrcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcContainerURLWithSAS.String(), rawDstContainerURLWithSAS.String()) raw.s2sPreserveAccessTier = false // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - validateS2STransfersAreScheduled(c, + validateS2STransfersAreScheduled(a, "", "/"+srcContainerName, []string{common.AZCOPY_PATH_SEPARATOR_STRING + blobName}, mockedRPC) // common.AZCOPY_PATH_SEPARATOR_STRING added for JobPartPlan file change. - c.Assert(mockedRPC.transfers[0].BlobTier, chk.Equals, azblob.AccessTierNone) + a.Equal(azblob.AccessTierNone, mockedRPC.transfers[0].BlobTier) }) } // Attempt to copy from a page blob to a block blob -func (s *cmdIntegrationSuite) TestS2SCopyFromPageToBlockBlob(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestS2SCopyFromPageToBlockBlob(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() // Generate source container and blobs - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - c.Assert(srcContainerURL, chk.NotNil) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + a.NotNil(srcContainerURL) objectList := []string{"file", "sub/file2"} - scenarioHelper{}.generatePageBlobsFromList(c, srcContainerURL, objectList, pageBlobDefaultData) + scenarioHelper{}.generatePageBlobsFromList(a, srcContainerURL, objectList, pageBlobDefaultData) // Create destination container - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, dstContainerURL) - c.Assert(dstContainerURL, chk.NotNil) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, dstContainerURL) + a.NotNil(dstContainerURL) // Set up interceptor mockedRPC := interceptor{} @@ -894,51 +913,52 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromPageToBlockBlob(c *chk.C) { mockedRPC.init() // Prepare copy command - rawSrcBlobURL := scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, "file") - rawDstContainerUrlWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcBlobURL := scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, "file") + rawDstContainerUrlWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcBlobURL.String(), rawDstContainerUrlWithSAS.String()) raw.blobType = "BlockBlob" // Run copy command - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/file") + a.Equal("/file", mockedRPC.transfers[0].Destination) }) // Prepare copy command - rawSrcBlobURL = scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, "sub/file2") - rawDstContainerUrlWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcBlobURL = scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, "sub/file2") + rawDstContainerUrlWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw = getDefaultRawCopyInput(rawSrcBlobURL.String(), rawDstContainerUrlWithSAS.String()) raw.blobType = "BlockBlob" // Run copy command - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 2) + a.Equal(2, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[1].Destination, chk.Equals, "/file2") + a.Equal("/file2", mockedRPC.transfers[1].Destination) }) } // Attempt to copy from a block blob to a page blob -func (s *cmdIntegrationSuite) TestS2SCopyFromBlockToPageBlob(c *chk.C) { +func TestS2SCopyFromBlockToPageBlob(t *testing.T) { + a := assert.New(t) bsu := getBSU() // Generate source container and blobs - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - c.Assert(srcContainerURL, chk.NotNil) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + a.NotNil(srcContainerURL) objectList := []string{"file", "sub/file2"} - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, objectList, pageBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, objectList, pageBlobDefaultData) // Create destination container - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, dstContainerURL) - c.Assert(dstContainerURL, chk.NotNil) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, dstContainerURL) + a.NotNil(dstContainerURL) // Set up interceptor mockedRPC := interceptor{} @@ -946,51 +966,52 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromBlockToPageBlob(c *chk.C) { mockedRPC.init() // Prepare copy command - rawSrcBlobURL := scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, "file") - rawDstContainerUrlWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcBlobURL := scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, "file") + rawDstContainerUrlWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcBlobURL.String(), rawDstContainerUrlWithSAS.String()) raw.blobType = "PageBlob" // Run copy command - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/file") + a.Equal("/file", mockedRPC.transfers[0].Destination) }) // Prepare copy command - rawSrcBlobURL = scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, "sub/file2") - rawDstContainerUrlWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcBlobURL = scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, "sub/file2") + rawDstContainerUrlWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw = getDefaultRawCopyInput(rawSrcBlobURL.String(), rawDstContainerUrlWithSAS.String()) raw.blobType = "PageBlob" // Run copy command - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 2) + a.Equal(2, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[1].Destination, chk.Equals, "/file2") + a.Equal("/file2", mockedRPC.transfers[1].Destination) }) } // Attempt to copy from a block blob to an append blob -func (s *cmdIntegrationSuite) TestS2SCopyFromBlockToAppendBlob(c *chk.C) { +func TestS2SCopyFromBlockToAppendBlob(t *testing.T) { + a := assert.New(t) bsu := getBSU() // Generate source container and blobs - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - c.Assert(srcContainerURL, chk.NotNil) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + a.NotNil(srcContainerURL) objectList := []string{"file", "sub/file2"} - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, objectList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, objectList, blockBlobDefaultData) // Create destination container - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, dstContainerURL) - c.Assert(dstContainerURL, chk.NotNil) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, dstContainerURL) + a.NotNil(dstContainerURL) // Set up interceptor mockedRPC := interceptor{} @@ -998,52 +1019,53 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromBlockToAppendBlob(c *chk.C) { mockedRPC.init() // Prepare copy command - rawSrcBlobURL := scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, "file") - rawDstContainerUrlWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcBlobURL := scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, "file") + rawDstContainerUrlWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcBlobURL.String(), rawDstContainerUrlWithSAS.String()) raw.blobType = "AppendBlob" // Run copy command - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/file") + a.Equal("/file", mockedRPC.transfers[0].Destination) }) // Prepare copy command - rawSrcBlobURL = scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, "sub/file2") - rawDstContainerUrlWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcBlobURL = scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, "sub/file2") + rawDstContainerUrlWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw = getDefaultRawCopyInput(rawSrcBlobURL.String(), rawDstContainerUrlWithSAS.String()) raw.blobType = "AppendBlob" // Run copy command - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 2) + a.Equal(2, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[1].Destination, chk.Equals, "/file2") + a.Equal("/file2", mockedRPC.transfers[1].Destination) }) } // Attempt to copy from an append blob to a block blob -func (s *cmdIntegrationSuite) TestS2SCopyFromAppendToBlockBlob(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestS2SCopyFromAppendToBlockBlob(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() // Generate source container and blobs - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - c.Assert(srcContainerURL, chk.NotNil) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + a.NotNil(srcContainerURL) objectList := []string{"file", "sub/file2"} - scenarioHelper{}.generateAppendBlobsFromList(c, srcContainerURL, objectList, appendBlobDefaultData) + scenarioHelper{}.generateAppendBlobsFromList(a, srcContainerURL, objectList, appendBlobDefaultData) // Create destination container - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, dstContainerURL) - c.Assert(dstContainerURL, chk.NotNil) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, dstContainerURL) + a.NotNil(dstContainerURL) // Set up interceptor mockedRPC := interceptor{} @@ -1051,52 +1073,53 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromAppendToBlockBlob(c *chk.C) { mockedRPC.init() // Prepare copy command - rawSrcBlobURL := scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, "file") - rawDstContainerUrlWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcBlobURL := scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, "file") + rawDstContainerUrlWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcBlobURL.String(), rawDstContainerUrlWithSAS.String()) raw.blobType = "BlockBlob" // Run copy command - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/file") + a.Equal("/file", mockedRPC.transfers[0].Destination) }) // Prepare copy command - rawSrcBlobURL = scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, "sub/file2") - rawDstContainerUrlWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcBlobURL = scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, "sub/file2") + rawDstContainerUrlWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw = getDefaultRawCopyInput(rawSrcBlobURL.String(), rawDstContainerUrlWithSAS.String()) raw.blobType = "BlockBlob" // Run copy command - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 2) + a.Equal(2, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[1].Destination, chk.Equals, "/file2") + a.Equal("file2/", mockedRPC.transfers[1].Destination) }) } // Attempt to copy from a page blob to an append blob -func (s *cmdIntegrationSuite) TestS2SCopyFromPageToAppendBlob(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestS2SCopyFromPageToAppendBlob(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() // Generate source container and blobs - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - c.Assert(srcContainerURL, chk.NotNil) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + a.NotNil(srcContainerURL) objectList := []string{"file", "sub/file2"} - scenarioHelper{}.generatePageBlobsFromList(c, srcContainerURL, objectList, pageBlobDefaultData) + scenarioHelper{}.generatePageBlobsFromList(a, srcContainerURL, objectList, pageBlobDefaultData) // Create destination container - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, dstContainerURL) - c.Assert(dstContainerURL, chk.NotNil) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, dstContainerURL) + a.NotNil(dstContainerURL) // Set up interceptor mockedRPC := interceptor{} @@ -1104,52 +1127,53 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromPageToAppendBlob(c *chk.C) { mockedRPC.init() // Prepare copy command - rawSrcBlobURL := scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, "file") - rawDstContainerUrlWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcBlobURL := scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, "file") + rawDstContainerUrlWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcBlobURL.String(), rawDstContainerUrlWithSAS.String()) raw.blobType = "AppendBlob" // Run copy command - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/file") + a.Equal("/file", mockedRPC.transfers[0].Destination) }) // Prepare copy command - rawSrcBlobURL = scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, "sub/file2") - rawDstContainerUrlWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcBlobURL = scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, "sub/file2") + rawDstContainerUrlWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw = getDefaultRawCopyInput(rawSrcBlobURL.String(), rawDstContainerUrlWithSAS.String()) raw.blobType = "AppendBlob" // Run copy command - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 2) + a.Equal(2, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[1].Destination, chk.Equals, "/file2") + a.Equal("file2/", mockedRPC.transfers[1].Destination) }) } // Attempt to copy from an append blob to a page blob -func (s *cmdIntegrationSuite) TestS2SCopyFromAppendToPageBlob(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestS2SCopyFromAppendToPageBlob(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() // Generate source container and blobs - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - c.Assert(srcContainerURL, chk.NotNil) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + a.NotNil(srcContainerURL) objectList := []string{"file", "sub/file2"} - scenarioHelper{}.generateAppendBlobsFromList(c, srcContainerURL, objectList, pageBlobDefaultData) + scenarioHelper{}.generateAppendBlobsFromList(a, srcContainerURL, objectList, pageBlobDefaultData) // Create destination container - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, dstContainerURL) - c.Assert(dstContainerURL, chk.NotNil) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, dstContainerURL) + a.NotNil(dstContainerURL) // Set up interceptor mockedRPC := interceptor{} @@ -1157,49 +1181,50 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromAppendToPageBlob(c *chk.C) { mockedRPC.init() // Prepare copy command - rawSrcBlobURL := scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, "file") - rawDstContainerUrlWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcBlobURL := scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, "file") + rawDstContainerUrlWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcBlobURL.String(), rawDstContainerUrlWithSAS.String()) raw.blobType = "PageBlob" // Run copy command - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/file") + a.Equal("/file", mockedRPC.transfers[0].Destination) }) // Prepare copy command - rawSrcBlobURL = scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, "sub/file2") - rawDstContainerUrlWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcBlobURL = scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, "sub/file2") + rawDstContainerUrlWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw = getDefaultRawCopyInput(rawSrcBlobURL.String(), rawDstContainerUrlWithSAS.String()) raw.blobType = "PageBlob" // Run copy command - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 2) + a.Equal(2, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[1].Destination, chk.Equals, "/file2") + a.Equal("file2/", mockedRPC.transfers[1].Destination) }) } -func (s *cmdIntegrationSuite) TestS2SCopyFromSingleBlobToBlobContainer(c *chk.C) { +func TestS2SCopyFromSingleBlobToBlobContainer(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - c.Assert(srcContainerURL, chk.NotNil) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + a.NotNil(srcContainerURL) objectList := []string{"file", "sub/file2"} - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, objectList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, objectList, blockBlobDefaultData) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, dstContainerURL) - c.Assert(dstContainerURL, chk.NotNil) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, dstContainerURL) + a.NotNil(dstContainerURL) // set up interceptor mockedRPC := interceptor{} @@ -1207,50 +1232,51 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromSingleBlobToBlobContainer(c *chk.C) mockedRPC.init() // construct the raw input to simulate user input - rawSrcBlobURL := scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, "file") // Use default region - rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcBlobURL := scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, "file") // Use default region + rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcBlobURL.String(), rawDstContainerURLWithSAS.String()) // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/file") + a.Equal("/file", mockedRPC.transfers[0].Destination) }) mockedRPC.reset() - rawSrcBlobURL = scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, "sub/file2") // Use default region - rawDstContainerURLWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcBlobURL = scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, "sub/file2") // Use default region + rawDstContainerURLWithSAS = scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw = getDefaultRawCopyInput(rawSrcBlobURL.String(), rawDstContainerURLWithSAS.String()) // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/file2") + a.Equal("/file2", mockedRPC.transfers[0].Destination) }) } -func (s *cmdIntegrationSuite) TestS2SCopyFromSingleAzureFileToBlobContainer(c *chk.C) { +func TestS2SCopyFromSingleAzureFileToBlobContainer(t *testing.T) { + a := assert.New(t) bsu := getBSU() fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - c.Assert(srcShareURL, chk.NotNil) + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + a.NotNil(srcShareURL) - scenarioHelper{}.generateFlatFiles(c, srcShareURL, []string{"file"}) + scenarioHelper{}.generateFlatFiles(a, srcShareURL, []string{"file"}) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, dstContainerURL) - c.Assert(dstContainerURL, chk.NotNil) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, dstContainerURL) + a.NotNil(dstContainerURL) // set up interceptor mockedRPC := interceptor{} @@ -1258,59 +1284,60 @@ func (s *cmdIntegrationSuite) TestS2SCopyFromSingleAzureFileToBlobContainer(c *c mockedRPC.init() // construct the raw input to simulate user input - rawSrcFileURL := scenarioHelper{}.getRawFileURLWithSAS(c, srcShareName, "file") // Use default region - rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + rawSrcFileURL := scenarioHelper{}.getRawFileURLWithSAS(a, srcShareName, "file") // Use default region + rawDstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultRawCopyInput(rawSrcFileURL.String(), rawDstContainerURLWithSAS.String()) // bucket should be resolved, and objects should be scheduled for transfer - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 1) + a.Equal(1, len(mockedRPC.transfers)) - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/file") + a.Equal("/file", mockedRPC.transfers[0].Destination) }) } -func (s *cmdIntegrationSuite) TestCopyWithDFSResource(c *chk.C) { +func TestCopyWithDFSResource(t *testing.T) { + a := assert.New(t) // invoke the interceptor so lifecycle manager does not shut down the tests ctx := context.Background() // get service SAS for raw input - serviceURLWithSAS := scenarioHelper{}.getRawAdlsServiceURLWithSAS(c) + serviceURLWithSAS := scenarioHelper{}.getRawAdlsServiceURLWithSAS(a) // Set up source // set up the file system bfsServiceURLSource := GetBFSSU() - fsURLSource, fsNameSource := createNewFilesystem(c, bfsServiceURLSource) - defer deleteFilesystem(c, fsURLSource) + fsURLSource, fsNameSource := createNewFilesystem(a, bfsServiceURLSource) + defer deleteFilesystem(a, fsURLSource) // set up the parent parentDirNameSource := generateName("dir", 0) parentDirURLSource := fsURLSource.NewDirectoryURL(parentDirNameSource) _, err := parentDirURLSource.Create(ctx, true) - c.Assert(err, chk.IsNil) + a.Nil(err) // set up the file fileNameSource := generateName("file", 0) fileURLSource := parentDirURLSource.NewFileURL(fileNameSource) _, err = fileURLSource.Create(ctx, azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{}) - c.Assert(err, chk.IsNil) + a.Nil(err) dirURLWithSASSource := serviceURLWithSAS.NewFileSystemURL(fsNameSource).NewDirectoryURL(parentDirNameSource) // Set up destination // set up the file system bfsServiceURL := GetBFSSU() - fsURL, fsName := createNewFilesystem(c, bfsServiceURL) - defer deleteFilesystem(c, fsURL) + fsURL, fsName := createNewFilesystem(a, bfsServiceURL) + defer deleteFilesystem(a, fsURL) // set up the parent parentDirName := generateName("dir", 0) parentDirURL := fsURL.NewDirectoryURL(parentDirName) _, err = parentDirURL.Create(ctx, true) - c.Assert(err, chk.IsNil) + a.Nil(err) dirURLWithSAS := serviceURLWithSAS.NewFileSystemURL(fsName).NewDirectoryURL(parentDirName) // ===================================== @@ -1325,13 +1352,13 @@ func (s *cmdIntegrationSuite) TestCopyWithDFSResource(c *chk.C) { Rpc = mockedRPC.intercept mockedRPC.init() - runCopyAndVerify(c, rawCopy, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, rawCopy, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 2) + a.Equal(2, len(mockedRPC.transfers)) - // c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/file") + // a.Equal("/file", mockedRPC.transfers[0].Destination) }) // 2. Verify Sync between dfs and dfs works. @@ -1340,16 +1367,16 @@ func (s *cmdIntegrationSuite) TestCopyWithDFSResource(c *chk.C) { fileNameSource = generateName("file2", 0) fileURLSource = parentDirURLSource.NewFileURL(fileNameSource) _, err = fileURLSource.Create(ctx, azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{}) - c.Assert(err, chk.IsNil) + a.Nil(err) rawSync := getDefaultSyncRawInput(dirURLWithSASSource.String(), dirURLWithSAS.String()) - runSyncAndVerify(c, rawSync, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, rawSync, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 2) + a.Equal(2, len(mockedRPC.transfers)) // c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "/file2") }) -} +} \ No newline at end of file diff --git a/cmd/zt_credentialUtil_test.go b/cmd/zt_credentialUtil_test.go index 83aedf95e..fb2699c42 100644 --- a/cmd/zt_credentialUtil_test.go +++ b/cmd/zt_credentialUtil_test.go @@ -22,7 +22,9 @@ package cmd import ( "context" + "github.com/stretchr/testify/assert" "strings" + "testing" "github.com/Azure/azure-storage-azcopy/v10/common" chk "gopkg.in/check.v1" @@ -32,7 +34,8 @@ type credentialUtilSuite struct{} var _ = chk.Suite(&credentialUtilSuite{}) -func (s *credentialUtilSuite) TestCheckAuthSafeForTarget(c *chk.C) { +func TestCheckAuthSafeForTarget(t *testing.T) { + a := assert.New(t) tests := []struct { ct common.CredentialType resourceType common.Location @@ -84,11 +87,12 @@ func (s *credentialUtilSuite) TestCheckAuthSafeForTarget(c *chk.C) { for i, t := range tests { err := checkAuthSafeForTarget(t.ct, t.resource, t.extraSuffixesAAD, t.resourceType) - c.Assert(err == nil, chk.Equals, t.expectedOK, chk.Commentf("Failed on test %d for resource %s", i, t.resource)) + a.Equal(t.expectedOK, err == nil, chk.Commentf("Failed on test %d for resource %s", i, t.resource)) } } -func (s *credentialUtilSuite) TestCheckAuthSafeForTargetIsCalledWhenGettingAuthType(c *chk.C) { +func TestCheckAuthSafeForTargetIsCalledWhenGettingAuthType(t *testing.T) { + a := assert.New(t) mockGetCredTypeFromEnvVar := func() common.CredentialType { return common.ECredentialType.OAuthToken() // force it to OAuth, which is the case we want to test } @@ -97,12 +101,12 @@ func (s *credentialUtilSuite) TestCheckAuthSafeForTargetIsCalledWhenGettingAuthT // that it really does fail. // This checks that our safety check is hooked into the main logic _, _, err := doGetCredentialTypeForLocation(context.Background(), common.ELocation.Blob(), "http://notblob.example.com", "", true, mockGetCredTypeFromEnvVar, common.CpkOptions{}) - c.Assert(err, chk.NotNil) - c.Assert(strings.Contains(err.Error(), "If this URL is in fact an Azure service, you can enable Azure authentication to notblob.example.com."), - chk.Equals, true) + a.NotNil(err) + a.True(strings.Contains(err.Error(), "If this URL is in fact an Azure service, you can enable Azure authentication to notblob.example.com.")) } -func (s *credentialUtilSuite) TestCheckAuthSafeForTargetIsCalledWhenGettingAuthTypeMDOAuth(c *chk.C) { +func TestCheckAuthSafeForTargetIsCalledWhenGettingAuthTypeMDOAuth(t *testing.T) { + a := assert.New(t) mockGetCredTypeFromEnvVar := func() common.CredentialType { return common.ECredentialType.MDOAuthToken() // force it to OAuth, which is the case we want to test } @@ -111,7 +115,6 @@ func (s *credentialUtilSuite) TestCheckAuthSafeForTargetIsCalledWhenGettingAuthT // that it really does fail. // This checks that our safety check is hooked into the main logic _, _, err := doGetCredentialTypeForLocation(context.Background(), common.ELocation.Blob(), "http://notblob.example.com", "", true, mockGetCredTypeFromEnvVar, common.CpkOptions{}) - c.Assert(err, chk.NotNil) - c.Assert(strings.Contains(err.Error(), "If this URL is in fact an Azure service, you can enable Azure authentication to notblob.example.com."), - chk.Equals, true) -} + a.NotNil(err) + a.True(strings.Contains(err.Error(), "If this URL is in fact an Azure service, you can enable Azure authentication to notblob.example.com.")) +} \ No newline at end of file diff --git a/cmd/zt_generic_filter_test.go b/cmd/zt_generic_filter_test.go index 7d8d0a36c..300cbcc8a 100644 --- a/cmd/zt_generic_filter_test.go +++ b/cmd/zt_generic_filter_test.go @@ -23,7 +23,9 @@ package cmd import ( "errors" "fmt" + "github.com/stretchr/testify/assert" "strings" + "testing" "time" chk "gopkg.in/check.v1" @@ -33,7 +35,8 @@ type genericFilterSuite struct{} var _ = chk.Suite(&genericFilterSuite{}) -func (s *genericFilterSuite) TestIncludeFilter(c *chk.C) { +func TestIncludeFilter(t *testing.T) { + a := assert.New(t) // set up the filters raw := rawSyncCmdArgs{} includePatternList := raw.parsePatterns("*.pdf;*.jpeg;exactName") @@ -43,18 +46,19 @@ func (s *genericFilterSuite) TestIncludeFilter(c *chk.C) { filesToPass := []string{"bla.pdf", "fancy.jpeg", "socool.jpeg.pdf", "exactName"} for _, file := range filesToPass { passed := includeFilter.DoesPass(StoredObject{name: file}) - c.Assert(passed, chk.Equals, true) + a.True(passed) } // test the negative cases filesNotToPass := []string{"bla.pdff", "fancyjpeg", "socool.jpeg.pdf.wut", "eexactName"} for _, file := range filesNotToPass { passed := includeFilter.DoesPass(StoredObject{name: file}) - c.Assert(passed, chk.Equals, false) + a.False(passed) } } -func (s *genericFilterSuite) TestExcludeFilter(c *chk.C) { +func TestExcludeFilter(t *testing.T) { + a := assert.New(t) // set up the filters raw := rawSyncCmdArgs{} excludePatternList := raw.parsePatterns("*.pdf;*.jpeg;exactName") @@ -65,8 +69,8 @@ func (s *genericFilterSuite) TestExcludeFilter(c *chk.C) { for _, file := range filesToPass { dummyProcessor := &dummyProcessor{} err := processIfPassedFilters(excludeFilterList, StoredObject{name: file}, dummyProcessor.process) - c.Assert(err, chk.IsNil) - c.Assert(len(dummyProcessor.record), chk.Equals, 1) + a.Nil(err) + a.Equal(1, len(dummyProcessor.record)) } // test the negative cases @@ -74,12 +78,13 @@ func (s *genericFilterSuite) TestExcludeFilter(c *chk.C) { for _, file := range filesToNotPass { dummyProcessor := &dummyProcessor{} err := processIfPassedFilters(excludeFilterList, StoredObject{name: file}, dummyProcessor.process) - c.Assert(err, chk.Equals, ignoredError) - c.Assert(len(dummyProcessor.record), chk.Equals, 0) + a.Equal(ignoredError, err) + a.Zero(len(dummyProcessor.record)) } } -func (s *genericFilterSuite) TestDateParsingForIncludeAfter(c *chk.C) { +func TestDateParsingForIncludeAfter(t *testing.T) { + a := assert.New(t) examples := []struct { input string // ISO 8601 expectedValue string // RFC822Z (with seconds and X for placeholder for local timezone offset) @@ -110,7 +115,7 @@ func (s *genericFilterSuite) TestDateParsingForIncludeAfter(c *chk.C) { for _, x := range examples { t, err := IncludeAfterDateFilter{}.ParseISO8601(x.input, true) if x.expectedErrorContents == "" { - c.Assert(err, chk.IsNil, chk.Commentf(x.input)) + a.Nil(err, x.input) //fmt.Printf("%v -> %v\n", x.input, t) expString := x.expectedValue expectedTime, expErr := time.Parse(expectedFormatWithTz, expString) @@ -119,14 +124,14 @@ func (s *genericFilterSuite) TestDateParsingForIncludeAfter(c *chk.C) { expString = strings.Replace(x.expectedValue, " X", "", -1) expectedTime, expErr = time.ParseInLocation(expectedFormatShort, expString, loc) } - c.Assert(expErr, chk.IsNil) + a.Nil(expErr) foo := expectedTime.String() if foo == "" { } - c.Check(t.Equal(expectedTime), chk.Equals, true, chk.Commentf(x.input)) + a.True(t.Equal(expectedTime), x.input) } else { - c.Assert(err, chk.Not(chk.IsNil)) - c.Assert(strings.Contains(err.Error(), x.expectedErrorContents), chk.Equals, true, chk.Commentf(x.input)) + a.NotNil(err) + a.True(strings.Contains(err.Error(), x.expectedErrorContents), x.input) } } } @@ -137,29 +142,30 @@ func (s *genericFilterSuite) TestDateParsingForIncludeAfter(c *chk.C) { // with include-after failing to pick up changes, if the include-after date fails within the ambiguous hour and files have been // changed in that hour. Using the earliest possible interpretation of the date avoids that problem). // There's no similar ambiguity in spring, because there an hour is just skipped. -func (s *genericFilterSuite) TestDateParsingForIncludeAfter_IsSafeAtDaylightSavingsTransition(c *chk.C) { +func TestDateParsingForIncludeAfter_IsSafeAtDaylightSavingsTransition(t *testing.T) { + a := assert.New(t) - dateString, utcEarlyVersion, utcLateVersion, err := s.findAmbiguousTime() + dateString, utcEarlyVersion, utcLateVersion, err := findAmbiguousTime() if err == noAmbiguousHourError { - c.Skip(fmt.Sprintf("Cannot run daylight savings test, because local timezone does not appear to have daylight savings time. Local time is %v", time.Now())) + t.Skip(fmt.Sprintf("Cannot run daylight savings test, because local timezone does not appear to have daylight savings time. Local time is %v", time.Now())) } - c.Assert(err, chk.IsNil) + a.Nil(err) fmt.Println("Testing end of daylight saving at " + dateString + " local time") // ask for the earliest of the two ambiguous times parsed, err := IncludeAfterDateFilter{}.ParseISO8601(dateString, true) // we use chooseEarliest=true for includeAfter - c.Assert(err, chk.IsNil) + a.Nil(err) fmt.Printf("For chooseEarliest = true, the times are parsed %v, utcEarly %v, utcLate %v \n", parsed, utcEarlyVersion, utcLateVersion) - c.Assert(parsed.Equal(utcEarlyVersion), chk.Equals, true) - c.Assert(parsed.Equal(utcLateVersion), chk.Equals, false) + a.True(parsed.Equal(utcEarlyVersion)) + a.False(parsed.Equal(utcLateVersion)) // ask for the latest of the two ambiguous times parsed, err = IncludeAfterDateFilter{}.ParseISO8601(dateString, false) // we test the false case in this test too, just for completeness - c.Assert(err, chk.IsNil) + a.Nil(err) fmt.Printf("For chooseEarliest = false, the times are parsed %v, utcEarly %v, utcLate %v \n", parsed, utcEarlyVersion, utcLateVersion) - c.Assert(parsed.UTC().Equal(utcEarlyVersion), chk.Equals, false) - c.Assert(parsed.UTC().Equal(utcLateVersion), chk.Equals, true) + a.False(parsed.UTC().Equal(utcEarlyVersion)) + a.True(parsed.UTC().Equal(utcLateVersion)) } @@ -167,7 +173,7 @@ var noAmbiguousHourError = errors.New("could not find hour for end of daylight s // Go's Location object is opaque to us, so we can't directly use it to see when daylight savings ends. // So we'll just test all the hours in the year, and see! -func (_ *genericFilterSuite) findAmbiguousTime() (string, time.Time, time.Time, error) { +func findAmbiguousTime() (string, time.Time, time.Time, error) { const localTimeFormat = "2006-01-02T15:04:05" start := time.Now().UTC() end := start.AddDate(1, 0, 0) @@ -181,4 +187,4 @@ func (_ *genericFilterSuite) findAmbiguousTime() (string, time.Time, time.Time, } return "", time.Time{}, time.Time{}, noAmbiguousHourError -} +} \ No newline at end of file diff --git a/cmd/zt_generic_processor_test.go b/cmd/zt_generic_processor_test.go index 48d590987..9a545062c 100644 --- a/cmd/zt_generic_processor_test.go +++ b/cmd/zt_generic_processor_test.go @@ -22,9 +22,11 @@ package cmd import ( "github.com/Azure/azure-storage-azcopy/v10/common" + "github.com/stretchr/testify/assert" chk "gopkg.in/check.v1" "os" "path/filepath" + "testing" "time" ) @@ -60,12 +62,13 @@ func (processorTestSuiteHelper) getCopyJobTemplate() *common.CopyJobPartOrderReq return &common.CopyJobPartOrderRequest{Fpo: common.EFolderPropertiesOption.NoFolders(), SymlinkHandlingType: common.ESymlinkHandlingType.Skip()} } -func (s *genericProcessorSuite) TestCopyTransferProcessorMultipleFiles(c *chk.C) { +func TestCopyTransferProcessorMultipleFiles(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up source and destination - containerURL, _ := getContainerURL(c, bsu) - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + containerURL, _ := getContainerURL(a, bsu) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -83,39 +86,40 @@ func (s *genericProcessorSuite) TestCopyTransferProcessorMultipleFiles(c *chk.C) // go through the objects and make sure they are processed without error for _, storedObject := range sampleObjects { err := copyProcessor.scheduleCopyTransfer(storedObject) - c.Assert(err, chk.IsNil) + a.Nil(err) } // make sure everything has been dispatched apart from the final one - c.Assert(copyProcessor.copyJobTemplate.PartNum, chk.Equals, common.PartNumber(numOfParts-1)) + a.Equal(common.PartNumber(numOfParts-1), copyProcessor.copyJobTemplate.PartNum) // dispatch final part jobInitiated, err := copyProcessor.dispatchFinalPart() - c.Assert(jobInitiated, chk.Equals, true) - c.Assert(err, chk.IsNil) + a.True(jobInitiated) + a.Nil(err) // assert the right transfers were scheduled - validateCopyTransfersAreScheduled(c, false, false, "", "", processorTestSuiteHelper{}.getExpectedTransferFromStoredObjectList(sampleObjects), mockedRPC) + validateCopyTransfersAreScheduled(a, false, false, "", "", processorTestSuiteHelper{}.getExpectedTransferFromStoredObjectList(sampleObjects), mockedRPC) mockedRPC.reset() } } -func (s *genericProcessorSuite) TestCopyTransferProcessorSingleFile(c *chk.C) { +func TestCopyTransferProcessorSingleFile(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, _ := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, _ := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up the container with a single blob blobList := []string{"singlefile101"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobList, blockBlobDefaultData) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) + a.NotNil(containerURL) // set up the directory with a single file - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) dstFileName := blobList[0] - scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, blobList) + scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, blobList) // set up interceptor mockedRPC := interceptor{} @@ -130,20 +134,20 @@ func (s *genericProcessorSuite) TestCopyTransferProcessorSingleFile(c *chk.C) { // exercise the copy transfer processor storedObject := newStoredObject(noPreProccessor, blobList[0], "", common.EEntityType.File(), time.Now(), 0, noContentProps, noBlobProps, noMetdata, "") err := copyProcessor.scheduleCopyTransfer(storedObject) - c.Assert(err, chk.IsNil) + a.Nil(err) // no part should have been dispatched - c.Assert(copyProcessor.copyJobTemplate.PartNum, chk.Equals, common.PartNumber(0)) + a.Equal(common.PartNumber(0), copyProcessor.copyJobTemplate.PartNum) // dispatch final part jobInitiated, err := copyProcessor.dispatchFinalPart() - c.Assert(jobInitiated, chk.Equals, true) + a.True(jobInitiated) // In cases of syncing file to file, the source and destination are empty because this info is already in the root // path. - c.Assert(mockedRPC.transfers[0].Source, chk.Equals, "") - c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, "") + a.Equal("", mockedRPC.transfers[0].Source) + a.Equal("", mockedRPC.transfers[0].Destination) // assert the right transfers were scheduled - validateCopyTransfersAreScheduled(c, false, false, "", "", []string{""}, mockedRPC) -} + validateCopyTransfersAreScheduled(a, false, false, "", "", []string{""}, mockedRPC) +} \ No newline at end of file diff --git a/cmd/zt_generic_service_traverser_test.go b/cmd/zt_generic_service_traverser_test.go index 716f7c11c..cdb64f3cc 100644 --- a/cmd/zt_generic_service_traverser_test.go +++ b/cmd/zt_generic_service_traverser_test.go @@ -2,15 +2,16 @@ package cmd import ( "context" + "github.com/stretchr/testify/assert" + "testing" + "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-file-go/azfile" - chk "gopkg.in/check.v1" - - "github.com/Azure/azure-storage-azcopy/v10/common" ) -func (s *genericTraverserSuite) TestServiceTraverserWithManyObjects(c *chk.C) { +func TestServiceTraverserWithManyObjects(t *testing.T) { + a := assert.New(t) bsu := getBSU() fsu := getFSU() testS3 := false // Only test S3 if credentials are present. @@ -20,26 +21,26 @@ func (s *genericTraverserSuite) TestServiceTraverserWithManyObjects(c *chk.C) { if err == nil && !isS3Disabled() { testS3 = true } else { - c.Log("WARNING: Service level traverser is NOT testing S3") + t.Log("WARNING: Service level traverser is NOT testing S3") } gcpClient, err := createGCPClientWithGCSSDK() if err == nil && !gcpTestsDisabled() { testGCP = true } else { - c.Log("WARNING: Service level traverser is NOT testing GCP") + t.Log("WARNING: Service level traverser is NOT testing GCP") } // Clean the accounts to ensure that only the containers we create exist if testS3 { - cleanS3Account(c, s3Client) + cleanS3Account(s3Client) } if testGCP { - cleanGCPAccount(c, gcpClient) + cleanGCPAccount(gcpClient) } // BlobFS is tested on the same account, therefore this is safe to clean up this way - cleanBlobAccount(c, bsu) - cleanFileAccount(c, fsu) + cleanBlobAccount(a, bsu) + cleanFileAccount(a, fsu) containerList := []string{ generateName("suchcontainermanystorage", 63), @@ -64,13 +65,13 @@ func (s *genericTraverserSuite) TestServiceTraverserWithManyObjects(c *chk.C) { objectData := "Hello world!" // Generate remote scenarios - scenarioHelper{}.generateBlobContainersAndBlobsFromLists(c, bsu, containerList, objectList, objectData) - scenarioHelper{}.generateFileSharesAndFilesFromLists(c, fsu, containerList, objectList, objectData) + scenarioHelper{}.generateBlobContainersAndBlobsFromLists(a, bsu, containerList, objectList, objectData) + scenarioHelper{}.generateFileSharesAndFilesFromLists(a, fsu, containerList, objectList, objectData) if testS3 { - scenarioHelper{}.generateS3BucketsAndObjectsFromLists(c, s3Client, containerList, objectList, objectData) + scenarioHelper{}.generateS3BucketsAndObjectsFromLists(a, s3Client, containerList, objectList, objectData) } if testGCP { - scenarioHelper{}.generateGCPBucketsAndObjectsFromLists(c, gcpClient, containerList, objectList) + scenarioHelper{}.generateGCPBucketsAndObjectsFromLists(a, gcpClient, containerList, objectList) } // deferred container cleanup @@ -85,7 +86,7 @@ func (s *genericTraverserSuite) TestServiceTraverserWithManyObjects(c *chk.C) { _ = s3Client.RemoveBucket(v) } if testGCP { - deleteGCPBucket(c, gcpClient, v, true) + deleteGCPBucket(gcpClient, v, true) } _, _ = blobContainer.Delete(ctx, azblob.ContainerAccessConditions{}) _, _ = fileShare.Delete(ctx, azfile.DeleteSnapshotsOptionNone) @@ -93,8 +94,8 @@ func (s *genericTraverserSuite) TestServiceTraverserWithManyObjects(c *chk.C) { }() // Generate local files to ensure behavior conforms to other traversers - dstDirName := scenarioHelper{}.generateLocalDirectory(c) - scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, objectList) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) + scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, objectList) // Create a local traversal localTraverser, _ := newLocalTraverser(context.TODO(), dstDirName, true, false, common.ESymlinkHandlingType.Follow(), common.ESyncHashType.None(), func(common.EntityType) {}, nil) @@ -102,51 +103,51 @@ func (s *genericTraverserSuite) TestServiceTraverserWithManyObjects(c *chk.C) { // Invoke the traversal with an indexer so the results are indexed for easy validation localIndexer := newObjectIndexer() err = localTraverser.Traverse(noPreProccessor, localIndexer.store, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) // construct a blob account traverser blobPipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) - rawBSU := scenarioHelper{}.getRawBlobServiceURLWithSAS(c) + rawBSU := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) blobAccountTraverser := newBlobAccountTraverser(&rawBSU, blobPipeline, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}, common.EPreservePermissionsOption.None()) // invoke the blob account traversal with a dummy processor blobDummyProcessor := dummyProcessor{} err = blobAccountTraverser.Traverse(noPreProccessor, blobDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) // construct a file account traverser filePipeline := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{}) - rawFSU := scenarioHelper{}.getRawFileServiceURLWithSAS(c) + rawFSU := scenarioHelper{}.getRawFileServiceURLWithSAS(a) fileAccountTraverser := newFileAccountTraverser(&rawFSU, filePipeline, ctx, false, func(common.EntityType) {}, common.ETrailingDotOption.Enable()) // invoke the file account traversal with a dummy processor fileDummyProcessor := dummyProcessor{} err = fileAccountTraverser.Traverse(noPreProccessor, fileDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) var s3DummyProcessor dummyProcessor var gcpDummyProcessor dummyProcessor if testS3 { // construct a s3 service traverser - accountURL := scenarioHelper{}.getRawS3AccountURL(c, "") + accountURL := scenarioHelper{}.getRawS3AccountURL(a, "") s3ServiceTraverser, err := newS3ServiceTraverser(&accountURL, ctx, false, func(common.EntityType) {}) - c.Assert(err, chk.IsNil) + a.Nil(err) // invoke the s3 service traversal with a dummy processor s3DummyProcessor = dummyProcessor{} err = s3ServiceTraverser.Traverse(noPreProccessor, s3DummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) } if testGCP { - gcpAccountURL := scenarioHelper{}.getRawGCPAccountURL(c) + gcpAccountURL := scenarioHelper{}.getRawGCPAccountURL(a) gcpServiceTraverser, err := newGCPServiceTraverser(&gcpAccountURL, ctx, false, func(entityType common.EntityType) {}) - c.Assert(err, chk.IsNil) + a.Nil(err) gcpDummyProcessor = dummyProcessor{} err = gcpServiceTraverser.Traverse(noPreProccessor, gcpDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) } records := append(blobDummyProcessor.record, fileDummyProcessor.record...) @@ -158,14 +159,14 @@ func (s *genericTraverserSuite) TestServiceTraverserWithManyObjects(c *chk.C) { localFileOnlyCount++ } } - c.Assert(len(blobDummyProcessor.record), chk.Equals, localFileOnlyCount*len(containerList)) - c.Assert(len(fileDummyProcessor.record), chk.Equals, localTotalCount*len(containerList)) + a.Equal(localFileOnlyCount*len(containerList), len(blobDummyProcessor.record)) + a.Equal(localTotalCount*len(containerList), len(fileDummyProcessor.record)) if testS3 { - c.Assert(len(s3DummyProcessor.record), chk.Equals, localFileOnlyCount*len(containerList)) + a.Equal(localFileOnlyCount*len(containerList), len(s3DummyProcessor.record)) records = append(records, s3DummyProcessor.record...) } if testGCP { - c.Assert(len(gcpDummyProcessor.record), chk.Equals, localFileOnlyCount*len(containerList)) + a.Equal(localFileOnlyCount*len(containerList), len(gcpDummyProcessor.record)) records = append(records, gcpDummyProcessor.record...) } @@ -173,13 +174,14 @@ func (s *genericTraverserSuite) TestServiceTraverserWithManyObjects(c *chk.C) { correspondingLocalFile, present := localIndexer.indexMap[storedObject.relativePath] _, cnamePresent := cnames[storedObject.ContainerName] - c.Assert(present, chk.Equals, true) - c.Assert(cnamePresent, chk.Equals, true) - c.Assert(correspondingLocalFile.name, chk.Equals, storedObject.name) + a.True(present) + a.True(cnamePresent) + a.Equal(storedObject.name, correspondingLocalFile.name) } } -func (s *genericTraverserSuite) TestServiceTraverserWithWildcards(c *chk.C) { +func TestServiceTraverserWithWildcards(t *testing.T) { + a := assert.New(t) bsu := getBSU() fsu := getFSU() testS3 := false // Only test S3 if credentials are present. @@ -189,25 +191,25 @@ func (s *genericTraverserSuite) TestServiceTraverserWithWildcards(c *chk.C) { if !isS3Disabled() && err == nil { testS3 = true } else { - c.Log("WARNING: Service level traverser is NOT testing S3") + t.Log("WARNING: Service level traverser is NOT testing S3") } gcpClient, err := createGCPClientWithGCSSDK() if !gcpTestsDisabled() && err == nil { testGCP = true } else { - c.Log("WARNING: Service level traverser is NOT testing GCP") + t.Log("WARNING: Service level traverser is NOT testing GCP") } // Clean the accounts to ensure that only the containers we create exist if testS3 { - cleanS3Account(c, s3Client) + cleanS3Account(s3Client) } if testGCP { - cleanGCPAccount(c, gcpClient) + cleanGCPAccount(gcpClient) } - cleanBlobAccount(c, bsu) - cleanFileAccount(c, fsu) + cleanBlobAccount(a, bsu) + cleanFileAccount(a, fsu) containerList := []string{ generateName("objectmatchone", 63), @@ -232,13 +234,13 @@ func (s *genericTraverserSuite) TestServiceTraverserWithWildcards(c *chk.C) { objectData := "Hello world!" // Generate remote scenarios - scenarioHelper{}.generateBlobContainersAndBlobsFromLists(c, bsu, containerList, objectList, objectData) - scenarioHelper{}.generateFileSharesAndFilesFromLists(c, fsu, containerList, objectList, objectData) + scenarioHelper{}.generateBlobContainersAndBlobsFromLists(a, bsu, containerList, objectList, objectData) + scenarioHelper{}.generateFileSharesAndFilesFromLists(a, fsu, containerList, objectList, objectData) if testS3 { - scenarioHelper{}.generateS3BucketsAndObjectsFromLists(c, s3Client, containerList, objectList, objectData) + scenarioHelper{}.generateS3BucketsAndObjectsFromLists(a, s3Client, containerList, objectList, objectData) } if testGCP { - scenarioHelper{}.generateGCPBucketsAndObjectsFromLists(c, gcpClient, containerList, objectList) + scenarioHelper{}.generateGCPBucketsAndObjectsFromLists(a, gcpClient, containerList, objectList) } // deferred container cleanup @@ -253,7 +255,7 @@ func (s *genericTraverserSuite) TestServiceTraverserWithWildcards(c *chk.C) { _ = s3Client.RemoveBucket(v) } if testGCP { - deleteGCPBucket(c, gcpClient, v, true) + deleteGCPBucket(gcpClient, v, true) } _, _ = blobContainer.Delete(ctx, azblob.ContainerAccessConditions{}) _, _ = fileShare.Delete(ctx, azfile.DeleteSnapshotsOptionNone) @@ -261,8 +263,8 @@ func (s *genericTraverserSuite) TestServiceTraverserWithWildcards(c *chk.C) { }() // Generate local files to ensure behavior conforms to other traversers - dstDirName := scenarioHelper{}.generateLocalDirectory(c) - scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, objectList) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) + scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, objectList) // Create a local traversal localTraverser, _ := newLocalTraverser(context.TODO(), dstDirName, true, false, common.ESymlinkHandlingType.Follow(), common.ESyncHashType.None(), func(common.EntityType) {}, nil) @@ -270,58 +272,58 @@ func (s *genericTraverserSuite) TestServiceTraverserWithWildcards(c *chk.C) { // Invoke the traversal with an indexer so the results are indexed for easy validation localIndexer := newObjectIndexer() err = localTraverser.Traverse(noPreProccessor, localIndexer.store, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) // construct a blob account traverser blobPipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) - rawBSU := scenarioHelper{}.getRawBlobServiceURLWithSAS(c) + rawBSU := scenarioHelper{}.getRawBlobServiceURLWithSAS(a) rawBSU.Path = "/objectmatch*" // set the container name to contain a wildcard blobAccountTraverser := newBlobAccountTraverser(&rawBSU, blobPipeline, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}, common.EPreservePermissionsOption.None()) // invoke the blob account traversal with a dummy processor blobDummyProcessor := dummyProcessor{} err = blobAccountTraverser.Traverse(noPreProccessor, blobDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) // construct a file account traverser filePipeline := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{}) - rawFSU := scenarioHelper{}.getRawFileServiceURLWithSAS(c) + rawFSU := scenarioHelper{}.getRawFileServiceURLWithSAS(a) rawFSU.Path = "/objectmatch*" // set the container name to contain a wildcard fileAccountTraverser := newFileAccountTraverser(&rawFSU, filePipeline, ctx, false, func(common.EntityType) {}, common.ETrailingDotOption.Enable()) // invoke the file account traversal with a dummy processor fileDummyProcessor := dummyProcessor{} err = fileAccountTraverser.Traverse(noPreProccessor, fileDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) var s3DummyProcessor dummyProcessor var gcpDummyProcessor dummyProcessor if testS3 { // construct a s3 service traverser - accountURL, err := common.NewS3URLParts(scenarioHelper{}.getRawS3AccountURL(c, "")) - c.Assert(err, chk.IsNil) + accountURL, err := common.NewS3URLParts(scenarioHelper{}.getRawS3AccountURL(a, "")) + a.Nil(err) accountURL.BucketName = "objectmatch*" // set the container name to contain a wildcard urlOut := accountURL.URL() s3ServiceTraverser, err := newS3ServiceTraverser(&urlOut, ctx, false, func(common.EntityType) {}) - c.Assert(err, chk.IsNil) + a.Nil(err) // invoke the s3 service traversal with a dummy processor s3DummyProcessor = dummyProcessor{} err = s3ServiceTraverser.Traverse(noPreProccessor, s3DummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) } if testGCP { - gcpAccountURL, err := common.NewGCPURLParts(scenarioHelper{}.getRawGCPAccountURL(c)) - c.Assert(err, chk.IsNil) + gcpAccountURL, err := common.NewGCPURLParts(scenarioHelper{}.getRawGCPAccountURL(a)) + a.Nil(err) gcpAccountURL.BucketName = "objectmatch*" urlStr := gcpAccountURL.URL() gcpServiceTraverser, err := newGCPServiceTraverser(&urlStr, ctx, false, func(entityType common.EntityType) {}) - c.Assert(err, chk.IsNil) + a.Nil(err) gcpDummyProcessor = dummyProcessor{} err = gcpServiceTraverser.Traverse(noPreProccessor, gcpDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) } records := append(blobDummyProcessor.record, fileDummyProcessor.record...) @@ -335,14 +337,14 @@ func (s *genericTraverserSuite) TestServiceTraverserWithWildcards(c *chk.C) { } // Only two containers should match. - c.Assert(len(blobDummyProcessor.record), chk.Equals, localFileOnlyCount*2) - c.Assert(len(fileDummyProcessor.record), chk.Equals, localTotalCount*2) + a.Equal(localFileOnlyCount*2, len(blobDummyProcessor.record)) + a.Equal(localTotalCount*2, len(fileDummyProcessor.record)) if testS3 { - c.Assert(len(s3DummyProcessor.record), chk.Equals, localFileOnlyCount*2) + a.Equal(localFileOnlyCount*2, len(s3DummyProcessor.record)) records = append(records, s3DummyProcessor.record...) } if testGCP { - c.Assert(len(gcpDummyProcessor.record), chk.Equals, localFileOnlyCount*2) + a.Equal(localFileOnlyCount*2, len(gcpDummyProcessor.record)) records = append(records, gcpDummyProcessor.record...) } @@ -350,8 +352,8 @@ func (s *genericTraverserSuite) TestServiceTraverserWithWildcards(c *chk.C) { correspondingLocalFile, present := localIndexer.indexMap[storedObject.relativePath] _, cnamePresent := cnames[storedObject.ContainerName] - c.Assert(present, chk.Equals, true) - c.Assert(cnamePresent, chk.Equals, true) - c.Assert(correspondingLocalFile.name, chk.Equals, storedObject.name) + a.True(present) + a.True(cnamePresent) + a.Equal(storedObject.name, correspondingLocalFile.name) } -} +} \ No newline at end of file diff --git a/cmd/zt_generic_traverser_test.go b/cmd/zt_generic_traverser_test.go index 70369281a..ee3b06942 100644 --- a/cmd/zt_generic_traverser_test.go +++ b/cmd/zt_generic_traverser_test.go @@ -23,11 +23,13 @@ package cmd import ( "context" "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/stretchr/testify/assert" "io" "os" "path/filepath" "runtime" "strings" + "testing" "time" gcpUtils "cloud.google.com/go/storage" @@ -47,40 +49,63 @@ var _ = chk.Suite(&genericTraverserSuite{}) // On Windows, if you don't hold adequate permissions to create a symlink, tests regarding symlinks will fail. // This is arguably annoying to dig through, therefore, we cleanly skip the test. -func trySymlink(src, dst string, c *chk.C) { +func trySymlink(src, dst string, t *testing.T) { if err := os.Symlink(src, dst); err != nil { if strings.Contains(err.Error(), "A required privilege is not held by the client") { - c.Skip("client lacks required privilege to create symlinks; symlinks will not be tested") + t.Skip("client lacks required privilege to create symlinks; symlinks will not be tested") } - c.Error(err) + t.Error(err) } } -func (s *genericTraverserSuite) TestLocalWildcardOverlap(c *chk.C) { +func TestLocalWildcardOverlap(t *testing.T) { + a := assert.New(t) if runtime.GOOS == "windows" { - c.Skip("invalid filename used") + t.Skip("invalid filename used") return } /* - Wildcard support is not actually a part of the local traverser, believe it or not. - It's instead implemented in InitResourceTraverser as a short-circuit to a list traverser - utilizing the filepath.Glob function, which then initializes local traversers to achieve the same effect. - */ - tmpDir := scenarioHelper{}.generateLocalDirectory(c) + Wildcard support is not actually a part of the local traverser, believe it or not. + It's instead implemented in InitResourceTraverser as a short-circuit to a list traverser + utilizing the filepath.Glob function, which then initializes local traversers to achieve the same effect. + */ + tmpDir := scenarioHelper{}.generateLocalDirectory(a) defer func(path string) { _ = os.RemoveAll(path) }(tmpDir) - scenarioHelper{}.generateLocalFilesFromList(c, tmpDir, []string{ + scenarioHelper{}.generateLocalFilesFromList(a, tmpDir, []string{ "test.txt", "tes*t.txt", "foobarbaz/test.txt", }) resource, err := SplitResourceString(filepath.Join(tmpDir, "tes*t.txt"), common.ELocation.Local()) - c.Assert(err, chk.IsNil) - - traverser, err := InitResourceTraverser(resource, common.ELocation.Local(), nil, nil, common.ESymlinkHandlingType.Follow(), nil, true, false, false, common.EPermanentDeleteOption.None(), nil, nil, false, common.ESyncHashType.None(), common.EPreservePermissionsOption.None(), pipeline.LogInfo, common.CpkOptions{}, nil, true, common.ETrailingDotOption.Enable(), nil) - c.Assert(err, chk.IsNil) + a.Nil(err) + + traverser, err := InitResourceTraverser( + resource, + common.ELocation.Local(), + nil, + nil, + common.ESymlinkHandlingType.Follow(), + nil, + true, + false, + false, + common.EPermanentDeleteOption.None(), + nil, + nil, + false, + common.ESyncHashType.None(), + common.EPreservePermissionsOption.None(), + pipeline.LogInfo, + common.CpkOptions{}, + nil, + true, + common.ETrailingDotOption.Enable(), + nil, + ) + a.Nil(err) seenFiles := make(map[string]bool) @@ -88,20 +113,21 @@ func (s *genericTraverserSuite) TestLocalWildcardOverlap(c *chk.C) { seenFiles[storedObject.relativePath] = true return nil }, []ObjectFilter{}) - c.Assert(err, chk.IsNil) + a.Nil(err) - c.Assert(seenFiles, chk.DeepEquals, map[string]bool{ + a.Equal(map[string]bool{ "test.txt": true, "tes*t.txt": true, - }) + }, seenFiles) } // GetProperties tests. // GetProperties does not exist on Blob, as the properties come in the list call. // While BlobFS could get properties in the future, it's currently disabled as BFS source S2S isn't set up right now, and likely won't be. -func (s *genericTraverserSuite) TestFilesGetProperties(c *chk.C) { +func TestFilesGetProperties(t *testing.T) { + a := assert.New(t) fsu := getFSU() - share, shareName := createNewAzureShare(c, fsu) + share, shareName := createNewAzureShare(a, fsu) fileName := generateAzureFileName() headers := azfile.FileHTTPHeaders{ @@ -112,10 +138,10 @@ func (s *genericTraverserSuite) TestFilesGetProperties(c *chk.C) { CacheControl: "testCacheControl", } - scenarioHelper{}.generateAzureFilesFromList(c, share, []string{fileName}) + scenarioHelper{}.generateAzureFilesFromList(a, share, []string{fileName}) _, err := share.NewRootDirectoryURL().NewFileURL(fileName).SetHTTPHeaders(ctx, headers) - c.Assert(err, chk.IsNil) - shareURL := scenarioHelper{}.getRawShareURLWithSAS(c, shareName) + a.Nil(err) + shareURL := scenarioHelper{}.getRawShareURLWithSAS(a, shareName) pipeline := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{}) // first test reading from the share itself @@ -126,39 +152,40 @@ func (s *genericTraverserSuite) TestFilesGetProperties(c *chk.C) { processor := func(object StoredObject) error { if object.entityType == common.EEntityType.File() { // test all attributes (but only for files, since folders don't have them) - c.Assert(object.contentType, chk.Equals, headers.ContentType) - c.Assert(object.contentEncoding, chk.Equals, headers.ContentEncoding) - c.Assert(object.contentLanguage, chk.Equals, headers.ContentLanguage) - c.Assert(object.contentDisposition, chk.Equals, headers.ContentDisposition) - c.Assert(object.cacheControl, chk.Equals, headers.CacheControl) + a.Equal(headers.ContentType, object.contentType) + a.Equal(headers.ContentEncoding, object.contentEncoding) + a.Equal(headers.ContentLanguage, object.contentLanguage) + a.Equal(headers.ContentDisposition, object.contentDisposition) + a.Equal(headers.CacheControl, object.cacheControl) seenContentType = true } return nil } err = traverser.Traverse(noPreProccessor, processor, nil) - c.Assert(err, chk.IsNil) - c.Assert(seenContentType, chk.Equals, true) + a.Nil(err) + a.True(seenContentType) // then test reading from the filename exactly, because that's a different codepath. seenContentType = false - fileURL := scenarioHelper{}.getRawFileURLWithSAS(c, shareName, fileName) + fileURL := scenarioHelper{}.getRawFileURLWithSAS(a, shareName, fileName) traverser = newFileTraverser(&fileURL, pipeline, ctx, false, true, func(common.EntityType) {}, common.ETrailingDotOption.Enable()) err = traverser.Traverse(noPreProccessor, processor, nil) - c.Assert(err, chk.IsNil) - c.Assert(seenContentType, chk.Equals, true) + a.Nil(err) + a.True(seenContentType) } -func (s *genericTraverserSuite) TestS3GetProperties(c *chk.C) { - skipIfS3Disabled(c) +func TestS3GetProperties(t *testing.T) { + a := assert.New(t) + skipIfS3Disabled(t) client, err := createS3ClientWithMinio(createS3ResOptions{}) if err != nil { // TODO: Alter all tests that use S3 credentials to just skip instead of failing // This is useful for local testing, when we don't want to have to sift through errors related to S3 clients not being created // Just so that we can test locally without interrupting CI. - c.Skip("S3-based tests will not be ran as no credentials were supplied.") + t.Skip("S3-based tests will not be ran as no credentials were supplied.") return // make syntax highlighting happy } @@ -173,54 +200,55 @@ func (s *genericTraverserSuite) TestS3GetProperties(c *chk.C) { bucketName := generateBucketName() objectName := generateObjectName() err = client.MakeBucket(bucketName, "") - defer deleteBucket(c, client, bucketName, false) - c.Assert(err, chk.IsNil) + defer deleteBucket(client, bucketName, false) + a.Nil(err) _, err = client.PutObjectWithContext(ctx, bucketName, objectName, strings.NewReader(objectDefaultData), int64(len(objectDefaultData)), headers) - c.Assert(err, chk.IsNil) + a.Nil(err) // First test against the bucket - s3BucketURL := scenarioHelper{}.getRawS3BucketURL(c, "", bucketName) + s3BucketURL := scenarioHelper{}.getRawS3BucketURL(a, "", bucketName) credentialInfo := common.CredentialInfo{CredentialType: common.ECredentialType.S3AccessKey()} traverser, err := newS3Traverser(credentialInfo.CredentialType, &s3BucketURL, ctx, false, true, func(common.EntityType) {}) - c.Assert(err, chk.IsNil) + a.Nil(err) // Embed the check into the processor for ease of use seenContentType := false processor := func(object StoredObject) error { // test all attributes - c.Assert(object.contentType, chk.Equals, headers.ContentType) - c.Assert(object.contentEncoding, chk.Equals, headers.ContentEncoding) - c.Assert(object.contentLanguage, chk.Equals, headers.ContentLanguage) - c.Assert(object.contentDisposition, chk.Equals, headers.ContentDisposition) - c.Assert(object.cacheControl, chk.Equals, headers.CacheControl) + a.Equal(headers.ContentType, object.contentType) + a.Equal(headers.ContentEncoding, object.contentEncoding) + a.Equal(headers.ContentLanguage, object.contentLanguage) + a.Equal(headers.ContentDisposition, object.contentDisposition) + a.Equal(headers.CacheControl, object.cacheControl) seenContentType = true return nil } err = traverser.Traverse(noPreProccessor, processor, nil) - c.Assert(err, chk.IsNil) - c.Assert(seenContentType, chk.Equals, true) + a.Nil(err) + a.True(seenContentType) // Then, test against the object itself because that's a different codepath. seenContentType = false - s3ObjectURL := scenarioHelper{}.getRawS3ObjectURL(c, "", bucketName, objectName) + s3ObjectURL := scenarioHelper{}.getRawS3ObjectURL(a, "", bucketName, objectName) credentialInfo = common.CredentialInfo{CredentialType: common.ECredentialType.S3AccessKey()} traverser, err = newS3Traverser(credentialInfo.CredentialType, &s3ObjectURL, ctx, false, true, func(common.EntityType) {}) - c.Assert(err, chk.IsNil) + a.Nil(err) err = traverser.Traverse(noPreProccessor, processor, nil) - c.Assert(err, chk.IsNil) - c.Assert(seenContentType, chk.Equals, true) + a.Nil(err) + a.True(seenContentType) } -func (s *genericTraverserSuite) TestGCPGetProperties(c *chk.C) { - skipIfGCPDisabled(c) +func TestGCPGetProperties(t *testing.T) { + a := assert.New(t) + skipIfGCPDisabled(t) client, err := createGCPClientWithGCSSDK() if err != nil { - c.Skip("GCP-based tests will not be run as no credentials were supplied.") + t.Skip("GCP-based tests will not be run as no credentials were supplied.") return } @@ -236,79 +264,80 @@ func (s *genericTraverserSuite) TestGCPGetProperties(c *chk.C) { objectName := generateObjectName() bkt := client.Bucket(bucketName) err = bkt.Create(context.Background(), os.Getenv("GOOGLE_CLOUD_PROJECT"), &gcpUtils.BucketAttrs{}) - defer deleteGCPBucket(c, client, bucketName, false) - c.Assert(err, chk.IsNil) + defer deleteGCPBucket(client, bucketName, false) + a.Nil(err) reader := strings.NewReader(objectDefaultData) obj := bkt.Object(objectName) wc := obj.NewWriter(ctx) n, err := io.Copy(wc, reader) - c.Assert(err, chk.IsNil) - c.Assert(n, chk.Equals, int64(len(objectDefaultData))) + a.Nil(err) + a.Equal(int64(len(objectDefaultData)), n) err = wc.Close() - c.Assert(err, chk.IsNil) + a.Nil(err) _, err = obj.Update(ctx, headers) - c.Assert(err, chk.IsNil) + a.Nil(err) // First test against the bucket - gcpBucketURL := scenarioHelper{}.getRawGCPBucketURL(c, bucketName) + gcpBucketURL := scenarioHelper{}.getRawGCPBucketURL(a, bucketName) traverser, err := newGCPTraverser(&gcpBucketURL, ctx, false, true, func(common.EntityType) {}) - c.Assert(err, chk.IsNil) + a.Nil(err) // Embed the check into the processor for ease of use seenContentType := false processor := func(object StoredObject) error { // test all attributes - c.Assert(object.contentType, chk.Equals, headers.ContentType) - c.Assert(object.contentEncoding, chk.Equals, headers.ContentEncoding) - c.Assert(object.contentLanguage, chk.Equals, headers.ContentLanguage) - c.Assert(object.contentDisposition, chk.Equals, headers.ContentDisposition) - c.Assert(object.cacheControl, chk.Equals, headers.CacheControl) + a.Equal(headers.ContentType, object.contentType) + a.Equal(headers.ContentEncoding, object.contentEncoding) + a.Equal(headers.ContentLanguage, object.contentLanguage) + a.Equal(headers.ContentDisposition, object.contentDisposition) + a.Equal(headers.CacheControl, object.cacheControl) seenContentType = true return nil } err = traverser.Traverse(noPreProccessor, processor, nil) - c.Assert(err, chk.IsNil) - c.Assert(seenContentType, chk.Equals, true) + a.Nil(err) + a.True(seenContentType) // Then, test against the object itself because that's a different codepath. seenContentType = false - gcpObjectURL := scenarioHelper{}.getRawGCPObjectURL(c, bucketName, objectName) + gcpObjectURL := scenarioHelper{}.getRawGCPObjectURL(a, bucketName, objectName) traverser, err = newGCPTraverser(&gcpObjectURL, ctx, false, true, func(common.EntityType) {}) - c.Assert(err, chk.IsNil) + a.Nil(err) err = traverser.Traverse(noPreProccessor, processor, nil) - c.Assert(err, chk.IsNil) - c.Assert(seenContentType, chk.Equals, true) + a.Nil(err) + a.True(seenContentType) } // Test follow symlink functionality -func (s *genericTraverserSuite) TestWalkWithSymlinks_ToFolder(c *chk.C) { +func TestWalkWithSymlinks_ToFolder(t *testing.T) { + a := assert.New(t) fileNames := []string{"March 20th is international happiness day.txt", "wonderwall but it goes on and on and on.mp3", "bonzi buddy.exe"} - tmpDir := scenarioHelper{}.generateLocalDirectory(c) + tmpDir := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(tmpDir) - symlinkTmpDir := scenarioHelper{}.generateLocalDirectory(c) + symlinkTmpDir := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(symlinkTmpDir) - c.Assert(tmpDir, chk.Not(chk.Equals), symlinkTmpDir) + a.NotEqual(symlinkTmpDir, tmpDir) - scenarioHelper{}.generateLocalFilesFromList(c, tmpDir, fileNames) - scenarioHelper{}.generateLocalFilesFromList(c, symlinkTmpDir, fileNames) + scenarioHelper{}.generateLocalFilesFromList(a, tmpDir, fileNames) + scenarioHelper{}.generateLocalFilesFromList(a, symlinkTmpDir, fileNames) dirLinkName := "so long and thanks for all the fish" time.Sleep(2 * time.Second) // to be sure to get different LMT for link, compared to root, so we can make assertions later about whose fileInfo we get - trySymlink(symlinkTmpDir, filepath.Join(tmpDir, dirLinkName), c) + trySymlink(symlinkTmpDir, filepath.Join(tmpDir, dirLinkName), t) fileCount := 0 sawLinkTargetDir := false - c.Assert(WalkWithSymlinks(context.TODO(), tmpDir, func(path string, fi os.FileInfo, err error) error { - c.Assert(err, chk.IsNil) + a.Nil(WalkWithSymlinks(context.TODO(), tmpDir, func(path string, fi os.FileInfo, err error) error { + a.Nil(err) if fi.IsDir() { if fi.Name() == dirLinkName { sawLinkTargetDir = true s, _ := os.Stat(symlinkTmpDir) - c.Assert(fi.ModTime().UTC(), chk.Equals, s.ModTime().UTC()) + a.Equal(s.ModTime().UTC(), fi.ModTime().UTC()) } return nil } @@ -316,31 +345,32 @@ func (s *genericTraverserSuite) TestWalkWithSymlinks_ToFolder(c *chk.C) { fileCount++ return nil }, - common.ESymlinkHandlingType.Follow(), nil), chk.IsNil) + common.ESymlinkHandlingType.Follow(), nil)) // 3 files live in base, 3 files live in symlink - c.Assert(fileCount, chk.Equals, 6) - c.Assert(sawLinkTargetDir, chk.Equals, true) + a.Equal(6, fileCount) + a.True(sawLinkTargetDir) } // Next test is temporarily disabled, to avoid changing functionality near 10.4 release date /* // symlinks are not just to folders. They may be to individual files -func (s *genericTraverserSuite) TestWalkWithSymlinks_ToFile(c *chk.C) { +func TestWalkWithSymlinks_ToFile(t *testing.T) { + a := assert.New(t) mainDirFilenames := []string{"iAmANormalFile.txt"} symlinkTargetFilenames := []string{"iAmASymlinkTargetFile.txt"} - tmpDir := scenarioHelper{}.generateLocalDirectory(c) + tmpDir := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(tmpDir) - symlinkTmpDir := scenarioHelper{}.generateLocalDirectory(c) + symlinkTmpDir := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(symlinkTmpDir) c.Assert(tmpDir, chk.Not(chk.Equals), symlinkTmpDir) - scenarioHelper{}.generateLocalFilesFromList(c, tmpDir, mainDirFilenames) - scenarioHelper{}.generateLocalFilesFromList(c, symlinkTmpDir, symlinkTargetFilenames) + scenarioHelper{}.generateLocalFilesFromList(a, tmpDir, mainDirFilenames) + scenarioHelper{}.generateLocalFilesFromList(a, symlinkTmpDir, symlinkTargetFilenames) trySymlink(filepath.Join(symlinkTmpDir, symlinkTargetFilenames[0]), filepath.Join(tmpDir, "iPointToTheSymlink"), c) trySymlink(filepath.Join(symlinkTmpDir, symlinkTargetFilenames[0]), filepath.Join(tmpDir, "iPointToTheSameSymlink"), c) fileCount := 0 c.Assert(WalkWithSymlinks(tmpDir, func(path string, fi os.FileInfo, err error) error { - c.Assert(err, chk.IsNil) + a.Nil(err) if fi.IsDir() { return nil } @@ -360,19 +390,20 @@ func (s *genericTraverserSuite) TestWalkWithSymlinks_ToFile(c *chk.C) { */ // Test cancel symlink loop functionality -func (s *genericTraverserSuite) TestWalkWithSymlinksBreakLoop(c *chk.C) { +func TestWalkWithSymlinksBreakLoop(t *testing.T) { + a := assert.New(t) fileNames := []string{"stonks.txt", "jaws but its a baby shark.mp3", "my crow soft.txt"} - tmpDir := scenarioHelper{}.generateLocalDirectory(c) + tmpDir := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(tmpDir) - scenarioHelper{}.generateLocalFilesFromList(c, tmpDir, fileNames) - trySymlink(tmpDir, filepath.Join(tmpDir, "spinloop"), c) + scenarioHelper{}.generateLocalFilesFromList(a, tmpDir, fileNames) + trySymlink(tmpDir, filepath.Join(tmpDir, "spinloop"), t) // Only 3 files should ever be found. // This is because the symlink links back to the root dir fileCount := 0 - c.Assert(WalkWithSymlinks(context.TODO(), tmpDir, func(path string, fi os.FileInfo, err error) error { - c.Assert(err, chk.IsNil) + a.Nil(WalkWithSymlinks(context.TODO(), tmpDir, func(path string, fi os.FileInfo, err error) error { + a.Nil(err) if fi.IsDir() { return nil @@ -381,28 +412,29 @@ func (s *genericTraverserSuite) TestWalkWithSymlinksBreakLoop(c *chk.C) { fileCount++ return nil }, - common.ESymlinkHandlingType.Follow(), nil), chk.IsNil) + common.ESymlinkHandlingType.Follow(), nil)) - c.Assert(fileCount, chk.Equals, 3) + a.Equal(3, fileCount) } // Test ability to dedupe within the same directory -func (s *genericTraverserSuite) TestWalkWithSymlinksDedupe(c *chk.C) { +func TestWalkWithSymlinksDedupe(t *testing.T) { + a := assert.New(t) fileNames := []string{"stonks.txt", "jaws but its a baby shark.mp3", "my crow soft.txt"} - tmpDir := scenarioHelper{}.generateLocalDirectory(c) + tmpDir := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(tmpDir) symlinkTmpDir, err := os.MkdirTemp(tmpDir, "subdir") - c.Assert(err, chk.IsNil) + a.Nil(err) - scenarioHelper{}.generateLocalFilesFromList(c, tmpDir, fileNames) - scenarioHelper{}.generateLocalFilesFromList(c, symlinkTmpDir, fileNames) - trySymlink(symlinkTmpDir, filepath.Join(tmpDir, "symlinkdir"), c) + scenarioHelper{}.generateLocalFilesFromList(a, tmpDir, fileNames) + scenarioHelper{}.generateLocalFilesFromList(a, symlinkTmpDir, fileNames) + trySymlink(symlinkTmpDir, filepath.Join(tmpDir, "symlinkdir"), t) // Only 6 files should ever be found. // 3 in the root dir, 3 in subdir, then symlinkdir should be ignored because it's been seen. fileCount := 0 - c.Assert(WalkWithSymlinks(context.TODO(), tmpDir, func(path string, fi os.FileInfo, err error) error { - c.Assert(err, chk.IsNil) + a.Nil(WalkWithSymlinks(context.TODO(), tmpDir, func(path string, fi os.FileInfo, err error) error { + a.Nil(err) if fi.IsDir() { return nil @@ -411,29 +443,30 @@ func (s *genericTraverserSuite) TestWalkWithSymlinksDedupe(c *chk.C) { fileCount++ return nil }, - common.ESymlinkHandlingType.Follow(), nil), chk.IsNil) + common.ESymlinkHandlingType.Follow(), nil)) - c.Assert(fileCount, chk.Equals, 6) + a.Equal(6, fileCount) } // Test ability to only get the output of one symlink when two point to the same place -func (s *genericTraverserSuite) TestWalkWithSymlinksMultitarget(c *chk.C) { +func TestWalkWithSymlinksMultitarget(t *testing.T) { + a := assert.New(t) fileNames := []string{"March 20th is international happiness day.txt", "wonderwall but it goes on and on and on.mp3", "bonzi buddy.exe"} - tmpDir := scenarioHelper{}.generateLocalDirectory(c) + tmpDir := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(tmpDir) - symlinkTmpDir := scenarioHelper{}.generateLocalDirectory(c) + symlinkTmpDir := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(symlinkTmpDir) - c.Assert(tmpDir, chk.Not(chk.Equals), symlinkTmpDir) + a.NotEqual(symlinkTmpDir, tmpDir) - scenarioHelper{}.generateLocalFilesFromList(c, tmpDir, fileNames) - scenarioHelper{}.generateLocalFilesFromList(c, symlinkTmpDir, fileNames) - trySymlink(symlinkTmpDir, filepath.Join(tmpDir, "so long and thanks for all the fish"), c) - trySymlink(symlinkTmpDir, filepath.Join(tmpDir, "extradir"), c) - trySymlink(filepath.Join(tmpDir, "extradir"), filepath.Join(tmpDir, "linktolink"), c) + scenarioHelper{}.generateLocalFilesFromList(a, tmpDir, fileNames) + scenarioHelper{}.generateLocalFilesFromList(a, symlinkTmpDir, fileNames) + trySymlink(symlinkTmpDir, filepath.Join(tmpDir, "so long and thanks for all the fish"), t) + trySymlink(symlinkTmpDir, filepath.Join(tmpDir, "extradir"), t) + trySymlink(filepath.Join(tmpDir, "extradir"), filepath.Join(tmpDir, "linktolink"), t) fileCount := 0 - c.Assert(WalkWithSymlinks(context.TODO(), tmpDir, func(path string, fi os.FileInfo, err error) error { - c.Assert(err, chk.IsNil) + a.Nil(WalkWithSymlinks(context.TODO(), tmpDir, func(path string, fi os.FileInfo, err error) error { + a.Nil(err) if fi.IsDir() { return nil @@ -442,31 +475,32 @@ func (s *genericTraverserSuite) TestWalkWithSymlinksMultitarget(c *chk.C) { fileCount++ return nil }, - common.ESymlinkHandlingType.Follow(), nil), chk.IsNil) + common.ESymlinkHandlingType.Follow(), nil)) // 3 files live in base, 3 files live in first symlink, second & third symlink is ignored. - c.Assert(fileCount, chk.Equals, 6) + a.Equal(6, fileCount) } -func (s *genericTraverserSuite) TestWalkWithSymlinksToParentAndChild(c *chk.C) { +func TestWalkWithSymlinksToParentAndChild(t *testing.T) { + a := assert.New(t) fileNames := []string{"file1.txt", "file2.txt", "file3.txt"} - root1 := scenarioHelper{}.generateLocalDirectory(c) + root1 := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(root1) - root2 := scenarioHelper{}.generateLocalDirectory(c) + root2 := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(root2) child, err := os.MkdirTemp(root2, "childdir") - c.Assert(err, chk.IsNil) + a.Nil(err) - scenarioHelper{}.generateLocalFilesFromList(c, root2, fileNames) - scenarioHelper{}.generateLocalFilesFromList(c, child, fileNames) - trySymlink(root2, filepath.Join(root1, "toroot"), c) - trySymlink(child, filepath.Join(root1, "tochild"), c) + scenarioHelper{}.generateLocalFilesFromList(a, root2, fileNames) + scenarioHelper{}.generateLocalFilesFromList(a, child, fileNames) + trySymlink(root2, filepath.Join(root1, "toroot"), t) + trySymlink(child, filepath.Join(root1, "tochild"), t) fileCount := 0 - c.Assert(WalkWithSymlinks(context.TODO(), root1, func(path string, fi os.FileInfo, err error) error { - c.Assert(err, chk.IsNil) + a.Nil(WalkWithSymlinks(context.TODO(), root1, func(path string, fi os.FileInfo, err error) error { + a.Nil(err) if fi.IsDir() { return nil @@ -475,22 +509,27 @@ func (s *genericTraverserSuite) TestWalkWithSymlinksToParentAndChild(c *chk.C) { fileCount++ return nil }, - common.ESymlinkHandlingType.Follow(), nil), chk.IsNil) + common.ESymlinkHandlingType.Follow(), nil)) // 6 files total live under toroot. tochild should be ignored (or if tochild was traversed first, child will be ignored on toroot). - c.Assert(fileCount, chk.Equals, 6) + a.Equal(6, fileCount) } // validate traversing a single Blob, a single Azure File, and a single local file // compare that the traversers get consistent results -func (s *genericTraverserSuite) TestTraverserWithSingleObject(c *chk.C) { +func TestTraverserWithSingleObject(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) fsu := getFSU() - shareURL, shareName := createNewAzureShare(c, fsu) - defer deleteShare(c, shareURL) + shareURL, shareName := createNewAzureShare(a, fsu) + defer deleteShare(a, shareURL) + + bfsu := GetBFSSU() + filesystemURL, _ := createNewFilesystem(a, bfsu) + defer deleteFilesystem(a, filesystemURL) s3Client, err := createS3ClientWithMinio(createS3ResOptions{}) s3Enabled := err == nil && !isS3Disabled() @@ -499,25 +538,25 @@ func (s *genericTraverserSuite) TestTraverserWithSingleObject(c *chk.C) { var bucketName string var bucketNameGCP string if s3Enabled { - bucketName = createNewBucket(c, s3Client, createS3ResOptions{}) - defer deleteBucket(c, s3Client, bucketName, true) + bucketName = createNewBucket(a, s3Client, createS3ResOptions{}) + defer deleteBucket(s3Client, bucketName, true) } if gcpEnabled { - bucketNameGCP = createNewGCPBucket(c, gcpClient) - defer deleteGCPBucket(c, gcpClient, bucketNameGCP, true) + bucketNameGCP = createNewGCPBucket(a, gcpClient) + defer deleteGCPBucket(gcpClient, bucketNameGCP, true) } // test two scenarios, either blob is at the root virtual dir, or inside sub virtual dirs for _, storedObjectName := range []string{"sub1/sub2/singleblobisbest", "nosubsingleblob", "满汉全席.txt"} { // set up the container with a single blob blobList := []string{storedObjectName} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) // set up the directory as a single file - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) dstFileName := storedObjectName - scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, blobList) + scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, blobList) // construct a local traverser localTraverser, _ := newLocalTraverser(context.TODO(), filepath.Join(dstDirName, dstFileName), false, false, common.ESymlinkHandlingType.Follow(), common.ESyncHashType.None(), func(common.EntityType) {}, nil) @@ -525,24 +564,24 @@ func (s *genericTraverserSuite) TestTraverserWithSingleObject(c *chk.C) { // invoke the local traversal with a dummy processor localDummyProcessor := dummyProcessor{} err := localTraverser.Traverse(noPreProccessor, localDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) - c.Assert(len(localDummyProcessor.record), chk.Equals, 1) + a.Nil(err) + a.Equal(1, len(localDummyProcessor.record)) // construct a blob traverser ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobList[0]) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobList[0]) blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, false, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) // invoke the blob traversal with a dummy processor blobDummyProcessor := dummyProcessor{} err = blobTraverser.Traverse(noPreProccessor, blobDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) - c.Assert(len(blobDummyProcessor.record), chk.Equals, 1) + a.Nil(err) + a.Equal(1, len(blobDummyProcessor.record)) // assert the important info are correct - c.Assert(localDummyProcessor.record[0].name, chk.Equals, blobDummyProcessor.record[0].name) - c.Assert(localDummyProcessor.record[0].relativePath, chk.Equals, blobDummyProcessor.record[0].relativePath) + a.Equal(localDummyProcessor.record[0].name, blobDummyProcessor.record[0].name) + a.Equal(localDummyProcessor.record[0].relativePath, blobDummyProcessor.record[0].relativePath) // Azure File cannot handle names with '/' in them // TODO: Construct a directory URL and then build a file URL atop it in order to solve this portion of the test. @@ -551,75 +590,76 @@ func (s *genericTraverserSuite) TestTraverserWithSingleObject(c *chk.C) { if !strings.Contains(storedObjectName, "/") { // set up the Azure Share with a single file fileList := []string{storedObjectName} - scenarioHelper{}.generateAzureFilesFromList(c, shareURL, fileList) + scenarioHelper{}.generateAzureFilesFromList(a, shareURL, fileList) // construct an Azure file traverser filePipeline := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{}) - rawFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(c, shareName, fileList[0]) + rawFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, shareName, fileList[0]) azureFileTraverser := newFileTraverser(&rawFileURLWithSAS, filePipeline, ctx, false, false, func(common.EntityType) {}, common.ETrailingDotOption.Enable()) // invoke the file traversal with a dummy processor fileDummyProcessor := dummyProcessor{} err = azureFileTraverser.Traverse(noPreProccessor, fileDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) - c.Assert(len(fileDummyProcessor.record), chk.Equals, 1) + a.Nil(err) + a.Equal(1, len(fileDummyProcessor.record)) - c.Assert(localDummyProcessor.record[0].relativePath, chk.Equals, fileDummyProcessor.record[0].relativePath) - c.Assert(localDummyProcessor.record[0].name, chk.Equals, fileDummyProcessor.record[0].name) + a.Equal(localDummyProcessor.record[0].relativePath, fileDummyProcessor.record[0].relativePath) + a.Equal(localDummyProcessor.record[0].name, fileDummyProcessor.record[0].name) } if s3Enabled { // set up the bucket with a single file s3List := []string{storedObjectName} - scenarioHelper{}.generateObjects(c, s3Client, bucketName, s3List) + scenarioHelper{}.generateObjects(a, s3Client, bucketName, s3List) // construct a s3 traverser s3DummyProcessor := dummyProcessor{} - url := scenarioHelper{}.getRawS3ObjectURL(c, "", bucketName, storedObjectName) + url := scenarioHelper{}.getRawS3ObjectURL(a, "", bucketName, storedObjectName) credentialInfo := common.CredentialInfo{CredentialType: common.ECredentialType.S3AccessKey()} S3Traverser, err := newS3Traverser(credentialInfo.CredentialType, &url, ctx, false, false, func(common.EntityType) {}) - c.Assert(err, chk.IsNil) + a.Nil(err) err = S3Traverser.Traverse(noPreProccessor, s3DummyProcessor.process, nil) - c.Assert(err, chk.IsNil) - c.Assert(len(s3DummyProcessor.record), chk.Equals, 1) + a.Nil(err) + a.Equal(1, len(s3DummyProcessor.record)) - c.Assert(localDummyProcessor.record[0].relativePath, chk.Equals, s3DummyProcessor.record[0].relativePath) - c.Assert(localDummyProcessor.record[0].name, chk.Equals, s3DummyProcessor.record[0].name) + a.Equal(localDummyProcessor.record[0].relativePath, s3DummyProcessor.record[0].relativePath) + a.Equal(localDummyProcessor.record[0].name, s3DummyProcessor.record[0].name) } if gcpEnabled { gcpList := []string{storedObjectName} - scenarioHelper{}.generateGCPObjects(c, gcpClient, bucketNameGCP, gcpList) + scenarioHelper{}.generateGCPObjects(a, gcpClient, bucketNameGCP, gcpList) gcpDummyProcessor := dummyProcessor{} - gcpURL := scenarioHelper{}.getRawGCPObjectURL(c, bucketNameGCP, storedObjectName) + gcpURL := scenarioHelper{}.getRawGCPObjectURL(a, bucketNameGCP, storedObjectName) GCPTraverser, err := newGCPTraverser(&gcpURL, ctx, false, false, func(entityType common.EntityType) {}) - c.Assert(err, chk.IsNil) + a.Nil(err) err = GCPTraverser.Traverse(noPreProccessor, gcpDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) - c.Assert(len(gcpDummyProcessor.record), chk.Equals, 1) + a.Nil(err) + a.Equal(1, len(gcpDummyProcessor.record)) - c.Assert(localDummyProcessor.record[0].relativePath, chk.Equals, gcpDummyProcessor.record[0].relativePath) - c.Assert(localDummyProcessor.record[0].name, chk.Equals, gcpDummyProcessor.record[0].name) + a.Equal(localDummyProcessor.record[0].relativePath, gcpDummyProcessor.record[0].relativePath) + a.Equal(localDummyProcessor.record[0].name, gcpDummyProcessor.record[0].name) } } } // validate traversing a container, a share, and a local directory containing the same objects // compare that traversers get consistent results -func (s *genericTraverserSuite) TestTraverserContainerAndLocalDirectory(c *chk.C) { +func TestTraverserContainerAndLocalDirectory(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) fsu := getFSU() - shareURL, shareName := createNewAzureShare(c, fsu) - defer deleteShare(c, shareURL) + shareURL, shareName := createNewAzureShare(a, fsu) + defer deleteShare(a, shareURL) bfsu := GetBFSSU() - filesystemURL, _ := createNewFilesystem(c, bfsu) - defer deleteFilesystem(c, filesystemURL) + filesystemURL, _ := createNewFilesystem(a, bfsu) + defer deleteFilesystem(a, filesystemURL) s3Client, err := createS3ClientWithMinio(createS3ResOptions{}) s3Enabled := err == nil && !isS3Disabled() // are creds supplied, and is S3 enabled @@ -628,35 +668,35 @@ func (s *genericTraverserSuite) TestTraverserContainerAndLocalDirectory(c *chk.C var bucketName string var bucketNameGCP string if s3Enabled { - bucketName = createNewBucket(c, s3Client, createS3ResOptions{}) - defer deleteBucket(c, s3Client, bucketName, true) + bucketName = createNewBucket(a, s3Client, createS3ResOptions{}) + defer deleteBucket(s3Client, bucketName, true) } if gcpEnabled { - bucketNameGCP = createNewGCPBucket(c, gcpClient) - defer deleteGCPBucket(c, gcpClient, bucketNameGCP, true) + bucketNameGCP = createNewGCPBucket(a, gcpClient) + defer deleteGCPBucket(gcpClient, bucketNameGCP, true) } // set up the container with numerous blobs - fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - c.Assert(containerURL, chk.NotNil) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + a.NotNil(containerURL) // set up an Azure File Share with the same files - scenarioHelper{}.generateAzureFilesFromList(c, shareURL, fileList) + scenarioHelper{}.generateAzureFilesFromList(a, shareURL, fileList) // set up a filesystem with the same files - scenarioHelper{}.generateBFSPathsFromList(c, filesystemURL, fileList) + scenarioHelper{}.generateBFSPathsFromList(a, filesystemURL, fileList) if s3Enabled { // set up a bucket with the same files - scenarioHelper{}.generateObjects(c, s3Client, bucketName, fileList) + scenarioHelper{}.generateObjects(a, s3Client, bucketName, fileList) } if gcpEnabled { - scenarioHelper{}.generateGCPObjects(c, gcpClient, bucketNameGCP, fileList) + scenarioHelper{}.generateGCPObjects(a, gcpClient, bucketNameGCP, fileList) } - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) - scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, fileList) + scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, fileList) // test two scenarios, either recursive or not for _, isRecursiveOn := range []bool{true, false} { @@ -667,46 +707,46 @@ func (s *genericTraverserSuite) TestTraverserContainerAndLocalDirectory(c *chk.C // so that the results are indexed for easy validation localIndexer := newObjectIndexer() err := localTraverser.Traverse(noPreProccessor, localIndexer.store, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) // construct a blob traverser ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) blobTraverser := newBlobTraverser(&rawContainerURLWithSAS, p, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) // invoke the local traversal with a dummy processor blobDummyProcessor := dummyProcessor{} err = blobTraverser.Traverse(noPreProccessor, blobDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) // construct an Azure File traverser filePipeline := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{}) - rawFileURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, shareName) + rawFileURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, shareName) azureFileTraverser := newFileTraverser(&rawFileURLWithSAS, filePipeline, ctx, isRecursiveOn, false, func(common.EntityType) {}, common.ETrailingDotOption.Enable()) // invoke the file traversal with a dummy processor fileDummyProcessor := dummyProcessor{} err = azureFileTraverser.Traverse(noPreProccessor, fileDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) s3DummyProcessor := dummyProcessor{} gcpDummyProcessor := dummyProcessor{} if s3Enabled { // construct and run a S3 traverser - rawS3URL := scenarioHelper{}.getRawS3BucketURL(c, "", bucketName) + rawS3URL := scenarioHelper{}.getRawS3BucketURL(a, "", bucketName) credentialInfo := common.CredentialInfo{CredentialType: common.ECredentialType.S3AccessKey()} S3Traverser, err := newS3Traverser(credentialInfo.CredentialType, &rawS3URL, ctx, isRecursiveOn, false, func(common.EntityType) {}) - c.Assert(err, chk.IsNil) + a.Nil(err) err = S3Traverser.Traverse(noPreProccessor, s3DummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) } if gcpEnabled { - rawGCPURL := scenarioHelper{}.getRawGCPBucketURL(c, bucketNameGCP) + rawGCPURL := scenarioHelper{}.getRawGCPBucketURL(a, bucketNameGCP) GCPTraverser, err := newGCPTraverser(&rawGCPURL, ctx, isRecursiveOn, false, func(entityType common.EntityType) {}) - c.Assert(err, chk.IsNil) + a.Nil(err) err = GCPTraverser.Traverse(noPreProccessor, gcpDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) } // make sure the results are as expected @@ -718,21 +758,21 @@ func (s *genericTraverserSuite) TestTraverserContainerAndLocalDirectory(c *chk.C } } - c.Assert(len(blobDummyProcessor.record), chk.Equals, localFileOnlyCount) + a.Equal(localFileOnlyCount, len(blobDummyProcessor.record)) if isRecursiveOn { - c.Assert(len(fileDummyProcessor.record), chk.Equals, localTotalCount) + a.Equal(localTotalCount, len(fileDummyProcessor.record)) } else { // in real usage, folders get stripped out in ToNewCopyTransfer when non-recursive, // but that doesn't run here in this test, // so we have to count files only on the processor - c.Assert(fileDummyProcessor.countFilesOnly(), chk.Equals, localTotalCount) + a.Equal(localTotalCount, fileDummyProcessor.countFilesOnly()) } if s3Enabled { - c.Assert(len(s3DummyProcessor.record), chk.Equals, localFileOnlyCount) + a.Equal(localFileOnlyCount, len(s3DummyProcessor.record)) } if gcpEnabled { - c.Assert(len(gcpDummyProcessor.record), chk.Equals, localFileOnlyCount) + a.Equal(localFileOnlyCount, len(gcpDummyProcessor.record)) } // if s3dummyprocessor is empty, it's A-OK because no records will be tested @@ -740,11 +780,11 @@ func (s *genericTraverserSuite) TestTraverserContainerAndLocalDirectory(c *chk.C if isRecursiveOn || storedObject.entityType == common.EEntityType.File() { // folder enumeration knowingly NOT consistent when non-recursive (since the folders get stripped out by ToNewCopyTransfer when non-recursive anyway) correspondingLocalFile, present := localIndexer.indexMap[storedObject.relativePath] - c.Assert(present, chk.Equals, true) - c.Assert(correspondingLocalFile.name, chk.Equals, storedObject.name) + a.True(present) + a.Equal(storedObject.name, correspondingLocalFile.name) if !isRecursiveOn { - c.Assert(strings.Contains(storedObject.relativePath, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(storedObject.relativePath, common.AZCOPY_PATH_SEPARATOR_STRING)) } } } @@ -753,14 +793,19 @@ func (s *genericTraverserSuite) TestTraverserContainerAndLocalDirectory(c *chk.C // validate traversing a virtual and a local directory containing the same objects // compare that blob and local traversers get consistent results -func (s *genericTraverserSuite) TestTraverserWithVirtualAndLocalDirectory(c *chk.C) { +func TestTraverserWithVirtualAndLocalDirectory(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) fsu := getFSU() - shareURL, shareName := createNewAzureShare(c, fsu) - defer deleteShare(c, shareURL) + shareURL, shareName := createNewAzureShare(a, fsu) + defer deleteShare(a, shareURL) + + bfsu := GetBFSSU() + filesystemURL, _ := createNewFilesystem(a, bfsu) + defer deleteFilesystem(a, filesystemURL) s3Client, err := createS3ClientWithMinio(createS3ResOptions{}) s3Enabled := err == nil && !isS3Disabled() @@ -768,36 +813,39 @@ func (s *genericTraverserSuite) TestTraverserWithVirtualAndLocalDirectory(c *chk gcpEnabled := err == nil && !gcpTestsDisabled() var bucketName, bucketNameGCP string if s3Enabled { - bucketName = createNewBucket(c, s3Client, createS3ResOptions{}) - defer deleteBucket(c, s3Client, bucketName, true) + bucketName = createNewBucket(a, s3Client, createS3ResOptions{}) + defer deleteBucket(s3Client, bucketName, true) } if gcpEnabled { - bucketNameGCP = createNewGCPBucket(c, gcpClient) - defer deleteGCPBucket(c, gcpClient, bucketNameGCP, true) + bucketNameGCP = createNewGCPBucket(a, gcpClient) + defer deleteGCPBucket(gcpClient, bucketNameGCP, true) } // set up the container with numerous blobs virDirName := "virdir" - fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, virDirName+"/") - c.Assert(containerURL, chk.NotNil) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, virDirName+"/") + a.NotNil(containerURL) // set up an Azure File Share with the same files - scenarioHelper{}.generateAzureFilesFromList(c, shareURL, fileList) + scenarioHelper{}.generateAzureFilesFromList(a, shareURL, fileList) + + // set up the filesystem with the same files + scenarioHelper{}.generateBFSPathsFromList(a, filesystemURL, fileList) if s3Enabled { // Set up the bucket with the same files - scenarioHelper{}.generateObjects(c, s3Client, bucketName, fileList) + scenarioHelper{}.generateObjects(a, s3Client, bucketName, fileList) } if gcpEnabled { - scenarioHelper{}.generateGCPObjects(c, gcpClient, bucketNameGCP, fileList) + scenarioHelper{}.generateGCPObjects(a, gcpClient, bucketNameGCP, fileList) } time.Sleep(time.Second * 2) // Ensure the objects' LMTs are in the past // set up the destination with a folder that have the exact same files - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) - scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, fileList) + scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, fileList) // test two scenarios, either recursive or not for _, isRecursiveOn := range []bool{true, false} { @@ -808,28 +856,28 @@ func (s *genericTraverserSuite) TestTraverserWithVirtualAndLocalDirectory(c *chk // so that the results are indexed for easy validation localIndexer := newObjectIndexer() err := localTraverser.Traverse(noPreProccessor, localIndexer.store, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) // construct a blob traverser ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) - rawVirDirURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, virDirName) + rawVirDirURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, virDirName) blobTraverser := newBlobTraverser(&rawVirDirURLWithSAS, p, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) // invoke the local traversal with a dummy processor blobDummyProcessor := dummyProcessor{} err = blobTraverser.Traverse(noPreProccessor, blobDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) // construct an Azure File traverser filePipeline := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{}) - rawFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(c, shareName, virDirName) + rawFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, shareName, virDirName) azureFileTraverser := newFileTraverser(&rawFileURLWithSAS, filePipeline, ctx, isRecursiveOn, false, func(common.EntityType) {}, common.ETrailingDotOption.Enable()) // invoke the file traversal with a dummy processor fileDummyProcessor := dummyProcessor{} err = azureFileTraverser.Traverse(noPreProccessor, fileDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) localTotalCount := len(localIndexer.indexMap) localFileOnlyCount := 0 @@ -844,33 +892,33 @@ func (s *genericTraverserSuite) TestTraverserWithVirtualAndLocalDirectory(c *chk if s3Enabled { // construct and run a S3 traverser // directory object keys always end with / in S3 - rawS3URL := scenarioHelper{}.getRawS3ObjectURL(c, "", bucketName, virDirName+"/") + rawS3URL := scenarioHelper{}.getRawS3ObjectURL(a, "", bucketName, virDirName+"/") credentialInfo := common.CredentialInfo{CredentialType: common.ECredentialType.S3AccessKey()} S3Traverser, err := newS3Traverser(credentialInfo.CredentialType, &rawS3URL, ctx, isRecursiveOn, false, func(common.EntityType) {}) - c.Assert(err, chk.IsNil) + a.Nil(err) err = S3Traverser.Traverse(noPreProccessor, s3DummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) // check that the results are the same length - c.Assert(len(s3DummyProcessor.record), chk.Equals, localFileOnlyCount) + a.Equal(localFileOnlyCount, len(s3DummyProcessor.record)) } if gcpEnabled { - rawGCPURL := scenarioHelper{}.getRawGCPObjectURL(c, bucketNameGCP, virDirName+"/") + rawGCPURL := scenarioHelper{}.getRawGCPObjectURL(a, bucketNameGCP, virDirName+"/") GCPTraverser, err := newGCPTraverser(&rawGCPURL, ctx, isRecursiveOn, false, func(common.EntityType) {}) - c.Assert(err, chk.IsNil) + a.Nil(err) err = GCPTraverser.Traverse(noPreProccessor, gcpDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) - c.Assert(len(gcpDummyProcessor.record), chk.Equals, localFileOnlyCount) + a.Equal(localFileOnlyCount, len(gcpDummyProcessor.record)) } // make sure the results are as expected - c.Assert(len(blobDummyProcessor.record), chk.Equals, localFileOnlyCount) + a.Equal(localFileOnlyCount, len(blobDummyProcessor.record)) if isRecursiveOn { - c.Assert(len(fileDummyProcessor.record), chk.Equals, localTotalCount) + a.Equal(localTotalCount, len(fileDummyProcessor.record)) } else { // only files matter when not recursive (since ToNewCopyTransfer strips out everything else when non-recursive) - c.Assert(fileDummyProcessor.countFilesOnly(), chk.Equals, localTotalCount) + a.Equal(localTotalCount, fileDummyProcessor.countFilesOnly()) } // if s3 testing is disabled the s3 dummy processors' records will be empty. This is OK for appending. Nothing will happen. for _, storedObject := range append(append(append(blobDummyProcessor.record, fileDummyProcessor.record...), s3DummyProcessor.record...), gcpDummyProcessor.record...) { @@ -878,14 +926,14 @@ func (s *genericTraverserSuite) TestTraverserWithVirtualAndLocalDirectory(c *chk correspondingLocalFile, present := localIndexer.indexMap[storedObject.relativePath] - c.Assert(present, chk.Equals, true) - c.Assert(correspondingLocalFile.name, chk.Equals, storedObject.name) + a.True(present) + a.Equal(storedObject.name, correspondingLocalFile.name) // Say, here's a good question, why do we have this last check? // None of the other tests have it. - c.Assert(correspondingLocalFile.isMoreRecentThan(storedObject, false), chk.Equals, true) + a.True(correspondingLocalFile.isMoreRecentThan(storedObject, false)) if !isRecursiveOn { - c.Assert(strings.Contains(storedObject.relativePath, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(storedObject.relativePath, common.AZCOPY_PATH_SEPARATOR_STRING)) } } } @@ -894,22 +942,23 @@ func (s *genericTraverserSuite) TestTraverserWithVirtualAndLocalDirectory(c *chk // validate traversing a virtual directory containing the same objects // compare that the serial and parallel blob traversers get consistent results -func (s *genericTraverserSuite) TestSerialAndParallelBlobTraverser(c *chk.C) { +func TestSerialAndParallelBlobTraverser(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up the container with numerous blobs virDirName := "virdir" - scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, virDirName+"/") - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, virDirName+"/") + a.NotNil(containerURL) // test two scenarios, either recursive or not for _, isRecursiveOn := range []bool{true, false} { // construct a parallel blob traverser ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) - rawVirDirURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, virDirName) + rawVirDirURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, virDirName) parallelBlobTraverser := newBlobTraverser(&rawVirDirURLWithSAS, p, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) // construct a serial blob traverser @@ -919,15 +968,15 @@ func (s *genericTraverserSuite) TestSerialAndParallelBlobTraverser(c *chk.C) { // invoke the parallel traversal with a dummy processor parallelDummyProcessor := dummyProcessor{} err := parallelBlobTraverser.Traverse(noPreProccessor, parallelDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) // invoke the serial traversal with a dummy processor serialDummyProcessor := dummyProcessor{} err = parallelBlobTraverser.Traverse(noPreProccessor, serialDummyProcessor.process, nil) - c.Assert(err, chk.IsNil) + a.Nil(err) // make sure the results are as expected - c.Assert(len(parallelDummyProcessor.record), chk.Equals, len(serialDummyProcessor.record)) + a.Equal(len(serialDummyProcessor.record), len(parallelDummyProcessor.record)) // compare the entries one by one lookupMap := make(map[string]StoredObject) @@ -937,9 +986,9 @@ func (s *genericTraverserSuite) TestSerialAndParallelBlobTraverser(c *chk.C) { for _, storedObject := range serialDummyProcessor.record { correspondingFile, present := lookupMap[storedObject.relativePath] - c.Assert(present, chk.Equals, true) - c.Assert(storedObject.lastModifiedTime, chk.DeepEquals, correspondingFile.lastModifiedTime) - c.Assert(storedObject.md5, chk.DeepEquals, correspondingFile.md5) + a.True(present) + a.Equal(correspondingFile.lastModifiedTime, storedObject.lastModifiedTime) + a.Equal(correspondingFile.md5, storedObject.md5) } } -} +} \ No newline at end of file diff --git a/cmd/zt_interceptors_for_test.go b/cmd/zt_interceptors_for_test.go index 35eb16331..3370f8f7f 100644 --- a/cmd/zt_interceptors_for_test.go +++ b/cmd/zt_interceptors_for_test.go @@ -198,4 +198,4 @@ func (d *dummyProcessor) countFilesOnly() int { } } return n -} +} \ No newline at end of file diff --git a/cmd/zt_overwrite_posix_properties_test.go b/cmd/zt_overwrite_posix_properties_test.go index e97b5c55f..79fb94c60 100644 --- a/cmd/zt_overwrite_posix_properties_test.go +++ b/cmd/zt_overwrite_posix_properties_test.go @@ -22,25 +22,27 @@ package cmd import ( "context" + "github.com/stretchr/testify/assert" "os" "path/filepath" "runtime" "strconv" + "testing" "time" "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-blob-go/azblob" - chk "gopkg.in/check.v1" ) -func (s *cmdIntegrationSuite) TestOverwritePosixProperties(c *chk.C) { +func TestOverwritePosixProperties(t *testing.T) { + a := assert.New(t) if runtime.GOOS != "linux" { - c.Skip("This test will run only on linux") + t.Skip("This test will run only on linux") } - + bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) files := []string{ "filea", @@ -48,24 +50,24 @@ func (s *cmdIntegrationSuite) TestOverwritePosixProperties(c *chk.C) { "filec", } - dirPath := scenarioHelper{}.generateLocalDirectory(c) + dirPath := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dirPath) - scenarioHelper{}.generateLocalFilesFromList(c, dirPath, files) + scenarioHelper{}.generateLocalFilesFromList(a, dirPath, files) mockedRPC := interceptor{} Rpc = mockedRPC.intercept mockedRPC.init() - rawBlobURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawBlobURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultRawCopyInput(dirPath, rawBlobURLWithSAS.String()) raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 3) + a.Equal(3, len(mockedRPC.transfers)) // trim / and /folder/ off - validateDownloadTransfersAreScheduled(c, "/", "/"+filepath.Base(dirPath)+"/", files[:], mockedRPC) + validateDownloadTransfersAreScheduled(a, "/", "/"+filepath.Base(dirPath)+"/", files[:], mockedRPC) }) time.Sleep(10 * time.Second) @@ -79,21 +81,21 @@ func (s *cmdIntegrationSuite) TestOverwritePosixProperties(c *chk.C) { mockedRPC.reset() raw.forceWrite = "posixproperties" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) - c.Assert(len(mockedRPC.transfers), chk.Equals, 3) + a.Equal(3, len(mockedRPC.transfers)) // trim / and /folder/ off - validateDownloadTransfersAreScheduled(c, "/", "/"+filepath.Base(dirPath)+"/", files[:], mockedRPC) + validateDownloadTransfersAreScheduled(a, "/", "/"+filepath.Base(dirPath)+"/", files[:], mockedRPC) }) listBlob, err := containerURL.ListBlobsFlatSegment(context.TODO(), azblob.Marker{}, - azblob.ListBlobsSegmentOptions{Details: azblob.BlobListingDetails{Metadata: true, Tags: true}, Prefix: filepath.Base(dirPath)}) + azblob.ListBlobsSegmentOptions{Details: azblob.BlobListingDetails{Metadata: true, Tags: true}, Prefix: filepath.Base(dirPath)}) - c.Assert(err, chk.Equals, nil) + a.Nil(err) for _, blob := range listBlob.Segment.BlobItems { - c.Assert(blob.Metadata[common.POSIXCTimeMeta], chk.Equals, strconv.FormatInt(newTimeStamp.UnixNano(), 10)) - c.Assert(blob.Metadata[common.POSIXATimeMeta], chk.Equals, strconv.FormatInt(newTimeStamp.UnixNano(), 10)) + a.Equal(strconv.FormatInt(newTimeStamp.UnixNano(), 10), blob.Metadata[common.POSIXCTimeMeta]) + a.Equal(strconv.FormatInt(newTimeStamp.UnixNano(), 10), blob.Metadata[common.POSIXATimeMeta]) } -} +} \ No newline at end of file diff --git a/cmd/zt_parseSize_test.go b/cmd/zt_parseSize_test.go index 1c28c5ffa..c0f0b7ca6 100644 --- a/cmd/zt_parseSize_test.go +++ b/cmd/zt_parseSize_test.go @@ -21,38 +21,41 @@ package cmd import ( + "github.com/stretchr/testify/assert" chk "gopkg.in/check.v1" + "testing" ) type parseSizeSuite struct{} var _ = chk.Suite(&parseSizeSuite{}) -func (s *parseSizeSuite) TestParseSize(c *chk.C) { +func TestParseSize(t *testing.T) { + a := assert.New(t) b, _ := ParseSizeString("123K", "x") - c.Assert(b, chk.Equals, int64(123*1024)) + a.Equal(int64(123*1024), b) b, _ = ParseSizeString("456m", "x") - c.Assert(b, chk.Equals, int64(456*1024*1024)) + a.Equal(int64(456*1024*1024), b) b, _ = ParseSizeString("789G", "x") - c.Assert(b, chk.Equals, int64(789*1024*1024*1024)) + a.Equal(int64(789*1024*1024*1024), b) expectedError := "foo-bar must be a number immediately followed by K, M or G. E.g. 12k or 200G" _, err := ParseSizeString("123", "foo-bar") - c.Assert(err.Error(), chk.Equals, expectedError) + a.Equal(expectedError, err.Error()) _, err = ParseSizeString("123 K", "foo-bar") - c.Assert(err.Error(), chk.Equals, expectedError) + a.Equal(expectedError, err.Error()) _, err = ParseSizeString("123KB", "foo-bar") - c.Assert(err.Error(), chk.Equals, expectedError) + a.Equal(expectedError, err.Error()) _, err = ParseSizeString("123T", "foo-bar") // we don't support terabytes - c.Assert(err.Error(), chk.Equals, expectedError) + a.Equal(expectedError, err.Error()) _, err = ParseSizeString("abcK", "foo-bar") - c.Assert(err.Error(), chk.Equals, expectedError) + a.Equal(expectedError, err.Error()) -} +} \ No newline at end of file diff --git a/cmd/zt_pathUtils_test.go b/cmd/zt_pathUtils_test.go index 859d06c91..de9b12a59 100644 --- a/cmd/zt_pathUtils_test.go +++ b/cmd/zt_pathUtils_test.go @@ -22,14 +22,17 @@ package cmd import ( "github.com/Azure/azure-storage-azcopy/v10/common" + "github.com/stretchr/testify/assert" chk "gopkg.in/check.v1" + "testing" ) type pathUtilsSuite struct{} var _ = chk.Suite(&pathUtilsSuite{}) -func (s *pathUtilsSuite) TestStripQueryFromSaslessUrl(c *chk.C) { +func TestStripQueryFromSaslessUrl(t *testing.T) { + a := assert.New(t) tests := []struct { full string isRemote bool @@ -53,14 +56,15 @@ func (s *pathUtilsSuite) TestStripQueryFromSaslessUrl(c *chk.C) { loc = common.ELocation.File() } m, q := splitQueryFromSaslessResource(t.full, loc) - c.Assert(m, chk.Equals, t.expectedMain) - c.Assert(q, chk.Equals, t.expectedQuery) + a.Equal(t.expectedMain, m) + a.Equal(t.expectedQuery, q) } } -func (s *pathUtilsSuite) TestToReversedString(c *chk.C) { - t := &benchmarkTraverser{} - c.Assert("1", chk.Equals, t.toReversedString(1)) - c.Assert("01", chk.Equals, t.toReversedString(10)) - c.Assert("54321", chk.Equals, t.toReversedString(12345)) -} +func TestToReversedString(t *testing.T) { + a := assert.New(t) + traverser := &benchmarkTraverser{} + a.Equal("1", traverser.toReversedString(1)) + a.Equal("01", traverser.toReversedString(10)) + a.Equal("54321", traverser.toReversedString(12345)) +} \ No newline at end of file diff --git a/cmd/zt_remove_blob_test.go b/cmd/zt_remove_blob_test.go index 212e8f1aa..0a15b6cdc 100644 --- a/cmd/zt_remove_blob_test.go +++ b/cmd/zt_remove_blob_test.go @@ -25,24 +25,26 @@ import ( "fmt" "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-blob-go/azblob" - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" "log" "net/url" "os" "strings" + "testing" "time" ) -func (s *cmdIntegrationSuite) TestRemoveSingleBlob(c *chk.C) { +func TestRemoveSingleBlob(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobList, blockBlobDefaultData) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) + a.NotNil(containerURL) // set up interceptor mockedRPC := interceptor{} @@ -50,27 +52,28 @@ func (s *cmdIntegrationSuite) TestRemoveSingleBlob(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobList[0]) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobList[0]) raw := getDefaultRemoveRawInput(rawBlobURLWithSAS.String()) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // note that when we are targeting single blobs, the relative path is empty ("") since the root path already points to the blob - validateRemoveTransfersAreScheduled(c, true, []string{""}, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, []string{""}, mockedRPC) }) } } -func (s *cmdIntegrationSuite) TestRemoveBlobsUnderContainer(c *chk.C) { +func TestRemoveBlobsUnderContainer(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -78,46 +81,47 @@ func (s *cmdIntegrationSuite) TestRemoveBlobsUnderContainer(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultRemoveRawInput(rawContainerURLWithSAS.String()) raw.recursive = true raw.includeDirectoryStubs = false // The test target is a DFS account, which coincidentally created our directory stubs. Thus, we mustn't include them, since this is a test of blob. - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateRemoveTransfersAreScheduled(c, true, blobList, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, blobList, mockedRPC) }) // turn off recursive, this time only top blobs should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } -func (s *cmdIntegrationSuite) TestRemoveBlobsUnderVirtualDir(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestRemoveBlobsUnderVirtualDir(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() vdirName := "vdir1/vdir2/vdir3/" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, vdirName) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, vdirName) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -125,49 +129,50 @@ func (s *cmdIntegrationSuite) TestRemoveBlobsUnderVirtualDir(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawVirtualDirectoryURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, vdirName) + rawVirtualDirectoryURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, vdirName) raw := getDefaultRemoveRawInput(rawVirtualDirectoryURLWithSAS.String()) raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent expectedTransfers := scenarioHelper{}.shaveOffPrefix(blobList, vdirName) - validateRemoveTransfersAreScheduled(c, true, expectedTransfers, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, expectedTransfers, mockedRPC) }) // turn off recursive, this time only top blobs should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } // include flag limits the scope of the delete -func (s *cmdIntegrationSuite) TestRemoveWithIncludeFlag(c *chk.C) { +func TestRemoveWithIncludeFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToInclude, blockBlobDefaultData) includeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -176,31 +181,32 @@ func (s *cmdIntegrationSuite) TestRemoveWithIncludeFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultRemoveRawInput(rawContainerURLWithSAS.String()) raw.include = includeString raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobsToInclude, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobsToInclude, mockedRPC) }) } // exclude flag limits the scope of the delete -func (s *cmdIntegrationSuite) TestRemoveWithExcludeFlag(c *chk.C) { +func TestRemoveWithExcludeFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToExclude, blockBlobDefaultData) excludeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -209,38 +215,39 @@ func (s *cmdIntegrationSuite) TestRemoveWithExcludeFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultRemoveRawInput(rawContainerURLWithSAS.String()) raw.exclude = excludeString raw.recursive = true raw.includeDirectoryStubs = false // The test target is a DFS account, which coincidentally created our directory stubs. Thus, we mustn't include them, since this is a test of blob. - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobList, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobList, mockedRPC) }) } // include and exclude flag can work together to limit the scope of the delete -func (s *cmdIntegrationSuite) TestRemoveWithIncludeAndExcludeFlag(c *chk.C) { +func TestRemoveWithIncludeAndExcludeFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToInclude, blockBlobDefaultData) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToExclude, blockBlobDefaultData) excludeString := "so*;not*;exactName" // set up interceptor @@ -249,32 +256,33 @@ func (s *cmdIntegrationSuite) TestRemoveWithIncludeAndExcludeFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultRemoveRawInput(rawContainerURLWithSAS.String()) raw.include = includeString raw.exclude = excludeString raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobsToInclude, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobsToInclude, mockedRPC) }) } // note: list-of-files flag is used -func (s *cmdIntegrationSuite) TestRemoveListOfBlobsAndVirtualDirs(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestRemoveListOfBlobsAndVirtualDirs(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() vdirName := "megadir" // set up the container with numerous blobs and a vdir - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - blobListPart2 := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, vdirName+"/") + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + blobListPart2 := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, vdirName+"/") blobList := append(blobListPart1, blobListPart2...) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -282,7 +290,7 @@ func (s *cmdIntegrationSuite) TestRemoveListOfBlobsAndVirtualDirs(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultRemoveRawInput(rawContainerURLWithSAS.String()) raw.recursive = true @@ -292,60 +300,61 @@ func (s *cmdIntegrationSuite) TestRemoveListOfBlobsAndVirtualDirs(c *chk.C) { // add some random files that don't actually exist listOfFiles = append(listOfFiles, "WUTAMIDOING") listOfFiles = append(listOfFiles, "DONTKNOW") - raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(c, listOfFiles) + raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(a, listOfFiles) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateRemoveTransfersAreScheduled(c, true, blobList, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, blobList, mockedRPC) }) // turn off recursive, this time only top blobs should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { source, err := url.PathUnescape(transfer.Source) - c.Assert(err, chk.IsNil) + a.Nil(err) // if the transfer is under the given dir, make sure only the top level files were scheduled if strings.HasPrefix(source, vdirName) { trimmedSource := strings.TrimPrefix(source, vdirName+"/") - c.Assert(strings.Contains(trimmedSource, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(trimmedSource, common.AZCOPY_PATH_SEPARATOR_STRING)) } } }) } // note: list-of-files flag is used -func (s *cmdIntegrationSuite) TestRemoveListOfBlobsWithIncludeAndExclude(c *chk.C) { +func TestRemoveListOfBlobsWithIncludeAndExclude(t *testing.T) { + a := assert.New(t) bsu := getBSU() vdirName := "megadir" // set up the container with numerous blobs and a vdir - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, vdirName+"/") + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, vdirName+"/") // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToInclude, blockBlobDefaultData) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToExclude, blockBlobDefaultData) excludeString := "so*;not*;exactName" // set up interceptor @@ -354,7 +363,7 @@ func (s *cmdIntegrationSuite) TestRemoveListOfBlobsWithIncludeAndExclude(c *chk. mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultRemoveRawInput(rawContainerURLWithSAS.String()) raw.recursive = true raw.include = includeString @@ -370,30 +379,31 @@ func (s *cmdIntegrationSuite) TestRemoveListOfBlobsWithIncludeAndExclude(c *chk. // add files to both include and exclude listOfFiles = append(listOfFiles, blobsToInclude...) listOfFiles = append(listOfFiles, blobsToExclude...) - raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(c, listOfFiles) + raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(a, listOfFiles) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobsToInclude)) + a.Equal(len(blobsToInclude), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateRemoveTransfersAreScheduled(c, true, blobsToInclude, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, blobsToInclude, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestRemoveBlobsWithDirectoryStubs(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestRemoveBlobsWithDirectoryStubs(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() vdirName := "vdir1/" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - blobAndDirStubsList := scenarioHelper{}.generateCommonRemoteScenarioForWASB(c, containerURL, vdirName) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobAndDirStubsList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + blobAndDirStubsList := scenarioHelper{}.generateCommonRemoteScenarioForWASB(a, containerURL, vdirName) + a.NotNil(containerURL) + a.NotZero(len(blobAndDirStubsList)) // set up interceptor mockedRPC := interceptor{} @@ -401,53 +411,54 @@ func (s *cmdIntegrationSuite) TestRemoveBlobsWithDirectoryStubs(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawVirtualDirectoryURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, vdirName) + rawVirtualDirectoryURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, vdirName) raw := getDefaultRemoveRawInput(rawVirtualDirectoryURLWithSAS.String()) raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobAndDirStubsList)) + a.Equal(len(blobAndDirStubsList), len(mockedRPC.transfers)) // validate that the right transfers were sent expectedTransfers := scenarioHelper{}.shaveOffPrefix(blobAndDirStubsList, strings.TrimSuffix(vdirName, "/")) expectedTransfers = scenarioHelper{}.shaveOffPrefix(expectedTransfers, "/") - validateRemoveTransfersAreScheduled(c, true, expectedTransfers, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, expectedTransfers, mockedRPC) }) // turn off recursive, this time only top blobs should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // there should be exactly 20 top files, no directory stubs should be included - c.Assert(len(mockedRPC.transfers), chk.Equals, 20) + a.Equal(20, len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } -func (s *cmdIntegrationSuite) TestRemoveBlobsWithDirectoryStubsWithListOfFiles(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestRemoveBlobsWithDirectoryStubsWithListOfFiles(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() vdirName := "vdir1/" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - blobAndDirStubsList := scenarioHelper{}.generateCommonRemoteScenarioForWASB(c, containerURL, vdirName) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobAndDirStubsList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + blobAndDirStubsList := scenarioHelper{}.generateCommonRemoteScenarioForWASB(a, containerURL, vdirName) + a.NotNil(containerURL) + a.NotZero(len(blobAndDirStubsList)) // set up another empty dir vdirName2 := "emptydir" - createNewDirectoryStub(c, containerURL, vdirName2) + createNewDirectoryStub(a, containerURL, vdirName2) blobAndDirStubsList = append(blobAndDirStubsList, vdirName2) // set up interceptor @@ -456,43 +467,44 @@ func (s *cmdIntegrationSuite) TestRemoveBlobsWithDirectoryStubsWithListOfFiles(c mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultRemoveRawInput(rawContainerURLWithSAS.String()) raw.recursive = true // make the input for list-of-files listOfFiles := []string{vdirName, vdirName2} - raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(c, listOfFiles) + raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(a, listOfFiles) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobAndDirStubsList)) + a.Equal(len(blobAndDirStubsList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateRemoveTransfersAreScheduled(c, true, blobAndDirStubsList, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, blobAndDirStubsList, mockedRPC) }) // turn off recursive, this time an error should be thrown raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + runCopyAndVerify(a, raw, func(err error) { + a.NotNil(err) + a.Zero(len(mockedRPC.transfers)) }) } -func (s *cmdIntegrationSuite) TestDryrunRemoveSingleBlob(c *chk.C) { +func TestDryrunRemoveSingleBlob(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up the container with a single blob blobName := []string{"sub1/test/testing.txt"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobName, blockBlobDefaultData) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobName, blockBlobDefaultData) + a.NotNil(containerURL) // set up interceptor mockedRPC := interceptor{} @@ -502,32 +514,33 @@ func (s *cmdIntegrationSuite) TestDryrunRemoveSingleBlob(c *chk.C) { glcm = &mockedLcm // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobName[0]) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobName[0]) raw := getDefaultRemoveRawInput(rawBlobURLWithSAS.String()) raw.dryrun = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that none where transferred - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) msg := <-mockedLcm.dryrunLog // comparing message printed for dry run - c.Check(strings.Contains(msg, "DRYRUN: remove"), chk.Equals, true) - c.Check(strings.Contains(msg, containerURL.String()), chk.Equals, true) - c.Check(strings.Contains(msg, blobName[0]), chk.Equals, true) + a.True(strings.Contains(msg, "DRYRUN: remove")) + a.True(strings.Contains(msg, containerURL.String())) + a.True(strings.Contains(msg, blobName[0])) }) } -func (s *cmdIntegrationSuite) TestDryrunRemoveBlobsUnderContainer(c *chk.C) { +func TestDryrunRemoveBlobsUnderContainer(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up the container with a single blob blobList := []string{"AzURE2021.jpeg", "sub1/dir2/HELLO-4.txt", "sub1/test/testing.txt"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobList, blockBlobDefaultData) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) + a.NotNil(containerURL) // set up interceptor mockedRPC := interceptor{} @@ -537,35 +550,36 @@ func (s *cmdIntegrationSuite) TestDryrunRemoveBlobsUnderContainer(c *chk.C) { glcm = &mockedLcm // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawBlobURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultRemoveRawInput(rawBlobURLWithSAS.String()) raw.dryrun = true raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that none where transferred - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) msg := mockedLcm.GatherAllLogs(mockedLcm.dryrunLog) for i := 0; i < len(blobList); i++ { - c.Check(strings.Contains(msg[i], "DRYRUN: remove"), chk.Equals, true) - c.Check(strings.Contains(msg[i], containerURL.String()), chk.Equals, true) + a.True(strings.Contains(msg[i], "DRYRUN: remove")) + a.True(strings.Contains(msg[i], containerURL.String())) } - c.Check(testDryrunStatements(blobList, msg), chk.Equals, true) + a.True(testDryrunStatements(blobList, msg)) }) } -func (s *cmdIntegrationSuite) TestDryrunRemoveBlobsUnderContainerJson(c *chk.C) { +func TestDryrunRemoveBlobsUnderContainerJson(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up the container with a single blob blobName := []string{"tech.txt"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobName, blockBlobDefaultData) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobName, blockBlobDefaultData) + a.NotNil(containerURL) // set up interceptor mockedRPC := interceptor{} @@ -575,37 +589,38 @@ func (s *cmdIntegrationSuite) TestDryrunRemoveBlobsUnderContainerJson(c *chk.C) glcm = &mockedLcm // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawBlobURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultRemoveRawInput(rawBlobURLWithSAS.String()) raw.dryrun = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that none where transferred - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) msg := <-mockedLcm.dryrunLog deleteTransfer := common.CopyTransfer{} errMarshal := json.Unmarshal([]byte(msg), &deleteTransfer) - c.Assert(errMarshal, chk.IsNil) + a.Nil(errMarshal) // comparing some values of deleteTransfer - c.Check(strings.Compare(deleteTransfer.Source, blobName[0]), chk.Equals, 0) - c.Check(strings.Compare(deleteTransfer.Destination, blobName[0]), chk.Equals, 0) - c.Check(strings.Compare(deleteTransfer.EntityType.String(), common.EEntityType.File().String()), chk.Equals, 0) - c.Check(strings.Compare(string(deleteTransfer.BlobType), "BlockBlob"), chk.Equals, 0) + a.Equal(deleteTransfer.Source, blobName[0]) + a.Equal(deleteTransfer.Destination, blobName[0]) + a.Equal("File", deleteTransfer.EntityType.String()) + a.Equal("BlockBlob", string(deleteTransfer.BlobType)) }) } -func (s *cmdIntegrationSuite) TestRemoveSingleBlobWithFromTo(c *chk.C) { +func TestRemoveSingleBlobWithFromTo(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobList, blockBlobDefaultData) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) + a.NotNil(containerURL) // set up interceptor mockedRPC := interceptor{} @@ -613,28 +628,29 @@ func (s *cmdIntegrationSuite) TestRemoveSingleBlobWithFromTo(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobList[0]) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobList[0]) raw := getDefaultRemoveRawInput(rawBlobURLWithSAS.String()) raw.fromTo = "BlobTrash" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // note that when we are targeting single blobs, the relative path is empty ("") since the root path already points to the blob - validateRemoveTransfersAreScheduled(c, true, []string{""}, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, []string{""}, mockedRPC) }) } } -func (s *cmdIntegrationSuite) TestRemoveBlobsUnderContainerWithFromTo(c *chk.C) { +func TestRemoveBlobsUnderContainerWithFromTo(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -642,47 +658,48 @@ func (s *cmdIntegrationSuite) TestRemoveBlobsUnderContainerWithFromTo(c *chk.C) mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultRemoveRawInput(rawContainerURLWithSAS.String()) raw.fromTo = "BlobTrash" raw.recursive = true raw.includeDirectoryStubs = false // The test target is a DFS account, which coincidentally created our directory stubs. Thus, we mustn't include them, since this is a test of blob. - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateRemoveTransfersAreScheduled(c, true, blobList, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, blobList, mockedRPC) }) // turn off recursive, this time only top blobs should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } -func (s *cmdIntegrationSuite) TestRemoveBlobsUnderVirtualDirWithFromTo(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestRemoveBlobsUnderVirtualDirWithFromTo(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() vdirName := "vdir1/vdir2/vdir3/" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, vdirName) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, vdirName) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -690,52 +707,53 @@ func (s *cmdIntegrationSuite) TestRemoveBlobsUnderVirtualDirWithFromTo(c *chk.C) mockedRPC.init() // construct the raw input to simulate user input - rawVirtualDirectoryURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, vdirName) + rawVirtualDirectoryURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, vdirName) raw := getDefaultRemoveRawInput(rawVirtualDirectoryURLWithSAS.String()) raw.fromTo = "BlobTrash" raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent expectedTransfers := scenarioHelper{}.shaveOffPrefix(blobList, vdirName) - validateRemoveTransfersAreScheduled(c, true, expectedTransfers, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, expectedTransfers, mockedRPC) }) // turn off recursive, this time only top blobs should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } -func (s *cmdIntegrationSuite) TestPermDeleteSnapshotsVersionsUnderSingleBlob(c *chk.C) { - serviceURL := setUpAccountPermDelete(c) +func TestPermDeleteSnapshotsVersionsUnderSingleBlob(t *testing.T) { + a := assert.New(t) + serviceURL := setUpAccountPermDelete(a) os.Setenv("AZCOPY_DISABLE_HIERARCHICAL_SCAN", "true") time.Sleep(time.Second * 10) // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, serviceURL) - defer deleteContainer(c, containerURL) - blobName, blobList, _ := scenarioHelper{}.generateCommonRemoteScenarioForSoftDelete(c, containerURL, "") - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Equals, 3) + containerURL, containerName := createNewContainer(a, serviceURL) + defer deleteContainer(a, containerURL) + blobName, blobList, _ := scenarioHelper{}.generateCommonRemoteScenarioForSoftDelete(a, containerURL, "") + a.NotNil(containerURL) + a.Equal(3, len(blobList)) list, _ := containerURL.ListBlobsFlatSegment(ctx, azblob.Marker{}, azblob.ListBlobsSegmentOptions{Details: azblob.BlobListingDetails{Deleted: true, Snapshots: true}, Prefix: blobName}) - c.Assert(list.Segment.BlobItems, chk.NotNil) - c.Assert(len(list.Segment.BlobItems), chk.Equals, 4) + a.NotNil(list.Segment.BlobItems) + a.Equal(4, len(list.Segment.BlobItems)) // set up interceptor mockedRPC := interceptor{} @@ -743,30 +761,31 @@ func (s *cmdIntegrationSuite) TestPermDeleteSnapshotsVersionsUnderSingleBlob(c * mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobName) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobName) raw := getDefaultRemoveRawInput(rawBlobURLWithSAS.String()) raw.recursive = true raw.permanentDeleteOption = "snapshotsandversions" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 3) + a.Equal(3, len(mockedRPC.transfers)) }) } -func (s *cmdIntegrationSuite) TestPermDeleteSnapshotsVersionsUnderContainer(c *chk.C) { - serviceURL := setUpAccountPermDelete(c) +func TestPermDeleteSnapshotsVersionsUnderContainer(t *testing.T) { + a := assert.New(t) + serviceURL := setUpAccountPermDelete(a) os.Setenv("AZCOPY_DISABLE_HIERARCHICAL_SCAN", "true") time.Sleep(time.Second * 10) // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, serviceURL) - defer deleteContainer(c, containerURL) - _, blobList, listOfTransfers := scenarioHelper{}.generateCommonRemoteScenarioForSoftDelete(c, containerURL, "") - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Equals, 3) + containerURL, containerName := createNewContainer(a, serviceURL) + defer deleteContainer(a, containerURL) + _, blobList, listOfTransfers := scenarioHelper{}.generateCommonRemoteScenarioForSoftDelete(a, containerURL, "") + a.NotNil(containerURL) + a.Equal(3, len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -774,19 +793,19 @@ func (s *cmdIntegrationSuite) TestPermDeleteSnapshotsVersionsUnderContainer(c *c mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultRemoveRawInput(rawContainerURLWithSAS.String()) raw.recursive = true raw.permanentDeleteOption = "snapshotsandversions" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(listOfTransfers)) + a.Equal(len(listOfTransfers), len(mockedRPC.transfers)) }) } -func setUpAccountPermDelete(c *chk.C) azblob.ServiceURL { +func setUpAccountPermDelete(a *assert.Assertions) azblob.ServiceURL { accountName, accountKey := getAccountAndKey() u, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)) @@ -813,7 +832,7 @@ func setUpAccountPermDelete(c *chk.C) azblob.ServiceURL { days := int32(5) allowDelete := true _, err = serviceURL.SetProperties(ctx, azblob.StorageServiceProperties{DeleteRetentionPolicy: &azblob.RetentionPolicy{Enabled: true, Days: &days, AllowPermanentDelete: &allowDelete}}) - c.Assert(err, chk.IsNil) + a.Nil(err) return serviceURL } diff --git a/cmd/zt_remove_copy_test.go b/cmd/zt_remove_copy_test.go index cd1c0adea..ce47be8bd 100644 --- a/cmd/zt_remove_copy_test.go +++ b/cmd/zt_remove_copy_test.go @@ -1,25 +1,27 @@ package cmd import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" "strings" + "testing" ) -func (s *cmdIntegrationSuite) TestCopyBlobsWithDirectoryStubsS2S(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestCopyBlobsWithDirectoryStubsS2S(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() vdirName := "vdir1/" // create container and dest container - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) dstBlobName := "testcopyblobswithdirectorystubs" + generateBlobName() - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) - blobAndDirStubsList := scenarioHelper{}.generateCommonRemoteScenarioForWASB(c, srcContainerURL, vdirName) - c.Assert(srcContainerURL, chk.NotNil) - c.Assert(len(blobAndDirStubsList), chk.Not(chk.Equals), 0) + blobAndDirStubsList := scenarioHelper{}.generateCommonRemoteScenarioForWASB(a, srcContainerURL, vdirName) + a.NotNil(srcContainerURL) + a.NotZero(len(blobAndDirStubsList)) // set up interceptor mockedRPC := interceptor{} @@ -27,20 +29,20 @@ func (s *cmdIntegrationSuite) TestCopyBlobsWithDirectoryStubsS2S(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawSrcBlobWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, vdirName) - rawDstBlobWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, dstContainerName, dstBlobName) + rawSrcBlobWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, vdirName) + rawDstBlobWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, dstContainerName, dstBlobName) raw := getDefaultCopyRawInput(rawSrcBlobWithSAS.String(), rawDstBlobWithSAS.String()) raw.recursive = true raw.includeDirectoryStubs = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobAndDirStubsList)) + a.Equal(len(blobAndDirStubsList), len(mockedRPC.transfers)) // validate that the right transfers were sent expectedTransfers := scenarioHelper{}.shaveOffPrefix(blobAndDirStubsList, strings.TrimSuffix(vdirName, "/")) - validateCopyTransfersAreScheduled(c, true, true, vdirName, "/vdir1", expectedTransfers, mockedRPC) + validateCopyTransfersAreScheduled(a, true, true, vdirName, "/vdir1", expectedTransfers, mockedRPC) }) -} +} \ No newline at end of file diff --git a/cmd/zt_remove_file_test.go b/cmd/zt_remove_file_test.go index 645b8bc98..542644627 100644 --- a/cmd/zt_remove_file_test.go +++ b/cmd/zt_remove_file_test.go @@ -22,21 +22,23 @@ package cmd import ( "github.com/Azure/azure-storage-azcopy/v10/common" - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" "net/url" "strings" + "testing" ) -func (s *cmdIntegrationSuite) TestRemoveSingleFile(c *chk.C) { +func TestRemoveSingleFile(t *testing.T) { + a := assert.New(t) fsu := getFSU() - shareURL, shareName := createNewAzureShare(c, fsu) - defer deleteShare(c, shareURL) + shareURL, shareName := createNewAzureShare(a, fsu) + defer deleteShare(a, shareURL) for _, fileName := range []string{"top/mid/low/singlefileisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the share with a single file fileList := []string{fileName} - scenarioHelper{}.generateAzureFilesFromList(c, shareURL, fileList) - c.Assert(shareURL, chk.NotNil) + scenarioHelper{}.generateAzureFilesFromList(a, shareURL, fileList) + a.NotNil(shareURL) // set up interceptor mockedRPC := interceptor{} @@ -44,27 +46,28 @@ func (s *cmdIntegrationSuite) TestRemoveSingleFile(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(c, shareName, fileList[0]) + rawFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, shareName, fileList[0]) raw := getDefaultRemoveRawInput(rawFileURLWithSAS.String()) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // note that when we are targeting single files, the relative path is empty ("") since the root path already points to the file - validateRemoveTransfersAreScheduled(c, true, []string{""}, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, []string{""}, mockedRPC) }) } } -func (s *cmdIntegrationSuite) TestRemoveFilesUnderShare(c *chk.C) { +func TestRemoveFilesUnderShare(t *testing.T) { + a := assert.New(t) fsu := getFSU() // set up the share with numerous files - shareURL, shareName := createNewAzureShare(c, fsu) - defer deleteShare(c, shareURL) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, shareURL, "") - c.Assert(shareURL, chk.NotNil) - c.Assert(len(fileList), chk.Not(chk.Equals), 0) + shareURL, shareName := createNewAzureShare(a, fsu) + defer deleteShare(a, shareURL) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, "") + a.NotNil(shareURL) + a.NotZero(len(fileList)) // set up interceptor mockedRPC := interceptor{} @@ -72,7 +75,7 @@ func (s *cmdIntegrationSuite) TestRemoveFilesUnderShare(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, shareName) + rawShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, shareName) raw := getDefaultRemoveRawInput(rawShareURLWithSAS.String()) raw.recursive = true @@ -83,40 +86,41 @@ func (s *cmdIntegrationSuite) TestRemoveFilesUnderShare(c *chk.C) { expectedRemovals := scenarioHelper{}.addFoldersToList(fileList, includeRootInTransfers) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(expectedRemovals)) + a.Equal(len(expectedRemovals), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateRemoveTransfersAreScheduled(c, true, expectedRemovals, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, expectedRemovals, mockedRPC) }) // turn off recursive, this time only top files should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(expectedRemovals)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(expectedRemovals), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } -func (s *cmdIntegrationSuite) TestRemoveFilesUnderDirectory(c *chk.C) { +func TestRemoveFilesUnderDirectory(t *testing.T) { + a := assert.New(t) fsu := getFSU() dirName := "dir1/dir2/dir3/" // set up the share with numerous files - shareURL, shareName := createNewAzureShare(c, fsu) - defer deleteShare(c, shareURL) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, shareURL, dirName) - c.Assert(shareURL, chk.NotNil) - c.Assert(len(fileList), chk.Not(chk.Equals), 0) + shareURL, shareName := createNewAzureShare(a, fsu) + defer deleteShare(a, shareURL) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, dirName) + a.NotNil(shareURL) + a.NotZero(len(fileList)) // set up interceptor mockedRPC := interceptor{} @@ -124,7 +128,7 @@ func (s *cmdIntegrationSuite) TestRemoveFilesUnderDirectory(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawDirectoryURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(c, shareName, dirName) + rawDirectoryURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, shareName, dirName) raw := getDefaultRemoveRawInput(rawDirectoryURLWithSAS.String()) raw.recursive = true @@ -137,45 +141,46 @@ func (s *cmdIntegrationSuite) TestRemoveFilesUnderDirectory(c *chk.C) { expectedDeletionMap[""] = 0 // add this one, because that's how dir1/dir2/dir3 appears, relative to the root (which itself) expectedDeletions := scenarioHelper{}.convertMapKeysToList(expectedDeletionMap) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(expectedDeletions)) + a.Equal(len(expectedDeletions), len(mockedRPC.transfers)) // validate that the right transfers were sent expectedTransfers := scenarioHelper{}.shaveOffPrefix(expectedDeletions, dirName) - validateRemoveTransfersAreScheduled(c, true, expectedTransfers, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, expectedTransfers, mockedRPC) }) // turn off recursive, this time only top files should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(expectedDeletions)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(expectedDeletions), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } // include flag limits the scope of the delete -func (s *cmdIntegrationSuite) TestRemoveFilesWithIncludeFlag(c *chk.C) { +func TestRemoveFilesWithIncludeFlag(t *testing.T) { + a := assert.New(t) fsu := getFSU() // set up the share with numerous files - shareURL, shareName := createNewAzureShare(c, fsu) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, shareURL, "") - defer deleteShare(c, shareURL) - c.Assert(shareURL, chk.NotNil) - c.Assert(len(fileList), chk.Not(chk.Equals), 0) + shareURL, shareName := createNewAzureShare(a, fsu) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, "") + defer deleteShare(a, shareURL) + a.NotNil(shareURL) + a.NotZero(len(fileList)) // add special files that we wish to include filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateAzureFilesFromList(c, shareURL, filesToInclude) + scenarioHelper{}.generateAzureFilesFromList(a, shareURL, filesToInclude) includeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -184,31 +189,32 @@ func (s *cmdIntegrationSuite) TestRemoveFilesWithIncludeFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, shareName) + rawShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, shareName) raw := getDefaultRemoveRawInput(rawShareURLWithSAS.String()) raw.include = includeString raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", filesToInclude, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", filesToInclude, mockedRPC) }) } // exclude flag limits the scope of the delete -func (s *cmdIntegrationSuite) TestRemoveFilesWithExcludeFlag(c *chk.C) { +func TestRemoveFilesWithExcludeFlag(t *testing.T) { + a := assert.New(t) fsu := getFSU() // set up the share with numerous files - shareURL, shareName := createNewAzureShare(c, fsu) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, shareURL, "") - defer deleteShare(c, shareURL) - c.Assert(shareURL, chk.NotNil) - c.Assert(len(fileList), chk.Not(chk.Equals), 0) + shareURL, shareName := createNewAzureShare(a, fsu) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, "") + defer deleteShare(a, shareURL) + a.NotNil(shareURL) + a.NotZero(len(fileList)) // add special files that we wish to exclude filesToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateAzureFilesFromList(c, shareURL, filesToExclude) + scenarioHelper{}.generateAzureFilesFromList(a, shareURL, filesToExclude) excludeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -217,37 +223,38 @@ func (s *cmdIntegrationSuite) TestRemoveFilesWithExcludeFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, shareName) + rawShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, shareName) raw := getDefaultRemoveRawInput(rawShareURLWithSAS.String()) raw.exclude = excludeString raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", fileList, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", fileList, mockedRPC) }) } // include and exclude flag can work together to limit the scope of the delete -func (s *cmdIntegrationSuite) TestRemoveFilesWithIncludeAndExcludeFlag(c *chk.C) { +func TestRemoveFilesWithIncludeAndExcludeFlag(t *testing.T) { + a := assert.New(t) fsu := getFSU() // set up the share with numerous files - shareURL, shareName := createNewAzureShare(c, fsu) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, shareURL, "") - defer deleteShare(c, shareURL) - c.Assert(shareURL, chk.NotNil) - c.Assert(len(fileList), chk.Not(chk.Equals), 0) + shareURL, shareName := createNewAzureShare(a, fsu) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, "") + defer deleteShare(a, shareURL) + a.NotNil(shareURL) + a.NotZero(len(fileList)) // add special files that we wish to include filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateAzureFilesFromList(c, shareURL, filesToInclude) + scenarioHelper{}.generateAzureFilesFromList(a, shareURL, filesToInclude) includeString := "*.pdf;*.jpeg;exactName" // add special files that we wish to exclude // note that the excluded files also match the include string filesToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateAzureFilesFromList(c, shareURL, filesToExclude) + scenarioHelper{}.generateAzureFilesFromList(a, shareURL, filesToExclude) excludeString := "so*;not*;exactName" // set up interceptor @@ -256,31 +263,32 @@ func (s *cmdIntegrationSuite) TestRemoveFilesWithIncludeAndExcludeFlag(c *chk.C) mockedRPC.init() // construct the raw input to simulate user input - rawShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, shareName) + rawShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, shareName) raw := getDefaultRemoveRawInput(rawShareURLWithSAS.String()) raw.include = includeString raw.exclude = excludeString raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", filesToInclude, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", filesToInclude, mockedRPC) }) } // note: list-of-files flag is used -func (s *cmdIntegrationSuite) TestRemoveListOfFilesAndDirectories(c *chk.C) { +func TestRemoveListOfFilesAndDirectories(t *testing.T) { + a := assert.New(t) fsu := getFSU() dirName := "megadir" // set up the share with numerous files - shareURL, shareName := createNewAzureShare(c, fsu) - c.Assert(shareURL, chk.NotNil) - defer deleteShare(c, shareURL) - individualFilesList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, shareURL, "") - filesUnderTopDir := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, shareURL, dirName+"/") + shareURL, shareName := createNewAzureShare(a, fsu) + a.NotNil(shareURL) + defer deleteShare(a, shareURL) + individualFilesList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, "") + filesUnderTopDir := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, dirName+"/") combined := append(individualFilesList, filesUnderTopDir...) - c.Assert(len(combined), chk.Not(chk.Equals), 0) + a.NotZero(len(combined)) // set up interceptor mockedRPC := interceptor{} @@ -288,7 +296,7 @@ func (s *cmdIntegrationSuite) TestRemoveListOfFilesAndDirectories(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, shareName) + rawShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, shareName) raw := getDefaultRemoveRawInput(rawShareURLWithSAS.String()) raw.recursive = true @@ -298,64 +306,65 @@ func (s *cmdIntegrationSuite) TestRemoveListOfFilesAndDirectories(c *chk.C) { // add some random files that don't actually exist listOfFiles = append(listOfFiles, "WUTAMIDOING") listOfFiles = append(listOfFiles, "DONTKNOW") - raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(c, listOfFiles) + raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(a, listOfFiles) expectedDeletions := append( scenarioHelper{}.addFoldersToList(filesUnderTopDir, false), // this is a directory in the list of files list, so it will be recursively processed. Don't include root of megadir itself individualFilesList..., // these are individual files in the files list (so not recursively processed) ) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(expectedDeletions)) + a.Equal(len(expectedDeletions), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateRemoveTransfersAreScheduled(c, true, expectedDeletions, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, expectedDeletions, mockedRPC) }) // turn off recursive, this time only top files should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(expectedDeletions)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(expectedDeletions), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { source, err := url.PathUnescape(transfer.Source) - c.Assert(err, chk.IsNil) + a.Nil(err) // if the transfer is under the given dir, make sure only the top level files were scheduled if strings.HasPrefix(source, dirName) { trimmedSource := strings.TrimPrefix(source, dirName+"/") - c.Assert(strings.Contains(trimmedSource, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(trimmedSource, common.AZCOPY_PATH_SEPARATOR_STRING)) } } }) } // include and exclude flag can work together to limit the scope of the delete -func (s *cmdIntegrationSuite) TestRemoveListOfFilesWithIncludeAndExclude(c *chk.C) { +func TestRemoveListOfFilesWithIncludeAndExclude(t *testing.T) { + a := assert.New(t) fsu := getFSU() dirName := "megadir" // set up the share with numerous files - shareURL, shareName := createNewAzureShare(c, fsu) - c.Assert(shareURL, chk.NotNil) - defer deleteShare(c, shareURL) - individualFilesList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, shareURL, "") - scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, shareURL, dirName+"/") + shareURL, shareName := createNewAzureShare(a, fsu) + a.NotNil(shareURL) + defer deleteShare(a, shareURL) + individualFilesList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, "") + scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, dirName+"/") // add special files that we wish to include filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateAzureFilesFromList(c, shareURL, filesToInclude) + scenarioHelper{}.generateAzureFilesFromList(a, shareURL, filesToInclude) includeString := "*.pdf;*.jpeg;exactName" // add special files that we wish to exclude // note that the excluded files also match the include string filesToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateAzureFilesFromList(c, shareURL, filesToExclude) + scenarioHelper{}.generateAzureFilesFromList(a, shareURL, filesToExclude) excludeString := "so*;not*;exactName" // set up interceptor @@ -364,7 +373,7 @@ func (s *cmdIntegrationSuite) TestRemoveListOfFilesWithIncludeAndExclude(c *chk. mockedRPC.init() // construct the raw input to simulate user input - rawShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, shareName) + rawShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, shareName) raw := getDefaultRemoveRawInput(rawShareURLWithSAS.String()) raw.recursive = true raw.include = includeString @@ -380,29 +389,30 @@ func (s *cmdIntegrationSuite) TestRemoveListOfFilesWithIncludeAndExclude(c *chk. // add files to both include and exclude listOfFiles = append(listOfFiles, filesToInclude...) listOfFiles = append(listOfFiles, filesToExclude...) - raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(c, listOfFiles) + raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(a, listOfFiles) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(filesToInclude)) + a.Equal(len(filesToInclude), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateRemoveTransfersAreScheduled(c, true, filesToInclude, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, filesToInclude, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestRemoveSingleFileWithFromTo(c *chk.C) { +func TestRemoveSingleFileWithFromTo(t *testing.T) { + a := assert.New(t) fsu := getFSU() - shareURL, shareName := createNewAzureShare(c, fsu) - defer deleteShare(c, shareURL) + shareURL, shareName := createNewAzureShare(a, fsu) + defer deleteShare(a, shareURL) for _, fileName := range []string{"top/mid/low/singlefileisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the share with a single file fileList := []string{fileName} - scenarioHelper{}.generateAzureFilesFromList(c, shareURL, fileList) - c.Assert(shareURL, chk.NotNil) + scenarioHelper{}.generateAzureFilesFromList(a, shareURL, fileList) + a.NotNil(shareURL) // set up interceptor mockedRPC := interceptor{} @@ -410,28 +420,29 @@ func (s *cmdIntegrationSuite) TestRemoveSingleFileWithFromTo(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(c, shareName, fileList[0]) + rawFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, shareName, fileList[0]) raw := getDefaultRemoveRawInput(rawFileURLWithSAS.String()) raw.fromTo = "FileTrash" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // note that when we are targeting single files, the relative path is empty ("") since the root path already points to the file - validateRemoveTransfersAreScheduled(c, true, []string{""}, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, []string{""}, mockedRPC) }) } } -func (s *cmdIntegrationSuite) TestRemoveFilesUnderShareWithFromTo(c *chk.C) { +func TestRemoveFilesUnderShareWithFromTo(t *testing.T) { + a := assert.New(t) fsu := getFSU() // set up the share with numerous files - shareURL, shareName := createNewAzureShare(c, fsu) - defer deleteShare(c, shareURL) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, shareURL, "") - c.Assert(shareURL, chk.NotNil) - c.Assert(len(fileList), chk.Not(chk.Equals), 0) + shareURL, shareName := createNewAzureShare(a, fsu) + defer deleteShare(a, shareURL) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, "") + a.NotNil(shareURL) + a.NotZero(len(fileList)) // set up interceptor mockedRPC := interceptor{} @@ -439,7 +450,7 @@ func (s *cmdIntegrationSuite) TestRemoveFilesUnderShareWithFromTo(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, shareName) + rawShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, shareName) raw := getDefaultRemoveRawInput(rawShareURLWithSAS.String()) raw.recursive = true raw.fromTo = "FileTrash" @@ -451,40 +462,41 @@ func (s *cmdIntegrationSuite) TestRemoveFilesUnderShareWithFromTo(c *chk.C) { expectedRemovals := scenarioHelper{}.addFoldersToList(fileList, includeRootInTransfers) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(expectedRemovals)) + a.Equal(len(expectedRemovals), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateRemoveTransfersAreScheduled(c, true, expectedRemovals, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, expectedRemovals, mockedRPC) }) // turn off recursive, this time only top files should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(expectedRemovals)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(expectedRemovals), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } -func (s *cmdIntegrationSuite) TestRemoveFilesUnderDirectoryWithFromTo(c *chk.C) { +func TestRemoveFilesUnderDirectoryWithFromTo(t *testing.T) { + a := assert.New(t) fsu := getFSU() dirName := "dir1/dir2/dir3/" // set up the share with numerous files - shareURL, shareName := createNewAzureShare(c, fsu) - defer deleteShare(c, shareURL) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, shareURL, dirName) - c.Assert(shareURL, chk.NotNil) - c.Assert(len(fileList), chk.Not(chk.Equals), 0) + shareURL, shareName := createNewAzureShare(a, fsu) + defer deleteShare(a, shareURL) + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, shareURL, dirName) + a.NotNil(shareURL) + a.NotZero(len(fileList)) // set up interceptor mockedRPC := interceptor{} @@ -492,7 +504,7 @@ func (s *cmdIntegrationSuite) TestRemoveFilesUnderDirectoryWithFromTo(c *chk.C) mockedRPC.init() // construct the raw input to simulate user input - rawDirectoryURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(c, shareName, dirName) + rawDirectoryURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, shareName, dirName) raw := getDefaultRemoveRawInput(rawDirectoryURLWithSAS.String()) raw.recursive = true raw.fromTo = "FileTrash" @@ -506,27 +518,27 @@ func (s *cmdIntegrationSuite) TestRemoveFilesUnderDirectoryWithFromTo(c *chk.C) expectedDeletionMap[""] = 0 // add this one, because that's how dir1/dir2/dir3 appears, relative to the root (which itself) expectedDeletions := scenarioHelper{}.convertMapKeysToList(expectedDeletionMap) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(expectedDeletions)) + a.Equal(len(expectedDeletions), len(mockedRPC.transfers)) // validate that the right transfers were sent expectedTransfers := scenarioHelper{}.shaveOffPrefix(expectedDeletions, dirName) - validateRemoveTransfersAreScheduled(c, true, expectedTransfers, mockedRPC) + validateRemoveTransfersAreScheduled(a, true, expectedTransfers, mockedRPC) }) // turn off recursive, this time only top files should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(expectedDeletions)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(expectedDeletions), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) -} +} \ No newline at end of file diff --git a/cmd/zt_scenario_helpers_for_test.go b/cmd/zt_scenario_helpers_for_test.go index d6a63cd37..c0a853e14 100644 --- a/cmd/zt_scenario_helpers_for_test.go +++ b/cmd/zt_scenario_helpers_for_test.go @@ -23,6 +23,7 @@ package cmd import ( "context" "fmt" + "github.com/stretchr/testify/assert" "io" "net/url" "os" @@ -40,7 +41,6 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-file-go/azfile" - chk "gopkg.in/check.v1" ) const defaultFileSize = 1024 @@ -62,25 +62,25 @@ var specialNames = []string{ } // note: this is to emulate the list-of-files flag -func (scenarioHelper) generateListOfFiles(c *chk.C, fileList []string) (path string) { +func (scenarioHelper) generateListOfFiles(a *assert.Assertions, fileList []string) (path string) { parentDirName, err := os.MkdirTemp("", "AzCopyLocalTest") - c.Assert(err, chk.IsNil) + a.Nil(err) // create the file path = common.GenerateFullPath(parentDirName, generateName("listy", 0)) err = os.MkdirAll(filepath.Dir(path), os.ModePerm) - c.Assert(err, chk.IsNil) + a.Nil(err) // pipe content into it content := strings.Join(fileList, "\n") err = os.WriteFile(path, []byte(content), common.DEFAULT_FILE_PERM) - c.Assert(err, chk.IsNil) + a.Nil(err) return } -func (scenarioHelper) generateLocalDirectory(c *chk.C) (dstDirName string) { +func (scenarioHelper) generateLocalDirectory(a *assert.Assertions) (dstDirName string) { dstDirName, err := os.MkdirTemp("", "AzCopyLocalTest") - c.Assert(err, chk.IsNil) + a.Nil(err) return } @@ -100,17 +100,17 @@ func (scenarioHelper) generateLocalFile(filePath string, fileSize int) ([]byte, return bigBuff, err } -func (s scenarioHelper) generateLocalFilesFromList(c *chk.C, dirPath string, fileList []string) { +func (s scenarioHelper) generateLocalFilesFromList(a *assert.Assertions, dirPath string, fileList []string) { for _, fileName := range fileList { _, err := s.generateLocalFile(filepath.Join(dirPath, fileName), defaultFileSize) - c.Assert(err, chk.IsNil) + a.Nil(err) } // sleep a bit so that the files' lmts are guaranteed to be in the past time.Sleep(time.Millisecond * 1050) } -func (s scenarioHelper) generateCommonRemoteScenarioForLocal(c *chk.C, dirPath string, prefix string) (fileList []string) { +func (s scenarioHelper) generateCommonRemoteScenarioForLocal(a *assert.Assertions, dirPath string, prefix string) (fileList []string) { fileList = make([]string, 50) for i := 0; i < 10; i++ { batch := []string{ @@ -124,7 +124,7 @@ func (s scenarioHelper) generateCommonRemoteScenarioForLocal(c *chk.C, dirPath s for j, name := range batch { fileList[5*i+j] = name _, err := s.generateLocalFile(filepath.Join(dirPath, name), defaultFileSize) - c.Assert(err, chk.IsNil) + a.Nil(err) } } @@ -133,14 +133,14 @@ func (s scenarioHelper) generateCommonRemoteScenarioForLocal(c *chk.C, dirPath s return } -func (scenarioHelper) generateCommonRemoteScenarioForSoftDelete(c *chk.C, containerURL azblob.ContainerURL, prefix string) (string, []azblob.BlockBlobURL, []string) { +func (scenarioHelper) generateCommonRemoteScenarioForSoftDelete(a *assert.Assertions, containerURL azblob.ContainerURL, prefix string) (string, []azblob.BlockBlobURL, []string) { blobList := make([]azblob.BlockBlobURL, 3) blobNames := make([]string, 3) var listOfTransfers []string - blobURL1, blobName1 := createNewBlockBlob(c, containerURL, prefix+"top") - blobURL2, blobName2 := createNewBlockBlob(c, containerURL, prefix+"sub1/") - blobURL3, blobName3 := createNewBlockBlob(c, containerURL, prefix+"sub1/sub3/sub5/") + blobURL1, blobName1 := createNewBlockBlob(a, containerURL, prefix+"top") + blobURL2, blobName2 := createNewBlockBlob(a, containerURL, prefix+"sub1/") + blobURL3, blobName3 := createNewBlockBlob(a, containerURL, prefix+"sub1/sub3/sub5/") blobList[0] = blobURL1 blobNames[0] = blobName1 @@ -153,15 +153,15 @@ func (scenarioHelper) generateCommonRemoteScenarioForSoftDelete(c *chk.C, contai for j := 0; j < 3; j++ { // create 3 soft-deleted snapshots for each blob // Create snapshot for blob snapResp, err := blobList[i].CreateSnapshot(ctx, azblob.Metadata{}, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) - c.Assert(snapResp, chk.NotNil) - c.Assert(err, chk.IsNil) + a.NotNil(snapResp) + a.Nil(err) time.Sleep(time.Millisecond * 30) // Soft delete snapshot snapshotBlob := blobList[i].WithSnapshot(snapResp.Snapshot()) _, err = snapshotBlob.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) - c.Assert(err, chk.IsNil) + a.Nil(err) listOfTransfers = append(listOfTransfers, blobNames[i]) } @@ -172,15 +172,15 @@ func (scenarioHelper) generateCommonRemoteScenarioForSoftDelete(c *chk.C, contai return blobName1, blobList, listOfTransfers } -func (scenarioHelper) generateCommonRemoteScenarioForBlob(c *chk.C, containerURL azblob.ContainerURL, prefix string) (blobList []string) { +func (scenarioHelper) generateCommonRemoteScenarioForBlob(a *assert.Assertions, containerURL azblob.ContainerURL, prefix string) (blobList []string) { blobList = make([]string, 50) for i := 0; i < 10; i++ { - _, blobName1 := createNewBlockBlob(c, containerURL, prefix+"top") - _, blobName2 := createNewBlockBlob(c, containerURL, prefix+"sub1/") - _, blobName3 := createNewBlockBlob(c, containerURL, prefix+"sub2/") - _, blobName4 := createNewBlockBlob(c, containerURL, prefix+"sub1/sub3/sub5/") - _, blobName5 := createNewBlockBlob(c, containerURL, prefix+specialNames[i]) + _, blobName1 := createNewBlockBlob(a, containerURL, prefix+"top") + _, blobName2 := createNewBlockBlob(a, containerURL, prefix+"sub1/") + _, blobName3 := createNewBlockBlob(a, containerURL, prefix+"sub2/") + _, blobName4 := createNewBlockBlob(a, containerURL, prefix+"sub1/sub3/sub5/") + _, blobName5 := createNewBlockBlob(a, containerURL, prefix+specialNames[i]) blobList[5*i] = blobName1 blobList[5*i+1] = blobName2 @@ -195,15 +195,15 @@ func (scenarioHelper) generateCommonRemoteScenarioForBlob(c *chk.C, containerURL } // same as blob, but for every virtual directory, a blob with the same name is created, and it has metadata 'hdi_isfolder = true' -func (scenarioHelper) generateCommonRemoteScenarioForWASB(c *chk.C, containerURL azblob.ContainerURL, prefix string) (blobList []string) { +func (scenarioHelper) generateCommonRemoteScenarioForWASB(a *assert.Assertions, containerURL azblob.ContainerURL, prefix string) (blobList []string) { blobList = make([]string, 50) for i := 0; i < 10; i++ { - _, blobName1 := createNewBlockBlob(c, containerURL, prefix+"top") - _, blobName2 := createNewBlockBlob(c, containerURL, prefix+"sub1/") - _, blobName3 := createNewBlockBlob(c, containerURL, prefix+"sub2/") - _, blobName4 := createNewBlockBlob(c, containerURL, prefix+"sub1/sub3/sub5/") - _, blobName5 := createNewBlockBlob(c, containerURL, prefix+specialNames[i]) + _, blobName1 := createNewBlockBlob(a, containerURL, prefix+"top") + _, blobName2 := createNewBlockBlob(a, containerURL, prefix+"sub1/") + _, blobName3 := createNewBlockBlob(a, containerURL, prefix+"sub2/") + _, blobName4 := createNewBlockBlob(a, containerURL, prefix+"sub1/sub3/sub5/") + _, blobName5 := createNewBlockBlob(a, containerURL, prefix+specialNames[i]) blobList[5*i] = blobName1 blobList[5*i+1] = blobName2 @@ -214,14 +214,14 @@ func (scenarioHelper) generateCommonRemoteScenarioForWASB(c *chk.C, containerURL if prefix != "" { rootDir := strings.TrimSuffix(prefix, "/") - createNewDirectoryStub(c, containerURL, rootDir) + createNewDirectoryStub(a, containerURL, rootDir) blobList = append(blobList, rootDir) } - createNewDirectoryStub(c, containerURL, prefix+"sub1") - createNewDirectoryStub(c, containerURL, prefix+"sub1/sub3") - createNewDirectoryStub(c, containerURL, prefix+"sub1/sub3/sub5") - createNewDirectoryStub(c, containerURL, prefix+"sub2") + createNewDirectoryStub(a, containerURL, prefix+"sub1") + createNewDirectoryStub(a, containerURL, prefix+"sub1/sub3") + createNewDirectoryStub(a, containerURL, prefix+"sub1/sub3/sub5") + createNewDirectoryStub(a, containerURL, prefix+"sub2") for _, dirPath := range []string{prefix + "sub1", prefix + "sub1/sub3", prefix + "sub1/sub3/sub5", prefix + "sub2"} { blobList = append(blobList, dirPath) @@ -232,15 +232,15 @@ func (scenarioHelper) generateCommonRemoteScenarioForWASB(c *chk.C, containerURL return } -func (scenarioHelper) generateCommonRemoteScenarioForBlobFS(c *chk.C, filesystemURL azbfs.FileSystemURL, prefix string) (pathList []string) { +func (scenarioHelper) generateCommonRemoteScenarioForBlobFS(a *assert.Assertions, filesystemURL azbfs.FileSystemURL, prefix string) (pathList []string) { pathList = make([]string, 50) for i := 0; i < 10; i++ { - _, pathName1 := createNewBfsFile(c, filesystemURL, prefix+"top") - _, pathName2 := createNewBfsFile(c, filesystemURL, prefix+"sub1/") - _, pathName3 := createNewBfsFile(c, filesystemURL, prefix+"sub2/") - _, pathName4 := createNewBfsFile(c, filesystemURL, prefix+"sub1/sub3/sub5") - _, pathName5 := createNewBfsFile(c, filesystemURL, prefix+specialNames[i]) + _, pathName1 := createNewBfsFile(a, filesystemURL, prefix+"top") + _, pathName2 := createNewBfsFile(a, filesystemURL, prefix+"sub1/") + _, pathName3 := createNewBfsFile(a, filesystemURL, prefix+"sub2/") + _, pathName4 := createNewBfsFile(a, filesystemURL, prefix+"sub1/sub3/sub5") + _, pathName5 := createNewBfsFile(a, filesystemURL, prefix+specialNames[i]) pathList[5*i] = pathName1 pathList[5*i+1] = pathName2 @@ -254,15 +254,15 @@ func (scenarioHelper) generateCommonRemoteScenarioForBlobFS(c *chk.C, filesystem return } -func (scenarioHelper) generateCommonRemoteScenarioForAzureFile(c *chk.C, shareURL azfile.ShareURL, prefix string) (fileList []string) { +func (scenarioHelper) generateCommonRemoteScenarioForAzureFile(a *assert.Assertions, shareURL azfile.ShareURL, prefix string) (fileList []string) { fileList = make([]string, 50) for i := 0; i < 10; i++ { - _, fileName1 := createNewAzureFile(c, shareURL, prefix+"top") - _, fileName2 := createNewAzureFile(c, shareURL, prefix+"sub1/") - _, fileName3 := createNewAzureFile(c, shareURL, prefix+"sub2/") - _, fileName4 := createNewAzureFile(c, shareURL, prefix+"sub1/sub3/sub5/") - _, fileName5 := createNewAzureFile(c, shareURL, prefix+specialNames[i]) + _, fileName1 := createNewAzureFile(a, shareURL, prefix+"top") + _, fileName2 := createNewAzureFile(a, shareURL, prefix+"sub1/") + _, fileName3 := createNewAzureFile(a, shareURL, prefix+"sub2/") + _, fileName4 := createNewAzureFile(a, shareURL, prefix+"sub1/sub3/sub5/") + _, fileName5 := createNewAzureFile(a, shareURL, prefix+specialNames[i]) fileList[5*i] = fileName1 fileList[5*i+1] = fileName2 @@ -276,73 +276,72 @@ func (scenarioHelper) generateCommonRemoteScenarioForAzureFile(c *chk.C, shareUR return } -func (s scenarioHelper) generateBlobContainersAndBlobsFromLists(c *chk.C, serviceURL azblob.ServiceURL, containerList []string, blobList []string, data string) { +func (s scenarioHelper) generateBlobContainersAndBlobsFromLists(a *assert.Assertions, serviceURL azblob.ServiceURL, containerList []string, blobList []string, data string) { for _, containerName := range containerList { curl := serviceURL.NewContainerURL(containerName) _, err := curl.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) - c.Assert(err, chk.IsNil) + a.Nil(err) - s.generateBlobsFromList(c, curl, blobList, data) + s.generateBlobsFromList(a, curl, blobList, data) } } -func (s scenarioHelper) generateFileSharesAndFilesFromLists(c *chk.C, serviceURL azfile.ServiceURL, shareList []string, fileList []string, data string) { +func (s scenarioHelper) generateFileSharesAndFilesFromLists(a *assert.Assertions, serviceURL azfile.ServiceURL, shareList []string, fileList []string, data string) { for _, shareName := range shareList { surl := serviceURL.NewShareURL(shareName) _, err := surl.Create(ctx, azfile.Metadata{}, 0) - c.Assert(err, chk.IsNil) + a.Nil(err) - s.generateAzureFilesFromList(c, surl, fileList) + s.generateAzureFilesFromList(a, surl, fileList) } } -func (s scenarioHelper) generateFilesystemsAndFilesFromLists(c *chk.C, serviceURL azbfs.ServiceURL, fsList []string, fileList []string, data string) { +func (s scenarioHelper) generateFilesystemsAndFilesFromLists(a *assert.Assertions, serviceURL azbfs.ServiceURL, fsList []string, fileList []string, data string) { for _, filesystemName := range fsList { fsURL := serviceURL.NewFileSystemURL(filesystemName) _, err := fsURL.Create(ctx) - c.Assert(err, chk.IsNil) + a.Nil(err) - s.generateBFSPathsFromList(c, fsURL, fileList) + s.generateBFSPathsFromList(a, fsURL, fileList) } } -func (s scenarioHelper) generateS3BucketsAndObjectsFromLists(c *chk.C, s3Client *minio.Client, bucketList []string, objectList []string, data string) { +func (s scenarioHelper) generateS3BucketsAndObjectsFromLists(a *assert.Assertions, s3Client *minio.Client, bucketList []string, objectList []string, data string) { for _, bucketName := range bucketList { err := s3Client.MakeBucket(bucketName, "") - c.Assert(err, chk.IsNil) + a.Nil(err) - s.generateObjects(c, s3Client, bucketName, objectList) + s.generateObjects(a, s3Client, bucketName, objectList) } } -func (s scenarioHelper) generateGCPBucketsAndObjectsFromLists(c *chk.C, client *gcpUtils.Client, bucketList []string, objectList []string) { +func (s scenarioHelper) generateGCPBucketsAndObjectsFromLists(a *assert.Assertions, client *gcpUtils.Client, bucketList []string, objectList []string) { for _, bucketName := range bucketList { bkt := client.Bucket(bucketName) err := bkt.Create(context.Background(), os.Getenv("GOOGLE_CLOUD_PROJECT"), &gcpUtils.BucketAttrs{}) - c.Assert(err, chk.IsNil) - s.generateGCPObjects(c, client, bucketName, objectList) + a.Nil(err) + s.generateGCPObjects(a, client, bucketName, objectList) } } // create the demanded blobs -func (scenarioHelper) generateBlobsFromList(c *chk.C, containerURL azblob.ContainerURL, blobList []string, data string) { +func (scenarioHelper) generateBlobsFromList(a *assert.Assertions, containerURL azblob.ContainerURL, blobList []string, data string) { for _, blobName := range blobList { blob := containerURL.NewBlockBlobURL(blobName) - cResp, err := blob.Upload(ctx, strings.NewReader(data), azblob.BlobHTTPHeaders{}, + _, err := blob.Upload(ctx, strings.NewReader(data), azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + a.Nil(err) } // sleep a bit so that the blobs' lmts are guaranteed to be in the past time.Sleep(time.Millisecond * 1050) } -func (scenarioHelper) generatePageBlobsFromList(c *chk.C, containerURL azblob.ContainerURL, blobList []string, data string) { +func (scenarioHelper) generatePageBlobsFromList(a *assert.Assertions, containerURL azblob.ContainerURL, blobList []string, data string) { for _, blobName := range blobList { // Create the blob (PUT blob) blob := containerURL.NewPageBlobURL(blobName) - cResp, err := blob.Create(ctx, + _, err := blob.Create(ctx, int64(len(data)), 0, azblob.BlobHTTPHeaders{ @@ -355,30 +354,28 @@ func (scenarioHelper) generatePageBlobsFromList(c *chk.C, containerURL azblob.Co azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}, ) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + a.Nil(err) // Create the page (PUT page) - uResp, err := blob.UploadPages(ctx, + _, err = blob.UploadPages(ctx, 0, strings.NewReader(data), azblob.PageBlobAccessConditions{}, nil, azblob.ClientProvidedKeyOptions{}, ) - c.Assert(err, chk.IsNil) - c.Assert(uResp.StatusCode(), chk.Equals, 201) + a.Nil(err) } // sleep a bit so that the blobs' lmts are guaranteed to be in the past time.Sleep(time.Millisecond * 1050) } -func (scenarioHelper) generateAppendBlobsFromList(c *chk.C, containerURL azblob.ContainerURL, blobList []string, data string) { +func (scenarioHelper) generateAppendBlobsFromList(a *assert.Assertions, containerURL azblob.ContainerURL, blobList []string, data string) { for _, blobName := range blobList { // Create the blob (PUT blob) blob := containerURL.NewAppendBlobURL(blobName) - cResp, err := blob.Create(ctx, + _, err := blob.Create(ctx, azblob.BlobHTTPHeaders{ ContentType: "text/random", }, @@ -388,60 +385,57 @@ func (scenarioHelper) generateAppendBlobsFromList(c *chk.C, containerURL azblob. azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}, ) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + a.Nil(err) // Append a block (PUT block) - uResp, err := blob.AppendBlock(ctx, + _, err = blob.AppendBlock(ctx, strings.NewReader(data), azblob.AppendBlobAccessConditions{}, nil, azblob.ClientProvidedKeyOptions{}) - c.Assert(err, chk.IsNil) - c.Assert(uResp.StatusCode(), chk.Equals, 201) + a.Nil(err) } // sleep a bit so that the blobs' lmts are guaranteed to be in the past time.Sleep(time.Millisecond * 1050) } -func (scenarioHelper) generateBlockBlobWithAccessTier(c *chk.C, containerURL azblob.ContainerURL, blobName string, accessTier azblob.AccessTierType) { +func (scenarioHelper) generateBlockBlobWithAccessTier(a *assert.Assertions, containerURL azblob.ContainerURL, blobName string, accessTier azblob.AccessTierType) { blob := containerURL.NewBlockBlobURL(blobName) - cResp, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), azblob.BlobHTTPHeaders{}, + _, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}, accessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + a.Nil(err) } // create the demanded objects -func (scenarioHelper) generateObjects(c *chk.C, client *minio.Client, bucketName string, objectList []string) { +func (scenarioHelper) generateObjects(a *assert.Assertions, client *minio.Client, bucketName string, objectList []string) { size := int64(len(objectDefaultData)) for _, objectName := range objectList { n, err := client.PutObjectWithContext(ctx, bucketName, objectName, strings.NewReader(objectDefaultData), size, minio.PutObjectOptions{}) - c.Assert(err, chk.IsNil) - c.Assert(n, chk.Equals, size) + a.Nil(err) + a.Equal(size, n) } } -func (scenarioHelper) generateGCPObjects(c *chk.C, client *gcpUtils.Client, bucketName string, objectList []string) { +func (scenarioHelper) generateGCPObjects(a *assert.Assertions, client *gcpUtils.Client, bucketName string, objectList []string) { size := int64(len(objectDefaultData)) for _, objectName := range objectList { wc := client.Bucket(bucketName).Object(objectName).NewWriter(context.Background()) reader := strings.NewReader(objectDefaultData) written, err := io.Copy(wc, reader) - c.Assert(err, chk.IsNil) - c.Assert(written, chk.Equals, size) + a.Nil(err) + a.Equal(size, written) err = wc.Close() - c.Assert(err, chk.IsNil) + a.Nil(err) } } // create the demanded files -func (scenarioHelper) generateFlatFiles(c *chk.C, shareURL azfile.ShareURL, fileList []string) { +func (scenarioHelper) generateFlatFiles(a *assert.Assertions, shareURL azfile.ShareURL, fileList []string) { for _, fileName := range fileList { file := shareURL.NewRootDirectoryURL().NewFileURL(fileName) err := azfile.UploadBufferToAzureFile(ctx, []byte(fileDefaultData), file, azfile.UploadToAzureFileOptions{}) - c.Assert(err, chk.IsNil) + a.Nil(err) } // sleep a bit so that the blobs' lmts are guaranteed to be in the past @@ -454,15 +448,15 @@ func (scenarioHelper) generateFlatFiles(c *chk.C, shareURL azfile.ShareURL, file // 10 of them in sub dir "sub2" // 10 of them in deeper sub dir "sub1/sub3/sub5" // 10 of them with special characters -func (scenarioHelper) generateCommonRemoteScenarioForS3(c *chk.C, client *minio.Client, bucketName string, prefix string, returnObjectListWithBucketName bool) (objectList []string) { +func (scenarioHelper) generateCommonRemoteScenarioForS3(a *assert.Assertions, client *minio.Client, bucketName string, prefix string, returnObjectListWithBucketName bool) (objectList []string) { objectList = make([]string, 50) for i := 0; i < 10; i++ { - objectName1 := createNewObject(c, client, bucketName, prefix+"top") - objectName2 := createNewObject(c, client, bucketName, prefix+"sub1/") - objectName3 := createNewObject(c, client, bucketName, prefix+"sub2/") - objectName4 := createNewObject(c, client, bucketName, prefix+"sub1/sub3/sub5/") - objectName5 := createNewObject(c, client, bucketName, prefix+specialNames[i]) + objectName1 := createNewObject(a, client, bucketName, prefix+"top") + objectName2 := createNewObject(a, client, bucketName, prefix+"sub1/") + objectName3 := createNewObject(a, client, bucketName, prefix+"sub2/") + objectName4 := createNewObject(a, client, bucketName, prefix+"sub1/sub3/sub5/") + objectName5 := createNewObject(a, client, bucketName, prefix+specialNames[i]) // Note: common.AZCOPY_PATH_SEPARATOR_STRING is added before bucket or objectName, as in the change minimize JobPartPlan file size, // transfer.Source & transfer.Destination(after trimming the SourceRoot and DestinationRoot) are with AZCOPY_PATH_SEPARATOR_STRING suffix, @@ -485,14 +479,14 @@ func (scenarioHelper) generateCommonRemoteScenarioForS3(c *chk.C, client *minio. return } -func (scenarioHelper) generateCommonRemoteScenarioForGCP(c *chk.C, client *gcpUtils.Client, bucketName string, prefix string, returnObjectListWithBucketName bool) []string { +func (scenarioHelper) generateCommonRemoteScenarioForGCP(a *assert.Assertions, client *gcpUtils.Client, bucketName string, prefix string, returnObjectListWithBucketName bool) []string { objectList := make([]string, 50) for i := 0; i < 10; i++ { - objectName1 := createNewGCPObject(c, client, bucketName, prefix+"top") - objectName2 := createNewGCPObject(c, client, bucketName, prefix+"sub1/") - objectName3 := createNewGCPObject(c, client, bucketName, prefix+"sub2/") - objectName4 := createNewGCPObject(c, client, bucketName, prefix+"sub1/sub3/sub5/") - objectName5 := createNewGCPObject(c, client, bucketName, prefix+specialNames[i]) + objectName1 := createNewGCPObject(a, client, bucketName, prefix+"top") + objectName2 := createNewGCPObject(a, client, bucketName, prefix+"sub1/") + objectName3 := createNewGCPObject(a, client, bucketName, prefix+"sub2/") + objectName4 := createNewGCPObject(a, client, bucketName, prefix+"sub1/sub3/sub5/") + objectName5 := createNewGCPObject(a, client, bucketName, prefix+specialNames[i]) // Note: common.AZCOPY_PATH_SEPARATOR_STRING is added before bucket or objectName, as in the change minimize JobPartPlan file size, // transfer.Source & transfer.Destination(after trimming the SourceRoot and DestinationRoot) are with AZCOPY_PATH_SEPARATOR_STRING suffix, @@ -516,39 +510,39 @@ func (scenarioHelper) generateCommonRemoteScenarioForGCP(c *chk.C, client *gcpUt } // create the demanded azure files -func (scenarioHelper) generateAzureFilesFromList(c *chk.C, shareURL azfile.ShareURL, fileList []string) { +func (scenarioHelper) generateAzureFilesFromList(a *assert.Assertions, shareURL azfile.ShareURL, fileList []string) { for _, filePath := range fileList { file := shareURL.NewRootDirectoryURL().NewFileURL(filePath) // create parents first - generateParentsForAzureFile(c, file) + generateParentsForAzureFile(a, file) // create the file itself cResp, err := file.Create(ctx, defaultAzureFileSizeInBytes, azfile.FileHTTPHeaders{}, azfile.Metadata{}) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + a.Nil(err) + a.Equal(201, cResp.StatusCode()) } // sleep a bit so that the files' lmts are guaranteed to be in the past time.Sleep(time.Millisecond * 1050) } -func (scenarioHelper) generateBFSPathsFromList(c *chk.C, filesystemURL azbfs.FileSystemURL, fileList []string) { - for _, path := range fileList { - file := filesystemURL.NewRootDirectoryURL().NewFileURL(path) +func (scenarioHelper) generateBFSPathsFromList(a *assert.Assertions, filesystemURL azbfs.FileSystemURL, fileList []string) { + for _, p := range fileList { + file := filesystemURL.NewRootDirectoryURL().NewFileURL(p) // Create the file cResp, err := file.Create(ctx, azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{}) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + a.Nil(err) + a.Equal(201, cResp.StatusCode()) aResp, err := file.AppendData(ctx, 0, strings.NewReader(string(make([]byte, defaultBlobFSFileSizeInBytes)))) - c.Assert(err, chk.IsNil) - c.Assert(aResp.StatusCode(), chk.Equals, 202) + a.Nil(err) + a.Equal(202, aResp.StatusCode()) fResp, err := file.FlushData(ctx, defaultBlobFSFileSizeInBytes, nil, azbfs.BlobFSHTTPHeaders{}, false, true) - c.Assert(err, chk.IsNil) - c.Assert(fResp.StatusCode(), chk.Equals, 200) + a.Nil(err) + a.Equal(200, fResp.StatusCode()) } } @@ -610,129 +604,129 @@ func (scenarioHelper) addPrefix(list []string, prefix string) []string { return modifiedList } -func (scenarioHelper) getRawContainerURLWithSAS(c *chk.C, containerName string) url.URL { +func (scenarioHelper) getRawContainerURLWithSAS(a *assert.Assertions, containerName string) url.URL { accountName, accountKey := getAccountAndKey() credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) - c.Assert(err, chk.IsNil) - containerURLWithSAS := getContainerURLWithSAS(c, *credential, containerName) + a.Nil(err) + containerURLWithSAS := getContainerURLWithSAS(a, *credential, containerName) return containerURLWithSAS.URL() } -func (scenarioHelper) getRawBlobURLWithSAS(c *chk.C, containerName string, blobName string) url.URL { +func (scenarioHelper) getRawBlobURLWithSAS(a *assert.Assertions, containerName string, blobName string) url.URL { accountName, accountKey := getAccountAndKey() credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) - c.Assert(err, chk.IsNil) - containerURLWithSAS := getContainerURLWithSAS(c, *credential, containerName) + a.Nil(err) + containerURLWithSAS := getContainerURLWithSAS(a, *credential, containerName) blobURLWithSAS := containerURLWithSAS.NewBlockBlobURL(blobName) return blobURLWithSAS.URL() } -func (scenarioHelper) getRawBlobServiceURLWithSAS(c *chk.C) url.URL { +func (scenarioHelper) getRawBlobServiceURLWithSAS(a *assert.Assertions) url.URL { accountName, accountKey := getAccountAndKey() credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) - c.Assert(err, chk.IsNil) + a.Nil(err) - return getBlobServiceURLWithSAS(c, *credential).URL() + return getBlobServiceURLWithSAS(a, *credential).URL() } -func (scenarioHelper) getRawFileServiceURLWithSAS(c *chk.C) url.URL { +func (scenarioHelper) getRawFileServiceURLWithSAS(a *assert.Assertions) url.URL { accountName, accountKey := getAccountAndKey() credential, err := azfile.NewSharedKeyCredential(accountName, accountKey) - c.Assert(err, chk.IsNil) + a.Nil(err) - return getFileServiceURLWithSAS(c, *credential).URL() + return getFileServiceURLWithSAS(a, *credential).URL() } -func (scenarioHelper) getRawAdlsServiceURLWithSAS(c *chk.C) azbfs.ServiceURL { +func (scenarioHelper) getRawAdlsServiceURLWithSAS(a *assert.Assertions) azbfs.ServiceURL { accountName, accountKey := getAccountAndKey() credential := azbfs.NewSharedKeyCredential(accountName, accountKey) - return getAdlsServiceURLWithSAS(c, *credential) + return getAdlsServiceURLWithSAS(a, *credential) } -func (scenarioHelper) getBlobServiceURL(c *chk.C) azblob.ServiceURL { +func (scenarioHelper) getBlobServiceURL(a *assert.Assertions) azblob.ServiceURL { accountName, accountKey := getAccountAndKey() credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) - c.Assert(err, chk.IsNil) + a.Nil(err) rawURL := fmt.Sprintf("https://%s.blob.core.windows.net", credential.AccountName()) // convert the raw url and validate it was parsed successfully fullURL, err := url.Parse(rawURL) - c.Assert(err, chk.IsNil) + a.Nil(err) return azblob.NewServiceURL(*fullURL, azblob.NewPipeline(credential, azblob.PipelineOptions{})) } -func (s scenarioHelper) getContainerURL(c *chk.C, containerName string) azblob.ContainerURL { - serviceURL := s.getBlobServiceURL(c) +func (s scenarioHelper) getContainerURL(a *assert.Assertions, containerName string) azblob.ContainerURL { + serviceURL := s.getBlobServiceURL(a) containerURL := serviceURL.NewContainerURL(containerName) return containerURL } -func (scenarioHelper) getRawS3AccountURL(c *chk.C, region string) url.URL { +func (scenarioHelper) getRawS3AccountURL(a *assert.Assertions, region string) url.URL { rawURL := fmt.Sprintf("https://s3%s.amazonaws.com", common.IffString(region == "", "", "-"+region)) fullURL, err := url.Parse(rawURL) - c.Assert(err, chk.IsNil) + a.Nil(err) return *fullURL } -func (scenarioHelper) getRawGCPAccountURL(c *chk.C) url.URL { +func (scenarioHelper) getRawGCPAccountURL(a *assert.Assertions) url.URL { rawURL := "https://storage.cloud.google.com/" fullURL, err := url.Parse(rawURL) - c.Assert(err, chk.IsNil) + a.Nil(err) return *fullURL } // TODO: Possibly add virtual-hosted-style and dual stack support. Currently use path style for testing. -func (scenarioHelper) getRawS3BucketURL(c *chk.C, region string, bucketName string) url.URL { +func (scenarioHelper) getRawS3BucketURL(a *assert.Assertions, region string, bucketName string) url.URL { rawURL := fmt.Sprintf("https://s3%s.amazonaws.com/%s", common.IffString(region == "", "", "-"+region), bucketName) fullURL, err := url.Parse(rawURL) - c.Assert(err, chk.IsNil) + a.Nil(err) return *fullURL } -func (scenarioHelper) getRawGCPBucketURL(c *chk.C, bucketName string) url.URL { +func (scenarioHelper) getRawGCPBucketURL(a *assert.Assertions, bucketName string) url.URL { rawURL := fmt.Sprintf("https://storage.cloud.google.com/%s", bucketName) fmt.Println(rawURL) fullURL, err := url.Parse(rawURL) - c.Assert(err, chk.IsNil) + a.Nil(err) return *fullURL } -func (scenarioHelper) getRawS3ObjectURL(c *chk.C, region string, bucketName string, objectName string) url.URL { +func (scenarioHelper) getRawS3ObjectURL(a *assert.Assertions, region string, bucketName string, objectName string) url.URL { rawURL := fmt.Sprintf("https://s3%s.amazonaws.com/%s/%s", common.IffString(region == "", "", "-"+region), bucketName, objectName) fullURL, err := url.Parse(rawURL) - c.Assert(err, chk.IsNil) + a.Nil(err) return *fullURL } -func (scenarioHelper) getRawGCPObjectURL(c *chk.C, bucketName string, objectName string) url.URL { +func (scenarioHelper) getRawGCPObjectURL(a *assert.Assertions, bucketName string, objectName string) url.URL { rawURL := fmt.Sprintf("https://storage.cloud.google.com/%s/%s", bucketName, objectName) fullURL, err := url.Parse(rawURL) - c.Assert(err, chk.IsNil) + a.Nil(err) return *fullURL } -func (scenarioHelper) getRawFileURLWithSAS(c *chk.C, shareName string, fileName string) url.URL { +func (scenarioHelper) getRawFileURLWithSAS(a *assert.Assertions, shareName string, fileName string) url.URL { credential, err := getGenericCredentialForFile("") - c.Assert(err, chk.IsNil) - shareURLWithSAS := getShareURLWithSAS(c, *credential, shareName) + a.Nil(err) + shareURLWithSAS := getShareURLWithSAS(a, *credential, shareName) fileURLWithSAS := shareURLWithSAS.NewRootDirectoryURL().NewFileURL(fileName) return fileURLWithSAS.URL() } -func (scenarioHelper) getRawShareURLWithSAS(c *chk.C, shareName string) url.URL { +func (scenarioHelper) getRawShareURLWithSAS(a *assert.Assertions, shareName string) url.URL { accountName, accountKey := getAccountAndKey() credential, err := azfile.NewSharedKeyCredential(accountName, accountKey) - c.Assert(err, chk.IsNil) - shareURLWithSAS := getShareURLWithSAS(c, *credential, shareName) + a.Nil(err) + shareURLWithSAS := getShareURLWithSAS(a, *credential, shareName) return shareURLWithSAS.URL() } @@ -752,10 +746,10 @@ func (scenarioHelper) containerExists(containerURL azblob.ContainerURL) bool { return false } -func runSyncAndVerify(c *chk.C, raw rawSyncCmdArgs, verifier func(err error)) { +func runSyncAndVerify(a *assert.Assertions, raw rawSyncCmdArgs, verifier func(err error)) { // the simulated user input should parse properly cooked, err := raw.cook() - c.Assert(err, chk.IsNil) + a.Nil(err) // the enumeration ends when process() returns err = cooked.process() @@ -764,7 +758,7 @@ func runSyncAndVerify(c *chk.C, raw rawSyncCmdArgs, verifier func(err error)) { verifier(err) } -func runCopyAndVerify(c *chk.C, raw rawCopyCmdArgs, verifier func(err error)) { +func runCopyAndVerify(a *assert.Assertions, raw rawCopyCmdArgs, verifier func(err error)) { // the simulated user input should parse properly cooked, err := raw.cook() if err == nil { @@ -782,21 +776,21 @@ func runCopyAndVerify(c *chk.C, raw rawCopyCmdArgs, verifier func(err error)) { verifier(err) } -func validateUploadTransfersAreScheduled(c *chk.C, sourcePrefix string, destinationPrefix string, expectedTransfers []string, mockedRPC interceptor) { - validateCopyTransfersAreScheduled(c, false, true, sourcePrefix, destinationPrefix, expectedTransfers, mockedRPC) +func validateUploadTransfersAreScheduled(a *assert.Assertions, sourcePrefix string, destinationPrefix string, expectedTransfers []string, mockedRPC interceptor) { + validateCopyTransfersAreScheduled(a, false, true, sourcePrefix, destinationPrefix, expectedTransfers, mockedRPC) } -func validateDownloadTransfersAreScheduled(c *chk.C, sourcePrefix string, destinationPrefix string, expectedTransfers []string, mockedRPC interceptor) { - validateCopyTransfersAreScheduled(c, true, false, sourcePrefix, destinationPrefix, expectedTransfers, mockedRPC) +func validateDownloadTransfersAreScheduled(a *assert.Assertions, sourcePrefix string, destinationPrefix string, expectedTransfers []string, mockedRPC interceptor) { + validateCopyTransfersAreScheduled(a, true, false, sourcePrefix, destinationPrefix, expectedTransfers, mockedRPC) } -func validateS2SSyncTransfersAreScheduled(c *chk.C, sourcePrefix string, destinationPrefix string, expectedTransfers []string, mockedRPC interceptor) { - validateCopyTransfersAreScheduled(c, true, true, sourcePrefix, destinationPrefix, expectedTransfers, mockedRPC) +func validateS2SSyncTransfersAreScheduled(a *assert.Assertions, sourcePrefix string, destinationPrefix string, expectedTransfers []string, mockedRPC interceptor) { + validateCopyTransfersAreScheduled(a, true, true, sourcePrefix, destinationPrefix, expectedTransfers, mockedRPC) } -func validateCopyTransfersAreScheduled(c *chk.C, isSrcEncoded bool, isDstEncoded bool, sourcePrefix string, destinationPrefix string, expectedTransfers []string, mockedRPC interceptor) { +func validateCopyTransfersAreScheduled(a *assert.Assertions, isSrcEncoded bool, isDstEncoded bool, sourcePrefix string, destinationPrefix string, expectedTransfers []string, mockedRPC interceptor) { // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(expectedTransfers)) + a.Equal(len(expectedTransfers), len(mockedRPC.transfers)) // validate that the right transfers were sent lookupMap := scenarioHelper{}.convertListToMap(expectedTransfers) @@ -827,18 +821,18 @@ func validateCopyTransfersAreScheduled(c *chk.C, isSrcEncoded bool, isDstEncoded } // the relative paths should be equal - c.Assert(srcRelativeFilePath, chk.Equals, dstRelativeFilePath) + a.Equal(dstRelativeFilePath, srcRelativeFilePath) // look up the path from the expected transfers, make sure it exists _, transferExist := lookupMap[srcRelativeFilePath] - c.Assert(transferExist, chk.Equals, true) + a.True(transferExist) } } -func validateRemoveTransfersAreScheduled(c *chk.C, isSrcEncoded bool, expectedTransfers []string, mockedRPC interceptor) { +func validateRemoveTransfersAreScheduled(a *assert.Assertions, isSrcEncoded bool, expectedTransfers []string, mockedRPC interceptor) { // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(expectedTransfers)) + a.Equal(len(expectedTransfers), len(mockedRPC.transfers)) // validate that the right transfers were sent lookupMap := scenarioHelper{}.convertListToMap(expectedTransfers) @@ -851,7 +845,7 @@ func validateRemoveTransfersAreScheduled(c *chk.C, isSrcEncoded bool, expectedTr // look up the source from the expected transfers, make sure it exists _, srcExist := lookupMap[srcRelativeFilePath] - c.Assert(srcExist, chk.Equals, true) + a.True(srcExist) delete(lookupMap, srcRelativeFilePath) } @@ -860,11 +854,11 @@ func validateRemoveTransfersAreScheduled(c *chk.C, isSrcEncoded bool, expectedTr // } } -func getDefaultSyncRawInput(src, dst string) rawSyncCmdArgs { +func getDefaultSyncRawInput(sra, dst string) rawSyncCmdArgs { deleteDestination := common.EDeleteDestination.True() return rawSyncCmdArgs{ - src: src, + src: sra, dst: dst, recursive: true, deleteDestination: deleteDestination.String(), @@ -963,4 +957,4 @@ func getDefaultSetPropertiesRawInput(src string, params transferParams) rawCopyC } return rawArgs -} +} \ No newline at end of file diff --git a/cmd/zt_scenario_helpers_for_windows_test.go b/cmd/zt_scenario_helpers_for_windows_test.go index ef90c7607..8fd613742 100644 --- a/cmd/zt_scenario_helpers_for_windows_test.go +++ b/cmd/zt_scenario_helpers_for_windows_test.go @@ -21,7 +21,7 @@ package cmd import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" "path/filepath" "strings" "syscall" @@ -54,9 +54,9 @@ func (scenarioHelper) setAttributesForLocalFile(filePath string, attrList []stri return err } -func (s scenarioHelper) setAttributesForLocalFiles(c *chk.C, dirPath string, fileList []string, attrList []string) { +func (s scenarioHelper) setAttributesForLocalFiles(a *assert.Assertions, dirPath string, fileList []string, attrList []string) { for _, fileName := range fileList { err := s.setAttributesForLocalFile(filepath.Join(dirPath, fileName), attrList) - c.Assert(err, chk.IsNil) + a.Nil(err) } -} +} \ No newline at end of file diff --git a/cmd/zt_set_properties_test.go b/cmd/zt_set_properties_test.go index 37c944d6a..9844c2c3a 100644 --- a/cmd/zt_set_properties_test.go +++ b/cmd/zt_set_properties_test.go @@ -23,9 +23,10 @@ package cmd import ( "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-blob-go/azblob" - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" "net/url" "strings" + "testing" "time" ) @@ -46,40 +47,40 @@ func (tp transferParams) getMetadata() common.Metadata { return metadataMap } -func (scenarioHelper) generateBlobsFromListWithAccessTier(c *chk.C, containerURL azblob.ContainerURL, blobList []string, data string, accessTier azblob.AccessTierType) { +func (scenarioHelper) generateBlobsFromListWithAccessTier(a *assert.Assertions, containerURL azblob.ContainerURL, blobList []string, data string, accessTier azblob.AccessTierType) { for _, blobName := range blobList { blob := containerURL.NewBlockBlobURL(blobName) cResp, err := blob.Upload(ctx, strings.NewReader(data), azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}, accessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + a.Nil(err) + a.Equal(201, cResp.StatusCode()) } // sleep a bit so that the blobs' lmts are guaranteed to be in the past time.Sleep(time.Millisecond * 1050) } -func createNewBlockBlobWithAccessTier(c *chk.C, container azblob.ContainerURL, prefix string, accessTier azblob.AccessTierType) (blob azblob.BlockBlobURL, name string) { - blob, name = getBlockBlobURL(c, container, prefix) +func createNewBlockBlobWithAccessTier(a *assert.Assertions, container azblob.ContainerURL, prefix string, accessTier azblob.AccessTierType) (blob azblob.BlockBlobURL, name string) { + blob, name = getBlockBlobURL(a, container, prefix) cResp, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}, accessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + a.Nil(err) + a.Equal(201, cResp.StatusCode()) return } -func (scenarioHelper) generateCommonRemoteScenarioForBlobWithAccessTier(c *chk.C, containerURL azblob.ContainerURL, prefix string, accessTier azblob.AccessTierType) (blobList []string) { +func (scenarioHelper) generateCommonRemoteScenarioForBlobWithAccessTier(a *assert.Assertions, containerURL azblob.ContainerURL, prefix string, accessTier azblob.AccessTierType) (blobList []string) { blobList = make([]string, 50) for i := 0; i < 10; i++ { - _, blobName1 := createNewBlockBlobWithAccessTier(c, containerURL, prefix+"top", accessTier) - _, blobName2 := createNewBlockBlobWithAccessTier(c, containerURL, prefix+"sub1/", accessTier) - _, blobName3 := createNewBlockBlobWithAccessTier(c, containerURL, prefix+"sub2/", accessTier) - _, blobName4 := createNewBlockBlobWithAccessTier(c, containerURL, prefix+"sub1/sub3/sub5/", accessTier) - _, blobName5 := createNewBlockBlobWithAccessTier(c, containerURL, prefix+specialNames[i], accessTier) + _, blobName1 := createNewBlockBlobWithAccessTier(a, containerURL, prefix+"top", accessTier) + _, blobName2 := createNewBlockBlobWithAccessTier(a, containerURL, prefix+"sub1/", accessTier) + _, blobName3 := createNewBlockBlobWithAccessTier(a, containerURL, prefix+"sub2/", accessTier) + _, blobName4 := createNewBlockBlobWithAccessTier(a, containerURL, prefix+"sub1/sub3/sub5/", accessTier) + _, blobName5 := createNewBlockBlobWithAccessTier(a, containerURL, prefix+specialNames[i], accessTier) blobList[5*i] = blobName1 blobList[5*i+1] = blobName2 @@ -93,25 +94,25 @@ func (scenarioHelper) generateCommonRemoteScenarioForBlobWithAccessTier(c *chk.C return } -func checkMapsEqual(c *chk.C, mapA map[string]string, mapB map[string]string) { - c.Assert(len(mapA), chk.Equals, len(mapB)) +func checkMapsEqual(a *assert.Assertions, mapA map[string]string, mapB map[string]string) { + a.Equal(len(mapB), len(mapA)) for k, v := range mapA { - c.Assert(mapB[k], chk.Equals, v) + a.Equal(v, mapB[k]) } } -func validateSetPropertiesTransfersAreScheduled(c *chk.C, isSrcEncoded bool, expectedTransfers []string, transferParams transferParams, mockedRPC interceptor) { +func validateSetPropertiesTransfersAreScheduled(a *assert.Assertions, isSrcEncoded bool, expectedTransfers []string, transferParams transferParams, mockedRPC interceptor) { // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(expectedTransfers)) + a.Equal(len(expectedTransfers), len(mockedRPC.transfers)) // validate that the right transfers were sent lookupMap := scenarioHelper{}.convertListToMap(expectedTransfers) for _, transfer := range mockedRPC.transfers { srcRelativeFilePath := transfer.Source - c.Assert(transfer.BlobTier, chk.Equals, transferParams.blockBlobTier.ToAccessTierType()) - checkMapsEqual(c, transfer.Metadata, transferParams.getMetadata()) - checkMapsEqual(c, transfer.BlobTags, transferParams.blobTags) + a.Equal(transferParams.blockBlobTier.ToAccessTierType(), transfer.BlobTier) + checkMapsEqual(a, transfer.Metadata, transferParams.getMetadata()) + checkMapsEqual(a, transfer.BlobTags, transferParams.blobTags) if isSrcEncoded { srcRelativeFilePath, _ = url.PathUnescape(srcRelativeFilePath) @@ -119,24 +120,25 @@ func validateSetPropertiesTransfersAreScheduled(c *chk.C, isSrcEncoded bool, exp // look up the source from the expected transfers, make sure it exists _, srcExist := lookupMap[srcRelativeFilePath] - c.Assert(srcExist, chk.Equals, true) + a.True(srcExist) delete(lookupMap, srcRelativeFilePath) } } -func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobForBlobTier(c *chk.C) { +func TestSetPropertiesSingleBlobForBlobTier(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} // upload the data with given accessTier - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) + a.NotNil(containerURL) // set up interceptor mockedRPC := interceptor{} @@ -144,7 +146,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobForBlobTier(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobList[0]) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobList[0]) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.Cool(), pageBlobTier: common.EPageBlobTier.None(), @@ -153,24 +155,25 @@ func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobForBlobTier(c *chk.C) { } raw := getDefaultSetPropertiesRawInput(rawBlobURLWithSAS.String(), transferParams) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // note that when we are targeting single blobs, the relative path is empty ("") since the root path already points to the blob - validateSetPropertiesTransfersAreScheduled(c, true, []string{""}, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, []string{""}, transferParams, mockedRPC) }) } } -func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerForBlobTier(c *chk.C) { +func TestSetPropertiesBlobsUnderContainerForBlobTier(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -178,7 +181,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerForBlobTier(c mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.Cool(), pageBlobTier: common.EPageBlobTier.None(), @@ -189,14 +192,14 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerForBlobTier(c raw.recursive = true raw.includeDirectoryStubs = false // The test target is a DFS account, which coincidentally created our directory stubs. Thus, we mustn't include them, since this is a test of blob. - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateSetPropertiesTransfersAreScheduled(c, true, blobList, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobList, transferParams, mockedRPC) //TODO: I don't think we need to change ^ this function from remove, do we? }) @@ -204,31 +207,32 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerForBlobTier(c raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } -// TODO: func (s *cmdIntegrationSuite) TestRemoveBlobsUnderVirtualDir(c *chk.C) +// TODO: func TestRemoveBlobsUnderVirtualDir(a *assert.Assertions) -func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeFlagForBlobTier(c *chk.C) { +func TestSetPropertiesWithIncludeFlagForBlobTier(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) includeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -237,7 +241,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeFlagForBlobTier(c *chk mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.Cool(), pageBlobTier: common.EPageBlobTier.None(), @@ -248,26 +252,27 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeFlagForBlobTier(c *chk raw.include = includeString raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobsToInclude, mockedRPC) - validateSetPropertiesTransfersAreScheduled(c, true, blobsToInclude, transferParams, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobsToInclude, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobsToInclude, transferParams, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestSetPropertiesWithExcludeFlagForBlobTier(c *chk.C) { +func TestSetPropertiesWithExcludeFlagForBlobTier(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) excludeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -276,7 +281,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithExcludeFlagForBlobTier(c *chk mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.Cool(), pageBlobTier: common.EPageBlobTier.None(), @@ -289,32 +294,33 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithExcludeFlagForBlobTier(c *chk raw.recursive = true raw.includeDirectoryStubs = false // The test target is a DFS account, which coincidentally created our directory stubs. Thus, we mustn't include them, since this is a test of blob. - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobList, mockedRPC) - validateSetPropertiesTransfersAreScheduled(c, true, blobList, transferParams, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobList, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobList, transferParams, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeAndExcludeFlagForBlobTier(c *chk.C) { +func TestSetPropertiesWithIncludeAndExcludeFlagForBlobTier(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) excludeString := "so*;not*;exactName" // set up interceptor @@ -323,7 +329,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeAndExcludeFlagForBlobT mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.Cool(), pageBlobTier: common.EPageBlobTier.None(), @@ -336,28 +342,29 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeAndExcludeFlagForBlobT raw.exclude = excludeString raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobsToInclude, mockedRPC) - validateSetPropertiesTransfersAreScheduled(c, true, blobsToInclude, transferParams, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobsToInclude, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobsToInclude, transferParams, mockedRPC) }) } // note: list-of-files flag is used -func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsAndVirtualDirsForBlobTier(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestSetPropertiesListOfBlobsAndVirtualDirsForBlobTier(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() vdirName := "megadir" // set up the container with numerous blobs and a vdir - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - blobListPart2 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, vdirName+"/", azblob.AccessTierHot) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + blobListPart2 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName+"/", azblob.AccessTierHot) blobList := append(blobListPart1, blobListPart2...) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -365,7 +372,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsAndVirtualDirsForBlobT mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.Cool(), pageBlobTier: common.EPageBlobTier.None(), @@ -382,60 +389,61 @@ func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsAndVirtualDirsForBlobT // add some random files that don't actually exist listOfFiles = append(listOfFiles, "WUTAMIDOING") listOfFiles = append(listOfFiles, "DONTKNOW") - raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(c, listOfFiles) + raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(a, listOfFiles) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateSetPropertiesTransfersAreScheduled(c, true, blobList, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobList, transferParams, mockedRPC) }) // turn off recursive, this time only top blobs should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { source, err := url.PathUnescape(transfer.Source) - c.Assert(err, chk.IsNil) + a.Nil(err) // if the transfer is under the given dir, make sure only the top level files were scheduled if strings.HasPrefix(source, vdirName) { trimmedSource := strings.TrimPrefix(source, vdirName+"/") - c.Assert(strings.Contains(trimmedSource, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(trimmedSource, common.AZCOPY_PATH_SEPARATOR_STRING)) } } }) } -func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsWithIncludeAndExcludeForBlobTier(c *chk.C) { +func TestSetPropertiesListOfBlobsWithIncludeAndExcludeForBlobTier(t *testing.T) { + a := assert.New(t) bsu := getBSU() vdirName := "megadir" // set up the container with numerous blobs and a vdir - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, vdirName+"/", azblob.AccessTierHot) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName+"/", azblob.AccessTierHot) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) excludeString := "so*;not*;exactName" // set up interceptor @@ -444,7 +452,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsWithIncludeAndExcludeF mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.Cool(), pageBlobTier: common.EPageBlobTier.None(), @@ -468,29 +476,30 @@ func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsWithIncludeAndExcludeF // add files to both include and exclude listOfFiles = append(listOfFiles, blobsToInclude...) listOfFiles = append(listOfFiles, blobsToExclude...) - raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(c, listOfFiles) + raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(a, listOfFiles) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobsToInclude)) + a.Equal(len(blobsToInclude), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateSetPropertiesTransfersAreScheduled(c, true, blobsToInclude, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobsToInclude, transferParams, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobWithFromToForBlobTier(c *chk.C) { +func TestSetPropertiesSingleBlobWithFromToForBlobTier(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) + a.NotNil(containerURL) // set up interceptor mockedRPC := interceptor{} @@ -498,7 +507,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobWithFromToForBlobTier(c mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobList[0]) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobList[0]) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.Cool(), pageBlobTier: common.EPageBlobTier.None(), @@ -509,25 +518,26 @@ func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobWithFromToForBlobTier(c raw := getDefaultSetPropertiesRawInput(rawBlobURLWithSAS.String(), transferParams) raw.fromTo = "BlobNone" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // note that when we are targeting single blobs, the relative path is empty ("") since the root path already points to the blob - validateSetPropertiesTransfersAreScheduled(c, true, []string{""}, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, []string{""}, transferParams, mockedRPC) }) } } -func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerWithFromToForBlobTier(c *chk.C) { +func TestSetPropertiesBlobsUnderContainerWithFromToForBlobTier(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -535,7 +545,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerWithFromToForB mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.Cool(), pageBlobTier: common.EPageBlobTier.None(), @@ -548,42 +558,43 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerWithFromToForB raw.recursive = true raw.includeDirectoryStubs = false // The test target is a DFS account, which coincidentally created our directory stubs. Thus, we mustn't include them, since this is a test of blob. - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateSetPropertiesTransfersAreScheduled(c, true, blobList, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobList, transferParams, mockedRPC) }) // turn off recursive, this time only top blobs should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } -func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderVirtualDirWithFromToForBlobTier(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestSetPropertiesBlobsUnderVirtualDirWithFromToForBlobTier(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() vdirName := "vdir1/vdir2/vdir3/" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, vdirName, azblob.AccessTierHot) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName, azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -591,7 +602,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderVirtualDirWithFromToFor mockedRPC.init() // construct the raw input to simulate user input - rawVirtualDirectoryURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, vdirName) + rawVirtualDirectoryURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, vdirName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.Cool(), pageBlobTier: common.EPageBlobTier.None(), @@ -603,45 +614,46 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderVirtualDirWithFromToFor raw.fromTo = "BlobNone" raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent expectedTransfers := scenarioHelper{}.shaveOffPrefix(blobList, vdirName) - validateSetPropertiesTransfersAreScheduled(c, true, expectedTransfers, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, expectedTransfers, transferParams, mockedRPC) }) // turn off recursive, this time only top blobs should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } ///////////////////////////////// METADATA ///////////////////////////////// -func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobForMetadata(c *chk.C) { +func TestSetPropertiesSingleBlobForMetadata(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} // upload the data with given accessTier - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) + a.NotNil(containerURL) // set up interceptor mockedRPC := interceptor{} @@ -649,7 +661,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobForMetadata(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobList[0]) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobList[0]) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -658,27 +670,28 @@ func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobForMetadata(c *chk.C) { } raw := getDefaultSetPropertiesRawInput(rawBlobURLWithSAS.String(), transferParams) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // note that when we are targeting single blobs, the relative path is empty ("") since the root path already points to the blob - validateSetPropertiesTransfersAreScheduled(c, true, []string{""}, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, []string{""}, transferParams, mockedRPC) }) } } -func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobForEmptyMetadata(c *chk.C) { +func TestSetPropertiesSingleBlobForEmptyMetadata(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} // upload the data with given accessTier - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) + a.NotNil(containerURL) // set up interceptor mockedRPC := interceptor{} @@ -686,7 +699,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobForEmptyMetadata(c *chk mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobList[0]) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobList[0]) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -695,24 +708,25 @@ func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobForEmptyMetadata(c *chk } raw := getDefaultSetPropertiesRawInput(rawBlobURLWithSAS.String(), transferParams) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // note that when we are targeting single blobs, the relative path is empty ("") since the root path already points to the blob - validateSetPropertiesTransfersAreScheduled(c, true, []string{""}, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, []string{""}, transferParams, mockedRPC) }) } } -func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerForMetadata(c *chk.C) { +func TestSetPropertiesBlobsUnderContainerForMetadata(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -720,7 +734,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerForMetadata(c mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -731,43 +745,44 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerForMetadata(c raw.recursive = true raw.includeDirectoryStubs = false // The test target is a DFS account, which coincidentally created our directory stubs. Thus, we mustn't include them, since this is a test of blob. - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateSetPropertiesTransfersAreScheduled(c, true, blobList, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobList, transferParams, mockedRPC) }) // turn off recursive, this time only top blobs should be changed raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } -func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeFlagForMetadata(c *chk.C) { +func TestSetPropertiesWithIncludeFlagForMetadata(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) includeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -776,7 +791,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeFlagForMetadata(c *chk mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -787,26 +802,27 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeFlagForMetadata(c *chk raw.include = includeString raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobsToInclude, mockedRPC) - validateSetPropertiesTransfersAreScheduled(c, true, blobsToInclude, transferParams, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobsToInclude, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobsToInclude, transferParams, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestSetPropertiesWithExcludeFlagForMetadata(c *chk.C) { +func TestSetPropertiesWithExcludeFlagForMetadata(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) excludeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -815,7 +831,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithExcludeFlagForMetadata(c *chk mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -828,32 +844,33 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithExcludeFlagForMetadata(c *chk raw.recursive = true raw.includeDirectoryStubs = false // The test target is a DFS account, which coincidentally created our directory stubs. Thus, we mustn't include them, since this is a test of blob. - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobList, mockedRPC) - validateSetPropertiesTransfersAreScheduled(c, true, blobList, transferParams, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobList, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobList, transferParams, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeAndExcludeFlagForMetadata(c *chk.C) { +func TestSetPropertiesWithIncludeAndExcludeFlagForMetadata(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) excludeString := "so*;not*;exactName" // set up interceptor @@ -862,7 +879,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeAndExcludeFlagForMetad mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -875,28 +892,29 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeAndExcludeFlagForMetad raw.exclude = excludeString raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobsToInclude, mockedRPC) - validateSetPropertiesTransfersAreScheduled(c, true, blobsToInclude, transferParams, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobsToInclude, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobsToInclude, transferParams, mockedRPC) }) } // note: list-of-files flag is used -func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsAndVirtualDirsForMetadata(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestSetPropertiesListOfBlobsAndVirtualDirsForMetadata(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() vdirName := "megadir" // set up the container with numerous blobs and a vdir - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - blobListPart2 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, vdirName+"/", azblob.AccessTierHot) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + blobListPart2 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName+"/", azblob.AccessTierHot) blobList := append(blobListPart1, blobListPart2...) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -904,7 +922,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsAndVirtualDirsForMetad mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -921,60 +939,61 @@ func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsAndVirtualDirsForMetad // add some random files that don't actually exist listOfFiles = append(listOfFiles, "WUTAMIDOING") listOfFiles = append(listOfFiles, "DONTKNOW") - raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(c, listOfFiles) + raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(a, listOfFiles) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateSetPropertiesTransfersAreScheduled(c, true, blobList, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobList, transferParams, mockedRPC) }) // turn off recursive, this time only top blobs should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { source, err := url.PathUnescape(transfer.Source) - c.Assert(err, chk.IsNil) + a.Nil(err) // if the transfer is under the given dir, make sure only the top level files were scheduled if strings.HasPrefix(source, vdirName) { trimmedSource := strings.TrimPrefix(source, vdirName+"/") - c.Assert(strings.Contains(trimmedSource, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(trimmedSource, common.AZCOPY_PATH_SEPARATOR_STRING)) } } }) } -func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsWithIncludeAndExcludeForMetadata(c *chk.C) { +func TestSetPropertiesListOfBlobsWithIncludeAndExcludeForMetadata(t *testing.T) { + a := assert.New(t) bsu := getBSU() vdirName := "megadir" // set up the container with numerous blobs and a vdir - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, vdirName+"/", azblob.AccessTierHot) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName+"/", azblob.AccessTierHot) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) excludeString := "so*;not*;exactName" // set up interceptor @@ -983,7 +1002,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsWithIncludeAndExcludeF mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -1007,29 +1026,30 @@ func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsWithIncludeAndExcludeF // add files to both include and exclude listOfFiles = append(listOfFiles, blobsToInclude...) listOfFiles = append(listOfFiles, blobsToExclude...) - raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(c, listOfFiles) + raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(a, listOfFiles) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobsToInclude)) + a.Equal(len(blobsToInclude), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateSetPropertiesTransfersAreScheduled(c, true, blobsToInclude, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobsToInclude, transferParams, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobWithFromToForMetadata(c *chk.C) { +func TestSetPropertiesSingleBlobWithFromToForMetadata(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) + a.NotNil(containerURL) // set up interceptor mockedRPC := interceptor{} @@ -1037,7 +1057,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobWithFromToForMetadata(c mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobList[0]) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobList[0]) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -1048,25 +1068,26 @@ func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobWithFromToForMetadata(c raw := getDefaultSetPropertiesRawInput(rawBlobURLWithSAS.String(), transferParams) raw.fromTo = "BlobNone" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // note that when we are targeting single blobs, the relative path is empty ("") since the root path already points to the blob - validateSetPropertiesTransfersAreScheduled(c, true, []string{""}, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, []string{""}, transferParams, mockedRPC) }) } } -func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerWithFromToForMetadata(c *chk.C) { +func TestSetPropertiesBlobsUnderContainerWithFromToForMetadata(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -1074,7 +1095,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerWithFromToForM mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -1087,42 +1108,43 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerWithFromToForM raw.recursive = true raw.includeDirectoryStubs = false // The test target is a DFS account, which coincidentally created our directory stubs. Thus, we mustn't include them, since this is a test of blob. - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateSetPropertiesTransfersAreScheduled(c, true, blobList, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobList, transferParams, mockedRPC) }) // turn off recursive, this time only top blobs should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } -func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderVirtualDirWithFromToForMetadata(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestSetPropertiesBlobsUnderVirtualDirWithFromToForMetadata(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() vdirName := "vdir1/vdir2/vdir3/" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, vdirName, azblob.AccessTierHot) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName, azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -1130,7 +1152,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderVirtualDirWithFromToFor mockedRPC.init() // construct the raw input to simulate user input - rawVirtualDirectoryURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, vdirName) + rawVirtualDirectoryURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, vdirName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -1142,45 +1164,46 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderVirtualDirWithFromToFor raw.fromTo = "BlobNone" raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent expectedTransfers := scenarioHelper{}.shaveOffPrefix(blobList, vdirName) - validateSetPropertiesTransfersAreScheduled(c, true, expectedTransfers, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, expectedTransfers, transferParams, mockedRPC) }) // turn off recursive, this time only top blobs should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } ///////////////////////////////// TAGS ///////////////////////////////// -func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobForBlobTags(c *chk.C) { +func TestSetPropertiesSingleBlobForBlobTags(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} // upload the data with given accessTier - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) + a.NotNil(containerURL) // set up interceptor mockedRPC := interceptor{} @@ -1188,7 +1211,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobForBlobTags(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobList[0]) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobList[0]) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -1197,27 +1220,28 @@ func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobForBlobTags(c *chk.C) { } raw := getDefaultSetPropertiesRawInput(rawBlobURLWithSAS.String(), transferParams) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // note that when we are targeting single blobs, the relative path is empty ("") since the root path already points to the blob - validateSetPropertiesTransfersAreScheduled(c, true, []string{""}, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, []string{""}, transferParams, mockedRPC) }) } } -func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobForEmptyBlobTags(c *chk.C) { +func TestSetPropertiesSingleBlobForEmptyBlobTags(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} // upload the data with given accessTier - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) + a.NotNil(containerURL) // set up interceptor mockedRPC := interceptor{} @@ -1225,7 +1249,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobForEmptyBlobTags(c *chk mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobList[0]) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobList[0]) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -1234,24 +1258,25 @@ func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobForEmptyBlobTags(c *chk } raw := getDefaultSetPropertiesRawInput(rawBlobURLWithSAS.String(), transferParams) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // note that when we are targeting single blobs, the relative path is empty ("") since the root path already points to the blob - validateSetPropertiesTransfersAreScheduled(c, true, []string{""}, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, []string{""}, transferParams, mockedRPC) }) } } -func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerForBlobTags(c *chk.C) { +func TestSetPropertiesBlobsUnderContainerForBlobTags(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -1259,7 +1284,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerForBlobTags(c mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -1270,43 +1295,44 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerForBlobTags(c raw.recursive = true raw.includeDirectoryStubs = false // The test target is a DFS account, which coincidentally created our directory stubs. Thus, we mustn't include them, since this is a test of blob. - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateSetPropertiesTransfersAreScheduled(c, true, blobList, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobList, transferParams, mockedRPC) }) // turn off recursive, this time only top blobs should be changed raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } -func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeFlagForBlobTags(c *chk.C) { +func TestSetPropertiesWithIncludeFlagForBlobTags(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) includeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -1315,7 +1341,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeFlagForBlobTags(c *chk mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -1326,26 +1352,27 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeFlagForBlobTags(c *chk raw.include = includeString raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobsToInclude, mockedRPC) - validateSetPropertiesTransfersAreScheduled(c, true, blobsToInclude, transferParams, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobsToInclude, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobsToInclude, transferParams, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestSetPropertiesWithExcludeFlagForBlobTags(c *chk.C) { +func TestSetPropertiesWithExcludeFlagForBlobTags(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) excludeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -1354,7 +1381,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithExcludeFlagForBlobTags(c *chk mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -1367,32 +1394,33 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithExcludeFlagForBlobTags(c *chk raw.recursive = true raw.includeDirectoryStubs = false // The test target is a DFS account, which coincidentally created our directory stubs. Thus, we mustn't include them, since this is a test of blob. - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobList, mockedRPC) - validateSetPropertiesTransfersAreScheduled(c, true, blobList, transferParams, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobList, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobList, transferParams, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeAndExcludeFlagForBlobTags(c *chk.C) { +func TestSetPropertiesWithIncludeAndExcludeFlagForBlobTags(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) excludeString := "so*;not*;exactName" // set up interceptor @@ -1401,7 +1429,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeAndExcludeFlagForBlobT mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -1414,28 +1442,29 @@ func (s *cmdIntegrationSuite) TestSetPropertiesWithIncludeAndExcludeFlagForBlobT raw.exclude = excludeString raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobsToInclude, mockedRPC) - validateSetPropertiesTransfersAreScheduled(c, true, blobsToInclude, transferParams, mockedRPC) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobsToInclude, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobsToInclude, transferParams, mockedRPC) }) } // note: list-of-files flag is used -func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsAndVirtualDirsForBlobTags(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestSetPropertiesListOfBlobsAndVirtualDirsForBlobTags(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() vdirName := "megadir" // set up the container with numerous blobs and a vdir - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - blobListPart2 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, vdirName+"/", azblob.AccessTierHot) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + blobListPart2 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName+"/", azblob.AccessTierHot) blobList := append(blobListPart1, blobListPart2...) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -1443,7 +1472,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsAndVirtualDirsForBlobT mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -1460,60 +1489,61 @@ func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsAndVirtualDirsForBlobT // add some random files that don't actually exist listOfFiles = append(listOfFiles, "WUTAMIDOING") listOfFiles = append(listOfFiles, "DONTKNOW") - raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(c, listOfFiles) + raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(a, listOfFiles) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateSetPropertiesTransfersAreScheduled(c, true, blobList, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobList, transferParams, mockedRPC) }) // turn off recursive, this time only top blobs should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { source, err := url.PathUnescape(transfer.Source) - c.Assert(err, chk.IsNil) + a.Nil(err) // if the transfer is under the given dir, make sure only the top level files were scheduled if strings.HasPrefix(source, vdirName) { trimmedSource := strings.TrimPrefix(source, vdirName+"/") - c.Assert(strings.Contains(trimmedSource, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(trimmedSource, common.AZCOPY_PATH_SEPARATOR_STRING)) } } }) } -func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsWithIncludeAndExcludeForBlobTags(c *chk.C) { +func TestSetPropertiesListOfBlobsWithIncludeAndExcludeForBlobTags(t *testing.T) { + a := assert.New(t) bsu := getBSU() vdirName := "megadir" // set up the container with numerous blobs and a vdir - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) - scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, vdirName+"/", azblob.AccessTierHot) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + blobListPart1 := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) + scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName+"/", azblob.AccessTierHot) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToInclude, blockBlobDefaultData, azblob.AccessTierHot) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobsToExclude, blockBlobDefaultData, azblob.AccessTierHot) excludeString := "so*;not*;exactName" // set up interceptor @@ -1522,7 +1552,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsWithIncludeAndExcludeF mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -1546,29 +1576,30 @@ func (s *cmdIntegrationSuite) TestSetPropertiesListOfBlobsWithIncludeAndExcludeF // add files to both include and exclude listOfFiles = append(listOfFiles, blobsToInclude...) listOfFiles = append(listOfFiles, blobsToExclude...) - raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(c, listOfFiles) + raw.listOfFilesToCopy = scenarioHelper{}.generateListOfFiles(a, listOfFiles) - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobsToInclude)) + a.Equal(len(blobsToInclude), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateSetPropertiesTransfersAreScheduled(c, true, blobsToInclude, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobsToInclude, transferParams, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobWithFromToForBlobTags(c *chk.C) { +func TestSetPropertiesSingleBlobWithFromToForBlobTags(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) for _, blobName := range []string{"top/mid/low/singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} - scenarioHelper{}.generateBlobsFromListWithAccessTier(c, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromListWithAccessTier(a, containerURL, blobList, blockBlobDefaultData, azblob.AccessTierHot) + a.NotNil(containerURL) // set up interceptor mockedRPC := interceptor{} @@ -1576,7 +1607,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobWithFromToForBlobTags(c mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobList[0]) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobList[0]) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -1587,25 +1618,26 @@ func (s *cmdIntegrationSuite) TestSetPropertiesSingleBlobWithFromToForBlobTags(c raw := getDefaultSetPropertiesRawInput(rawBlobURLWithSAS.String(), transferParams) raw.fromTo = "BlobNone" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // note that when we are targeting single blobs, the relative path is empty ("") since the root path already points to the blob - validateSetPropertiesTransfersAreScheduled(c, true, []string{""}, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, []string{""}, transferParams, mockedRPC) }) } } -func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerWithFromToForBlobTags(c *chk.C) { +func TestSetPropertiesBlobsUnderContainerWithFromToForBlobTags(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, "", azblob.AccessTierHot) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, "", azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -1613,7 +1645,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerWithFromToForB mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -1626,42 +1658,43 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderContainerWithFromToForB raw.recursive = true raw.includeDirectoryStubs = false // The test target is a DFS account, which coincidentally created our directory stubs. Thus, we mustn't include them, since this is a test of blob. - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateSetPropertiesTransfersAreScheduled(c, true, blobList, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, blobList, transferParams, mockedRPC) }) // turn off recursive, this time only top blobs should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } -func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderVirtualDirWithFromToForBlobTags(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestSetPropertiesBlobsUnderVirtualDirWithFromToForBlobTags(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() vdirName := "vdir1/vdir2/vdir3/" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(c, containerURL, vdirName, azblob.AccessTierHot) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlobWithAccessTier(a, containerURL, vdirName, azblob.AccessTierHot) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -1669,7 +1702,7 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderVirtualDirWithFromToFor mockedRPC.init() // construct the raw input to simulate user input - rawVirtualDirectoryURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, vdirName) + rawVirtualDirectoryURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, vdirName) transferParams := transferParams{ blockBlobTier: common.EBlockBlobTier.None(), pageBlobTier: common.EPageBlobTier.None(), @@ -1681,27 +1714,27 @@ func (s *cmdIntegrationSuite) TestSetPropertiesBlobsUnderVirtualDirWithFromToFor raw.fromTo = "BlobNone" raw.recursive = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent expectedTransfers := scenarioHelper{}.shaveOffPrefix(blobList, vdirName) - validateSetPropertiesTransfersAreScheduled(c, true, expectedTransfers, transferParams, mockedRPC) + validateSetPropertiesTransfersAreScheduled(a, true, expectedTransfers, transferParams, mockedRPC) }) // turn off recursive, this time only top blobs should be deleted raw.recursive = false mockedRPC.reset() - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runCopyAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) -} +} \ No newline at end of file diff --git a/cmd/zt_sync_blob_blob_test.go b/cmd/zt_sync_blob_blob_test.go index 372403466..66e82812a 100644 --- a/cmd/zt_sync_blob_blob_test.go +++ b/cmd/zt_sync_blob_blob_test.go @@ -24,29 +24,31 @@ import ( "bytes" "context" "encoding/json" + "github.com/stretchr/testify/assert" "sort" "strings" + "testing" "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-blob-go/azblob" - chk "gopkg.in/check.v1" ) // regular blob->file sync -func (s *cmdIntegrationSuite) TestSyncS2SWithSingleBlob(c *chk.C) { +func TestSyncS2SWithSingleBlob(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) for _, blobName := range []string{"singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the source container with a single blob blobList := []string{blobName} - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobList, blockBlobDefaultData) // set up the destination container with the same single blob - scenarioHelper{}.generateBlobsFromList(c, dstContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobList, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -54,40 +56,41 @@ func (s *cmdIntegrationSuite) TestSyncS2SWithSingleBlob(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, srcContainerName, blobList[0]) - dstBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, dstContainerName, blobList[0]) + srcBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, srcContainerName, blobList[0]) + dstBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, dstContainerName, blobList[0]) raw := getDefaultSyncRawInput(srcBlobURLWithSAS.String(), dstBlobURLWithSAS.String()) // the destination was created after the source, so no sync should happen - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) // recreate the source blob to have a later last modified time - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobList, blockBlobDefaultData) mockedRPC.reset() - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", []string{""}, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", []string{""}, mockedRPC) }) } } // regular container->container sync but destination is empty, so everything has to be transferred -func (s *cmdIntegrationSuite) TestSyncS2SWithEmptyDestination(c *chk.C) { +func TestSyncS2SWithEmptyDestination(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, "") - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -95,48 +98,49 @@ func (s *cmdIntegrationSuite) TestSyncS2SWithEmptyDestination(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) // all blobs at source should be synced to destination - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateS2SSyncTransfersAreScheduled(c, "", "", blobList, mockedRPC) + validateS2SSyncTransfersAreScheduled(a, "", "", blobList, mockedRPC) }) // turn off recursive, this time only top blobs should be transferred raw.recursive = false mockedRPC.reset() - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } // regular container->container sync but destination is identical to the source, transfers are scheduled based on lmt -func (s *cmdIntegrationSuite) TestSyncS2SWithIdenticalDestination(c *chk.C) { +func TestSyncS2SWithIdenticalDestination(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, "") - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + a.NotZero(len(blobList)) // set up the destination with the exact same files - scenarioHelper{}.generateBlobsFromList(c, dstContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobList, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -144,46 +148,47 @@ func (s *cmdIntegrationSuite) TestSyncS2SWithIdenticalDestination(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) // nothing should be sync since the source is older - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) // refresh the source blobs' last modified time so that they get synced - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobList, blockBlobDefaultData) mockedRPC.reset() - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", blobList, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", blobList, mockedRPC) }) } // regular container->container sync where destination is missing some files from source, and also has some extra files -func (s *cmdIntegrationSuite) TestSyncS2SWithMismatchedDestination(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestSyncS2SWithMismatchedDestination(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) // set up the container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, "") - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + a.NotZero(len(blobList)) // set up the destination with half of the blobs from source - scenarioHelper{}.generateBlobsFromList(c, dstContainerURL, blobList[0:len(blobList)/2], blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobList[0:len(blobList)/2], blockBlobDefaultData) expectedOutput := blobList[len(blobList)/2:] // the missing half of source blobs should be transferred // add some extra blobs that shouldn't be included - scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, dstContainerURL, "extra") + scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, dstContainerURL, "extra") // set up interceptor mockedRPC := interceptor{} @@ -191,19 +196,19 @@ func (s *cmdIntegrationSuite) TestSyncS2SWithMismatchedDestination(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", expectedOutput, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", expectedOutput, mockedRPC) // make sure the extra blobs were deleted extraFilesFound := false for marker := (azblob.Marker{}); marker.NotDone(); { listResponse, err := dstContainerURL.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{}) - c.Assert(err, chk.IsNil) + a.Nil(err) marker = listResponse.NextMarker // if ever the extra blobs are found, note it down @@ -214,25 +219,26 @@ func (s *cmdIntegrationSuite) TestSyncS2SWithMismatchedDestination(c *chk.C) { } } - c.Assert(extraFilesFound, chk.Equals, false) + a.False(extraFilesFound) }) } // include flag limits the scope of source/destination comparison -func (s *cmdIntegrationSuite) TestSyncS2SWithIncludePatternFlag(c *chk.C) { +func TestSyncS2SWithIncludePatternFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, "") - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToInclude, blockBlobDefaultData) includeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -241,33 +247,34 @@ func (s *cmdIntegrationSuite) TestSyncS2SWithIncludePatternFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) raw.include = includeString // verify that only the blobs specified by the include flag are synced - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", blobsToInclude, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", blobsToInclude, mockedRPC) }) } // exclude flag limits the scope of source/destination comparison -func (s *cmdIntegrationSuite) TestSyncS2SWithExcludePatternFlag(c *chk.C) { +func TestSyncS2SWithExcludePatternFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, "") - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToExclude, blockBlobDefaultData) excludeString := "*.pdf;*.jpeg;exactName" // set up interceptor @@ -276,39 +283,40 @@ func (s *cmdIntegrationSuite) TestSyncS2SWithExcludePatternFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) raw.exclude = excludeString // make sure the list doesn't include the blobs specified by the exclude flag - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", blobList, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", blobList, mockedRPC) }) } // include and exclude flag can work together to limit the scope of source/destination comparison -func (s *cmdIntegrationSuite) TestSyncS2SWithIncludeAndExcludePatternFlag(c *chk.C) { +func TestSyncS2SWithIncludeAndExcludePatternFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, "") - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToInclude, blockBlobDefaultData) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToExclude, blockBlobDefaultData) excludeString := "so*;not*;exactName" // set up interceptor @@ -317,34 +325,35 @@ func (s *cmdIntegrationSuite) TestSyncS2SWithIncludeAndExcludePatternFlag(c *chk mockedRPC.init() // construct the raw input to simulate user input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) raw.include = includeString raw.exclude = excludeString // verify that only the blobs specified by the include flag are synced - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", blobsToInclude, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", blobsToInclude, mockedRPC) }) } // a specific path is avoided in the comparison -func (s *cmdIntegrationSuite) TestSyncS2SWithExcludePathFlag(c *chk.C) { +func TestSyncS2SWithExcludePathFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, "") - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"excludeSub/notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToExclude, blockBlobDefaultData) excludeString := "excludeSub;exactName" // set up interceptor @@ -353,49 +362,50 @@ func (s *cmdIntegrationSuite) TestSyncS2SWithExcludePathFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) raw.excludePath = excludeString // make sure the list doesn't include the blobs specified by the exclude flag - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", blobList, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", blobList, mockedRPC) }) // now set up the destination with the blobs to be excluded, and make sure they are not touched - scenarioHelper{}.generateBlobsFromList(c, dstContainerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobsToExclude, blockBlobDefaultData) // re-create the ones at the source so that their lmts are newer - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToExclude, blockBlobDefaultData) mockedRPC.reset() - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", blobList, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", blobList, mockedRPC) // make sure the extra blobs were not touched for _, blobName := range blobsToExclude { exists := scenarioHelper{}.blobExists(dstContainerURL.NewBlobURL(blobName)) - c.Assert(exists, chk.Equals, true) + a.True(exists) } }) } // validate the bug fix for this scenario -func (s *cmdIntegrationSuite) TestSyncS2SWithMissingDestination(c *chk.C) { +func TestSyncS2SWithMissingDestination(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) // delete the destination container to simulate non-existing destination, or recently removed destination - deleteContainer(c, dstContainerURL) + deleteContainer(a, dstContainerURL) // set up the container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, "") - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -403,35 +413,36 @@ func (s *cmdIntegrationSuite) TestSyncS2SWithMissingDestination(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) // verify error is thrown - runSyncAndVerify(c, raw, func(err error) { + runSyncAndVerify(a, raw, func(err error) { // error should not be nil, but the app should not crash either - c.Assert(err, chk.NotNil) + a.NotNil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) } // there is a type mismatch between the source and destination -func (s *cmdIntegrationSuite) TestSyncS2SMismatchContainerAndBlob(c *chk.C) { +func TestSyncS2SMismatchContainerAndBlob(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, "") - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + a.NotZero(len(blobList)) // set up the destination container with a single blob singleBlobName := "single" - scenarioHelper{}.generateBlobsFromList(c, dstContainerURL, []string{singleBlobName}, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, []string{singleBlobName}, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -439,41 +450,42 @@ func (s *cmdIntegrationSuite) TestSyncS2SMismatchContainerAndBlob(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, dstContainerName, singleBlobName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, dstContainerName, singleBlobName) raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstBlobURLWithSAS.String()) // type mismatch, we should get an error - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) + runSyncAndVerify(a, raw, func(err error) { + a.NotNil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) // reverse the source and destination raw = getDefaultSyncRawInput(dstBlobURLWithSAS.String(), srcContainerURLWithSAS.String()) // type mismatch again, we should also get an error - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) + runSyncAndVerify(a, raw, func(err error) { + a.NotNil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) } // container <-> virtual dir sync -func (s *cmdIntegrationSuite) TestSyncS2SContainerAndEmptyVirtualDir(c *chk.C) { +func TestSyncS2SContainerAndEmptyVirtualDir(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, "") - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + a.NotZero(len(blobList)) // set up interceptor mockedRPC := interceptor{} @@ -481,50 +493,51 @@ func (s *cmdIntegrationSuite) TestSyncS2SContainerAndEmptyVirtualDir(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstVirtualDirURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, dstContainerName, "emptydir/") + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstVirtualDirURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, dstContainerName, "emptydir/") raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstVirtualDirURLWithSAS.String()) // verify that targeting a virtual directory works fine - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateS2SSyncTransfersAreScheduled(c, "", "", blobList, mockedRPC) + validateS2SSyncTransfersAreScheduled(a, "", "", blobList, mockedRPC) }) // turn off recursive, this time only top blobs should be transferred raw.recursive = false mockedRPC.reset() - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } // regular vdir -> vdir sync -func (s *cmdIntegrationSuite) TestSyncS2SBetweenVirtualDirs(c *chk.C) { +func TestSyncS2SBetweenVirtualDirs(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) // set up the source container with numerous blobs vdirName := "vdir" - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) + a.NotZero(len(blobList)) // set up the destination with the exact same files - scenarioHelper{}.generateBlobsFromList(c, dstContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobList, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -532,51 +545,52 @@ func (s *cmdIntegrationSuite) TestSyncS2SBetweenVirtualDirs(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) srcContainerURLWithSAS.Path += common.AZCOPY_PATH_SEPARATOR_STRING + vdirName dstContainerURLWithSAS.Path += common.AZCOPY_PATH_SEPARATOR_STRING + vdirName raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) // nothing should be synced since the source is older - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) // refresh the blobs' last modified time so that they are newer - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobList, blockBlobDefaultData) mockedRPC.reset() expectedList := scenarioHelper{}.shaveOffPrefix(blobList, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", expectedList, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", expectedList, mockedRPC) }) } // examine situation where a blob has the same name as virtual dir // trailing slash is used to disambiguate the path as a vdir -func (s *cmdIntegrationSuite) TestSyncS2SBetweenVirtualDirsWithConflictingBlob(c *chk.C) { - c.Skip("Enable after setting Account to non-HNS") +func TestSyncS2SBetweenVirtualDirsWithConflictingBlob(t *testing.T) { + a := assert.New(t) + t.Skip("Enable after setting Account to non-HNS") bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) // set up the source container with numerous blobs vdirName := "vdir" - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + a.NotZero(len(blobList)) // set up the destination with the exact same files - scenarioHelper{}.generateBlobsFromList(c, dstContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobList, blockBlobDefaultData) // create a blob at the destination with the exact same name as the vdir - scenarioHelper{}.generateBlobsFromList(c, dstContainerURL, []string{vdirName}, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, []string{vdirName}, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -584,39 +598,39 @@ func (s *cmdIntegrationSuite) TestSyncS2SBetweenVirtualDirsWithConflictingBlob(c mockedRPC.init() // case 1: vdir -> blob sync: should fail - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) srcContainerURLWithSAS.Path += common.AZCOPY_PATH_SEPARATOR_STRING + vdirName dstContainerURLWithSAS.Path += common.AZCOPY_PATH_SEPARATOR_STRING + vdirName // construct the raw input to simulate user input raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) + runSyncAndVerify(a, raw, func(err error) { + a.NotNil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) // case 2: blob -> vdir sync: simply swap src and dst, should fail too raw = getDefaultSyncRawInput(dstContainerURLWithSAS.String(), srcContainerURLWithSAS.String()) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) + runSyncAndVerify(a, raw, func(err error) { + a.NotNil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) // case 3: blob -> blob: if source is also a blob, then single blob to blob sync happens // create a blob at the source with the exact same name as the vdir - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, []string{vdirName}, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, []string{vdirName}, blockBlobDefaultData) raw = getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", []string{""}, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", []string{""}, mockedRPC) }) // refresh the dst blobs' last modified time so that they are newer - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobList, blockBlobDefaultData) mockedRPC.reset() // case 4: vdir -> vdir: adding a trailing slash helps to clarify it should be treated as virtual dir @@ -624,34 +638,35 @@ func (s *cmdIntegrationSuite) TestSyncS2SBetweenVirtualDirsWithConflictingBlob(c dstContainerURLWithSAS.Path += common.AZCOPY_PATH_SEPARATOR_STRING raw = getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) expectedList := scenarioHelper{}.shaveOffPrefix(blobList, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", expectedList, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", expectedList, mockedRPC) }) } // sync a vdir with a blob representing an ADLS directory // we should recognize this and sync with the virtual directory instead -func (s *cmdIntegrationSuite) TestSyncS2SADLSDirectory(c *chk.C) { +func TestSyncS2SADLSDirectory(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) // set up the source container with numerous blobs vdirName := "vdir" - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + a.NotZero(len(blobList)) // set up the destination with the exact same files - scenarioHelper{}.generateBlobsFromList(c, dstContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobList, blockBlobDefaultData) // create an ADLS Gen2 directory at the source with the exact same name as the vdir _, err := srcContainerURL.NewBlockBlobURL(vdirName).Upload(context.Background(), bytes.NewReader(nil), azblob.BlobHTTPHeaders{}, azblob.Metadata{"hdi_isfolder": "true"}, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) - c.Assert(err, chk.IsNil) + a.Nil(err) // set up interceptor mockedRPC := interceptor{} @@ -660,45 +675,46 @@ func (s *cmdIntegrationSuite) TestSyncS2SADLSDirectory(c *chk.C) { // ADLS Gen2 directory -> vdir sync: should work // but since the src files are older, nothing should be synced - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) srcContainerURLWithSAS.Path += common.AZCOPY_PATH_SEPARATOR_STRING + vdirName dstContainerURLWithSAS.Path += common.AZCOPY_PATH_SEPARATOR_STRING + vdirName // construct the raw input to simulate user input raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) // refresh the sources blobs' last modified time so that they are newer - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobList, blockBlobDefaultData) mockedRPC.reset() expectedTransfers := scenarioHelper{}.shaveOffPrefix(blobList, vdirName+common.AZCOPY_PATH_SEPARATOR_STRING) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", expectedTransfers, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", expectedTransfers, mockedRPC) }) } // testing multiple include regular expression -func (s *cmdIntegrationSuite) TestSyncS2SWithIncludeRegexFlag(c *chk.C) { +func TestSyncS2SWithIncludeRegexFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, "") - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"tessssssssssssst.txt", "zxcfile.txt", "subOne/tetingessssss.jpeg", "subOne/subTwo/tessssst.pdf"} - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToInclude, blockBlobDefaultData) includeString := "es{4,};^zxc" // set up interceptor @@ -707,16 +723,16 @@ func (s *cmdIntegrationSuite) TestSyncS2SWithIncludeRegexFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) raw.includeRegex = includeString // verify that only the blobs specified by the include flag are synced - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobsToInclude)) + a.Equal(len(blobsToInclude), len(mockedRPC.transfers)) // comparing is names of files, since not in order need to sort each string and the compare them actualTransfer := []string{} for i := 0; i < len(mockedRPC.transfers); i++ { @@ -724,27 +740,28 @@ func (s *cmdIntegrationSuite) TestSyncS2SWithIncludeRegexFlag(c *chk.C) { } sort.Strings(actualTransfer) sort.Strings(blobsToInclude) - c.Assert(actualTransfer, chk.DeepEquals, blobsToInclude) + a.Equal(blobsToInclude, actualTransfer) - validateS2SSyncTransfersAreScheduled(c, "", "", blobsToInclude, mockedRPC) + validateS2SSyncTransfersAreScheduled(a, "", "", blobsToInclude, mockedRPC) }) } // testing multiple exclude regular expressions -func (s *cmdIntegrationSuite) TestSyncS2SWithExcludeRegexFlag(c *chk.C) { +func TestSyncS2SWithExcludeRegexFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) // set up the source container with blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, "") - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"tessssssssssssst.txt", "subOne/dogs.jpeg", "subOne/subTwo/tessssst.pdf"} - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToExclude, blockBlobDefaultData) excludeString := "es{4,};o(g)" // set up interceptor @@ -753,41 +770,42 @@ func (s *cmdIntegrationSuite) TestSyncS2SWithExcludeRegexFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) raw.excludeRegex = excludeString // make sure the list doesn't include the blobs specified by the exclude flag - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // all blobs from the blobList are transferred - validateS2SSyncTransfersAreScheduled(c, "", "", blobList, mockedRPC) + validateS2SSyncTransfersAreScheduled(a, "", "", blobList, mockedRPC) }) } // testing with both include and exclude regular expression flags -func (s *cmdIntegrationSuite) TestSyncS2SWithIncludeAndExcludeRegexFlag(c *chk.C) { +func TestSyncS2SWithIncludeAndExcludeRegexFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) - defer deleteContainer(c, dstContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) + defer deleteContainer(a, dstContainerURL) // set up the source container with numerous blobs - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, srcContainerURL, "") - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, srcContainerURL, "") + a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"tessssssssssssst.txt", "zxcfile.txt", "subOne/tetingessssss.jpeg"} - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToInclude, blockBlobDefaultData) includeString := "es{4,};^zxc" // add special blobs that we wish to exclude blobsToExclude := []string{"zxca.txt", "subOne/dogs.jpeg", "subOne/subTwo/zxcat.pdf"} - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToExclude, blockBlobDefaultData) excludeString := "^zxca;o(g)" // set up interceptor @@ -796,17 +814,17 @@ func (s *cmdIntegrationSuite) TestSyncS2SWithIncludeAndExcludeRegexFlag(c *chk.C mockedRPC.init() // construct the raw input to simulate user input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) raw.includeRegex = includeString raw.excludeRegex = excludeString // verify that only the blobs specified by the include flag are synced - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobsToInclude)) + a.Equal(len(blobsToInclude), len(mockedRPC.transfers)) // comparing is names of files, since not in order need to sort each string and the compare them actualTransfer := []string{} for i := 0; i < len(mockedRPC.transfers); i++ { @@ -814,26 +832,27 @@ func (s *cmdIntegrationSuite) TestSyncS2SWithIncludeAndExcludeRegexFlag(c *chk.C } sort.Strings(actualTransfer) sort.Strings(blobsToInclude) - c.Assert(actualTransfer, chk.DeepEquals, blobsToInclude) + a.Equal(blobsToInclude, actualTransfer) - validateS2SSyncTransfersAreScheduled(c, "", "", blobsToInclude, mockedRPC) + validateS2SSyncTransfersAreScheduled(a, "", "", blobsToInclude, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestDryrunSyncBlobtoBlob(c *chk.C) { +func TestDryrunSyncBlobtoBlob(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up src container - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) blobsToInclude := []string{"AzURE2.jpeg", "sub1/aTestOne.txt", "sub1/sub2/testTwo.pdf"} - scenarioHelper{}.generateBlobsFromList(c, srcContainerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, srcContainerURL, blobsToInclude, blockBlobDefaultData) // set up dst container - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, dstContainerURL) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, dstContainerURL) blobsToDelete := []string{"testThree.jpeg"} - scenarioHelper{}.generateBlobsFromList(c, dstContainerURL, blobsToDelete, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobsToDelete, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -843,45 +862,46 @@ func (s *cmdIntegrationSuite) TestDryrunSyncBlobtoBlob(c *chk.C) { glcm = &mockedLcm // construct the raw input to simulate user input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) raw.dryrun = true raw.deleteDestination = "true" - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", []string{}, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", []string{}, mockedRPC) msg := mockedLcm.GatherAllLogs(mockedLcm.dryrunLog) sort.Strings(msg) for i := 0; i < len(msg); i++ { if strings.Contains(msg[i], "DRYRUN: remove") { - c.Check(strings.Contains(msg[i], dstContainerURL.String()), chk.Equals, true) + a.True(strings.Contains(msg[i], dstContainerURL.String())) } else { - c.Check(strings.Contains(msg[i], "DRYRUN: copy"), chk.Equals, true) - c.Check(strings.Contains(msg[i], srcContainerURL.String()), chk.Equals, true) - c.Check(strings.Contains(msg[i], dstContainerURL.String()), chk.Equals, true) + a.True(strings.Contains(msg[i], "DRYRUN: copy")) + a.True(strings.Contains(msg[i], srcContainerURL.String())) + a.True(strings.Contains(msg[i], dstContainerURL.String())) } } - c.Check(testDryrunStatements(blobsToInclude, msg), chk.Equals, true) - c.Check(testDryrunStatements(blobsToDelete, msg), chk.Equals, true) + a.True(testDryrunStatements(blobsToInclude, msg)) + a.True(testDryrunStatements(blobsToDelete, msg)) }) } -func (s *cmdIntegrationSuite) TestDryrunSyncBlobtoBlobJson(c *chk.C) { +func TestDryrunSyncBlobtoBlobJson(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up src container - srcContainerURL, srcContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, srcContainerURL) + srcContainerURL, srcContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, srcContainerURL) // set up dst container - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, dstContainerURL) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, dstContainerURL) blobsToDelete := []string{"testThree.jpeg"} - scenarioHelper{}.generateBlobsFromList(c, dstContainerURL, blobsToDelete, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobsToDelete, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -891,23 +911,23 @@ func (s *cmdIntegrationSuite) TestDryrunSyncBlobtoBlobJson(c *chk.C) { glcm = &mockedLcm // construct the raw input to simulate user input - srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, srcContainerName) - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + srcContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, srcContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultSyncRawInput(srcContainerURLWithSAS.String(), dstContainerURLWithSAS.String()) raw.dryrun = true raw.deleteDestination = "true" - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", []string{}, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", []string{}, mockedRPC) msg := <-mockedLcm.dryrunLog syncMessage := common.CopyTransfer{} errMarshal := json.Unmarshal([]byte(msg), &syncMessage) - c.Assert(errMarshal, chk.IsNil) - c.Check(strings.Contains(syncMessage.Source, blobsToDelete[0]), chk.Equals, true) - c.Check(strings.Compare(syncMessage.EntityType.String(), common.EEntityType.File().String()), chk.Equals, 0) - c.Check(strings.Compare(string(syncMessage.BlobType), "BlockBlob"), chk.Equals, 0) + a.Nil(errMarshal) + a.True(strings.Contains(syncMessage.Source, blobsToDelete[0])) + a.Equal("File", syncMessage.EntityType.String()) + a.Equal("BlockBlob", string(syncMessage.BlobType)) }) -} +} \ No newline at end of file diff --git a/cmd/zt_sync_blob_local_test.go b/cmd/zt_sync_blob_local_test.go index aacf391bd..3ff73d261 100644 --- a/cmd/zt_sync_blob_local_test.go +++ b/cmd/zt_sync_blob_local_test.go @@ -23,14 +23,15 @@ package cmd import ( "bytes" "context" + "github.com/stretchr/testify/assert" "os" "path/filepath" "strings" + "testing" "time" "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-blob-go/azblob" - chk "gopkg.in/check.v1" ) const ( @@ -38,23 +39,24 @@ const ( ) // regular blob->file sync -func (s *cmdIntegrationSuite) TestSyncDownloadWithSingleFile(c *chk.C) { +func TestSyncDownloadWithSingleFile(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) for _, blobName := range []string{"singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the container with a single blob blobList := []string{blobName} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobList, blockBlobDefaultData) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) + a.NotNil(containerURL) // set up the destination as a single file time.Sleep(time.Second) - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) dstFileName := blobName - scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, blobList) + scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, blobList) // set up interceptor mockedRPC := interceptor{} @@ -62,45 +64,46 @@ func (s *cmdIntegrationSuite) TestSyncDownloadWithSingleFile(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobList[0]) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobList[0]) raw := getDefaultSyncRawInput(rawBlobURLWithSAS.String(), filepath.Join(dstDirName, dstFileName)) // the file was created after the blob, so no sync should happen - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) // Sleep a bit to offset LMTs time.Sleep(5 * time.Second) // recreate the blob to have a later last modified time time.Sleep(time.Second) - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) mockedRPC.reset() - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) - validateDownloadTransfersAreScheduled(c, "", "", []string{""}, mockedRPC) + validateDownloadTransfersAreScheduled(a, "", "", []string{""}, mockedRPC) }) } } // regular container->directory sync but destination is empty, so everything has to be transferred -func (s *cmdIntegrationSuite) TestSyncDownloadWithEmptyDestination(c *chk.C) { +func TestSyncDownloadWithEmptyDestination(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up the destination with an empty folder - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -109,48 +112,49 @@ func (s *cmdIntegrationSuite) TestSyncDownloadWithEmptyDestination(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(rawContainerURLWithSAS.String(), dstDirName) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateDownloadTransfersAreScheduled(c, "", "", blobList, mockedRPC) + validateDownloadTransfersAreScheduled(a, "", "", blobList, mockedRPC) }) // turn off recursive, this time only top blobs should be transferred raw.recursive = false mockedRPC.reset() - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } // regular container->directory sync but destination is identical to the source, transfers are scheduled based on lmt -func (s *cmdIntegrationSuite) TestSyncDownloadWithIdenticalDestination(c *chk.C) { +func TestSyncDownloadWithIdenticalDestination(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up the destination with a folder that have the exact same files - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) - scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, blobList) + scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, blobList) // set up interceptor mockedRPC := interceptor{} @@ -158,42 +162,43 @@ func (s *cmdIntegrationSuite) TestSyncDownloadWithIdenticalDestination(c *chk.C) mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(rawContainerURLWithSAS.String(), dstDirName) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) // refresh the blobs' last modified time so that they are newer - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) mockedRPC.reset() - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobList, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobList, mockedRPC) }) } // regular container->directory sync where destination is missing some files from source, and also has some extra files -func (s *cmdIntegrationSuite) TestSyncDownloadWithMismatchedDestination(c *chk.C) { +func TestSyncDownloadWithMismatchedDestination(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up the destination with a folder that have half of the files from source - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) - scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, blobList[0:len(blobList)/2]) - scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, []string{"extraFile1.pdf, extraFile2.txt"}) + scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, blobList[0:len(blobList)/2]) + scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, []string{"extraFile1.pdf, extraFile2.txt"}) expectedOutput := blobList[len(blobList)/2:] // the missing half of source files should be transferred // set up interceptor @@ -202,12 +207,12 @@ func (s *cmdIntegrationSuite) TestSyncDownloadWithMismatchedDestination(c *chk.C mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(rawContainerURLWithSAS.String(), dstDirName) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", expectedOutput, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", expectedOutput, mockedRPC) // make sure the extra files were deleted currentDstFileList, err := os.ReadDir(dstDirName) @@ -218,28 +223,29 @@ func (s *cmdIntegrationSuite) TestSyncDownloadWithMismatchedDestination(c *chk.C } } - c.Assert(extraFilesFound, chk.Equals, false) + a.False( extraFilesFound) }) } // include flag limits the scope of source/destination comparison -func (s *cmdIntegrationSuite) TestSyncDownloadWithIncludePatternFlag(c *chk.C) { +func TestSyncDownloadWithIncludePatternFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToInclude, blockBlobDefaultData) includeString := "*.pdf;*.jpeg;exactName" // set up the destination with an empty folder - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -248,34 +254,35 @@ func (s *cmdIntegrationSuite) TestSyncDownloadWithIncludePatternFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(rawContainerURLWithSAS.String(), dstDirName) raw.include = includeString - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobsToInclude, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobsToInclude, mockedRPC) }) } // exclude flag limits the scope of source/destination comparison -func (s *cmdIntegrationSuite) TestSyncDownloadWithExcludePatternFlag(c *chk.C) { +func TestSyncDownloadWithExcludePatternFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToExclude, blockBlobDefaultData) excludeString := "*.pdf;*.jpeg;exactName" // set up the destination with an empty folder - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -284,40 +291,41 @@ func (s *cmdIntegrationSuite) TestSyncDownloadWithExcludePatternFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(rawContainerURLWithSAS.String(), dstDirName) raw.exclude = excludeString - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobList, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobList, mockedRPC) }) } // include and exclude flag can work together to limit the scope of source/destination comparison -func (s *cmdIntegrationSuite) TestSyncDownloadWithIncludeAndExcludePatternFlag(c *chk.C) { +func TestSyncDownloadWithIncludeAndExcludePatternFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // add special blobs that we wish to include blobsToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToInclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToInclude, blockBlobDefaultData) includeString := "*.pdf;*.jpeg;exactName" // add special blobs that we wish to exclude // note that the excluded files also match the include string blobsToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToExclude, blockBlobDefaultData) excludeString := "so*;not*;exactName" // set up the destination with an empty folder - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -326,35 +334,36 @@ func (s *cmdIntegrationSuite) TestSyncDownloadWithIncludeAndExcludePatternFlag(c mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(rawContainerURLWithSAS.String(), dstDirName) raw.include = includeString raw.exclude = excludeString - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobsToInclude, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobsToInclude, mockedRPC) }) } // a specific path is avoided in the comparison -func (s *cmdIntegrationSuite) TestSyncDownloadWithExcludePathFlag(c *chk.C) { +func TestSyncDownloadWithExcludePathFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // add special blobs that we wish to exclude blobsToExclude := []string{"excludeSub/notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToExclude, blockBlobDefaultData) excludeString := "excludeSub;exactName" // set up the destination with an empty folder - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -363,47 +372,48 @@ func (s *cmdIntegrationSuite) TestSyncDownloadWithExcludePathFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(rawContainerURLWithSAS.String(), dstDirName) raw.excludePath = excludeString - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobList, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobList, mockedRPC) }) // now set up the destination with the files to be excluded, and make sure they are not touched - scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, blobsToExclude) + scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, blobsToExclude) // re-create the ones at the source so that their lmts are newer - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobsToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobsToExclude, blockBlobDefaultData) mockedRPC.reset() - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateDownloadTransfersAreScheduled(c, "", "", blobList, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateDownloadTransfersAreScheduled(a, "", "", blobList, mockedRPC) // make sure the extra files were not touched for _, blobName := range blobsToExclude { _, err := os.Stat(filepath.Join(dstDirName, blobName)) - c.Assert(err, chk.IsNil) + a.Nil(err) } }) } // validate the bug fix for this scenario -func (s *cmdIntegrationSuite) TestSyncDownloadWithMissingDestination(c *chk.C) { +func TestSyncDownloadWithMissingDestination(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up the destination as a missing folder - baseDirName := scenarioHelper{}.generateLocalDirectory(c) + baseDirName := scenarioHelper{}.generateLocalDirectory(a) dstDirName := filepath.Join(baseDirName, "imbatman") defer os.RemoveAll(baseDirName) @@ -413,34 +423,35 @@ func (s *cmdIntegrationSuite) TestSyncDownloadWithMissingDestination(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(rawContainerURLWithSAS.String(), dstDirName) - runSyncAndVerify(c, raw, func(err error) { + runSyncAndVerify(a, raw, func(err error) { // error should not be nil, but the app should not crash either - c.Assert(err, chk.NotNil) + a.NotNil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) } // there is a type mismatch between the source and destination -func (s *cmdIntegrationSuite) TestSyncMismatchContainerAndFile(c *chk.C) { +func TestSyncMismatchContainerAndFile(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, "") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // set up the destination as a single file - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) dstFileName := blobList[0] - scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, blobList) + scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, blobList) // set up interceptor mockedRPC := interceptor{} @@ -448,43 +459,44 @@ func (s *cmdIntegrationSuite) TestSyncMismatchContainerAndFile(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(rawContainerURLWithSAS.String(), filepath.Join(dstDirName, dstFileName)) // type mismatch, we should get an error - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) + runSyncAndVerify(a, raw, func(err error) { + a.NotNil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) // reverse the source and destination raw = getDefaultSyncRawInput(filepath.Join(dstDirName, dstFileName), rawContainerURLWithSAS.String()) // type mismatch, we should get an error - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) + runSyncAndVerify(a, raw, func(err error) { + a.NotNil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) } // there is a type mismatch between the source and destination -func (s *cmdIntegrationSuite) TestSyncMismatchBlobAndDirectory(c *chk.C) { +func TestSyncMismatchBlobAndDirectory(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the container with a single blob blobName := "singleblobisbest" blobList := []string{blobName} - containerURL, containerName := createNewContainer(c, bsu) - scenarioHelper{}.generateBlobsFromList(c, containerURL, blobList, blockBlobDefaultData) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) + containerURL, containerName := createNewContainer(a, bsu) + scenarioHelper{}.generateBlobsFromList(a, containerURL, blobList, blockBlobDefaultData) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) // set up the destination as a directory - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -493,50 +505,51 @@ func (s *cmdIntegrationSuite) TestSyncMismatchBlobAndDirectory(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobList[0]) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobList[0]) raw := getDefaultSyncRawInput(rawBlobURLWithSAS.String(), dstDirName) // type mismatch, we should get an error - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) + runSyncAndVerify(a, raw, func(err error) { + a.NotNil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) // reverse the source and destination raw = getDefaultSyncRawInput(dstDirName, rawBlobURLWithSAS.String()) // type mismatch, we should get an error - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) + runSyncAndVerify(a, raw, func(err error) { + a.NotNil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) } // download a blob representing an ADLS directory to a local file // we should recognize that there is a type mismatch -func (s *cmdIntegrationSuite) TestSyncDownloadADLSDirectoryTypeMismatch(c *chk.C) { +func TestSyncDownloadADLSDirectoryTypeMismatch(t *testing.T) { + a := assert.New(t) bsu := getBSU() blobName := "adlsdir" // set up the destination as a single file - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) dstFileName := blobName - scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, []string{blobName}) + scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, []string{blobName}) // set up the container - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) // create a single blob that represents an ADLS directory _, err := containerURL.NewBlockBlobURL(blobName).Upload(context.Background(), bytes.NewReader(nil), azblob.BlobHTTPHeaders{}, azblob.Metadata{"hdi_isfolder": "true"}, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) - c.Assert(err, chk.IsNil) + a.Nil(err) // set up interceptor mockedRPC := interceptor{} @@ -544,44 +557,45 @@ func (s *cmdIntegrationSuite) TestSyncDownloadADLSDirectoryTypeMismatch(c *chk.C mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobName) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, blobName) raw := getDefaultSyncRawInput(rawBlobURLWithSAS.String(), filepath.Join(dstDirName, dstFileName)) // the file was created after the blob, so no sync should happen - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) + runSyncAndVerify(a, raw, func(err error) { + a.NotNil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) } // adls directory -> local directory sync // we should download every blob except the blob representing the directory -func (s *cmdIntegrationSuite) TestSyncDownloadWithADLSDirectory(c *chk.C) { +func TestSyncDownloadWithADLSDirectory(t *testing.T) { + a := assert.New(t) bsu := getBSU() adlsDirName := "adlsdir" // set up the container with numerous blobs - containerURL, containerName := createNewContainer(c, bsu) - blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, adlsDirName+"/") - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) - c.Assert(len(blobList), chk.Not(chk.Equals), 0) + containerURL, containerName := createNewContainer(a, bsu) + blobList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(a, containerURL, adlsDirName+"/") + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) + a.NotZero(len(blobList)) // create a single blob that represents the ADLS directory dirBlob := containerURL.NewBlockBlobURL(adlsDirName) _, err := dirBlob.Upload(context.Background(), bytes.NewReader(nil), azblob.BlobHTTPHeaders{}, azblob.Metadata{"hdi_isfolder": "true"}, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) - c.Assert(err, chk.IsNil) + a.Nil(err) // create an extra blob that represents an empty ADLS directory, which should never be picked up _, err = containerURL.NewBlockBlobURL(adlsDirName+"/neverpickup").Upload(context.Background(), bytes.NewReader(nil), azblob.BlobHTTPHeaders{}, azblob.Metadata{"hdi_isfolder": "true"}, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) - c.Assert(err, chk.IsNil) + a.Nil(err) // set up the destination with an empty folder - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) // set up interceptor @@ -590,26 +604,26 @@ func (s *cmdIntegrationSuite) TestSyncDownloadWithADLSDirectory(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, adlsDirName) + rawContainerURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, adlsDirName) raw := getDefaultSyncRawInput(rawContainerURLWithSAS.String(), dstDirName) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(blobList)) + a.Equal(len(blobList), len(mockedRPC.transfers)) }) // turn off recursive, this time only top blobs should be transferred raw.recursive = false mockedRPC.reset() - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(blobList)) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(blobList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) -} +} \ No newline at end of file diff --git a/cmd/zt_sync_comparator_test.go b/cmd/zt_sync_comparator_test.go index ecb2b854a..1eb7867ec 100644 --- a/cmd/zt_sync_comparator_test.go +++ b/cmd/zt_sync_comparator_test.go @@ -1,4 +1,4 @@ -// Copyright © 2017 Microsoft +// Copyright © Microsoft // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -21,220 +21,663 @@ package cmd import ( + "context" "github.com/Azure/azure-storage-azcopy/v10/common" - chk "gopkg.in/check.v1" + "github.com/Azure/azure-storage-file-go/azfile" + "github.com/stretchr/testify/assert" + "os" + "sort" + "strings" + "testing" "time" ) -type syncComparatorSuite struct{} - -var _ = chk.Suite(&syncComparatorSuite{}) - -func (s *syncComparatorSuite) TestSyncSourceComparator(c *chk.C) { - dummyCopyScheduler := dummyProcessor{} - srcMD5 := []byte{'s'} - destMD5 := []byte{'d'} - - // set up the indexer as well as the source comparator - indexer := newObjectIndexer() - sourceComparator := newSyncSourceComparator(indexer, dummyCopyScheduler.process, common.ESyncHashType.None(), false, false) - - // create a sample destination object - sampleDestinationObject := StoredObject{name: "test", relativePath: "/usr/test", lastModifiedTime: time.Now(), md5: destMD5} - - // test the comparator in case a given source object is not present at the destination - // meaning no entry in the index, so the comparator should pass the given object to schedule a transfer - compareErr := sourceComparator.processIfNecessary(StoredObject{name: "only_at_source", relativePath: "only_at_source", lastModifiedTime: time.Now(), md5: srcMD5}) - c.Assert(compareErr, chk.Equals, nil) - - // check the source object was indeed scheduled - c.Assert(len(dummyCopyScheduler.record), chk.Equals, 1) - c.Assert(dummyCopyScheduler.record[0].md5, chk.DeepEquals, srcMD5) - - // reset the processor so that it's empty - dummyCopyScheduler = dummyProcessor{} - - // test the comparator in case a given source object is present at the destination - // and it has a later modified time, so the comparator should pass the give object to schedule a transfer - err := indexer.store(sampleDestinationObject) - c.Assert(err, chk.IsNil) - compareErr = sourceComparator.processIfNecessary(StoredObject{name: "test", relativePath: "/usr/test", lastModifiedTime: time.Now().Add(time.Hour), md5: srcMD5}) - c.Assert(compareErr, chk.Equals, nil) - - // check the source object was indeed scheduled - c.Assert(len(dummyCopyScheduler.record), chk.Equals, 1) - c.Assert(dummyCopyScheduler.record[0].md5, chk.DeepEquals, srcMD5) - c.Assert(len(indexer.indexMap), chk.Equals, 0) - - // reset the processor so that it's empty - dummyCopyScheduler = dummyProcessor{} +// regular file->file sync +func TestFileSyncS2SWithSingleFile(t *testing.T) { + a := assert.New(t) + fsu := getFSU() + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) + + for _, fileName := range []string{"singlefileisbest", "打麻将.txt", "%4509%4254$85140&"} { + // set up the source share with a single file + fileList := []string{fileName} + scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, fileList) + + // set up the destination share with the same single file + scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, fileList) + + // set up interceptor + mockedRPC := interceptor{} + Rpc = mockedRPC.intercept + mockedRPC.init() + + // construct the raw input to simulate user input + srcFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, srcShareName, fileList[0]) + dstFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, dstShareName, fileList[0]) + raw := getDefaultSyncRawInput(srcFileURLWithSAS.String(), dstFileURLWithSAS.String()) + + // the destination was created after the source, so no sync should happen + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + + // validate that the right number of transfers were scheduled + a.Zero(len(mockedRPC.transfers)) + }) + + // recreate the source file to have a later last modified time + scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, fileList) + mockedRPC.reset() + + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", []string{""}, mockedRPC) + }) + } +} - // test the comparator in case a given source object is present at the destination - // but is has an earlier modified time compared to the one at the destination - // meaning that the source object is considered stale, so no transfer should be scheduled - err = indexer.store(sampleDestinationObject) - c.Assert(err, chk.IsNil) - compareErr = sourceComparator.processIfNecessary(StoredObject{name: "test", relativePath: "/usr/test", lastModifiedTime: time.Now().Add(-time.Hour), md5: srcMD5}) - c.Assert(compareErr, chk.Equals, nil) +// regular share->share sync but destination is empty, so everything has to be transferred +func TestFileSyncS2SWithEmptyDestination(t *testing.T) { + a := assert.New(t) + fsu := getFSU() + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) + + // set up the source share with numerous files + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + a.NotZero(len(fileList)) + + // set up interceptor + mockedRPC := interceptor{} + Rpc = mockedRPC.intercept + mockedRPC.init() + + // construct the raw input to simulate user input + srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) + raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) + + // all files at source should be synced to destination + expectedList := scenarioHelper{}.addFoldersToList(fileList, false) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + + // validate that the right number of transfers were scheduled + a.Equal(len(expectedList), len(mockedRPC.transfers)) + + // validate that the right transfers were sent + validateS2SSyncTransfersAreScheduled(a, "", "", expectedList, mockedRPC) + }) + + // turn off recursive, this time only top files should be transferred + raw.recursive = false + mockedRPC.reset() + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(fileList), len(mockedRPC.transfers)) + + for _, transfer := range mockedRPC.transfers { + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) + } + }) +} - // check no source object was scheduled - c.Assert(len(dummyCopyScheduler.record), chk.Equals, 0) - c.Assert(len(indexer.indexMap), chk.Equals, 0) +// regular share->share sync but destination is identical to the source, transfers are scheduled based on lmt +func TestFileSyncS2SWithIdenticalDestination(t *testing.T) { + a := assert.New(t) + fsu := getFSU() + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) + + // set up the source share with numerous files + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + a.NotZero(len(fileList)) + + // set up the destination with the exact same files + scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, fileList) + + // set up interceptor + mockedRPC := interceptor{} + Rpc = mockedRPC.intercept + mockedRPC.init() + + // construct the raw input to simulate user input + srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) + raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) + + // nothing should be sync since the source is older + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + + // validate that the right number of transfers were scheduled + a.Zero(len(mockedRPC.transfers)) + }) + + // refresh the source files' last modified time so that they get synced + scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, fileList) + mockedRPC.reset() + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", fileList, mockedRPC) + }) } -func (s *syncComparatorSuite) TestSyncSrcCompDisableComparator(c *chk.C) { - dummyCopyScheduler := dummyProcessor{} - srcMD5 := []byte{'s'} - destMD5 := []byte{'d'} - - // set up the indexer as well as the source comparator - indexer := newObjectIndexer() - sourceComparator := newSyncSourceComparator(indexer, dummyCopyScheduler.process, common.ESyncHashType.None(), false, true) - - // test the comparator in case a given source object is not present at the destination - // meaning no entry in the index, so the comparator should pass the given object to schedule a transfer - compareErr := sourceComparator.processIfNecessary(StoredObject{name: "only_at_source", relativePath: "only_at_source", lastModifiedTime: time.Now(), md5: srcMD5}) - c.Assert(compareErr, chk.Equals, nil) - - // check the source object was indeed scheduled - c.Assert(len(dummyCopyScheduler.record), chk.Equals, 1) - c.Assert(dummyCopyScheduler.record[0].md5, chk.DeepEquals, srcMD5) - - // reset the processor so that it's empty - dummyCopyScheduler = dummyProcessor{} - - // create a sample source object - currTime := time.Now() - destinationStoredObjects := []StoredObject{ - // file whose last modified time is greater than that of source - {name: "test1", relativePath: "/usr/test1", lastModifiedTime: currTime, md5: destMD5}, - // file whose last modified time is less than that of source - {name: "test2", relativePath: "/usr/test2", lastModifiedTime: currTime, md5: destMD5}, +// regular share->share sync where destination is missing some files from source, and also has some extra files +func TestFileSyncS2SWithMismatchedDestination(t *testing.T) { + a := assert.New(t) + fsu := getFSU() + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) + + // set up the share with numerous files + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + a.NotZero(len(fileList)) + + // set up the destination with half of the files from source + filesAlreadyAtDestination := fileList[0 : len(fileList)/2] + scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, filesAlreadyAtDestination) + expectedOutput := fileList[len(fileList)/2:] // the missing half of source files should be transferred + + // add some extra files that shouldn't be included + scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, dstShareURL, "extra") + + // set up interceptor + mockedRPC := interceptor{} + Rpc = mockedRPC.intercept + mockedRPC.init() + + // construct the raw input to simulate user input + srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) + raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) + + expectedOutputMap := scenarioHelper{}.convertListToMap( + scenarioHelper{}.addFoldersToList(expectedOutput, false)) + everythingAlreadyAtDestination := scenarioHelper{}.convertListToMap( + scenarioHelper{}.addFoldersToList(filesAlreadyAtDestination, false)) + for exists := range everythingAlreadyAtDestination { + delete(expectedOutputMap, exists) // remove directories that actually exist at destination } + expectedOutput = scenarioHelper{}.convertMapKeysToList(expectedOutputMap) + + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", expectedOutput, mockedRPC) + + // make sure the extra files were deleted + extraFilesFound := false + for marker := (azfile.Marker{}); marker.NotDone(); { + listResponse, err := dstShareURL.NewRootDirectoryURL().ListFilesAndDirectoriesSegment(ctx, marker, azfile.ListFilesAndDirectoriesOptions{}) + a.Nil(err) + marker = listResponse.NextMarker + + // if ever the extra files are found, note it down + for _, file := range listResponse.FileItems { + if strings.Contains(file.Name, "extra") { + extraFilesFound = true + } + } + } + + a.False(extraFilesFound) + }) +} - sourceStoredObjects := []StoredObject{ - {name: "test1", relativePath: "/usr/test1", lastModifiedTime: currTime.Add(time.Hour), md5: srcMD5}, - {name: "test2", relativePath: "/usr/test2", lastModifiedTime: currTime.Add(-time.Hour), md5: srcMD5}, - } +// include flag limits the scope of source/destination comparison +func TestFileSyncS2SWithIncludeFlag(t *testing.T) { + a := assert.New(t) + fsu := getFSU() + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) + + // set up the source share with numerous files + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + a.NotZero(len(fileList)) + + // add special files that we wish to include + filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} + scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToInclude) + includeString := "*.pdf;*.jpeg;exactName" + + // set up interceptor + mockedRPC := interceptor{} + Rpc = mockedRPC.intercept + mockedRPC.init() + + // construct the raw input to simulate user input + srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) + raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) + raw.include = includeString + + // verify that only the files specified by the include flag are synced + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", filesToInclude, mockedRPC) + }) +} - // test the comparator in case a given source object is present at the destination - // but is has an earlier modified time compared to the one at the destination - // meaning that the source object is considered stale, so no transfer should be scheduled - for key, dstStoredObject := range destinationStoredObjects { - err := indexer.store(dstStoredObject) - c.Assert(err, chk.IsNil) - compareErr = sourceComparator.processIfNecessary(sourceStoredObjects[key]) - c.Assert(compareErr, chk.Equals, nil) - c.Assert(len(dummyCopyScheduler.record), chk.Equals, key+1) - c.Assert(len(indexer.indexMap), chk.Equals, 0) - } +// exclude flag limits the scope of source/destination comparison +func TestFileSyncS2SWithExcludeFlag(t *testing.T) { + a := assert.New(t) + fsu := getFSU() + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) + + // set up the source share with numerous files + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + a.NotZero(len(fileList)) + + // add special files that we wish to exclude + filesToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} + scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToExclude) + excludeString := "*.pdf;*.jpeg;exactName" + + // set up interceptor + mockedRPC := interceptor{} + Rpc = mockedRPC.intercept + mockedRPC.init() + + // construct the raw input to simulate user input + srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) + raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) + raw.exclude = excludeString + + // make sure the list doesn't include the files specified by the exclude flag + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", fileList, mockedRPC) + }) } -func (s *syncComparatorSuite) TestSyncDestinationComparator(c *chk.C) { - dummyCopyScheduler := dummyProcessor{} - dummyCleaner := dummyProcessor{} - srcMD5 := []byte{'s'} - destMD5 := []byte{'d'} - - // set up the indexer as well as the destination comparator - indexer := newObjectIndexer() - destinationComparator := newSyncDestinationComparator(indexer, dummyCopyScheduler.process, dummyCleaner.process, common.ESyncHashType.None(), false, false) - - // create a sample source object - sampleSourceObject := StoredObject{name: "test", relativePath: "/usr/test", lastModifiedTime: time.Now(), md5: srcMD5} - - // test the comparator in case a given destination object is not present at the source - // meaning it is an extra file that needs to be deleted, so the comparator should pass the given object to the destinationCleaner - compareErr := destinationComparator.processIfNecessary(StoredObject{name: "only_at_dst", relativePath: "only_at_dst", lastModifiedTime: time.Now(), md5: destMD5}) - c.Assert(compareErr, chk.Equals, nil) - - // verify that destination object is being deleted - c.Assert(len(dummyCopyScheduler.record), chk.Equals, 0) - c.Assert(len(dummyCleaner.record), chk.Equals, 1) - c.Assert(dummyCleaner.record[0].md5, chk.DeepEquals, destMD5) - - // reset dummy processors - dummyCopyScheduler = dummyProcessor{} - dummyCleaner = dummyProcessor{} - - // test the comparator in case a given destination object is present at the source - // and it has a later modified time, since the source data is stale, - // no transfer happens - err := indexer.store(sampleSourceObject) - c.Assert(err, chk.IsNil) - compareErr = destinationComparator.processIfNecessary(StoredObject{name: "test", relativePath: "/usr/test", lastModifiedTime: time.Now().Add(time.Hour), md5: destMD5}) - c.Assert(compareErr, chk.Equals, nil) - - // verify that the source object is scheduled for transfer - c.Assert(len(dummyCopyScheduler.record), chk.Equals, 0) - c.Assert(len(dummyCleaner.record), chk.Equals, 0) - - // reset dummy processors - dummyCopyScheduler = dummyProcessor{} - dummyCleaner = dummyProcessor{} - - // test the comparator in case a given destination object is present at the source - // but is has an earlier modified time compared to the one at the source - // meaning that the source object should be transferred since the destination object is stale - err = indexer.store(sampleSourceObject) - c.Assert(err, chk.IsNil) - compareErr = destinationComparator.processIfNecessary(StoredObject{name: "test", relativePath: "/usr/test", lastModifiedTime: time.Now().Add(-time.Hour), md5: destMD5}) - c.Assert(compareErr, chk.Equals, nil) - - // verify that there's no transfer & no deletes - c.Assert(len(dummyCopyScheduler.record), chk.Equals, 1) - c.Assert(dummyCopyScheduler.record[0].md5, chk.DeepEquals, srcMD5) - c.Assert(len(dummyCleaner.record), chk.Equals, 0) +// include and exclude flag can work together to limit the scope of source/destination comparison +func TestFileSyncS2SWithIncludeAndExcludeFlag(t *testing.T) { + a := assert.New(t) + fsu := getFSU() + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) + + // set up the source share with numerous files + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + a.NotZero(len(fileList)) + + // add special files that we wish to include + filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} + scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToInclude) + includeString := "*.pdf;*.jpeg;exactName" + + // add special files that we wish to exclude + // note that the excluded files also match the include string + filesToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} + scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToExclude) + excludeString := "so*;not*;exactName" + + // set up interceptor + mockedRPC := interceptor{} + Rpc = mockedRPC.intercept + mockedRPC.init() + + // construct the raw input to simulate user input + srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) + raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) + raw.include = includeString + raw.exclude = excludeString + + // verify that only the files specified by the include flag are synced + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", filesToInclude, mockedRPC) + }) } -func (s *syncComparatorSuite) TestSyncDestCompDisableComparison(c *chk.C) { - dummyCopyScheduler := dummyProcessor{} - dummyCleaner := dummyProcessor{} - srcMD5 := []byte{'s'} - destMD5 := []byte{'d'} - - // set up the indexer as well as the destination comparator - indexer := newObjectIndexer() - destinationComparator := newSyncDestinationComparator(indexer, dummyCopyScheduler.process, dummyCleaner.process, common.ESyncHashType.None(), false, true) - - // create a sample source object - currTime := time.Now() - sourceStoredObjects := []StoredObject{ - {name: "test1", relativePath: "/usr/test1", lastModifiedTime: currTime, md5: srcMD5}, - {name: "test2", relativePath: "/usr/test2", lastModifiedTime: currTime, md5: srcMD5}, - } +// TODO: Fix me, passes locally (Windows and WSL2), but not on CI +// // validate the bug fix for this scenario +// func TestFileSyncS2SWithMissingDestination(t *testing.T) { +// a := assert.New(t) +// fsu := getFSU() +// srcShareURL, srcShareName := createNewAzureShare(a, fsu) +// dstShareURL, dstShareName := createNewAzureShare(a, fsu) +// defer deleteShare(a, srcShareURL) +// +// // delete the destination share to simulate non-existing destination, or recently removed destination +// deleteShare(a, dstShareURL) +// +// // set up the share with numerous files +// fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") +// a.NotZero(len(fileList)) +// +// // set up interceptor +// mockedRPC := interceptor{} +// Rpc = mockedRPC.intercept +// mockedRPC.init() +// +// // construct the raw input to simulate user input +// srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) +// dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) +// raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) +// +// // verify error is thrown +// runSyncAndVerify(a, raw, func(err error) { +// // error should not be nil, but the app should not crash either +// a.NotNil(err) +// +// // validate that the right number of transfers were scheduled +// a.Zero(len(mockedRPC.transfers)) +// }) +// } + +// there is a type mismatch between the source and destination +func TestFileSyncS2SMismatchShareAndFile(t *testing.T) { + a := assert.New(t) + fsu := getFSU() + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) + + // set up the source share with numerous files + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + a.NotZero(len(fileList)) + + // set up the destination share with a single file + singleFileName := "single" + scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, []string{singleFileName}) + + // set up interceptor + mockedRPC := interceptor{} + Rpc = mockedRPC.intercept + mockedRPC.init() + + // construct the raw input to simulate user input + srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) + dstFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, dstShareName, singleFileName) + raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstFileURLWithSAS.String()) + + // type mismatch, we should get an error + runSyncAndVerify(a, raw, func(err error) { + a.NotNil(err) + + // validate that the right number of transfers were scheduled + a.Zero(len(mockedRPC.transfers)) + }) + + // reverse the source and destination + raw = getDefaultSyncRawInput(dstFileURLWithSAS.String(), srcShareURLWithSAS.String()) + + // type mismatch again, we should also get an error + runSyncAndVerify(a, raw, func(err error) { + a.NotNil(err) + + // validate that the right number of transfers were scheduled + a.Zero(len(mockedRPC.transfers)) + }) +} - // onlyAtSrc := StoredObject{name: "only_at_src", relativePath: "/usr/only_at_src", lastModifiedTime: currTime, md5: destMD5} +// share <-> dir sync +func TestFileSyncS2SShareAndEmptyDir(t *testing.T) { + a := assert.New(t) + fsu := getFSU() + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) + + // set up the source share with numerous files + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + a.NotZero(len(fileList)) + + // set up interceptor + mockedRPC := interceptor{} + Rpc = mockedRPC.intercept + mockedRPC.init() + + // construct the raw input to simulate user input + srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) + dirName := "emptydir" + _, err := dstShareURL.NewDirectoryURL(dirName).Create(context.Background(), azfile.Metadata{}, azfile.SMBProperties{}) + a.Nil(err) + dstDirURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(a, dstShareName, dirName) + raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstDirURLWithSAS.String()) + + // verify that targeting a directory works fine + expectedList := scenarioHelper{}.addFoldersToList(fileList, false) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + + // validate that the right number of transfers were scheduled + a.Equal(len(expectedList), len(mockedRPC.transfers)) + + // validate that the right transfers were sent + validateS2SSyncTransfersAreScheduled(a, "", "", expectedList, mockedRPC) + }) + + // turn off recursive, this time only top files should be transferred + raw.recursive = false + mockedRPC.reset() + + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(fileList), len(mockedRPC.transfers)) + + for _, transfer := range mockedRPC.transfers { + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) + } + }) +} - destinationStoredObjects := []StoredObject{ - // file whose last modified time is greater than that of source - {name: "test1", relativePath: "/usr/test1", lastModifiedTime: time.Now().Add(time.Hour), md5: destMD5}, - // file whose last modified time is less than that of source - {name: "test2", relativePath: "/usr/test2", lastModifiedTime: time.Now().Add(-time.Hour), md5: destMD5}, - } +// regular dir -> dir sync +func TestFileSyncS2SBetweenDirs(t *testing.T) { + a := assert.New(t) + fsu := getFSU() + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) + + // set up the source share with numerous files + dirName := "dir" + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, dirName+common.AZCOPY_PATH_SEPARATOR_STRING) + a.NotZero(len(fileList)) + + // set up the destination with the exact same files + scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, fileList) + + // set up interceptor + mockedRPC := interceptor{} + Rpc = mockedRPC.intercept + mockedRPC.init() + + // construct the raw input to simulate user input + srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) + srcShareURLWithSAS.Path += common.AZCOPY_PATH_SEPARATOR_STRING + dirName + dstShareURLWithSAS.Path += common.AZCOPY_PATH_SEPARATOR_STRING + dirName + raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) + + // nothing should be synced since the source is older + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + + // validate that the right number of transfers were scheduled + a.Zero(len(mockedRPC.transfers)) + }) + + // refresh the files' last modified time so that they are newer + scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, fileList) + mockedRPC.reset() + expectedList := scenarioHelper{}.shaveOffPrefix(fileList, dirName+common.AZCOPY_PATH_SEPARATOR_STRING) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", expectedList, mockedRPC) + }) +} - // test the comparator in case a given destination object is not present at the source - // meaning it is an extra file that needs to be deleted, so the comparator should pass the given object to the destinationCleaner - compareErr := destinationComparator.processIfNecessary(StoredObject{name: "only_at_dst", relativePath: "only_at_dst", lastModifiedTime: currTime, md5: destMD5}) - c.Assert(compareErr, chk.Equals, nil) - - // verify that destination object is being deleted - c.Assert(len(dummyCopyScheduler.record), chk.Equals, 0) - c.Assert(len(dummyCleaner.record), chk.Equals, 1) - c.Assert(dummyCleaner.record[0].md5, chk.DeepEquals, destMD5) - - // reset dummy processors - dummyCopyScheduler = dummyProcessor{} - dummyCleaner = dummyProcessor{} - - // test the comparator in case a given destination object is present at the source - // and it has a later modified time, since the source data is stale, - // no transfer happens - for key, srcStoredObject := range sourceStoredObjects { - err := indexer.store(srcStoredObject) - c.Assert(err, chk.IsNil) - compareErr = destinationComparator.processIfNecessary(destinationStoredObjects[key]) - c.Assert(compareErr, chk.Equals, nil) - c.Assert(len(dummyCopyScheduler.record), chk.Equals, key+1) - } +func TestDryrunSyncFiletoFile(t *testing.T) { + a := assert.New(t) + fsu := getFSU() + + //set up src share + filesToInclude := []string{"AzURE2.jpeg", "TestOne.txt"} + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, filesToInclude) + + //set up dst share + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, dstShareURL) + fileToDelete := []string{"testThree.jpeg"} + scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, fileToDelete) + + // set up interceptor + mockedRPC := interceptor{} + Rpc = mockedRPC.intercept + mockedLcm := mockedLifecycleManager{dryrunLog: make(chan string, 50)} + mockedLcm.SetOutputFormat(common.EOutputFormat.Text()) + glcm = &mockedLcm + + // construct the raw input to simulate user input + srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) + raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) + raw.dryrun = true + raw.deleteDestination = "true" + + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", []string{}, mockedRPC) + + msg := mockedLcm.GatherAllLogs(mockedLcm.dryrunLog) + sort.Strings(msg) + for i := 0; i < len(msg); i++ { + if strings.Contains(msg[i], "DRYRUN: remove") { + a.True(strings.Contains(msg[i], dstShareURL.String())) + } else { + a.True(strings.Contains(msg[i], "DRYRUN: copy")) + a.True(strings.Contains(msg[i], srcShareName)) + a.True(strings.Contains(msg[i], dstShareURL.String())) + } + } + + a.True(testDryrunStatements(fileToDelete, msg)) + a.True(testDryrunStatements(filesToInclude, msg)) + }) +} + +func TestDryrunSyncLocaltoFile(t *testing.T) { + a := assert.New(t) + fsu := getFSU() + + //set up local src + blobsToInclude := []string{"AzURE2.jpeg"} + srcDirName := scenarioHelper{}.generateLocalDirectory(a) + defer os.RemoveAll(srcDirName) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, blobsToInclude) + + //set up dst share + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, dstShareURL) + fileToDelete := []string{"testThree.jpeg"} + scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, fileToDelete) + + // set up interceptor + mockedRPC := interceptor{} + Rpc = mockedRPC.intercept + mockedLcm := mockedLifecycleManager{dryrunLog: make(chan string, 50)} + mockedLcm.SetOutputFormat(common.EOutputFormat.Text()) + glcm = &mockedLcm + + // construct the raw input to simulate user input + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) + raw := getDefaultSyncRawInput(srcDirName, dstShareURLWithSAS.String()) + raw.dryrun = true + raw.deleteDestination = "true" + + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", []string{}, mockedRPC) + + msg := mockedLcm.GatherAllLogs(mockedLcm.dryrunLog) + sort.Strings(msg) + for i := 0; i < len(msg); i++ { + if strings.Contains(msg[i], "DRYRUN: remove") { + a.True(strings.Contains(msg[i], dstShareURL.String())) + } else { + a.True(strings.Contains(msg[i], "DRYRUN: copy")) + a.True(strings.Contains(msg[i], srcDirName)) + a.True(strings.Contains(msg[i], dstShareURL.String())) + } + } + + a.True(testDryrunStatements(blobsToInclude, msg)) + a.True(testDryrunStatements(fileToDelete, msg)) + }) } + +// regular share->share sync but destination is identical to the source, transfers are scheduled based on lmt +func TestFileSyncS2SWithIdenticalDestinationTemp(t *testing.T) { + a := assert.New(t) + fsu := getFSU() + srcShareURL, srcShareName := createNewAzureShare(a, fsu) + dstShareURL, dstShareName := createNewAzureShare(a, fsu) + defer deleteShare(a, srcShareURL) + defer deleteShare(a, dstShareURL) + + // set up the source share with numerous files + fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(a, srcShareURL, "") + a.NotZero(len(fileList)) + + // set up the destination with the exact same files + scenarioHelper{}.generateAzureFilesFromList(a, dstShareURL, fileList) + + // set up interceptor + mockedRPC := interceptor{} + Rpc = mockedRPC.intercept + mockedRPC.init() + + // construct the raw input to simulate user input + srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, srcShareName) + dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(a, dstShareName) + raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) + raw.preserveSMBInfo = false + + // nothing should be sync since the source is older + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + + // validate that the right number of transfers were scheduled + a.Zero(len(mockedRPC.transfers)) + }) + + // refresh the source files' last modified time so that they get synced + scenarioHelper{}.generateAzureFilesFromList(a, srcShareURL, fileList) + mockedRPC.reset() + currentTime := time.Now() + newTime := currentTime.Add(-time.Hour) // give extra hour + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", fileList, mockedRPC) + + for _, transfer := range mockedRPC.transfers { + if !(transfer.LastModifiedTime.Before(currentTime) && transfer.LastModifiedTime.After(newTime)) { + t.Fail() + } + } + }) +} \ No newline at end of file diff --git a/cmd/zt_sync_file_file_test.go b/cmd/zt_sync_file_file_test.go index 073b6e3d3..2b35c3b71 100644 --- a/cmd/zt_sync_file_file_test.go +++ b/cmd/zt_sync_file_file_test.go @@ -1,4 +1,4 @@ -// Copyright © Microsoft +// Copyright © 2017 Microsoft // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -21,648 +21,222 @@ package cmd import ( - "context" "github.com/Azure/azure-storage-azcopy/v10/common" - "github.com/Azure/azure-storage-file-go/azfile" - chk "gopkg.in/check.v1" - "os" - "sort" - "strings" + "github.com/stretchr/testify/assert" + "testing" "time" ) -// regular file->file sync -func (s *cmdIntegrationSuite) TestFileSyncS2SWithSingleFile(c *chk.C) { - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) - - for _, fileName := range []string{"singlefileisbest", "打麻将.txt", "%4509%4254$85140&"} { - // set up the source share with a single file - fileList := []string{fileName} - scenarioHelper{}.generateAzureFilesFromList(c, srcShareURL, fileList) - - // set up the destination share with the same single file - scenarioHelper{}.generateAzureFilesFromList(c, dstShareURL, fileList) - - // set up interceptor - mockedRPC := interceptor{} - Rpc = mockedRPC.intercept - mockedRPC.init() - - // construct the raw input to simulate user input - srcFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(c, srcShareName, fileList[0]) - dstFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(c, dstShareName, fileList[0]) - raw := getDefaultSyncRawInput(srcFileURLWithSAS.String(), dstFileURLWithSAS.String()) - - // the destination was created after the source, so no sync should happen - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - - // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) - }) - - // recreate the source file to have a later last modified time - scenarioHelper{}.generateAzureFilesFromList(c, srcShareURL, fileList) - mockedRPC.reset() - - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", []string{""}, mockedRPC) - }) - } -} - -// regular share->share sync but destination is empty, so everything has to be transferred -func (s *cmdIntegrationSuite) TestFileSyncS2SWithEmptyDestination(c *chk.C) { - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) - - // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, srcShareURL, "") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) - - // set up interceptor - mockedRPC := interceptor{} - Rpc = mockedRPC.intercept - mockedRPC.init() - - // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) - raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) - - // all files at source should be synced to destination - expectedList := scenarioHelper{}.addFoldersToList(fileList, false) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - - // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(expectedList)) - - // validate that the right transfers were sent - validateS2SSyncTransfersAreScheduled(c, "", "", expectedList, mockedRPC) - }) - - // turn off recursive, this time only top files should be transferred - raw.recursive = false - mockedRPC.reset() - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(fileList)) - - for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) - } - }) +func TestSyncSourceComparator(t *testing.T) { + a := assert.New(t) + dummyCopyScheduler := dummyProcessor{} + srcMD5 := []byte{'s'} + destMD5 := []byte{'d'} + + // set up the indexer as well as the source comparator + indexer := newObjectIndexer() + sourceComparator := newSyncSourceComparator(indexer, dummyCopyScheduler.process, common.ESyncHashType.None(), false, false) + + // create a sample destination object + sampleDestinationObject := StoredObject{name: "test", relativePath: "/usr/test", lastModifiedTime: time.Now(), md5: destMD5} + + // test the comparator in case a given source object is not present at the destination + // meaning no entry in the index, so the comparator should pass the given object to schedule a transfer + compareErr := sourceComparator.processIfNecessary(StoredObject{name: "only_at_source", relativePath: "only_at_source", lastModifiedTime: time.Now(), md5: srcMD5}) + a.Nil(compareErr) + + // check the source object was indeed scheduled + a.Equal(1, len(dummyCopyScheduler.record)) + + a.Equal(srcMD5, dummyCopyScheduler.record[0].md5) + + // reset the processor so that it's empty + dummyCopyScheduler = dummyProcessor{} + + // test the comparator in case a given source object is present at the destination + // and it has a later modified time, so the comparator should pass the give object to schedule a transfer + err := indexer.store(sampleDestinationObject) + a.Nil(err) + compareErr = sourceComparator.processIfNecessary(StoredObject{name: "test", relativePath: "/usr/test", lastModifiedTime: time.Now().Add(time.Hour), md5: srcMD5}) + a.Nil(compareErr) + + // check the source object was indeed scheduled + a.Equal(1, len(dummyCopyScheduler.record)) + a.Equal(srcMD5, dummyCopyScheduler.record[0].md5) + a.Zero(len(indexer.indexMap)) + + // reset the processor so that it's empty + dummyCopyScheduler = dummyProcessor{} + + // test the comparator in case a given source object is present at the destination + // but is has an earlier modified time compared to the one at the destination + // meaning that the source object is considered stale, so no transfer should be scheduled + err = indexer.store(sampleDestinationObject) + a.Nil(err) + compareErr = sourceComparator.processIfNecessary(StoredObject{name: "test", relativePath: "/usr/test", lastModifiedTime: time.Now().Add(-time.Hour), md5: srcMD5}) + a.Nil(compareErr) + + // check no source object was scheduled + a.Zero(len(dummyCopyScheduler.record)) + a.Zero(len(indexer.indexMap)) } -// regular share->share sync but destination is identical to the source, transfers are scheduled based on lmt -func (s *cmdIntegrationSuite) TestFileSyncS2SWithIdenticalDestination(c *chk.C) { - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) - - // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, srcShareURL, "") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) - - // set up the destination with the exact same files - scenarioHelper{}.generateAzureFilesFromList(c, dstShareURL, fileList) - - // set up interceptor - mockedRPC := interceptor{} - Rpc = mockedRPC.intercept - mockedRPC.init() - - // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) - raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) - - // nothing should be sync since the source is older - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - - // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) - }) - - // refresh the source files' last modified time so that they get synced - scenarioHelper{}.generateAzureFilesFromList(c, srcShareURL, fileList) - mockedRPC.reset() - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", fileList, mockedRPC) - }) -} - -// regular share->share sync where destination is missing some files from source, and also has some extra files -func (s *cmdIntegrationSuite) TestFileSyncS2SWithMismatchedDestination(c *chk.C) { - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) - - // set up the share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, srcShareURL, "") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) - - // set up the destination with half of the files from source - filesAlreadyAtDestination := fileList[0 : len(fileList)/2] - scenarioHelper{}.generateAzureFilesFromList(c, dstShareURL, filesAlreadyAtDestination) - expectedOutput := fileList[len(fileList)/2:] // the missing half of source files should be transferred - - // add some extra files that shouldn't be included - scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, dstShareURL, "extra") - - // set up interceptor - mockedRPC := interceptor{} - Rpc = mockedRPC.intercept - mockedRPC.init() - - // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) - raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) - - expectedOutputMap := scenarioHelper{}.convertListToMap( - scenarioHelper{}.addFoldersToList(expectedOutput, false)) - everythingAlreadyAtDestination := scenarioHelper{}.convertListToMap( - scenarioHelper{}.addFoldersToList(filesAlreadyAtDestination, false)) - for exists := range everythingAlreadyAtDestination { - delete(expectedOutputMap, exists) // remove directories that actually exist at destination +func TestSyncSrcCompDisableComparator(t *testing.T) { + a := assert.New(t) + dummyCopyScheduler := dummyProcessor{} + srcMD5 := []byte{'s'} + destMD5 := []byte{'d'} + + // set up the indexer as well as the source comparator + indexer := newObjectIndexer() + sourceComparator := newSyncSourceComparator(indexer, dummyCopyScheduler.process, common.ESyncHashType.None(), false, true) + + // test the comparator in case a given source object is not present at the destination + // meaning no entry in the index, so the comparator should pass the given object to schedule a transfer + compareErr := sourceComparator.processIfNecessary(StoredObject{name: "only_at_source", relativePath: "only_at_source", lastModifiedTime: time.Now(), md5: srcMD5}) + a.Nil(compareErr) + + // check the source object was indeed scheduled + a.Equal(1, len(dummyCopyScheduler.record)) + a.Equal(srcMD5, dummyCopyScheduler.record[0].md5) + + // reset the processor so that it's empty + dummyCopyScheduler = dummyProcessor{} + + // create a sample source object + currTime := time.Now() + destinationStoredObjects := []StoredObject{ + // file whose last modified time is greater than that of source + {name: "test1", relativePath: "/usr/test1", lastModifiedTime: currTime, md5: destMD5}, + // file whose last modified time is less than that of source + {name: "test2", relativePath: "/usr/test2", lastModifiedTime: currTime, md5: destMD5}, } - expectedOutput = scenarioHelper{}.convertMapKeysToList(expectedOutputMap) - - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", expectedOutput, mockedRPC) - - // make sure the extra files were deleted - extraFilesFound := false - for marker := (azfile.Marker{}); marker.NotDone(); { - listResponse, err := dstShareURL.NewRootDirectoryURL().ListFilesAndDirectoriesSegment(ctx, marker, azfile.ListFilesAndDirectoriesOptions{}) - c.Assert(err, chk.IsNil) - marker = listResponse.NextMarker - - // if ever the extra files are found, note it down - for _, file := range listResponse.FileItems { - if strings.Contains(file.Name, "extra") { - extraFilesFound = true - } - } - } - - c.Assert(extraFilesFound, chk.Equals, false) - }) -} -// include flag limits the scope of source/destination comparison -func (s *cmdIntegrationSuite) TestFileSyncS2SWithIncludeFlag(c *chk.C) { - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) - - // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, srcShareURL, "") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) - - // add special files that we wish to include - filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateAzureFilesFromList(c, srcShareURL, filesToInclude) - includeString := "*.pdf;*.jpeg;exactName" - - // set up interceptor - mockedRPC := interceptor{} - Rpc = mockedRPC.intercept - mockedRPC.init() - - // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) - raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) - raw.include = includeString - - // verify that only the files specified by the include flag are synced - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", filesToInclude, mockedRPC) - }) -} - -// exclude flag limits the scope of source/destination comparison -func (s *cmdIntegrationSuite) TestFileSyncS2SWithExcludeFlag(c *chk.C) { - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) - - // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, srcShareURL, "") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) - - // add special files that we wish to exclude - filesToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateAzureFilesFromList(c, srcShareURL, filesToExclude) - excludeString := "*.pdf;*.jpeg;exactName" - - // set up interceptor - mockedRPC := interceptor{} - Rpc = mockedRPC.intercept - mockedRPC.init() - - // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) - raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) - raw.exclude = excludeString - - // make sure the list doesn't include the files specified by the exclude flag - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", fileList, mockedRPC) - }) -} - -// include and exclude flag can work together to limit the scope of source/destination comparison -func (s *cmdIntegrationSuite) TestFileSyncS2SWithIncludeAndExcludeFlag(c *chk.C) { - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) - - // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, srcShareURL, "") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) - - // add special files that we wish to include - filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateAzureFilesFromList(c, srcShareURL, filesToInclude) - includeString := "*.pdf;*.jpeg;exactName" - - // add special files that we wish to exclude - // note that the excluded files also match the include string - filesToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateAzureFilesFromList(c, srcShareURL, filesToExclude) - excludeString := "so*;not*;exactName" - - // set up interceptor - mockedRPC := interceptor{} - Rpc = mockedRPC.intercept - mockedRPC.init() - - // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) - raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) - raw.include = includeString - raw.exclude = excludeString - - // verify that only the files specified by the include flag are synced - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", filesToInclude, mockedRPC) - }) -} + sourceStoredObjects := []StoredObject{ + {name: "test1", relativePath: "/usr/test1", lastModifiedTime: currTime.Add(time.Hour), md5: srcMD5}, + {name: "test2", relativePath: "/usr/test2", lastModifiedTime: currTime.Add(-time.Hour), md5: srcMD5}, + } -// TODO: Fix me, passes locally (Windows and WSL2), but not on CI -// // validate the bug fix for this scenario -// func (s *cmdIntegrationSuite) TestFileSyncS2SWithMissingDestination(c *chk.C) { -// fsu := getFSU() -// srcShareURL, srcShareName := createNewAzureShare(c, fsu) -// dstShareURL, dstShareName := createNewAzureShare(c, fsu) -// defer deleteShare(c, srcShareURL) -// -// // delete the destination share to simulate non-existing destination, or recently removed destination -// deleteShare(c, dstShareURL) -// -// // set up the share with numerous files -// fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, srcShareURL, "") -// c.Assert(len(fileList), chk.Not(chk.Equals), 0) -// -// // set up interceptor -// mockedRPC := interceptor{} -// Rpc = mockedRPC.intercept -// mockedRPC.init() -// -// // construct the raw input to simulate user input -// srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) -// dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) -// raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) -// -// // verify error is thrown -// runSyncAndVerify(c, raw, func(err error) { -// // error should not be nil, but the app should not crash either -// c.Assert(err, chk.NotNil) -// -// // validate that the right number of transfers were scheduled -// c.Assert(len(mockedRPC.transfers), chk.Equals, 0) -// }) -// } - -// there is a type mismatch between the source and destination -func (s *cmdIntegrationSuite) TestFileSyncS2SMismatchShareAndFile(c *chk.C) { - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) - - // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, srcShareURL, "") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) - - // set up the destination share with a single file - singleFileName := "single" - scenarioHelper{}.generateAzureFilesFromList(c, dstShareURL, []string{singleFileName}) - - // set up interceptor - mockedRPC := interceptor{} - Rpc = mockedRPC.intercept - mockedRPC.init() - - // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) - dstFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(c, dstShareName, singleFileName) - raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstFileURLWithSAS.String()) - - // type mismatch, we should get an error - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) - - // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) - }) - - // reverse the source and destination - raw = getDefaultSyncRawInput(dstFileURLWithSAS.String(), srcShareURLWithSAS.String()) - - // type mismatch again, we should also get an error - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) - - // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) - }) + // test the comparator in case a given source object is present at the destination + // but is has an earlier modified time compared to the one at the destination + // meaning that the source object is considered stale, so no transfer should be scheduled + for key, dstStoredObject := range destinationStoredObjects { + err := indexer.store(dstStoredObject) + a.Nil(err) + compareErr = sourceComparator.processIfNecessary(sourceStoredObjects[key]) + a.Nil(compareErr) + a.Equal(key+1, len(dummyCopyScheduler.record)) + a.Zero(len(indexer.indexMap)) + } } -// share <-> dir sync -func (s *cmdIntegrationSuite) TestFileSyncS2SShareAndEmptyDir(c *chk.C) { - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) - - // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, srcShareURL, "") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) - - // set up interceptor - mockedRPC := interceptor{} - Rpc = mockedRPC.intercept - mockedRPC.init() - - // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) - dirName := "emptydir" - _, err := dstShareURL.NewDirectoryURL(dirName).Create(context.Background(), azfile.Metadata{}, azfile.SMBProperties{}) - c.Assert(err, chk.IsNil) - dstDirURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(c, dstShareName, dirName) - raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstDirURLWithSAS.String()) - - // verify that targeting a directory works fine - expectedList := scenarioHelper{}.addFoldersToList(fileList, false) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - - // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(expectedList)) - - // validate that the right transfers were sent - validateS2SSyncTransfersAreScheduled(c, "", "", expectedList, mockedRPC) - }) - - // turn off recursive, this time only top files should be transferred - raw.recursive = false - mockedRPC.reset() - - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(fileList)) - - for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) - } - }) +func TestSyncDestinationComparator(t *testing.T) { + a := assert.New(t) + dummyCopyScheduler := dummyProcessor{} + dummyCleaner := dummyProcessor{} + srcMD5 := []byte{'s'} + destMD5 := []byte{'d'} + + // set up the indexer as well as the destination comparator + indexer := newObjectIndexer() + destinationComparator := newSyncDestinationComparator(indexer, dummyCopyScheduler.process, dummyCleaner.process, common.ESyncHashType.None(), false, false) + + // create a sample source object + sampleSourceObject := StoredObject{name: "test", relativePath: "/usr/test", lastModifiedTime: time.Now(), md5: srcMD5} + + // test the comparator in case a given destination object is not present at the source + // meaning it is an extra file that needs to be deleted, so the comparator should pass the given object to the destinationCleaner + compareErr := destinationComparator.processIfNecessary(StoredObject{name: "only_at_dst", relativePath: "only_at_dst", lastModifiedTime: time.Now(), md5: destMD5}) + a.Nil(compareErr) + + // verify that destination object is being deleted + a.Zero(len(dummyCopyScheduler.record)) + a.Equal(1, len(dummyCleaner.record)) + a.Equal(destMD5, dummyCleaner.record[0].md5) + + // reset dummy processors + dummyCopyScheduler = dummyProcessor{} + dummyCleaner = dummyProcessor{} + + // test the comparator in case a given destination object is present at the source + // and it has a later modified time, since the source data is stale, + // no transfer happens + err := indexer.store(sampleSourceObject) + a.Nil(err) + compareErr = destinationComparator.processIfNecessary(StoredObject{name: "test", relativePath: "/usr/test", lastModifiedTime: time.Now().Add(time.Hour), md5: destMD5}) + a.Nil(compareErr) + + // verify that the source object is scheduled for transfer + a.Zero(len(dummyCopyScheduler.record)) + a.Zero(len(dummyCleaner.record)) + + // reset dummy processors + dummyCopyScheduler = dummyProcessor{} + dummyCleaner = dummyProcessor{} + + // test the comparator in case a given destination object is present at the source + // but is has an earlier modified time compared to the one at the source + // meaning that the source object should be transferred since the destination object is stale + err = indexer.store(sampleSourceObject) + a.Nil(err) + compareErr = destinationComparator.processIfNecessary(StoredObject{name: "test", relativePath: "/usr/test", lastModifiedTime: time.Now().Add(-time.Hour), md5: destMD5}) + a.Nil(compareErr) + + // verify that there's no transfer & no deletes + a.Equal(1, len(dummyCopyScheduler.record)) + a.Equal(srcMD5, dummyCopyScheduler.record[0].md5) + a.Zero(len(dummyCleaner.record)) } -// regular dir -> dir sync -func (s *cmdIntegrationSuite) TestFileSyncS2SBetweenDirs(c *chk.C) { - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) - - // set up the source share with numerous files - dirName := "dir" - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, srcShareURL, dirName+common.AZCOPY_PATH_SEPARATOR_STRING) - c.Assert(len(fileList), chk.Not(chk.Equals), 0) - - // set up the destination with the exact same files - scenarioHelper{}.generateAzureFilesFromList(c, dstShareURL, fileList) - - // set up interceptor - mockedRPC := interceptor{} - Rpc = mockedRPC.intercept - mockedRPC.init() - - // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) - srcShareURLWithSAS.Path += common.AZCOPY_PATH_SEPARATOR_STRING + dirName - dstShareURLWithSAS.Path += common.AZCOPY_PATH_SEPARATOR_STRING + dirName - raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) - - // nothing should be synced since the source is older - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - - // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) - }) - - // refresh the files' last modified time so that they are newer - scenarioHelper{}.generateAzureFilesFromList(c, srcShareURL, fileList) - mockedRPC.reset() - expectedList := scenarioHelper{}.shaveOffPrefix(fileList, dirName+common.AZCOPY_PATH_SEPARATOR_STRING) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", expectedList, mockedRPC) - }) -} +func TestSyncDestCompDisableComparison(t *testing.T) { + a := assert.New(t) + dummyCopyScheduler := dummyProcessor{} + dummyCleaner := dummyProcessor{} + srcMD5 := []byte{'s'} + destMD5 := []byte{'d'} + + // set up the indexer as well as the destination comparator + indexer := newObjectIndexer() + destinationComparator := newSyncDestinationComparator(indexer, dummyCopyScheduler.process, dummyCleaner.process, common.ESyncHashType.None(), false, true) + + // create a sample source object + currTime := time.Now() + sourceStoredObjects := []StoredObject{ + {name: "test1", relativePath: "/usr/test1", lastModifiedTime: currTime, md5: srcMD5}, + {name: "test2", relativePath: "/usr/test2", lastModifiedTime: currTime, md5: srcMD5}, + } -func (s *cmdIntegrationSuite) TestDryrunSyncFiletoFile(c *chk.C) { - fsu := getFSU() - - //set up src share - filesToInclude := []string{"AzURE2.jpeg", "TestOne.txt"} - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - scenarioHelper{}.generateAzureFilesFromList(c, srcShareURL, filesToInclude) - - //set up dst share - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, dstShareURL) - fileToDelete := []string{"testThree.jpeg"} - scenarioHelper{}.generateAzureFilesFromList(c, dstShareURL, fileToDelete) - - // set up interceptor - mockedRPC := interceptor{} - Rpc = mockedRPC.intercept - mockedLcm := mockedLifecycleManager{dryrunLog: make(chan string, 50)} - mockedLcm.SetOutputFormat(common.EOutputFormat.Text()) - glcm = &mockedLcm - - // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) - raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) - raw.dryrun = true - raw.deleteDestination = "true" - - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", []string{}, mockedRPC) - - msg := mockedLcm.GatherAllLogs(mockedLcm.dryrunLog) - sort.Strings(msg) - for i := 0; i < len(msg); i++ { - if strings.Contains(msg[i], "DRYRUN: remove") { - c.Check(strings.Contains(msg[i], dstShareURL.String()), chk.Equals, true) - } else { - c.Check(strings.Contains(msg[i], "DRYRUN: copy"), chk.Equals, true) - c.Check(strings.Contains(msg[i], srcShareName), chk.Equals, true) - c.Check(strings.Contains(msg[i], dstShareURL.String()), chk.Equals, true) - } - } - - c.Check(testDryrunStatements(fileToDelete, msg), chk.Equals, true) - c.Check(testDryrunStatements(filesToInclude, msg), chk.Equals, true) - }) -} + // onlyAtSrc := StoredObject{name: "only_at_src", relativePath: "/usr/only_at_src", lastModifiedTime: currTime, md5: destMD5} -func (s *cmdIntegrationSuite) TestDryrunSyncLocaltoFile(c *chk.C) { - fsu := getFSU() - - //set up local src - blobsToInclude := []string{"AzURE2.jpeg"} - srcDirName := scenarioHelper{}.generateLocalDirectory(c) - defer os.RemoveAll(srcDirName) - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, blobsToInclude) - - //set up dst share - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, dstShareURL) - fileToDelete := []string{"testThree.jpeg"} - scenarioHelper{}.generateAzureFilesFromList(c, dstShareURL, fileToDelete) - - // set up interceptor - mockedRPC := interceptor{} - Rpc = mockedRPC.intercept - mockedLcm := mockedLifecycleManager{dryrunLog: make(chan string, 50)} - mockedLcm.SetOutputFormat(common.EOutputFormat.Text()) - glcm = &mockedLcm - - // construct the raw input to simulate user input - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) - raw := getDefaultSyncRawInput(srcDirName, dstShareURLWithSAS.String()) - raw.dryrun = true - raw.deleteDestination = "true" - - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", []string{}, mockedRPC) - - msg := mockedLcm.GatherAllLogs(mockedLcm.dryrunLog) - sort.Strings(msg) - for i := 0; i < len(msg); i++ { - if strings.Contains(msg[i], "DRYRUN: remove") { - c.Check(strings.Contains(msg[i], dstShareURL.String()), chk.Equals, true) - } else { - c.Check(strings.Contains(msg[i], "DRYRUN: copy"), chk.Equals, true) - c.Check(strings.Contains(msg[i], srcDirName), chk.Equals, true) - c.Check(strings.Contains(msg[i], dstShareURL.String()), chk.Equals, true) - } - } - - c.Check(testDryrunStatements(blobsToInclude, msg), chk.Equals, true) - c.Check(testDryrunStatements(fileToDelete, msg), chk.Equals, true) - }) -} + destinationStoredObjects := []StoredObject{ + // file whose last modified time is greater than that of source + {name: "test1", relativePath: "/usr/test1", lastModifiedTime: time.Now().Add(time.Hour), md5: destMD5}, + // file whose last modified time is less than that of source + {name: "test2", relativePath: "/usr/test2", lastModifiedTime: time.Now().Add(-time.Hour), md5: destMD5}, + } -// regular share->share sync but destination is identical to the source, transfers are scheduled based on lmt -func (s *cmdIntegrationSuite) TestFileSyncS2SWithIdenticalDestinationTemp(c *chk.C) { - fsu := getFSU() - srcShareURL, srcShareName := createNewAzureShare(c, fsu) - dstShareURL, dstShareName := createNewAzureShare(c, fsu) - defer deleteShare(c, srcShareURL) - defer deleteShare(c, dstShareURL) - - // set up the source share with numerous files - fileList := scenarioHelper{}.generateCommonRemoteScenarioForAzureFile(c, srcShareURL, "") - c.Assert(len(fileList), chk.Not(chk.Equals), 0) - - // set up the destination with the exact same files - scenarioHelper{}.generateAzureFilesFromList(c, dstShareURL, fileList) - - // set up interceptor - mockedRPC := interceptor{} - Rpc = mockedRPC.intercept - mockedRPC.init() - - // construct the raw input to simulate user input - srcShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, srcShareName) - dstShareURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, dstShareName) - raw := getDefaultSyncRawInput(srcShareURLWithSAS.String(), dstShareURLWithSAS.String()) - raw.preserveSMBInfo = false - - // nothing should be sync since the source is older - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - - // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) - }) - - // refresh the source files' last modified time so that they get synced - scenarioHelper{}.generateAzureFilesFromList(c, srcShareURL, fileList) - mockedRPC.reset() - currentTime := time.Now() - newTime := currentTime.Add(-time.Hour) // give extra hour - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", fileList, mockedRPC) - - for _, transfer := range mockedRPC.transfers { - if transfer.LastModifiedTime.Before(currentTime) && transfer.LastModifiedTime.After(newTime) { - c.Succeed() - } - } - }) + // test the comparator in case a given destination object is not present at the source + // meaning it is an extra file that needs to be deleted, so the comparator should pass the given object to the destinationCleaner + compareErr := destinationComparator.processIfNecessary(StoredObject{name: "only_at_dst", relativePath: "only_at_dst", lastModifiedTime: currTime, md5: destMD5}) + a.Nil(compareErr) + + // verify that destination object is being deleted + a.Zero(len(dummyCopyScheduler.record)) + a.Equal(1, len(dummyCleaner.record)) + a.Equal(destMD5, dummyCleaner.record[0].md5) + + // reset dummy processors + dummyCopyScheduler = dummyProcessor{} + dummyCleaner = dummyProcessor{} + + // test the comparator in case a given destination object is present at the source + // and it has a later modified time, since the source data is stale, + // no transfer happens + for key, srcStoredObject := range sourceStoredObjects { + err := indexer.store(srcStoredObject) + a.Nil(err) + compareErr = destinationComparator.processIfNecessary(destinationStoredObjects[key]) + a.Nil(compareErr) + a.Equal(key+1, len(dummyCopyScheduler.record)) + } } diff --git a/cmd/zt_sync_local_blob_test.go b/cmd/zt_sync_local_blob_test.go index bd92a785f..235c3a5fc 100644 --- a/cmd/zt_sync_local_blob_test.go +++ b/cmd/zt_sync_local_blob_test.go @@ -22,35 +22,37 @@ package cmd import ( "context" + "github.com/stretchr/testify/assert" "os" "path/filepath" "sort" "strings" + "testing" "time" "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-blob-go/azblob" - chk "gopkg.in/check.v1" ) // regular file->blob sync -func (s *cmdIntegrationSuite) TestSyncUploadWithSingleFile(c *chk.C) { +func TestSyncUploadWithSingleFile(t *testing.T) { + a := assert.New(t) bsu := getBSU() - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) for _, srcFileName := range []string{"singlefileisbest", "打麻将.txt", "%4509%4254$85140&"} { // set up the source as a single file - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) fileList := []string{srcFileName} - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, fileList) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, fileList) // set up the destination container with a single blob time.Sleep(time.Second) // later LMT dstBlobName := srcFileName - scenarioHelper{}.generateBlobsFromList(c, containerURL, []string{dstBlobName}, blockBlobDefaultData) - c.Assert(containerURL, chk.NotNil) + scenarioHelper{}.generateBlobsFromList(a, containerURL, []string{dstBlobName}, blockBlobDefaultData) + a.NotNil(containerURL) // set up interceptor mockedRPC := interceptor{} @@ -58,46 +60,47 @@ func (s *cmdIntegrationSuite) TestSyncUploadWithSingleFile(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, dstBlobName) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, dstBlobName) raw := getDefaultSyncRawInput(filepath.Join(srcDirName, srcFileName), rawBlobURLWithSAS.String()) // the blob was created after the file, so no sync should happen - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) // recreate the file to have a later last modified time time.Sleep(time.Second) - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, []string{srcFileName}) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, []string{srcFileName}) mockedRPC.reset() // the file was created after the blob, so the sync should happen - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // if source and destination already point to files, the relative path is an empty string "" - validateUploadTransfersAreScheduled(c, "", "", []string{""}, mockedRPC) + validateUploadTransfersAreScheduled(a, "", "", []string{""}, mockedRPC) }) } } // regular directory->container sync but destination is empty, so everything has to be transferred // this test seems to flake out. -func (s *cmdIntegrationSuite) TestSyncUploadWithEmptyDestination(c *chk.C) { +func TestSyncUploadWithEmptyDestination(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the source with numerous files - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirName, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirName, "") time.Sleep(time.Second) // set up an empty container - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up interceptor mockedRPC := interceptor{} @@ -105,49 +108,50 @@ func (s *cmdIntegrationSuite) TestSyncUploadWithEmptyDestination(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(srcDirName, rawContainerURLWithSAS.String()) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, len(fileList)) + a.Equal(len(fileList), len(mockedRPC.transfers)) // validate that the right transfers were sent - validateUploadTransfersAreScheduled(c, "", "", fileList, mockedRPC) + validateUploadTransfersAreScheduled(a, "", "", fileList, mockedRPC) }) // turn off recursive, this time only top blobs should be transferred raw.recursive = false mockedRPC.reset() - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - c.Assert(len(mockedRPC.transfers), chk.Not(chk.Equals), len(fileList)) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + a.NotEqual(len(fileList), len(mockedRPC.transfers)) for _, transfer := range mockedRPC.transfers { - c.Assert(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false) + a.False(strings.Contains(transfer.Source, common.AZCOPY_PATH_SEPARATOR_STRING)) } }) } // regular directory->container sync but destination is identical to the source, transfers are scheduled based on lmt -func (s *cmdIntegrationSuite) TestSyncUploadWithIdenticalDestination(c *chk.C) { +func TestSyncUploadWithIdenticalDestination(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the source with numerous files - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirName, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirName, "") // set up an the container with the exact same files, but later lmts - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // wait for 1 second so that the last modified times of the blobs are guaranteed to be newer time.Sleep(time.Second) - scenarioHelper{}.generateBlobsFromList(c, containerURL, fileList, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, fileList, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -155,42 +159,43 @@ func (s *cmdIntegrationSuite) TestSyncUploadWithIdenticalDestination(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(srcDirName, rawContainerURLWithSAS.String()) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) // refresh the files' last modified time so that they are newer - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, fileList) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, fileList) mockedRPC.reset() - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateUploadTransfersAreScheduled(c, "", "", fileList, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateUploadTransfersAreScheduled(a, "", "", fileList, mockedRPC) }) } // regular container->directory sync where destination is missing some files from source, and also has some extra files -func (s *cmdIntegrationSuite) TestSyncUploadWithMismatchedDestination(c *chk.C) { +func TestSyncUploadWithMismatchedDestination(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the source with numerous files - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirName, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirName, "") // set up an the container with half of the files, but later lmts // also add some extra blobs that are not present at the source extraBlobs := []string{"extraFile1.pdf, extraFile2.txt"} - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - scenarioHelper{}.generateBlobsFromList(c, containerURL, fileList[0:len(fileList)/2], blockBlobDefaultData) - scenarioHelper{}.generateBlobsFromList(c, containerURL, extraBlobs, blockBlobDefaultData) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + scenarioHelper{}.generateBlobsFromList(a, containerURL, fileList[0:len(fileList)/2], blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, extraBlobs, blockBlobDefaultData) expectedOutput := fileList[len(fileList)/2:] // set up interceptor @@ -199,38 +204,39 @@ func (s *cmdIntegrationSuite) TestSyncUploadWithMismatchedDestination(c *chk.C) mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(srcDirName, rawContainerURLWithSAS.String()) - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateUploadTransfersAreScheduled(c, "", "", expectedOutput, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateUploadTransfersAreScheduled(a, "", "", expectedOutput, mockedRPC) // make sure the extra blobs were deleted for _, blobName := range extraBlobs { exists := scenarioHelper{}.blobExists(containerURL.NewBlobURL(blobName)) - c.Assert(exists, chk.Equals, false) + a.False(exists) } }) } // include flag limits the scope of source/destination comparison -func (s *cmdIntegrationSuite) TestSyncUploadWithIncludePatternFlag(c *chk.C) { +func TestSyncUploadWithIncludePatternFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the source with numerous files - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) - scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirName, "") + scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirName, "") // add special files that we wish to include filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg", "exactName"} - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, filesToInclude) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, filesToInclude) includeString := "*.pdf;*.jpeg;exactName" // set up the destination as an empty container - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up interceptor mockedRPC := interceptor{} @@ -238,33 +244,34 @@ func (s *cmdIntegrationSuite) TestSyncUploadWithIncludePatternFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(srcDirName, rawContainerURLWithSAS.String()) raw.include = includeString - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateUploadTransfersAreScheduled(c, "", "", filesToInclude, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateUploadTransfersAreScheduled(a, "", "", filesToInclude, mockedRPC) }) } // exclude flag limits the scope of source/destination comparison -func (s *cmdIntegrationSuite) TestSyncUploadWithExcludePatternFlag(c *chk.C) { +func TestSyncUploadWithExcludePatternFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the source with numerous files - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirName, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirName, "") // add special files that we wish to exclude filesToExclude := []string{"notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, filesToExclude) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, filesToExclude) excludeString := "*.pdf;*.jpeg;exactName" // set up the destination as an empty container - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up interceptor mockedRPC := interceptor{} @@ -272,39 +279,40 @@ func (s *cmdIntegrationSuite) TestSyncUploadWithExcludePatternFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(srcDirName, rawContainerURLWithSAS.String()) raw.exclude = excludeString - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateUploadTransfersAreScheduled(c, "", "", fileList, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateUploadTransfersAreScheduled(a, "", "", fileList, mockedRPC) }) } // include and exclude flag can work together to limit the scope of source/destination comparison -func (s *cmdIntegrationSuite) TestSyncUploadWithIncludeAndExcludePatternFlag(c *chk.C) { +func TestSyncUploadWithIncludeAndExcludePatternFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the source with numerous files - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) - scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirName, "") + scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirName, "") // add special files that we wish to include filesToInclude := []string{"important.pdf", "includeSub/amazing.jpeg"} - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, filesToInclude) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, filesToInclude) includeString := "*.pdf;*.jpeg;exactName" // add special files that we wish to exclude // note that the excluded files also match the include string filesToExclude := []string{"sorry.pdf", "exclude/notGood.jpeg", "exactName", "sub/exactName"} - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, filesToExclude) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, filesToExclude) excludeString := "so*;not*;exactName" // set up the destination as an empty container - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up interceptor mockedRPC := interceptor{} @@ -312,34 +320,35 @@ func (s *cmdIntegrationSuite) TestSyncUploadWithIncludeAndExcludePatternFlag(c * mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(srcDirName, rawContainerURLWithSAS.String()) raw.include = includeString raw.exclude = excludeString - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateUploadTransfersAreScheduled(c, "", "", filesToInclude, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateUploadTransfersAreScheduled(a, "", "", filesToInclude, mockedRPC) }) } // a specific path is avoided in the comparison -func (s *cmdIntegrationSuite) TestSyncUploadWithExcludePathFlag(c *chk.C) { +func TestSyncUploadWithExcludePathFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the source with numerous files - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirName, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirName, "") // add special files that we wish to exclude filesToExclude := []string{"excludeSub/notGood.pdf", "excludeSub/lame.jpeg", "exactName"} - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, filesToExclude) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, filesToExclude) excludeString := "excludeSub;exactName" // set up the destination as an empty container - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up interceptor mockedRPC := interceptor{} @@ -347,49 +356,50 @@ func (s *cmdIntegrationSuite) TestSyncUploadWithExcludePathFlag(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(srcDirName, rawContainerURLWithSAS.String()) raw.excludePath = excludeString - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateUploadTransfersAreScheduled(c, "", "", fileList, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateUploadTransfersAreScheduled(a, "", "", fileList, mockedRPC) }) // now set up the destination with the blobs to be excluded, and make sure they are not touched - scenarioHelper{}.generateBlobsFromList(c, containerURL, filesToExclude, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, containerURL, filesToExclude, blockBlobDefaultData) // re-create the ones at the source so that their lmts are newer - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, filesToExclude) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, filesToExclude) mockedRPC.reset() - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateUploadTransfersAreScheduled(c, "", "", fileList, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateUploadTransfersAreScheduled(a, "", "", fileList, mockedRPC) // make sure the extra blobs were not touched for _, blobName := range filesToExclude { exists := scenarioHelper{}.blobExists(containerURL.NewBlobURL(blobName)) - c.Assert(exists, chk.Equals, true) + a.True(exists) } }) } // validate the bug fix for this scenario -func (s *cmdIntegrationSuite) TestSyncUploadWithMissingDestination(c *chk.C) { +func TestSyncUploadWithMissingDestination(t *testing.T) { + a := assert.New(t) bsu := getBSU() // set up the source with numerous files - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) - scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirName, "") + scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirName, "") // set up the destination as an non-existent container - containerURL, containerName := getContainerURL(c, bsu) + containerURL, containerName := getContainerURL(a, bsu) // validate that the container does not exist _, err := containerURL.GetProperties(context.Background(), azblob.LeaseAccessConditions{}) - c.Assert(err, chk.NotNil) + a.NotNil(err) // set up interceptor mockedRPC := interceptor{} @@ -397,32 +407,33 @@ func (s *cmdIntegrationSuite) TestSyncUploadWithMissingDestination(c *chk.C) { mockedRPC.init() // construct the raw input to simulate user input - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(srcDirName, rawContainerURLWithSAS.String()) - runSyncAndVerify(c, raw, func(err error) { + runSyncAndVerify(a, raw, func(err error) { // error should not be nil, but the app should not crash either - c.Assert(err, chk.NotNil) + a.NotNil(err) // validate that the right number of transfers were scheduled - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) } -func (s *cmdIntegrationSuite) TestDryrunSyncLocaltoBlob(c *chk.C) { +func TestDryrunSyncLocaltoBlob(t *testing.T) { + a := assert.New(t) bsu := getBSU() //set up local src blobsToInclude := []string{"AzURE2.jpeg", "sub1/aTestOne.txt", "sub1/sub2/testTwo.pdf"} - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, blobsToInclude) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, blobsToInclude) //set up dst container - dstContainerURL, dstContainerName := createNewContainer(c, bsu) - defer deleteContainer(c, dstContainerURL) + dstContainerURL, dstContainerName := createNewContainer(a, bsu) + defer deleteContainer(a, dstContainerURL) blobsToDelete := []string{"testThree.jpeg"} - scenarioHelper{}.generateBlobsFromList(c, dstContainerURL, blobsToDelete, blockBlobDefaultData) + scenarioHelper{}.generateBlobsFromList(a, dstContainerURL, blobsToDelete, blockBlobDefaultData) // set up interceptor mockedRPC := interceptor{} @@ -432,28 +443,28 @@ func (s *cmdIntegrationSuite) TestDryrunSyncLocaltoBlob(c *chk.C) { glcm = &mockedLcm // construct the raw input to simulate user input - dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, dstContainerName) + dstContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, dstContainerName) raw := getDefaultSyncRawInput(srcDirName, dstContainerURLWithSAS.String()) raw.dryrun = true raw.deleteDestination = "true" - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateS2SSyncTransfersAreScheduled(c, "", "", []string{}, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateS2SSyncTransfersAreScheduled(a, "", "", []string{}, mockedRPC) msg := mockedLcm.GatherAllLogs(mockedLcm.dryrunLog) sort.Strings(msg) for i := 0; i < len(msg); i++ { if strings.Contains(msg[i], "DRYRUN: remove") { - c.Check(strings.Contains(msg[i], dstContainerURL.String()), chk.Equals, true) + a.True(strings.Contains(msg[i], dstContainerURL.String())) } else { - c.Check(strings.Contains(msg[i], "DRYRUN: copy"), chk.Equals, true) - c.Check(strings.Contains(msg[i], srcDirName), chk.Equals, true) - c.Check(strings.Contains(msg[i], dstContainerURL.String()), chk.Equals, true) + a.True(strings.Contains(msg[i], "DRYRUN: copy")) + a.True(strings.Contains(msg[i], srcDirName)) + a.True(strings.Contains(msg[i], dstContainerURL.String())) } } - c.Check(testDryrunStatements(blobsToInclude, msg), chk.Equals, true) - c.Check(testDryrunStatements(blobsToDelete, msg), chk.Equals, true) + a.True(testDryrunStatements(blobsToInclude, msg)) + a.True(testDryrunStatements(blobsToDelete, msg)) }) -} +} \ No newline at end of file diff --git a/cmd/zt_sync_local_blob_windows_test.go b/cmd/zt_sync_local_blob_windows_test.go index f4f128c9d..19614f266 100644 --- a/cmd/zt_sync_local_blob_windows_test.go +++ b/cmd/zt_sync_local_blob_windows_test.go @@ -21,77 +21,79 @@ package cmd import ( + "github.com/stretchr/testify/assert" "io/fs" "os" "path/filepath" + "testing" "time" - - chk "gopkg.in/check.v1" ) -func (s *cmdIntegrationSuite) TestSyncUploadWithExcludeAttrFlag(c *chk.C) { +func TestSyncUploadWithExcludeAttrFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) - fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirName, "") + fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirName, "") // add special files with attributes that we wish to exclude filesToExclude := []string{"file1.pdf", "file2.txt", "file3"} - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, filesToExclude) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, filesToExclude) attrList := []string{"H", "I", "C"} excludeAttrsStr := "H;I;S" - scenarioHelper{}.setAttributesForLocalFiles(c, srcDirName, filesToExclude, attrList) + scenarioHelper{}.setAttributesForLocalFiles(a, srcDirName, filesToExclude, attrList) // set up the destination as an empty container - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up interceptor mockedRPC := interceptor{} Rpc = mockedRPC.intercept mockedRPC.init() - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(srcDirName, rawContainerURLWithSAS.String()) raw.excludeFileAttributes = excludeAttrsStr - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateUploadTransfersAreScheduled(c, "", "", fileList, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateUploadTransfersAreScheduled(a, "", "", fileList, mockedRPC) }) } -func (s *cmdIntegrationSuite) TestSyncUploadWithIncludeAttrFlag(c *chk.C) { +func TestSyncUploadWithIncludeAttrFlag(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) - scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirName, "") + scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirName, "") // add special files with attributes that we wish to include filesToInclude := []string{"file1.txt", "file2.pdf", "file3.pdf"} - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, filesToInclude) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, filesToInclude) attrList := []string{"H", "I", "C"} includeAttrsStr := "H;I;S" - scenarioHelper{}.setAttributesForLocalFiles(c, srcDirName, filesToInclude, attrList) + scenarioHelper{}.setAttributesForLocalFiles(a, srcDirName, filesToInclude, attrList) // set up the destination as an empty container - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) // set up interceptor mockedRPC := interceptor{} Rpc = mockedRPC.intercept mockedRPC.init() - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(srcDirName, rawContainerURLWithSAS.String()) raw.includeFileAttributes = includeAttrsStr - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateUploadTransfersAreScheduled(c, "", "", filesToInclude, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateUploadTransfersAreScheduled(a, "", "", filesToInclude, mockedRPC) }) } @@ -100,35 +102,36 @@ func (s *cmdIntegrationSuite) TestSyncUploadWithIncludeAttrFlag(c *chk.C) { // Create one file that matches only the attribute filter // Create one file that matches both // Only the last file should be transferred -func (s *cmdIntegrationSuite) TestSyncUploadWithIncludeAndIncludeAttrFlags(c *chk.C) { +func TestSyncUploadWithIncludeAndIncludeAttrFlags(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) - scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirName, "") + scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirName, "") fileList := []string{"file1.txt", "file2.png", "file3.txt"} - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, fileList) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, fileList) includeString := "*.txt" includeAttrsStr := "H;I;S" attrList := []string{"H", "I", "C"} - scenarioHelper{}.setAttributesForLocalFiles(c, srcDirName, fileList[1:], attrList) + scenarioHelper{}.setAttributesForLocalFiles(a, srcDirName, fileList[1:], attrList) - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) mockedRPC := interceptor{} Rpc = mockedRPC.intercept mockedRPC.init() - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(srcDirName, rawContainerURLWithSAS.String()) raw.includeFileAttributes = includeAttrsStr raw.include = includeString - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateUploadTransfersAreScheduled(c, "", "", fileList[2:], mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateUploadTransfersAreScheduled(a, "", "", fileList[2:], mockedRPC) }) } @@ -137,66 +140,68 @@ func (s *cmdIntegrationSuite) TestSyncUploadWithIncludeAndIncludeAttrFlags(c *ch // Create one file that matches only the attribute filter // Create one file that matches both // None of them should be transferred -func (s *cmdIntegrationSuite) TestSyncUploadWithExcludeAndExcludeAttrFlags(c *chk.C) { +func TestSyncUploadWithExcludeAndExcludeAttrFlags(t *testing.T) { + a := assert.New(t) bsu := getBSU() - srcDirName := scenarioHelper{}.generateLocalDirectory(c) + srcDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(srcDirName) - commonFileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirName, "") + commonFileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(a, srcDirName, "") fileList := []string{"file1.bin", "file2.png", "file3.bin"} - scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, fileList) + scenarioHelper{}.generateLocalFilesFromList(a, srcDirName, fileList) excludeString := "*.bin" excludeAttrsStr := "H;I;S" attrList := []string{"H", "I", "C"} - scenarioHelper{}.setAttributesForLocalFiles(c, srcDirName, fileList[1:], attrList) + scenarioHelper{}.setAttributesForLocalFiles(a, srcDirName, fileList[1:], attrList) - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) mockedRPC := interceptor{} Rpc = mockedRPC.intercept mockedRPC.init() - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(srcDirName, rawContainerURLWithSAS.String()) raw.excludeFileAttributes = excludeAttrsStr raw.exclude = excludeString - runSyncAndVerify(c, raw, func(err error) { - c.Assert(err, chk.IsNil) - validateUploadTransfersAreScheduled(c, "", "", commonFileList, mockedRPC) + runSyncAndVerify(a, raw, func(err error) { + a.Nil(err) + validateUploadTransfersAreScheduled(a, "", "", commonFileList, mockedRPC) }) } // mouthfull of a test name, but this ensures that case insensitivity doesn't cause the unintended deletion of files -func (s *cmdIntegrationSuite) TestSyncDownloadWithDeleteDestinationOnCaseInsensitiveFS(c *chk.C) { +func TestSyncDownloadWithDeleteDestinationOnCaseInsensitiveFS(t *testing.T) { + a := assert.New(t) bsu := getBSU() - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) fileList := []string{"FileWithCaps", "FiLeTwO", "FoOBaRBaZ"} - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) - scenarioHelper{}.generateBlobsFromList(c, containerURL, fileList, "Hello, World!") + scenarioHelper{}.generateBlobsFromList(a, containerURL, fileList, "Hello, World!") // let the local files be in the future; we don't want to do _anything_ to them; not delete nor download. time.Sleep(time.Second * 5) - scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, fileList) + scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, fileList) mockedRPC := interceptor{} Rpc = mockedRPC.intercept mockedRPC.init() - rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) raw := getDefaultSyncRawInput(rawContainerURLWithSAS.String(), dstDirName) raw.recursive = true raw.deleteDestination = "true" - runSyncAndVerify(c, raw, func(err error) { + runSyncAndVerify(a, raw, func(err error) { // It should not have deleted them seenFiles := make(map[string]bool) filepath.Walk(dstDirName, func(path string, info fs.FileInfo, err error) error { @@ -208,12 +213,12 @@ func (s *cmdIntegrationSuite) TestSyncDownloadWithDeleteDestinationOnCaseInsensi return nil }) - c.Assert(len(seenFiles), chk.Equals, len(fileList)) + a.Equal(len(fileList), len(seenFiles)) for _, v := range fileList { - c.Assert(seenFiles[v], chk.Equals, true) + a.True(seenFiles[v]) } // It should not have downloaded them - c.Assert(len(mockedRPC.transfers), chk.Equals, 0) + a.Zero(len(mockedRPC.transfers)) }) -} +} \ No newline at end of file diff --git a/cmd/zt_sync_processor_test.go b/cmd/zt_sync_processor_test.go index c96e7f658..28968171b 100644 --- a/cmd/zt_sync_processor_test.go +++ b/cmd/zt_sync_processor_test.go @@ -22,24 +22,22 @@ package cmd import ( "context" + "github.com/stretchr/testify/assert" "os" "path/filepath" + "testing" "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-blob-go/azblob" - chk "gopkg.in/check.v1" ) -type syncProcessorSuite struct{} - -var _ = chk.Suite(&syncProcessorSuite{}) - -func (s *syncProcessorSuite) TestLocalDeleter(c *chk.C) { +func TestLocalDeleter(t *testing.T) { + a := assert.New(t) // set up the local file - dstDirName := scenarioHelper{}.generateLocalDirectory(c) + dstDirName := scenarioHelper{}.generateLocalDirectory(a) defer os.RemoveAll(dstDirName) dstFileName := "extraFile.txt" - scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, []string{dstFileName}) + scenarioHelper{}.generateLocalFilesFromList(a, dstDirName, []string{dstFileName}) // construct the cooked input to simulate user input cca := &cookedSyncCmdArgs{ @@ -52,33 +50,34 @@ func (s *syncProcessorSuite) TestLocalDeleter(c *chk.C) { // validate that the file still exists _, err := os.Stat(filepath.Join(dstDirName, dstFileName)) - c.Assert(err, chk.IsNil) + a.Nil(err) // exercise the deleter err = deleter.removeImmediately(StoredObject{relativePath: dstFileName}) - c.Assert(err, chk.IsNil) + a.Nil(err) // validate that the file no longer exists _, err = os.Stat(filepath.Join(dstDirName, dstFileName)) - c.Assert(err, chk.NotNil) + a.NotNil(err) } -func (s *syncProcessorSuite) TestBlobDeleter(c *chk.C) { +func TestBlobDeleter(t *testing.T) { + a := assert.New(t) bsu := getBSU() blobName := "extraBlob.pdf" // set up the blob to delete - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - scenarioHelper{}.generateBlobsFromList(c, containerURL, []string{blobName}, blockBlobDefaultData) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + scenarioHelper{}.generateBlobsFromList(a, containerURL, []string{blobName}, blockBlobDefaultData) // validate that the blob exists blobURL := containerURL.NewBlobURL(blobName) _, err := blobURL.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) - c.Assert(err, chk.IsNil) + a.Nil(err) // construct the cooked input to simulate user input - rawContainerURL := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) + rawContainerURL := scenarioHelper{}.getRawContainerURLWithSAS(a, containerName) cca := &cookedSyncCmdArgs{ destination: newRemoteRes(rawContainerURL.String()), credentialInfo: common.CredentialInfo{CredentialType: common.ECredentialType.Anonymous()}, @@ -88,33 +87,34 @@ func (s *syncProcessorSuite) TestBlobDeleter(c *chk.C) { // set up the blob deleter deleter, err := newSyncDeleteProcessor(cca, common.EFolderPropertiesOption.NoFolders()) - c.Assert(err, chk.IsNil) + a.Nil(err) // exercise the deleter err = deleter.removeImmediately(StoredObject{relativePath: blobName}) - c.Assert(err, chk.IsNil) + a.Nil(err) // validate that the blob was deleted _, err = blobURL.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) - c.Assert(err, chk.NotNil) + a.NotNil(err) } -func (s *syncProcessorSuite) TestFileDeleter(c *chk.C) { +func TestFileDeleter(t *testing.T) { + a := assert.New(t) fsu := getFSU() fileName := "extraFile.pdf" // set up the file to delete - shareURL, shareName := createNewAzureShare(c, fsu) - defer deleteShare(c, shareURL) - scenarioHelper{}.generateAzureFilesFromList(c, shareURL, []string{fileName}) + shareURL, shareName := createNewAzureShare(a, fsu) + defer deleteShare(a, shareURL) + scenarioHelper{}.generateAzureFilesFromList(a, shareURL, []string{fileName}) // validate that the file exists fileURL := shareURL.NewRootDirectoryURL().NewFileURL(fileName) _, err := fileURL.GetProperties(context.Background()) - c.Assert(err, chk.IsNil) + a.Nil(err) // construct the cooked input to simulate user input - rawShareSAS := scenarioHelper{}.getRawShareURLWithSAS(c, shareName) + rawShareSAS := scenarioHelper{}.getRawShareURLWithSAS(a, shareName) cca := &cookedSyncCmdArgs{ destination: newRemoteRes(rawShareSAS.String()), credentialInfo: common.CredentialInfo{CredentialType: common.ECredentialType.Anonymous()}, @@ -124,13 +124,13 @@ func (s *syncProcessorSuite) TestFileDeleter(c *chk.C) { // set up the file deleter deleter, err := newSyncDeleteProcessor(cca, common.EFolderPropertiesOption.NoFolders()) - c.Assert(err, chk.IsNil) + a.Nil(err) // exercise the deleter err = deleter.removeImmediately(StoredObject{relativePath: fileName}) - c.Assert(err, chk.IsNil) + a.Nil(err) // validate that the file was deleted _, err = fileURL.GetProperties(context.Background()) - c.Assert(err, chk.NotNil) -} + a.NotNil(err) +} \ No newline at end of file diff --git a/cmd/zt_test.go b/cmd/zt_test.go index eae7790ca..531ef2cd9 100644 --- a/cmd/zt_test.go +++ b/cmd/zt_test.go @@ -25,6 +25,7 @@ import ( "context" "errors" "fmt" + "github.com/stretchr/testify/assert" "io" "math/rand" "net/url" @@ -42,18 +43,10 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/ste" "github.com/minio/minio-go" - chk "gopkg.in/check.v1" - "github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-file-go/azfile" ) -// Hookup to the testing framework -func Test(t *testing.T) { chk.TestingT(t) } - -type cmdIntegrationSuite struct{} - -var _ = chk.Suite(&cmdIntegrationSuite{}) var ctx = context.Background() const ( @@ -82,9 +75,9 @@ func isS3Disabled() bool { return strings.ToLower(os.Getenv("S3_TESTS_OFF")) != "" } -func skipIfS3Disabled(c *chk.C) { +func skipIfS3Disabled(t *testing.T) { if isS3Disabled() { - c.Skip("S3 testing is disabled for this unit test suite run.") + t.Skip("S3 testing is disabled for this unit test suite run.") } } @@ -93,9 +86,9 @@ func gcpTestsDisabled() bool { return strings.ToLower(os.Getenv("GCP_TESTS_OFF")) != "" } -func skipIfGCPDisabled(c *chk.C) { +func skipIfGCPDisabled(t *testing.T) { if gcpTestsDisabled() { - c.Skip("GCP testing is disabled for this run") + t.Skip("GCP testing is disabled for this run") } } @@ -130,7 +123,7 @@ func generateName(prefix string, maxLen int) string { name := "TestFoo" // default stub "Foo" is used if anything goes wrong with this procedure for { frame, more := frames.Next() - if strings.Contains(frame.Func.Name(), "Suite") { + if strings.Contains(frame.Func.Name(), "Test") { name = frame.Func.Name() break } else if !more { @@ -184,7 +177,7 @@ func generateFilesystemName() string { return generateName(blobfsPrefix, 63) } -func getShareURL(c *chk.C, fsu azfile.ServiceURL) (share azfile.ShareURL, name string) { +func getShareURL(a *assert.Assertions, fsu azfile.ServiceURL) (share azfile.ShareURL, name string) { name = generateShareName() share = fsu.NewShareURL(name) @@ -199,49 +192,49 @@ func generateBfsFileName() string { return generateName(blobfsPrefix, 0) } -func getContainerURL(c *chk.C, bsu azblob.ServiceURL) (container azblob.ContainerURL, name string) { +func getContainerURL(a *assert.Assertions, bsu azblob.ServiceURL) (container azblob.ContainerURL, name string) { name = generateContainerName() container = bsu.NewContainerURL(name) return container, name } -func getFilesystemURL(c *chk.C, bfssu azbfs.ServiceURL) (filesystem azbfs.FileSystemURL, name string) { +func getFilesystemURL(a *assert.Assertions, bfssu azbfs.ServiceURL) (filesystem azbfs.FileSystemURL, name string) { name = generateFilesystemName() filesystem = bfssu.NewFileSystemURL(name) return } -func getBlockBlobURL(c *chk.C, container azblob.ContainerURL, prefix string) (blob azblob.BlockBlobURL, name string) { +func getBlockBlobURL(a *assert.Assertions, container azblob.ContainerURL, prefix string) (blob azblob.BlockBlobURL, name string) { name = prefix + generateBlobName() blob = container.NewBlockBlobURL(name) return blob, name } -func getBfsFileURL(c *chk.C, filesystemURL azbfs.FileSystemURL, prefix string) (file azbfs.FileURL, name string) { +func getBfsFileURL(a *assert.Assertions, filesystemURL azbfs.FileSystemURL, prefix string) (file azbfs.FileURL, name string) { name = prefix + generateBfsFileName() file = filesystemURL.NewRootDirectoryURL().NewFileURL(name) return } -func getAppendBlobURL(c *chk.C, container azblob.ContainerURL, prefix string) (blob azblob.AppendBlobURL, name string) { +func getAppendBlobURL(a *assert.Assertions, container azblob.ContainerURL, prefix string) (blob azblob.AppendBlobURL, name string) { name = generateBlobName() blob = container.NewAppendBlobURL(prefix + name) return blob, name } -func getPageBlobURL(c *chk.C, container azblob.ContainerURL, prefix string) (blob azblob.PageBlobURL, name string) { +func getPageBlobURL(a *assert.Assertions, container azblob.ContainerURL, prefix string) (blob azblob.PageBlobURL, name string) { name = generateBlobName() blob = container.NewPageBlobURL(prefix + name) return } -func getAzureFileURL(c *chk.C, shareURL azfile.ShareURL, prefix string) (fileURL azfile.FileURL, name string) { +func getAzureFileURL(a *assert.Assertions, shareURL azfile.ShareURL, prefix string) (fileURL azfile.FileURL, name string) { name = prefix + generateAzureFileName() fileURL = shareURL.NewRootDirectoryURL().NewFileURL(name) @@ -303,140 +296,124 @@ func GetBFSSU() azbfs.ServiceURL { return azbfs.NewServiceURL(*u, pipeline) } -func createNewContainer(c *chk.C, bsu azblob.ServiceURL) (container azblob.ContainerURL, name string) { - container, name = getContainerURL(c, bsu) +func createNewContainer(a *assert.Assertions, bsu azblob.ServiceURL) (container azblob.ContainerURL, name string) { + container, name = getContainerURL(a, bsu) // ignore any errors here, since it doesn't matter if this fails (if it does, it's probably because the container didn't exist) _, _ = container.Delete(ctx, azblob.ContainerAccessConditions{}) - cResp, err := container.Create(ctx, nil, azblob.PublicAccessNone) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + _, err := container.Create(ctx, nil, azblob.PublicAccessNone) + a.Nil(err) return container, name } -func createNewFilesystem(c *chk.C, bfssu azbfs.ServiceURL) (filesystem azbfs.FileSystemURL, name string) { - filesystem, name = getFilesystemURL(c, bfssu) +func createNewFilesystem(a *assert.Assertions, bfssu azbfs.ServiceURL) (filesystem azbfs.FileSystemURL, name string) { + filesystem, name = getFilesystemURL(a, bfssu) // ditto _, _ = filesystem.Delete(ctx) - cResp, err := filesystem.Create(ctx) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + _, err := filesystem.Create(ctx) + a.Nil(err) return } -func createNewBfsFile(c *chk.C, filesystem azbfs.FileSystemURL, prefix string) (file azbfs.FileURL, name string) { - file, name = getBfsFileURL(c, filesystem, prefix) +func createNewBfsFile(a *assert.Assertions, filesystem azbfs.FileSystemURL, prefix string) (file azbfs.FileURL, name string) { + file, name = getBfsFileURL(a, filesystem, prefix) // Create the file - cResp, err := file.Create(ctx, azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{}) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + _, err := file.Create(ctx, azbfs.BlobFSHTTPHeaders{}, azbfs.BlobFSAccessControl{}) + a.Nil(err) - aResp, err := file.AppendData(ctx, 0, strings.NewReader(string(make([]byte, defaultBlobFSFileSizeInBytes)))) - c.Assert(err, chk.IsNil) - c.Assert(aResp.StatusCode(), chk.Equals, 202) + _, err = file.AppendData(ctx, 0, strings.NewReader(string(make([]byte, defaultBlobFSFileSizeInBytes)))) + a.Nil(err) - fResp, err := file.FlushData(ctx, defaultBlobFSFileSizeInBytes, nil, azbfs.BlobFSHTTPHeaders{}, false, true) - c.Assert(err, chk.IsNil) - c.Assert(fResp.StatusCode(), chk.Equals, 200) + _, err = file.FlushData(ctx, defaultBlobFSFileSizeInBytes, nil, azbfs.BlobFSHTTPHeaders{}, false, true) + a.Nil(err) return } -func createNewBlockBlob(c *chk.C, container azblob.ContainerURL, prefix string) (blob azblob.BlockBlobURL, name string) { - blob, name = getBlockBlobURL(c, container, prefix) +func createNewBlockBlob(a *assert.Assertions, container azblob.ContainerURL, prefix string) (blob azblob.BlockBlobURL, name string) { + blob, name = getBlockBlobURL(a, container, prefix) - cResp, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), azblob.BlobHTTPHeaders{}, + _, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) - - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + a.Nil(err) return } // create metadata indicating that this is a dir -func createNewDirectoryStub(c *chk.C, container azblob.ContainerURL, dirPath string) { +func createNewDirectoryStub(a *assert.Assertions, container azblob.ContainerURL, dirPath string) { dir := container.NewBlockBlobURL(dirPath) - cResp, err := dir.Upload(ctx, bytes.NewReader(nil), azblob.BlobHTTPHeaders{}, + _, err := dir.Upload(ctx, bytes.NewReader(nil), azblob.BlobHTTPHeaders{}, azblob.Metadata{"hdi_isfolder": "true"}, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + a.Nil(err) return } -func createNewAzureShare(c *chk.C, fsu azfile.ServiceURL) (share azfile.ShareURL, name string) { - share, name = getShareURL(c, fsu) +func createNewAzureShare(a *assert.Assertions, fsu azfile.ServiceURL) (share azfile.ShareURL, name string) { + share, name = getShareURL(a, fsu) - // + _, err := share.Create(ctx, nil, 0) + a.Nil(err) - cResp, err := share.Create(ctx, nil, 0) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) return share, name } -func createNewAzureFile(c *chk.C, share azfile.ShareURL, prefix string) (file azfile.FileURL, name string) { - file, name = getAzureFileURL(c, share, prefix) +func createNewAzureFile(a *assert.Assertions, share azfile.ShareURL, prefix string) (file azfile.FileURL, name string) { + file, name = getAzureFileURL(a, share, prefix) // generate parents first - generateParentsForAzureFile(c, file) + generateParentsForAzureFile(a, file) - cResp, err := file.Create(ctx, defaultAzureFileSizeInBytes, azfile.FileHTTPHeaders{}, azfile.Metadata{}) - c.Assert(err, chk.IsNil) - c.Assert(cResp.StatusCode(), chk.Equals, 201) + _, err := file.Create(ctx, defaultAzureFileSizeInBytes, azfile.FileHTTPHeaders{}, azfile.Metadata{}) + a.Nil(err) return } -func generateParentsForAzureFile(c *chk.C, fileURL azfile.FileURL) { +func generateParentsForAzureFile(a *assert.Assertions, fileURL azfile.FileURL) { accountName, accountKey := getAccountAndKey() credential, _ := azfile.NewSharedKeyCredential(accountName, accountKey) t := ste.NewFolderCreationTracker(common.EFolderPropertiesOption.NoFolders(), nil) err := ste.AzureFileParentDirCreator{}.CreateParentDirToRoot(ctx, fileURL, azfile.NewPipeline(credential, azfile.PipelineOptions{}), t) - c.Assert(err, chk.IsNil) + a.Nil(err) } -func createNewAppendBlob(c *chk.C, container azblob.ContainerURL, prefix string) (blob azblob.AppendBlobURL, name string) { - blob, name = getAppendBlobURL(c, container, prefix) +func createNewAppendBlob(a *assert.Assertions, container azblob.ContainerURL, prefix string) (blob azblob.AppendBlobURL, name string) { + blob, name = getAppendBlobURL(a, container, prefix) - resp, err := blob.Create(ctx, azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) + _, err := blob.Create(ctx, azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) - c.Assert(err, chk.IsNil) - c.Assert(resp.StatusCode(), chk.Equals, 201) + a.Nil(err) return } -func createNewPageBlob(c *chk.C, container azblob.ContainerURL, prefix string) (blob azblob.PageBlobURL, name string) { - blob, name = getPageBlobURL(c, container, prefix) - - resp, err := blob.Create(ctx, azblob.PageBlobPageBytes*10, 0, azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}, azblob.DefaultPremiumBlobAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) +func createNewPageBlob(a *assert.Assertions, container azblob.ContainerURL, prefix string) (blob azblob.PageBlobURL, name string) { + blob, name = getPageBlobURL(a, container, prefix) - c.Assert(err, chk.IsNil) - c.Assert(resp.StatusCode(), chk.Equals, 201) + _, err := blob.Create(ctx, azblob.PageBlobPageBytes*10, 0, azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}, azblob.DefaultPremiumBlobAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) + a.Nil(err) return } -func deleteContainer(c *chk.C, container azblob.ContainerURL) { - resp, err := container.Delete(ctx, azblob.ContainerAccessConditions{}) - c.Assert(err, chk.IsNil) - c.Assert(resp.StatusCode(), chk.Equals, 202) +func deleteContainer(a *assert.Assertions, container azblob.ContainerURL) { + _, err := container.Delete(ctx, azblob.ContainerAccessConditions{}) + a.Nil(err) } -func deleteFilesystem(c *chk.C, filesystem azbfs.FileSystemURL) { - resp, err := filesystem.Delete(ctx) - c.Assert(err, chk.IsNil) - c.Assert(resp.StatusCode(), chk.Equals, 202) +func deleteFilesystem(a *assert.Assertions, filesystem azbfs.FileSystemURL) { + _, err := filesystem.Delete(ctx) + a.Nil(err) } -func validateStorageError(c *chk.C, err error, code azblob.ServiceCodeType) { +func validateStorageError(a *assert.Assertions, err error, code azblob.ServiceCodeType) { serr, _ := err.(azblob.StorageError) - c.Assert(serr.ServiceCode(), chk.Equals, code) + a.Equal(code, serr.ServiceCode()) } func getRelativeTimeGMT(amount time.Duration) time.Time { @@ -491,62 +468,62 @@ func createGCPClientWithGCSSDK() (*gcpUtils.Client, error) { return gcpClient, nil } -func createNewBucket(c *chk.C, client *minio.Client, o createS3ResOptions) string { +func createNewBucket(a *assert.Assertions, client *minio.Client, o createS3ResOptions) string { bucketName := generateBucketName() err := client.MakeBucket(bucketName, o.Location) - c.Assert(err, chk.IsNil) + a.Nil(err) return bucketName } -func createNewGCPBucket(c *chk.C, client *gcpUtils.Client) string { +func createNewGCPBucket(a *assert.Assertions, client *gcpUtils.Client) string { bucketName := generateBucketName() bkt := client.Bucket(bucketName) err := bkt.Create(context.Background(), os.Getenv("GOOGLE_CLOUD_PROJECT"), &gcpUtils.BucketAttrs{}) - c.Assert(err, chk.IsNil) + a.Nil(err) return bucketName } -func createNewBucketWithName(c *chk.C, client *minio.Client, bucketName string, o createS3ResOptions) { +func createNewBucketWithName(a *assert.Assertions, client *minio.Client, bucketName string, o createS3ResOptions) { err := client.MakeBucket(bucketName, o.Location) - c.Assert(err, chk.IsNil) + a.Nil(err) } -func createNewGCPBucketWithName(c *chk.C, client *gcpUtils.Client, bucketName string) { +func createNewGCPBucketWithName(a *assert.Assertions, client *gcpUtils.Client, bucketName string) { bucket := client.Bucket(bucketName) err := bucket.Create(context.Background(), os.Getenv("GOOGLE_CLOUD_PROJECT"), &gcpUtils.BucketAttrs{}) - c.Assert(err, chk.IsNil) + a.Nil(err) } -func createNewObject(c *chk.C, client *minio.Client, bucketName string, prefix string) (objectKey string) { +func createNewObject(a *assert.Assertions, client *minio.Client, bucketName string, prefix string) (objectKey string) { objectKey = prefix + generateObjectName() size := int64(len(objectDefaultData)) n, err := client.PutObject(bucketName, objectKey, strings.NewReader(objectDefaultData), size, minio.PutObjectOptions{}) - c.Assert(err, chk.IsNil) + a.Nil(err) - c.Assert(n, chk.Equals, size) + a.Equal(size, n) return } -func createNewGCPObject(c *chk.C, client *gcpUtils.Client, bucketName string, prefix string) (objectKey string) { +func createNewGCPObject(a *assert.Assertions, client *gcpUtils.Client, bucketName string, prefix string) (objectKey string) { objectKey = prefix + generateObjectName() size := int64(len(objectDefaultData)) wc := client.Bucket(bucketName).Object(objectKey).NewWriter(context.Background()) reader := strings.NewReader(objectDefaultData) written, err := io.Copy(wc, reader) - c.Assert(err, chk.IsNil) - c.Assert(written, chk.Equals, size) + a.Nil(err) + a.Equal(size, written) err = wc.Close() - c.Assert(err, chk.IsNil) + a.Nil(err) return objectKey } -func deleteBucket(c *chk.C, client *minio.Client, bucketName string, waitQuarterMinute bool) { +func deleteBucket(client *minio.Client, bucketName string, waitQuarterMinute bool) { // If we error out in this function, simply just skip over deleting the bucket. // Some of our buckets have become "ghost" buckets in the past. // Ghost buckets show up in list calls but can't actually be interacted with. @@ -590,7 +567,7 @@ func deleteBucket(c *chk.C, client *minio.Client, bucketName string, waitQuarter } } -func deleteGCPBucket(c *chk.C, client *gcpUtils.Client, bucketName string, waitQuarterMinute bool) { +func deleteGCPBucket(client *gcpUtils.Client, bucketName string, waitQuarterMinute bool) { bucket := client.Bucket(bucketName) ctx := context.Background() it := bucket.Objects(ctx, &gcpUtils.Query{Prefix: ""}) @@ -600,23 +577,18 @@ func deleteGCPBucket(c *chk.C, client *gcpUtils.Client, bucketName string, waitQ if err == iterator.Done { break } - - // Failure during listing - c.Assert(err, chk.Equals, nil) return } if err == nil { err = bucket.Object(attrs.Name).Delete(nil) if err != nil { - // Failure cleaning bucket - c.Assert(err, chk.Equals, nil) return } } } err := bucket.Delete(context.Background()) if err != nil { - c.Log(fmt.Sprintf("Failed to Delete GCS Bucket %v", bucketName)) + fmt.Println(fmt.Sprintf("Failed to Delete GCS Bucket %v", bucketName)) } if waitQuarterMinute { @@ -624,7 +596,7 @@ func deleteGCPBucket(c *chk.C, client *gcpUtils.Client, bucketName string, waitQ } } -func cleanS3Account(c *chk.C, client *minio.Client) { +func cleanS3Account(client *minio.Client) { buckets, err := client.ListBuckets() if err != nil { return @@ -634,16 +606,16 @@ func cleanS3Account(c *chk.C, client *minio.Client) { if strings.Contains(bucket.Name, "elastic") { continue } - deleteBucket(c, client, bucket.Name, false) + deleteBucket(client, bucket.Name, false) } time.Sleep(time.Minute) } -func cleanGCPAccount(c *chk.C, client *gcpUtils.Client) { +func cleanGCPAccount(client *gcpUtils.Client) { projectID := os.Getenv("GOOGLE_CLOUD_PROJECT") if projectID == "" { - c.Log("GOOGLE_CLOUD_PROJECT env variable not set. GCP tests will not run") + fmt.Println("GOOGLE_CLOUD_PROJECT env variable not set. GCP tests will not run") return } ctx := context.Background() @@ -654,19 +626,17 @@ func cleanGCPAccount(c *chk.C, client *gcpUtils.Client) { if err == iterator.Done { break } - - c.Assert(err, chk.Equals, nil) return } - deleteGCPBucket(c, client, battrs.Name, false) + deleteGCPBucket(client, battrs.Name, false) } } -func cleanBlobAccount(c *chk.C, serviceURL azblob.ServiceURL) { +func cleanBlobAccount(a *assert.Assertions, serviceURL azblob.ServiceURL) { marker := azblob.Marker{} for marker.NotDone() { resp, err := serviceURL.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{}) - c.Assert(err, chk.IsNil) + a.Nil(err) for _, v := range resp.ContainerItems { _, err = serviceURL.NewContainerURL(v.Name).Delete(ctx, azblob.ContainerAccessConditions{}) @@ -678,7 +648,7 @@ func cleanBlobAccount(c *chk.C, serviceURL azblob.ServiceURL) { } } - c.Assert(err, chk.IsNil) + a.Nil(err) } } @@ -686,11 +656,11 @@ func cleanBlobAccount(c *chk.C, serviceURL azblob.ServiceURL) { } } -func cleanFileAccount(c *chk.C, serviceURL azfile.ServiceURL) { +func cleanFileAccount(a *assert.Assertions, serviceURL azfile.ServiceURL) { marker := azfile.Marker{} for marker.NotDone() { resp, err := serviceURL.ListSharesSegment(ctx, marker, azfile.ListSharesOptions{}) - c.Assert(err, chk.IsNil) + a.Nil(err) for _, v := range resp.ShareItems { _, err = serviceURL.NewShareURL(v.Name).Delete(ctx, azfile.DeleteSnapshotsOptionNone) @@ -702,7 +672,7 @@ func cleanFileAccount(c *chk.C, serviceURL azfile.ServiceURL) { } } - c.Assert(err, chk.IsNil) + a.Nil(err) } } @@ -738,9 +708,9 @@ func getAlternateFSU() (azfile.ServiceURL, error) { return azfile.NewServiceURL(*fsURL, pipeline), nil } -func deleteShare(c *chk.C, share azfile.ShareURL) { +func deleteShare(a *assert.Assertions, share azfile.ShareURL) { _, err := share.Delete(ctx, azfile.DeleteSnapshotsOptionInclude) - c.Assert(err, chk.IsNil) + a.Nil(err) } // Some tests require setting service properties. It can take up to 30 seconds for the new properties to be reflected across all FEs. @@ -748,47 +718,47 @@ func deleteShare(c *chk.C, share azfile.ShareURL) { // those changes not being reflected yet, we will wait 30 seconds and try the test again. If it fails this time for any reason, // we fail the test. It is the responsibility of the the testImplFunc to determine which error string indicates the test should be retried. // There can only be one such string. All errors that cannot be due to this detail should be asserted and not returned as an error string. -func runTestRequiringServiceProperties(c *chk.C, bsu azblob.ServiceURL, code string, - enableServicePropertyFunc func(*chk.C, azblob.ServiceURL), - testImplFunc func(*chk.C, azblob.ServiceURL) error, - disableServicePropertyFunc func(*chk.C, azblob.ServiceURL)) { - enableServicePropertyFunc(c, bsu) - defer disableServicePropertyFunc(c, bsu) - err := testImplFunc(c, bsu) +func runTestRequiringServiceProperties(a *assert.Assertions, bsu azblob.ServiceURL, code string, + enableServicePropertyFunc func(*assert.Assertions, azblob.ServiceURL), + testImplFunc func(*assert.Assertions, azblob.ServiceURL) error, + disableServicePropertyFunc func(*assert.Assertions, azblob.ServiceURL)) { + enableServicePropertyFunc(a, bsu) + defer disableServicePropertyFunc(a, bsu) + err := testImplFunc(a, bsu) // We cannot assume that the error indicative of slow update will necessarily be a StorageError. As in ListBlobs. if err != nil && err.Error() == code { time.Sleep(time.Second * 30) - err = testImplFunc(c, bsu) - c.Assert(err, chk.IsNil) + err = testImplFunc(a, bsu) + a.Nil(err) } } -func enableSoftDelete(c *chk.C, bsu azblob.ServiceURL) { +func enableSoftDelete(a *assert.Assertions, bsu azblob.ServiceURL) { days := int32(1) _, err := bsu.SetProperties(ctx, azblob.StorageServiceProperties{DeleteRetentionPolicy: &azblob.RetentionPolicy{Enabled: true, Days: &days}}) - c.Assert(err, chk.IsNil) + a.Nil(err) } -func disableSoftDelete(c *chk.C, bsu azblob.ServiceURL) { +func disableSoftDelete(a *assert.Assertions, bsu azblob.ServiceURL) { _, err := bsu.SetProperties(ctx, azblob.StorageServiceProperties{DeleteRetentionPolicy: &azblob.RetentionPolicy{Enabled: false}}) - c.Assert(err, chk.IsNil) + a.Nil(err) } -func validateUpload(c *chk.C, blobURL azblob.BlockBlobURL) { +func validateUpload(a *assert.Assertions, blobURL azblob.BlockBlobURL) { resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) - c.Assert(err, chk.IsNil) + a.Nil(err) data, _ := io.ReadAll(resp.Response().Body) - c.Assert(data, chk.HasLen, 0) + a.Len(data, 0) } -func getContainerURLWithSAS(c *chk.C, credential azblob.SharedKeyCredential, containerName string) azblob.ContainerURL { +func getContainerURLWithSAS(a *assert.Assertions, credential azblob.SharedKeyCredential, containerName string) azblob.ContainerURL { sasQueryParams, err := azblob.BlobSASSignatureValues{ Protocol: azblob.SASProtocolHTTPS, ExpiryTime: time.Now().UTC().Add(48 * time.Hour), ContainerName: containerName, Permissions: azblob.ContainerSASPermissions{Read: true, Add: true, Write: true, Create: true, Delete: true, DeletePreviousVersion: true, List: true, Tag: true}.String(), }.NewSASQueryParameters(&credential) - c.Assert(err, chk.IsNil) + a.Nil(err) // construct the url from scratch qp := sasQueryParams.Encode() @@ -797,13 +767,13 @@ func getContainerURLWithSAS(c *chk.C, credential azblob.SharedKeyCredential, con // convert the raw url and validate it was parsed successfully fullURL, err := url.Parse(rawURL) - c.Assert(err, chk.IsNil) + a.Nil(err) // TODO perhaps we need a global default pipeline return azblob.NewContainerURL(*fullURL, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) } -func getBlobServiceURLWithSAS(c *chk.C, credential azblob.SharedKeyCredential) azblob.ServiceURL { +func getBlobServiceURLWithSAS(a *assert.Assertions, credential azblob.SharedKeyCredential) azblob.ServiceURL { sasQueryParams, err := azblob.AccountSASSignatureValues{ Protocol: azblob.SASProtocolHTTPS, ExpiryTime: time.Now().Add(48 * time.Hour), @@ -811,7 +781,7 @@ func getBlobServiceURLWithSAS(c *chk.C, credential azblob.SharedKeyCredential) a Services: azblob.AccountSASServices{File: true, Blob: true, Queue: true}.String(), ResourceTypes: azblob.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), }.NewSASQueryParameters(&credential) - c.Assert(err, chk.IsNil) + a.Nil(err) // construct the url from scratch qp := sasQueryParams.Encode() @@ -820,12 +790,12 @@ func getBlobServiceURLWithSAS(c *chk.C, credential azblob.SharedKeyCredential) a // convert the raw url and validate it was parsed successfully fullURL, err := url.Parse(rawURL) - c.Assert(err, chk.IsNil) + a.Nil(err) return azblob.NewServiceURL(*fullURL, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) } -func getFileServiceURLWithSAS(c *chk.C, credential azfile.SharedKeyCredential) azfile.ServiceURL { +func getFileServiceURLWithSAS(a *assert.Assertions, credential azfile.SharedKeyCredential) azfile.ServiceURL { sasQueryParams, err := azfile.AccountSASSignatureValues{ Protocol: azfile.SASProtocolHTTPS, ExpiryTime: time.Now().Add(48 * time.Hour), @@ -833,25 +803,25 @@ func getFileServiceURLWithSAS(c *chk.C, credential azfile.SharedKeyCredential) a Services: azfile.AccountSASServices{File: true, Blob: true, Queue: true}.String(), ResourceTypes: azfile.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), }.NewSASQueryParameters(&credential) - c.Assert(err, chk.IsNil) + a.Nil(err) qp := sasQueryParams.Encode() rawURL := fmt.Sprintf("https://%s.file.core.windows.net/?%s", credential.AccountName(), qp) fullURL, err := url.Parse(rawURL) - c.Assert(err, chk.IsNil) + a.Nil(err) return azfile.NewServiceURL(*fullURL, azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{})) } -func getShareURLWithSAS(c *chk.C, credential azfile.SharedKeyCredential, shareName string) azfile.ShareURL { +func getShareURLWithSAS(a *assert.Assertions, credential azfile.SharedKeyCredential, shareName string) azfile.ShareURL { sasQueryParams, err := azfile.FileSASSignatureValues{ Protocol: azfile.SASProtocolHTTPS, ExpiryTime: time.Now().UTC().Add(48 * time.Hour), ShareName: shareName, Permissions: azfile.ShareSASPermissions{Read: true, Write: true, Create: true, Delete: true, List: true}.String(), }.NewSASQueryParameters(&credential) - c.Assert(err, chk.IsNil) + a.Nil(err) // construct the url from scratch qp := sasQueryParams.Encode() @@ -860,13 +830,13 @@ func getShareURLWithSAS(c *chk.C, credential azfile.SharedKeyCredential, shareNa // convert the raw url and validate it was parsed successfully fullURL, err := url.Parse(rawURL) - c.Assert(err, chk.IsNil) + a.Nil(err) // TODO perhaps we need a global default pipeline return azfile.NewShareURL(*fullURL, azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{})) } -func getAdlsServiceURLWithSAS(c *chk.C, credential azbfs.SharedKeyCredential) azbfs.ServiceURL { +func getAdlsServiceURLWithSAS(a *assert.Assertions, credential azbfs.SharedKeyCredential) azbfs.ServiceURL { sasQueryParams, err := azbfs.AccountSASSignatureValues{ Protocol: azbfs.SASProtocolHTTPS, ExpiryTime: time.Now().Add(48 * time.Hour), @@ -874,7 +844,7 @@ func getAdlsServiceURLWithSAS(c *chk.C, credential azbfs.SharedKeyCredential) az Services: azfile.AccountSASServices{File: true, Blob: true, Queue: true}.String(), ResourceTypes: azfile.AccountSASResourceTypes{Service: true, Container: true, Object: true}.String(), }.NewSASQueryParameters(&credential) - c.Assert(err, chk.IsNil) + a.Nil(err) // construct the url from scratch qp := sasQueryParams.Encode() @@ -883,38 +853,7 @@ func getAdlsServiceURLWithSAS(c *chk.C, credential azbfs.SharedKeyCredential) az // convert the raw url and validate it was parsed successfully fullURL, err := url.Parse(rawURL) - c.Assert(err, chk.IsNil) + a.Nil(err) return azbfs.NewServiceURL(*fullURL, azbfs.NewPipeline(azbfs.NewAnonymousCredential(), azbfs.PipelineOptions{})) -} - -// check.v1 style "StringContains" checker - -type stringContainsChecker struct { - *chk.CheckerInfo -} - -var StringContains = &stringContainsChecker{ - &chk.CheckerInfo{Name: "StringContains", Params: []string{"obtained", "expected to find"}}, -} - -func (checker *stringContainsChecker) Check(params []interface{}, names []string) (result bool, error string) { - if len(params) < 2 { - return false, "StringContains requires two parameters" - } // Ignore extra parameters - - // Assert that params[0] and params[1] are strings - aStr, aOK := params[0].(string) - bStr, bOK := params[1].(string) - if !aOK || !bOK { - return false, "All parameters must be strings" - } - - if strings.Contains(aStr, bStr) { - return true, "" - } - - return false, fmt.Sprintf("Failed to find substring in source string:\n\n"+ - "SOURCE: %s\n"+ - "EXPECTED: %s\n", aStr, bStr) -} +} \ No newline at end of file diff --git a/cmd/zt_traverser_blob_test.go b/cmd/zt_traverser_blob_test.go index e77d49cd6..00502102a 100644 --- a/cmd/zt_traverser_blob_test.go +++ b/cmd/zt_traverser_blob_test.go @@ -25,97 +25,98 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-azcopy/v10/ste" "github.com/Azure/azure-storage-blob-go/azblob" - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "testing" ) -type traverserBlobSuite struct{} - -var _ = chk.Suite(&traverserBlobSuite{}) - -func (s *traverserBlobSuite) TestIsSourceDirWithStub(c *chk.C) { +func TestIsSourceDirWithStub(t *testing.T) { + a := assert.New(t) bsu := getBSU() // Generate source container and blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) dirName := "source_dir" - createNewDirectoryStub(c, containerURL, dirName) + createNewDirectoryStub(a, containerURL, dirName) // set up to create blob traverser ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, dirName) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, dirName) blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) isDir, err := blobTraverser.IsDirectory(true) - c.Assert(isDir, chk.Equals, true) - c.Assert(err, chk.Equals, nil) + a.True(isDir) + a.Nil(err) } -func (s *traverserBlobSuite) TestIsSourceDirWithNoStub(c *chk.C) { +func TestIsSourceDirWithNoStub(t *testing.T) { + a := assert.New(t) bsu := getBSU() // Generate source container and blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) dirName := "source_dir/" ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, dirName) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, dirName) blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) isDir, err := blobTraverser.IsDirectory(true) - c.Assert(isDir, chk.Equals, true) - c.Assert(err, chk.Equals, nil) + a.True(isDir) + a.Nil(err) } -func (s *traverserBlobSuite) TestIsSourceFileExists(c *chk.C) { +func TestIsSourceFileExists(t *testing.T) { + a := assert.New(t) bsu := getBSU() // Generate source container and blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) fileName := "source_file" - _, fileName = createNewBlockBlob(c, containerURL, fileName) + _, fileName = createNewBlockBlob(a, containerURL, fileName) ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, fileName) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, fileName) blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) isDir, err := blobTraverser.IsDirectory(true) - c.Assert(isDir, chk.Equals, false) - c.Assert(err, chk.IsNil) + a.False(isDir) + a.Nil(err) } -func (s *traverserBlobSuite) TestIsSourceFileDoesNotExist(c *chk.C) { +func TestIsSourceFileDoesNotExist(t *testing.T) { + a := assert.New(t) bsu := getBSU() // Generate source container and blobs - containerURL, containerName := createNewContainer(c, bsu) - defer deleteContainer(c, containerURL) - c.Assert(containerURL, chk.NotNil) + containerURL, containerName := createNewContainer(a, bsu) + defer deleteContainer(a, containerURL) + a.NotNil(containerURL) fileName := "file_does_not_exist" ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) // List - rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, fileName) + rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(a, containerName, fileName) blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) isDir, err := blobTraverser.IsDirectory(true) - c.Assert(isDir, chk.Equals, false) - c.Assert(err.Error(), chk.Equals, common.FILE_NOT_FOUND) -} + a.False(isDir) + a.Equal(common.FILE_NOT_FOUND, err.Error()) +} \ No newline at end of file diff --git a/cmd/zt_user_input_test.go b/cmd/zt_user_input_test.go index ae518192f..1d91f92eb 100644 --- a/cmd/zt_user_input_test.go +++ b/cmd/zt_user_input_test.go @@ -21,38 +21,40 @@ package cmd import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "testing" ) -func (s *cmdIntegrationSuite) TestCPKEncryptionInputTest(c *chk.C) { +func TestCPKEncryptionInputTest(t *testing.T) { + a := assert.New(t) mockedRPC := interceptor{} Rpc = mockedRPC.intercept mockedRPC.init() dirPath := "this/is/a/dummy/path" - rawDFSEndpointWithSAS := scenarioHelper{}.getRawAdlsServiceURLWithSAS(c) + rawDFSEndpointWithSAS := scenarioHelper{}.getRawAdlsServiceURLWithSAS(a) raw := getDefaultRawCopyInput(dirPath, rawDFSEndpointWithSAS.String()) raw.recursive = true raw.cpkInfo = true - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) - c.Assert(err.Error(), StringContains, "client provided keys (CPK) based encryption is only supported with blob endpoints (blob.core.windows.net)") + runCopyAndVerify(a, raw, func(err error) { + a.NotNil(err) + a.Contains(err.Error(), "client provided keys (CPK) based encryption is only supported with blob endpoints (blob.core.windows.net)") }) mockedRPC.reset() raw.cpkInfo = false raw.cpkScopeInfo = "dummyscope" - runCopyAndVerify(c, raw, func(err error) { - c.Assert(err, chk.NotNil) - c.Assert(err.Error(), StringContains, "client provided keys (CPK) based encryption is only supported with blob endpoints (blob.core.windows.net)") + runCopyAndVerify(a, raw, func(err error) { + a.NotNil(err) + a.Contains(err.Error(), "client provided keys (CPK) based encryption is only supported with blob endpoints (blob.core.windows.net)") }) - rawContainerURL := scenarioHelper{}.getContainerURL(c, "testcpkcontainer") + rawContainerURL := scenarioHelper{}.getContainerURL(a, "testcpkcontainer") raw2 := getDefaultRawCopyInput(dirPath, rawContainerURL.String()) raw2.recursive = true raw2.cpkInfo = true _, err := raw2.cook() - c.Assert(err, chk.IsNil) -} + a.Nil(err) +} \ No newline at end of file diff --git a/common/LongPathHandler_test.go b/common/LongPathHandler_test.go index 0e27b7bd3..29195ec3c 100644 --- a/common/LongPathHandler_test.go +++ b/common/LongPathHandler_test.go @@ -1,34 +1,33 @@ package common import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "testing" ) -type pathHandlerSuite struct{} - -var _ = chk.Suite(&pathHandlerSuite{}) - -func (p *pathHandlerSuite) TestShortToLong(c *chk.C) { +func TestShortToLong(t *testing.T) { + a := assert.New(t) if OS_PATH_SEPARATOR == `\` { - c.Assert(ToExtendedPath(`c:`), chk.Equals, `\\?\C:\`) - c.Assert(ToExtendedPath(`c:/`), chk.Equals, `\\?\C:\`) - c.Assert(ToExtendedPath(`c:/myPath`), chk.Equals, `\\?\C:\myPath`) - c.Assert(ToExtendedPath(`C:\myPath`), chk.Equals, `\\?\C:\myPath`) - c.Assert(ToExtendedPath(`\\myHost\myPath`), chk.Equals, `\\?\UNC\myHost\myPath`) - c.Assert(ToExtendedPath(`\\?\C:\myPath`), chk.Equals, `\\?\C:\myPath`) - c.Assert(ToExtendedPath(`\\?\UNC\myHost\myPath`), chk.Equals, `\\?\UNC\myHost\myPath`) + a.Equal(`\\?\C:\`, ToExtendedPath(`c:`)) + a.Equal(`\\?\C:\`, ToExtendedPath(`c:/`)) + a.Equal(`\\?\C:\myPath`, ToExtendedPath(`c:/myPath`)) + a.Equal(`\\?\C:\myPath`, ToExtendedPath(`C:\myPath`)) + a.Equal(`\\?\UNC\myHost\myPath`, ToExtendedPath(`\\myHost\myPath`)) + a.Equal(`\\?\C:\myPath`, ToExtendedPath(`\\?\C:\myPath`)) + a.Equal(`\\?\UNC\myHost\myPath`, ToExtendedPath(`\\?\UNC\myHost\myPath`)) } else { - c.Skip("Test only pertains to Windows.") + t.Skip("Test only pertains to Windows.") } } -func (p *pathHandlerSuite) TestLongToShort(c *chk.C) { +func TestLongToShort(t *testing.T) { + a := assert.New(t) if OS_PATH_SEPARATOR == `\` { - c.Assert(ToShortPath(`\\?\C:\myPath`), chk.Equals, `C:\myPath`) - c.Assert(ToShortPath(`\\?\UNC\myHost\myPath`), chk.Equals, `\\myHost\myPath`) - c.Assert(ToShortPath(`\\myHost\myPath`), chk.Equals, `\\myHost\myPath`) - c.Assert(ToShortPath(`C:\myPath`), chk.Equals, `C:\myPath`) + a.Equal(`C:\myPath`, ToShortPath(`\\?\C:\myPath`)) + a.Equal(`\\myHost\myPath`, ToShortPath(`\\?\UNC\myHost\myPath`)) + a.Equal(`\\myHost\myPath`, ToShortPath(`\\myHost\myPath`)) + a.Equal(`C:\myPath`, ToShortPath(`C:\myPath`)) } else { - c.Skip("Test only pertains to Windows.") + t.Skip("Test only pertains to Windows.") } -} +} \ No newline at end of file diff --git a/common/extensions_test.go b/common/extensions_test.go index 50f573aa2..e9990fabd 100644 --- a/common/extensions_test.go +++ b/common/extensions_test.go @@ -2,20 +2,18 @@ package common import ( "fmt" + "github.com/stretchr/testify/assert" "math/rand" "net/url" "strings" + "testing" "unsafe" "github.com/Azure/azure-storage-blob-go/azblob" - chk "gopkg.in/check.v1" ) -type extensionsTestSuite struct{} - -var _ = chk.Suite(&extensionsTestSuite{}) - -func (s *extensionsTestSuite) TestGenerateFullPath(c *chk.C) { +func TestGenerateFullPath(t *testing.T) { + a := assert.New(t) // the goal is to make sure the root path and child path are always combined correctly testCases := map[string][]string{ "/usr/foo1/bla.txt": {"/usr/foo1", "bla.txt"}, // normal case @@ -38,11 +36,12 @@ func (s *extensionsTestSuite) TestGenerateFullPath(c *chk.C) { for expectedFullPath, input := range testCases { resultFullPath := GenerateFullPath(input[0], input[1]) - c.Assert(resultFullPath, chk.Equals, expectedFullPath) + a.Equal(expectedFullPath, resultFullPath) } } -func (*extensionsTestSuite) TestURLWithPlusDecodedInPath(c *chk.C) { +func TestURLWithPlusDecodedInPath(t *testing.T) { + a := assert.New(t) type expectedResults struct { expectedResult string expectedRawPath string @@ -80,18 +79,18 @@ func (*extensionsTestSuite) TestURLWithPlusDecodedInPath(c *chk.C) { for k, v := range replacementTests { uri, err := url.Parse(k) - c.Assert(err, chk.IsNil) + a.Nil(err) extension := URLExtension{*uri}.URLWithPlusDecodedInPath() - c.Assert(extension.Path, chk.Equals, v.expectedPath) - c.Assert(extension.RawPath, chk.Equals, v.expectedRawPath) - c.Assert(extension.String(), chk.Equals, v.expectedResult) + a.Equal(v.expectedPath, extension.Path) + a.Equal(v.expectedRawPath, extension.RawPath) + a.Equal(v.expectedResult, extension.String()) } } -func (*extensionsTestSuite) TestRedaction(c *chk.C) { - +func TestRedaction(t *testing.T) { + a := assert.New(t) // must make sure that- //1. the signature is redacted if present //2. the capitalization of the rest of the string should not be affected @@ -126,29 +125,30 @@ func (*extensionsTestSuite) TestRedaction(c *chk.C) { actualOutputParams = append(actualOutputParams, param) } - c.Assert(len(expectedOutputParams), chk.Equals, len(actualOutputParams)) + a.Equal(len(actualOutputParams), len(expectedOutputParams)) var sigfound bool = false for i := range expectedOutputParams { expParam, expValue := strings.Split(expectedOutputParams[i], "=")[0], strings.Split(expectedOutputParams[i], "=")[1] actParam, actValue := strings.Split(actualOutputParams[i], "=")[0], strings.Split(actualOutputParams[i], "=")[1] - c.Assert(expParam, chk.Equals, actParam) - c.Assert(expValue, chk.Equals, actValue) + a.Equal(actParam, expParam) + a.Equal(actValue, expValue) if expParam == "sig" { - c.Assert(isRedacted, chk.Equals, true) + a.True(isRedacted) sigfound = true - c.Assert(actValue, chk.Equals, "REDACTED") + a.Equal("REDACTED", actValue) } } if !sigfound { - c.Assert(isRedacted, chk.Equals, false) + a.False(isRedacted) } } } -func (*extensionsTestSuite) TestBlockblobBlockIDGeneration(c *chk.C) { +func TestBlockblobBlockIDGeneration(t *testing.T) { + a := assert.New(t) // Make sure that for a given JobID, jobPart, an index in job part and a block index, // the blockID generated is consistent. numOfFilesPerDispatchJobPart :=int32(10000) // == cmd.NumOfFilesPerDispatchJobPart @@ -158,17 +158,17 @@ func (*extensionsTestSuite) TestBlockblobBlockIDGeneration(c *chk.C) { placeHolder := "00000" // 5B placeholder jobId := NewUUID() jobIdStr := string((*[16]byte)(unsafe.Pointer(&jobId))[:]) // 16Byte jobID - partNum := rand.Int31n(maxNumberOfParts) // 5B partNumber + partNum := rand.Int31n(maxNumberOfParts) // 5B partNumber fileIndex := rand.Int31n(numOfFilesPerDispatchJobPart) // 5Byte index of file in part blockIndex := rand.Int31n(azblob.BlockBlobMaxBlocks) // 5B blockIndex blockNamePrefix := fmt.Sprintf("%s%s%05d%05d", placeHolder, jobIdStr, partNum, fileIndex) blockName := GenerateBlockBlobBlockID(blockNamePrefix, blockIndex) - c.Assert(len(blockName), chk.Equals, azCopyBlockLength) + a.Equal(azCopyBlockLength, len(blockName)) for i := 1; i <= 10; i++ { tmp := GenerateBlockBlobBlockID(blockNamePrefix, blockIndex) - c.Assert(tmp, chk.Equals, blockName) + a.Equal(blockName, tmp) } } \ No newline at end of file diff --git a/common/fe-ste-models_test.go b/common/fe-ste-models_test.go index af86e5cf6..7321034d8 100644 --- a/common/fe-ste-models_test.go +++ b/common/fe-ste-models_test.go @@ -22,72 +22,71 @@ package common_test import ( "github.com/Azure/azure-storage-azcopy/v10/common" - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "testing" ) -type feSteModelsTestSuite struct{} - -var _ = chk.Suite(&feSteModelsTestSuite{}) - -func (s *feSteModelsTestSuite) TestEnhanceJobStatusInfo(c *chk.C) { +func TestEnhanceJobStatusInfo(t *testing.T) { + a := assert.New(t) status := common.EJobStatus status = status.EnhanceJobStatusInfo(true, true, true) - c.Assert(status, chk.Equals, common.EJobStatus.CompletedWithErrorsAndSkipped()) + a.Equal(common.EJobStatus.CompletedWithErrorsAndSkipped(), status) status = status.EnhanceJobStatusInfo(true, true, false) - c.Assert(status, chk.Equals, common.EJobStatus.CompletedWithErrorsAndSkipped()) + a.Equal(common.EJobStatus.CompletedWithErrorsAndSkipped(), status) status = status.EnhanceJobStatusInfo(true, false, true) - c.Assert(status, chk.Equals, common.EJobStatus.CompletedWithSkipped()) + a.Equal(common.EJobStatus.CompletedWithSkipped(), status) status = status.EnhanceJobStatusInfo(true, false, false) - c.Assert(status, chk.Equals, common.EJobStatus.CompletedWithSkipped()) + a.Equal(common.EJobStatus.CompletedWithSkipped(), status) status = status.EnhanceJobStatusInfo(false, true, true) - c.Assert(status, chk.Equals, common.EJobStatus.CompletedWithErrors()) + a.Equal(common.EJobStatus.CompletedWithErrors(), status) status = status.EnhanceJobStatusInfo(false, true, false) - c.Assert(status, chk.Equals, common.EJobStatus.Failed()) + a.Equal(common.EJobStatus.Failed(), status) status = status.EnhanceJobStatusInfo(false, false, true) - c.Assert(status, chk.Equals, common.EJobStatus.Completed()) + a.Equal(common.EJobStatus.Completed(), status) // No-op if all are false status = status.EnhanceJobStatusInfo(false, false, false) - c.Assert(status, chk.Equals, common.EJobStatus.Completed()) + a.Equal(common.EJobStatus.Completed(), status) } -func (s *feSteModelsTestSuite) TestIsJobDone(c *chk.C) { +func TestIsJobDone(t *testing.T) { + a := assert.New(t) status := common.EJobStatus.InProgress() - c.Assert(status.IsJobDone(), chk.Equals, false) + a.False(status.IsJobDone()) status = status.Paused() - c.Assert(status.IsJobDone(), chk.Equals, false) + a.False(status.IsJobDone()) status = status.Cancelling() - c.Assert(status.IsJobDone(), chk.Equals, false) + a.False(status.IsJobDone()) status = status.Cancelled() - c.Assert(status.IsJobDone(), chk.Equals, true) + a.True(status.IsJobDone()) status = status.Completed() - c.Assert(status.IsJobDone(), chk.Equals, true) + a.True(status.IsJobDone()) status = status.CompletedWithErrors() - c.Assert(status.IsJobDone(), chk.Equals, true) + a.True(status.IsJobDone()) status = status.CompletedWithSkipped() - c.Assert(status.IsJobDone(), chk.Equals, true) + a.True(status.IsJobDone()) status = status.CompletedWithErrors() - c.Assert(status.IsJobDone(), chk.Equals, true) + a.True(status.IsJobDone()) status = status.CompletedWithErrorsAndSkipped() - c.Assert(status.IsJobDone(), chk.Equals, true) + a.True(status.IsJobDone()) status = status.Failed() - c.Assert(status.IsJobDone(), chk.Equals, true) + a.True(status.IsJobDone()) } func getInvalidMetadataSample() common.Metadata { @@ -115,61 +114,64 @@ func getValidMetadataSample() common.Metadata { return m } -func validateMapEqual(c *chk.C, m1 map[string]string, m2 map[string]string) { - c.Assert(len(m1), chk.Equals, len(m2)) +func validateMapEqual(a *assert.Assertions, m1 map[string]string, m2 map[string]string) { + a.Equal(len(m2), len(m1)) for k1, v1 := range m1 { - c.Assert(m2[k1], chk.Equals, v1) + a.Equal(v1, m2[k1]) } } -func (s *feSteModelsTestSuite) TestMetadataExcludeInvalidKey(c *chk.C) { +func TestMetadataExcludeInvalidKey(t *testing.T) { + a := assert.New(t) mInvalid := getInvalidMetadataSample() mValid := getValidMetadataSample() retainedMetadata, excludedMetadata, invalidKeyExists := mInvalid.ExcludeInvalidKey() - c.Assert(invalidKeyExists, chk.Equals, true) - validateMapEqual(c, retainedMetadata, + a.True(invalidKeyExists) + validateMapEqual(a, retainedMetadata, map[string]string{"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRUSTUVWXYZ1234567890_": "v:abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRUSTUVWXYZ1234567890_", "Am": "v:Am", "_123": "v:_123"}) - validateMapEqual(c, excludedMetadata, + validateMapEqual(a, excludedMetadata, map[string]string{"1abc": "v:1abc", "a!@#": "v:a!@#", "a-metadata-samplE": "v:a-metadata-samplE"}) retainedMetadata, excludedMetadata, invalidKeyExists = mValid.ExcludeInvalidKey() - c.Assert(invalidKeyExists, chk.Equals, false) - validateMapEqual(c, retainedMetadata, map[string]string{"Key": "value"}) - c.Assert(len(excludedMetadata), chk.Equals, 0) - c.Assert(retainedMetadata.ConcatenatedKeys(), chk.Equals, "'Key' ") + a.False(invalidKeyExists) + validateMapEqual(a, retainedMetadata, map[string]string{"Key": "value"}) + a.Zero(len(excludedMetadata)) + a.Equal("'Key' ", retainedMetadata.ConcatenatedKeys()) } -func (s *feSteModelsTestSuite) TestMetadataResolveInvalidKey(c *chk.C) { +func TestMetadataResolveInvalidKey(t *testing.T) { + a := assert.New(t) mInvalid := getInvalidMetadataSample() mValid := getValidMetadataSample() resolvedMetadata, err := mInvalid.ResolveInvalidKey() - c.Assert(err, chk.IsNil) - validateMapEqual(c, resolvedMetadata, + a.Nil(err) + validateMapEqual(a, resolvedMetadata, map[string]string{"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRUSTUVWXYZ1234567890_": "v:abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRUSTUVWXYZ1234567890_", "Am": "v:Am", "_123": "v:_123", "rename_1abc": "v:1abc", "rename_key_1abc": "1abc", "rename_a___": "v:a!@#", "rename_key_a___": "a!@#", "rename_a_metadata_samplE": "v:a-metadata-samplE", "rename_key_a_metadata_samplE": "a-metadata-samplE"}) resolvedMetadata, err = mValid.ResolveInvalidKey() - c.Assert(err, chk.IsNil) - validateMapEqual(c, resolvedMetadata, map[string]string{"Key": "value"}) + a.Nil(err) + validateMapEqual(a, resolvedMetadata, map[string]string{"Key": "value"}) } // In this phase we keep the resolve logic easy, and whenever there is key resolving collision found, error reported. -func (s *feSteModelsTestSuite) TestMetadataResolveInvalidKeyNegative(c *chk.C) { +func TestMetadataResolveInvalidKeyNegative(t *testing.T) { + a := assert.New(t) mNegative1 := common.Metadata(map[string]string{"!": "!", "*": "*"}) mNegative2 := common.Metadata(map[string]string{"!": "!", "rename__": "rename__"}) mNegative3 := common.Metadata(map[string]string{"!": "!", "rename_key__": "rename_key__"}) _, err := mNegative1.ResolveInvalidKey() - c.Assert(err, chk.NotNil) + a.NotNil(err) _, err = mNegative2.ResolveInvalidKey() - c.Assert(err, chk.NotNil) + a.NotNil(err) _, err = mNegative3.ResolveInvalidKey() - c.Assert(err, chk.NotNil) + a.NotNil(err) } diff --git a/common/gcpURLParts_test.go b/common/gcpURLParts_test.go index 7a206656c..f509a66e5 100644 --- a/common/gcpURLParts_test.go +++ b/common/gcpURLParts_test.go @@ -1,68 +1,69 @@ package common import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" "net/url" "strings" + "testing" ) -type gcpURLPartsTestSuite struct{} - // This testsuite does not reach GCP service, and runs even with GCP_TESTS=FALSE -var _ = chk.Suite(&gcpURLPartsTestSuite{}) -func (s *gcpURLPartsTestSuite) TestGCPURLParse(c *chk.C) { +func TestGCPURLParse(t *testing.T) { + a := assert.New(t) u, _ := url.Parse("http://storage.cloud.google.com/bucket") p, err := NewGCPURLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.Host, chk.Equals, "storage.cloud.google.com") - c.Assert(p.BucketName, chk.Equals, "bucket") - c.Assert(p.ObjectKey, chk.Equals, "") - c.Assert(p.String(), chk.Equals, "http://storage.cloud.google.com/bucket") + a.Nil(err) + a.Equal("storage.cloud.google.com", p.Host) + a.Equal("bucket", p.BucketName) + a.Equal("", p.ObjectKey) + a.Equal("http://storage.cloud.google.com/bucket", p.String()) u, _ = url.Parse("https://storage.cloud.google.com") p, err = NewGCPURLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.BucketName, chk.Equals, "") - c.Assert(p.ObjectKey, chk.Equals, "") - c.Assert(p.String(), chk.Equals, "https://storage.cloud.google.com") + a.Nil(err) + a.Equal("", p.BucketName) + a.Equal("", p.ObjectKey) + a.Equal("https://storage.cloud.google.com", p.String()) u, _ = url.Parse("http://storage.cloud.google.com/bucket/keyname/") p, err = NewGCPURLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.BucketName, chk.Equals, "bucket") - c.Assert(p.ObjectKey, chk.Equals, "keyname/") - c.Assert(p.String(), chk.Equals, "http://storage.cloud.google.com/bucket/keyname/") + a.Nil(err) + a.Equal("bucket", p.BucketName) + a.Equal("keyname/", p.ObjectKey) + a.Equal("http://storage.cloud.google.com/bucket/keyname/", p.String()) } -func (s *gcpURLPartsTestSuite) TestGCPURLParseNegative(c *chk.C) { +func TestGCPURLParseNegative(t *testing.T) { + a := assert.New(t) u, _ := url.Parse("https://storage.cloud.googly.com/bucket") _, err := NewGCPURLParts(*u) - c.Assert(err, chk.NotNil) - c.Assert(strings.Contains(err.Error(), invalidGCPURLErrorMessage), chk.Equals, true) + a.NotNil(err) + a.True(strings.Contains(err.Error(), invalidGCPURLErrorMessage)) u, _ = url.Parse("https://mcdheestorage.blob.core.windows.net") _, err = NewGCPURLParts(*u) - c.Assert(err, chk.NotNil) - c.Assert(strings.Contains(err.Error(), invalidGCPURLErrorMessage), chk.Equals, true) + a.NotNil(err) + a.True(strings.Contains(err.Error(), invalidGCPURLErrorMessage)) } -func (s *gcpURLPartsTestSuite) TestIsGCPURL(c *chk.C) { +func TestIsGCPURL(t *testing.T) { + a := assert.New(t) u, _ := url.Parse("http://storage.cloud.google.com/bucket/keyname/") isGCP := IsGCPURL(*u) - c.Assert(isGCP, chk.Equals, true) + a.True(isGCP) // Negative Test Cases u, _ = url.Parse("http://storage.cloudxgoogle.com/bucket/keyname/") isGCP = IsGCPURL(*u) - c.Assert(isGCP, chk.Equals, false) + a.False(isGCP) u, _ = url.Parse("http://storage.cloud.googlexcom/bucket/keyname/") isGCP = IsGCPURL(*u) - c.Assert(isGCP, chk.Equals, false) + a.False(isGCP) u, _ = url.Parse("http://storagexcloud.google.com/bucket/keyname/") isGCP = IsGCPURL(*u) - c.Assert(isGCP, chk.Equals, false) -} + a.False(isGCP) +} \ No newline at end of file diff --git a/common/parallel/zt_FileSystemCrawlerTest_test.go b/common/parallel/zt_FileSystemCrawlerTest_test.go index 362925811..2998bc941 100644 --- a/common/parallel/zt_FileSystemCrawlerTest_test.go +++ b/common/parallel/zt_FileSystemCrawlerTest_test.go @@ -22,29 +22,23 @@ package parallel import ( "context" + "fmt" + "github.com/stretchr/testify/assert" "os" "path/filepath" "runtime" "strings" "testing" - - chk "gopkg.in/check.v1" ) -// Hookup to the testing framework -func Test(t *testing.T) { chk.TestingT(t) } - -type fileSystemCrawlerSuite struct{} - -var _ = chk.Suite(&fileSystemCrawlerSuite{}) - var windowsSystemDirectory = "" -func (s *fileSystemCrawlerSuite) TestParallelEnumerationFindsTheRightFiles(c *chk.C) { +func TestParallelEnumerationFindsTheRightFiles(t *testing.T) { + a := assert.New(t) dir := "/usr" if runtime.GOOS == "windows" { dir = windowsSystemDirectory - c.Assert(dir, chk.Not(chk.Equals), "") + a.NotEqual("", dir) } // standard (Go SDK) file walk @@ -74,8 +68,8 @@ func (s *fileSystemCrawlerSuite) TestParallelEnumerationFindsTheRightFiles(c *ch if _, ok := parallelResults[key]; ok { delete(parallelResults, key) } else { - c.Error("expected " + key) - c.Fail() + t.Error("expected " + key) + t.Fail() } } // repeat for cases where access was denied in the standard enumeration (since we still pick up those dirs in the parallel enumeration (we just don't pick up their contents)) @@ -83,31 +77,31 @@ func (s *fileSystemCrawlerSuite) TestParallelEnumerationFindsTheRightFiles(c *ch if _, ok := parallelResults[key]; ok { delete(parallelResults, key) } else { - c.Error("expected in access denied results" + key) - c.Fail() + t.Error("expected in access denied results" + key) + t.Fail() } } // assert that everything has been removed now for key := range parallelResults { - c.Error("unexpected extra " + key) - c.Fail() + t.Error("unexpected extra " + key) + t.Fail() } } -func (s *fileSystemCrawlerSuite) TestParallelEnumerationGetsTheRightFileInfo_NormalStat(c *chk.C) { - s.doTestParallelEnumerationGetsTheRightFileInfo(false, c) +func TestParallelEnumerationGetsTheRightFileInfo_NormalStat(t *testing.T) { + doTestParallelEnumerationGetsTheRightFileInfo(false, t) } -func (s *fileSystemCrawlerSuite) TestParallelEnumerationGetsTheRightFileInfo_ParallelStat(c *chk.C) { - s.doTestParallelEnumerationGetsTheRightFileInfo(true, c) +func TestParallelEnumerationGetsTheRightFileInfo_ParallelStat(t *testing.T) { + doTestParallelEnumerationGetsTheRightFileInfo(true, t) } -func (s *fileSystemCrawlerSuite) doTestParallelEnumerationGetsTheRightFileInfo(parallelStat bool, c *chk.C) { - +func doTestParallelEnumerationGetsTheRightFileInfo(parallelStat bool, t *testing.T) { + a := assert.New(t) dir := "/usr" if runtime.GOOS == "windows" { dir = windowsSystemDirectory - c.Assert(dir, chk.Not(chk.Equals), "") + a.NotEqual("", dir) dir = dir[0:1] + ":\\" + "Program Files" // need one where the contents won't change while our test runs } @@ -130,14 +124,14 @@ func (s *fileSystemCrawlerSuite) doTestParallelEnumerationGetsTheRightFileInfo(p }) // check results. - c.Assert(len(stdResults) > 1000, chk.Equals, true) // assert that we got a decent number of results + a.True(len(stdResults) > 1000) // assert that we got a decent number of results // check all std results have the same file info in the parallel results for key := range stdResults { if pInfo, ok := parallelResults[key]; ok { stdInfo := stdResults[key] - c.Assert(pInfo.Name(), chk.Equals, stdInfo.Name(), chk.Commentf(key)) - c.Assert(pInfo.Mode(), chk.Equals, stdInfo.Mode(), chk.Commentf(key)) - c.Assert(pInfo.IsDir(), chk.Equals, stdInfo.IsDir(), chk.Commentf(key)) + a.Equal(stdInfo.Name(), pInfo.Name(), key) + a.Equal(stdInfo.Mode(), pInfo.Mode(), key) + a.Equal(stdInfo.IsDir(), pInfo.IsDir(), key) if pInfo.ModTime() != stdInfo.ModTime() { // Sometimes, Windows will give us the creation time instead of the last write time for a directory // That's documented. (In fact, it's not clear to the author of this test how or why we are sometimes seeing the actual last write time, @@ -155,23 +149,23 @@ func (s *fileSystemCrawlerSuite) doTestParallelEnumerationGetsTheRightFileInfo(p // Tuesday, August 27, 2019 5:29:08 AM // PS C:\Program Files\Microsoft SQL Server> (gci | ? Name -eq 90).LastWriteTime # ask for same info, via different API // Tuesday, August 27, 2019 5:26:53 AM // and it gives is the creation datetime instead - c.Assert(pInfo.IsDir(), chk.Equals, true, chk.Commentf(key+" times are different, which is only OK on directories")) + a.True(pInfo.IsDir(), fmt.Sprintf(key+" times are different, which is only OK on directories")) } if !stdInfo.IsDir() { // std Enumeration gives 4096 as size of some directories on Windows, but parallel sizes them as zero // See here for what the std one is doing: https://stackoverflow.com/questions/33593335/size-of-directory-from-os-stat-lstat // Parallel one, on Windows, gets the info from the find result, so that's probably why its zero there for dirs - c.Assert(pInfo.Size(), chk.Equals, stdInfo.Size(), chk.Commentf(key)) + a.Equal(stdInfo.Size(), pInfo.Size(), key) } // don't check the Sys() method. It can be different, and we don't mind that } else { - c.Error("expected " + key) - c.Fail() + a.Fail("expected " + key) } } } -func (s *fileSystemCrawlerSuite) TestRootErrorsAreSignalled(c *chk.C) { +func TestRootErrorsAreSignalled(t *testing.T) { + a := assert.New(t) receivedError := false nonExistentDir := filepath.Join(os.TempDir(), "Big random-named directory that almost certainly doesn't exist 85784362628398473732827384") Walk(context.TODO(), nonExistentDir, 16, false, func(path string, _ os.FileInfo, fileErr error) error { @@ -180,5 +174,5 @@ func (s *fileSystemCrawlerSuite) TestRootErrorsAreSignalled(c *chk.C) { } return nil }) - c.Assert(receivedError, chk.Equals, true) -} + a.True(receivedError) +} \ No newline at end of file diff --git a/common/s3URLParts_test.go b/common/s3URLParts_test.go index bd23006f6..854dfde94 100644 --- a/common/s3URLParts_test.go +++ b/common/s3URLParts_test.go @@ -21,187 +21,184 @@ package common import ( + "github.com/stretchr/testify/assert" "net/url" "strings" - - chk "gopkg.in/check.v1" + "testing" ) -// Hookup to the testing framework -type s3URLPartsTestSuite struct{} - -var _ = chk.Suite(&s3URLPartsTestSuite{}) - -func (s *s3URLPartsTestSuite) TestS3URLParse(c *chk.C) { +func TestS3URLParse(t *testing.T) { + a := assert.New(t) u, _ := url.Parse("http://bucket.s3.amazonaws.com") p, err := NewS3URLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.Host, chk.Equals, "bucket.s3.amazonaws.com") - c.Assert(p.Endpoint, chk.Equals, "s3.amazonaws.com") - c.Assert(p.BucketName, chk.Equals, "bucket") - c.Assert(p.ObjectKey, chk.Equals, "") - c.Assert(p.Region, chk.Equals, "") - c.Assert(p.Version, chk.Equals, "") - c.Assert(p.String(), chk.Equals, "http://bucket.s3.amazonaws.com") + a.Nil(err) + a.Equal("bucket.s3.amazonaws.com", p.Host) + a.Equal("s3.amazonaws.com", p.Endpoint) + a.Equal("bucket", p.BucketName) + a.Equal("", p.ObjectKey) + a.Equal("", p.Region) + a.Equal("", p.Version) + a.Equal("http://bucket.s3.amazonaws.com", p.String()) u, _ = url.Parse("http://bucket.s3.amazonaws.com/") p, err = NewS3URLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.BucketName, chk.Equals, "bucket") - c.Assert(p.Endpoint, chk.Equals, "s3.amazonaws.com") - c.Assert(p.ObjectKey, chk.Equals, "") - c.Assert(p.Region, chk.Equals, "") - c.Assert(p.Version, chk.Equals, "") - c.Assert(p.String(), chk.Equals, "http://bucket.s3.amazonaws.com") + a.Nil(err) + a.Equal("bucket", p.BucketName) + a.Equal("s3.amazonaws.com", p.Endpoint) + a.Equal("", p.ObjectKey) + a.Equal("", p.Region) + a.Equal("", p.Version) + a.Equal("http://bucket.s3.amazonaws.com", p.String()) u, _ = url.Parse("http://bucket.s3-aws-region.amazonaws.com/keydir/keysubdir/keyname") p, err = NewS3URLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.Endpoint, chk.Equals, "s3-aws-region.amazonaws.com") - c.Assert(p.BucketName, chk.Equals, "bucket") - c.Assert(p.ObjectKey, chk.Equals, "keydir/keysubdir/keyname") - c.Assert(p.Region, chk.Equals, "aws-region") - c.Assert(p.Version, chk.Equals, "") - c.Assert(p.String(), chk.Equals, "http://bucket.s3-aws-region.amazonaws.com/keydir/keysubdir/keyname") + a.Nil(err) + a.Equal("s3-aws-region.amazonaws.com", p.Endpoint) + a.Equal("bucket", p.BucketName) + a.Equal("keydir/keysubdir/keyname", p.ObjectKey) + a.Equal("aws-region", p.Region) + a.Equal("", p.Version) + a.Equal("http://bucket.s3-aws-region.amazonaws.com/keydir/keysubdir/keyname", p.String()) u, _ = url.Parse("http://bucket.s3-aws-region.amazonaws.com/keyname") p, err = NewS3URLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.Endpoint, chk.Equals, "s3-aws-region.amazonaws.com") - c.Assert(p.BucketName, chk.Equals, "bucket") - c.Assert(p.ObjectKey, chk.Equals, "keyname") - c.Assert(p.Region, chk.Equals, "aws-region") - c.Assert(p.Version, chk.Equals, "") - c.Assert(p.String(), chk.Equals, "http://bucket.s3-aws-region.amazonaws.com/keyname") + a.Nil(err) + a.Equal("s3-aws-region.amazonaws.com", p.Endpoint) + a.Equal("bucket", p.BucketName) + a.Equal("keyname", p.ObjectKey) + a.Equal("aws-region", p.Region) + a.Equal("", p.Version) + a.Equal("http://bucket.s3-aws-region.amazonaws.com/keyname", p.String()) u, _ = url.Parse("http://bucket.s3-aws-region.amazonaws.com/keyname/") p, err = NewS3URLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.Endpoint, chk.Equals, "s3-aws-region.amazonaws.com") - c.Assert(p.BucketName, chk.Equals, "bucket") - c.Assert(p.ObjectKey, chk.Equals, "keyname/") - c.Assert(p.Region, chk.Equals, "aws-region") - c.Assert(p.Version, chk.Equals, "") - c.Assert(p.String(), chk.Equals, "http://bucket.s3-aws-region.amazonaws.com/keyname/") + a.Nil(err) + a.Equal("s3-aws-region.amazonaws.com", p.Endpoint) + a.Equal("bucket", p.BucketName) + a.Equal("keyname/", p.ObjectKey) + a.Equal("aws-region", p.Region) + a.Equal("", p.Version) + a.Equal("http://bucket.s3-aws-region.amazonaws.com/keyname/", p.String()) // dual stack u, _ = url.Parse("http://bucket.s3.dualstack.aws-region.amazonaws.com/keyname/") p, err = NewS3URLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.Endpoint, chk.Equals, "s3.dualstack.aws-region.amazonaws.com") - c.Assert(p.BucketName, chk.Equals, "bucket") - c.Assert(p.ObjectKey, chk.Equals, "keyname/") - c.Assert(p.Region, chk.Equals, "aws-region") - c.Assert(p.Version, chk.Equals, "") - c.Assert(p.String(), chk.Equals, "http://bucket.s3.dualstack.aws-region.amazonaws.com/keyname/") + a.Nil(err) + a.Equal("s3.dualstack.aws-region.amazonaws.com", p.Endpoint) + a.Equal("bucket", p.BucketName) + a.Equal("keyname/", p.ObjectKey) + a.Equal("aws-region", p.Region) + a.Equal("", p.Version) + a.Equal("http://bucket.s3.dualstack.aws-region.amazonaws.com/keyname/", p.String()) u, _ = url.Parse("https://s3.amazonaws.com") p, err = NewS3URLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.Endpoint, chk.Equals, "s3.amazonaws.com") - c.Assert(p.BucketName, chk.Equals, "") - c.Assert(p.ObjectKey, chk.Equals, "") - c.Assert(p.Region, chk.Equals, "") - c.Assert(p.Version, chk.Equals, "") - c.Assert(p.String(), chk.Equals, "https://s3.amazonaws.com") + a.Nil(err) + a.Equal("s3.amazonaws.com", p.Endpoint) + a.Equal("", p.BucketName) + a.Equal("", p.ObjectKey) + a.Equal("", p.Region) + a.Equal("", p.Version) + a.Equal("https://s3.amazonaws.com", p.String()) u, _ = url.Parse("https://s3.amazonaws.com/") p, err = NewS3URLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.Endpoint, chk.Equals, "s3.amazonaws.com") - c.Assert(p.BucketName, chk.Equals, "") - c.Assert(p.ObjectKey, chk.Equals, "") - c.Assert(p.Region, chk.Equals, "") - c.Assert(p.Version, chk.Equals, "") - c.Assert(p.String(), chk.Equals, "https://s3.amazonaws.com") + a.Nil(err) + a.Equal("s3.amazonaws.com", p.Endpoint) + a.Equal("", p.BucketName) + a.Equal("", p.ObjectKey) + a.Equal("", p.Region) + a.Equal("", p.Version) + a.Equal("https://s3.amazonaws.com", p.String()) u, _ = url.Parse("https://s3-ap-southeast-1.amazonaws.com/") p, err = NewS3URLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.Endpoint, chk.Equals, "s3-ap-southeast-1.amazonaws.com") - c.Assert(p.BucketName, chk.Equals, "") - c.Assert(p.ObjectKey, chk.Equals, "") - c.Assert(p.Region, chk.Equals, "ap-southeast-1") - c.Assert(p.Version, chk.Equals, "") - c.Assert(p.String(), chk.Equals, "https://s3-ap-southeast-1.amazonaws.com") + a.Nil(err) + a.Equal("s3-ap-southeast-1.amazonaws.com", p.Endpoint) + a.Equal("", p.BucketName) + a.Equal("", p.ObjectKey) + a.Equal("ap-southeast-1", p.Region) + a.Equal("", p.Version) + a.Equal("https://s3-ap-southeast-1.amazonaws.com", p.String()) u, _ = url.Parse("https://s3-ap-southeast-1.amazonaws.com/jiac-art-awsbucket01") p, err = NewS3URLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.Endpoint, chk.Equals, "s3-ap-southeast-1.amazonaws.com") - c.Assert(p.BucketName, chk.Equals, "jiac-art-awsbucket01") - c.Assert(p.ObjectKey, chk.Equals, "") - c.Assert(p.Region, chk.Equals, "ap-southeast-1") - c.Assert(p.Version, chk.Equals, "") - c.Assert(p.String(), chk.Equals, "https://s3-ap-southeast-1.amazonaws.com/jiac-art-awsbucket01") + a.Nil(err) + a.Equal("s3-ap-southeast-1.amazonaws.com", p.Endpoint) + a.Equal("jiac-art-awsbucket01", p.BucketName) + a.Equal("", p.ObjectKey) + a.Equal("ap-southeast-1", p.Region) + a.Equal("", p.Version) + a.Equal("https://s3-ap-southeast-1.amazonaws.com/jiac-art-awsbucket01", p.String()) u, _ = url.Parse("https://s3-ap-southeast-1.amazonaws.com/jiac-art-awsbucket01/") p, err = NewS3URLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.Endpoint, chk.Equals, "s3-ap-southeast-1.amazonaws.com") - c.Assert(p.BucketName, chk.Equals, "jiac-art-awsbucket01") - c.Assert(p.ObjectKey, chk.Equals, "") - c.Assert(p.Region, chk.Equals, "ap-southeast-1") - c.Assert(p.Version, chk.Equals, "") - c.Assert(p.String(), chk.Equals, "https://s3-ap-southeast-1.amazonaws.com/jiac-art-awsbucket01") + a.Nil(err) + a.Equal("s3-ap-southeast-1.amazonaws.com", p.Endpoint) + a.Equal("jiac-art-awsbucket01", p.BucketName) + a.Equal("", p.ObjectKey) + a.Equal("ap-southeast-1", p.Region) + a.Equal("", p.Version) + a.Equal("https://s3-ap-southeast-1.amazonaws.com/jiac-art-awsbucket01", p.String()) u, _ = url.Parse("https://s3-ap-southeast-1.amazonaws.com/jiac-art-awsbucket01/Test.pdf") p, err = NewS3URLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.Endpoint, chk.Equals, "s3-ap-southeast-1.amazonaws.com") - c.Assert(p.BucketName, chk.Equals, "jiac-art-awsbucket01") - c.Assert(p.ObjectKey, chk.Equals, "Test.pdf") - c.Assert(p.Region, chk.Equals, "ap-southeast-1") - c.Assert(p.Version, chk.Equals, "") - c.Assert(p.String(), chk.Equals, "https://s3-ap-southeast-1.amazonaws.com/jiac-art-awsbucket01/Test.pdf") + a.Nil(err) + a.Equal("s3-ap-southeast-1.amazonaws.com", p.Endpoint) + a.Equal("jiac-art-awsbucket01", p.BucketName) + a.Equal("Test.pdf", p.ObjectKey) + a.Equal("ap-southeast-1", p.Region) + a.Equal("", p.Version) + a.Equal("https://s3-ap-southeast-1.amazonaws.com/jiac-art-awsbucket01/Test.pdf", p.String()) u, _ = url.Parse("https://s3-ap-southeast-1.amazonaws.com/jiac-art-awsbucket01/space+folder/Test.pdf") p, err = NewS3URLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.Endpoint, chk.Equals, "s3-ap-southeast-1.amazonaws.com") - c.Assert(p.BucketName, chk.Equals, "jiac-art-awsbucket01") - c.Assert(p.ObjectKey, chk.Equals, "space+folder/Test.pdf") - c.Assert(p.Region, chk.Equals, "ap-southeast-1") - c.Assert(p.Version, chk.Equals, "") - c.Assert(p.String(), chk.Equals, "https://s3-ap-southeast-1.amazonaws.com/jiac-art-awsbucket01/space+folder/Test.pdf") + a.Nil(err) + a.Equal("s3-ap-southeast-1.amazonaws.com", p.Endpoint) + a.Equal("jiac-art-awsbucket01", p.BucketName) + a.Equal("space+folder/Test.pdf", p.ObjectKey) + a.Equal("ap-southeast-1", p.Region) + a.Equal("", p.Version) + a.Equal("https://s3-ap-southeast-1.amazonaws.com/jiac-art-awsbucket01/space+folder/Test.pdf", p.String()) // Version testing u, _ = url.Parse("https://s3.ap-northeast-2.amazonaws.com/jiac-art-awsbucket02-versionenabled/Test.pdf?versionId=Cy0pgpqHDTR7RlMEwU_BxDVER2QN5lJJ") p, err = NewS3URLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.Endpoint, chk.Equals, "s3.ap-northeast-2.amazonaws.com") - c.Assert(p.BucketName, chk.Equals, "jiac-art-awsbucket02-versionenabled") - c.Assert(p.ObjectKey, chk.Equals, "Test.pdf") - c.Assert(p.Region, chk.Equals, "ap-northeast-2") - c.Assert(p.Version, chk.Equals, "Cy0pgpqHDTR7RlMEwU_BxDVER2QN5lJJ") - c.Assert(p.String(), chk.Equals, "https://s3.ap-northeast-2.amazonaws.com/jiac-art-awsbucket02-versionenabled/Test.pdf?versionId=Cy0pgpqHDTR7RlMEwU_BxDVER2QN5lJJ") + a.Nil(err) + a.Equal("s3.ap-northeast-2.amazonaws.com", p.Endpoint) + a.Equal("jiac-art-awsbucket02-versionenabled", p.BucketName) + a.Equal("Test.pdf", p.ObjectKey) + a.Equal("ap-northeast-2", p.Region) + a.Equal("Cy0pgpqHDTR7RlMEwU_BxDVER2QN5lJJ", p.Version) + a.Equal("https://s3.ap-northeast-2.amazonaws.com/jiac-art-awsbucket02-versionenabled/Test.pdf?versionId=Cy0pgpqHDTR7RlMEwU_BxDVER2QN5lJJ", p.String()) // Version and dualstack testing u, _ = url.Parse("https://s3.dualstack.ap-northeast-2.amazonaws.com/jiac-art-awsbucket02-versionenabled/Test.pdf?versionId=Cy0pgpqHDTR7RlMEwU_BxDVER2QN5lJJ") p, err = NewS3URLParts(*u) - c.Assert(err, chk.IsNil) - c.Assert(p.Endpoint, chk.Equals, "s3.dualstack.ap-northeast-2.amazonaws.com") - c.Assert(p.BucketName, chk.Equals, "jiac-art-awsbucket02-versionenabled") - c.Assert(p.ObjectKey, chk.Equals, "Test.pdf") - c.Assert(p.Region, chk.Equals, "ap-northeast-2") - c.Assert(p.Version, chk.Equals, "Cy0pgpqHDTR7RlMEwU_BxDVER2QN5lJJ") - c.Assert(p.String(), chk.Equals, "https://s3.dualstack.ap-northeast-2.amazonaws.com/jiac-art-awsbucket02-versionenabled/Test.pdf?versionId=Cy0pgpqHDTR7RlMEwU_BxDVER2QN5lJJ") + a.Nil(err) + a.Equal("s3.dualstack.ap-northeast-2.amazonaws.com", p.Endpoint) + a.Equal("jiac-art-awsbucket02-versionenabled", p.BucketName) + a.Equal("Test.pdf", p.ObjectKey) + a.Equal("ap-northeast-2", p.Region) + a.Equal("Cy0pgpqHDTR7RlMEwU_BxDVER2QN5lJJ", p.Version) + a.Equal("https://s3.dualstack.ap-northeast-2.amazonaws.com/jiac-art-awsbucket02-versionenabled/Test.pdf?versionId=Cy0pgpqHDTR7RlMEwU_BxDVER2QN5lJJ", p.String()) } -func (s *s3URLPartsTestSuite) TestS3URLParseNegative(c *chk.C) { +func TestS3URLParseNegative(t *testing.T) { + a := assert.New(t) u, _ := url.Parse("http://bucket.amazonawstypo.com") _, err := NewS3URLParts(*u) - c.Assert(err, chk.NotNil) - c.Assert(strings.Contains(err.Error(), invalidS3URLErrorMessage), chk.Equals, true) + a.NotNil(err) + a.True(strings.Contains(err.Error(), invalidS3URLErrorMessage)) u, _ = url.Parse("http://bucket.s3.amazonawstypo.com") _, err = NewS3URLParts(*u) - c.Assert(err, chk.NotNil) - c.Assert(strings.Contains(err.Error(), invalidS3URLErrorMessage), chk.Equals, true) + a.NotNil(err) + a.True(strings.Contains(err.Error(), invalidS3URLErrorMessage)) u, _ = url.Parse("http://s3-test.blob.core.windows.net") _, err = NewS3URLParts(*u) - c.Assert(err, chk.NotNil) - c.Assert(strings.Contains(err.Error(), invalidS3URLErrorMessage), chk.Equals, true) -} + a.NotNil(err) + a.True(strings.Contains(err.Error(), invalidS3URLErrorMessage)) +} \ No newline at end of file diff --git a/common/util_test.go b/common/util_test.go index 3e80c0315..389eaa9c1 100644 --- a/common/util_test.go +++ b/common/util_test.go @@ -1,18 +1,18 @@ package common -import chk "gopkg.in/check.v1" +import ( + "github.com/stretchr/testify/assert" + "testing" +) -type utilityFunctionsSuite struct{} - -var _ = chk.Suite(&utilityFunctionsSuite{}) - -func (*utilityFunctionsSuite) Test_VerifyIsURLResolvable(c *chk.C) { - c.Skip("Disabled the check in mainline code") +func Test_VerifyIsURLResolvable(t *testing.T) { + a := assert.New(t) + t.Skip("Disabled the check in mainline code") valid_url := "https://github.com/" invalidUrl := "someString" invalidUrl2 := "https://$invalidAccount.blob.core.windows.net/" - c.Assert(VerifyIsURLResolvable(valid_url), chk.IsNil) - c.Assert(VerifyIsURLResolvable(invalidUrl), chk.NotNil) - c.Assert(VerifyIsURLResolvable(invalidUrl2), chk.NotNil) + a.Nil(VerifyIsURLResolvable(valid_url)) + a.NotNil(VerifyIsURLResolvable(invalidUrl)) + a.NotNil(VerifyIsURLResolvable(invalidUrl2)) } \ No newline at end of file diff --git a/common/uuid_test.go b/common/uuid_test.go index 4cd5ecabe..349b3d4e2 100644 --- a/common/uuid_test.go +++ b/common/uuid_test.go @@ -21,24 +21,22 @@ package common import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" "strings" + "testing" ) -type uuidTestSuite struct{} - -var _ = chk.Suite(&uuidTestSuite{}) - -func (s *uuidTestSuite) TestGUIDGenerationAndParsing(c *chk.C) { +func TestGUIDGenerationAndParsing(t *testing.T) { + a := assert.New(t) for i := 0; i < 100; i++ { uuid := NewUUID() // no space is allowed containsSpace := strings.Contains(uuid.String(), " ") - c.Assert(containsSpace, chk.Equals, false) + a.False(containsSpace) parsed, err := ParseUUID(uuid.String()) - c.Assert(err, chk.IsNil) - c.Assert(parsed, chk.DeepEquals, uuid) + a.Nil(err) + a.Equal(uuid, parsed) } -} +} \ No newline at end of file diff --git a/common/zt_ProxyLookupCache_test.go b/common/zt_ProxyLookupCache_test.go index efddc5dd2..af822ad98 100644 --- a/common/zt_ProxyLookupCache_test.go +++ b/common/zt_ProxyLookupCache_test.go @@ -21,18 +21,16 @@ package common import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" "net/http" "net/url" "sync" + "testing" "time" ) -type proxyLookupCacheSuite struct{} - -var _ = chk.Suite(&proxyLookupCacheSuite{}) - -func (s *proxyLookupCacheSuite) TestCacheIsUsed(c *chk.C) { +func TestCacheIsUsed(t *testing.T) { + a := assert.New(t) fakeMu := &sync.Mutex{} // avoids race condition in test code var fakeResult *url.URL var fakeError error @@ -54,23 +52,23 @@ func (s *proxyLookupCacheSuite) TestCacheIsUsed(c *chk.C) { fakeMu.Unlock() fooRequest, _ := http.NewRequest("GET", "http://foo.com/a", nil) fooResult1, err := pc.getProxy(fooRequest) - c.Check(err, chk.IsNil) - c.Check(fooResult1.String(), chk.Equals, "http://fooproxy") + a.Nil(err) + a.Equal("http://fooproxy", fooResult1.String()) fakeMu.Lock() fakeResult, fakeError = url.Parse("http://barproxy") fakeMu.Unlock() barRequest, _ := http.NewRequest("GET", "http://bar.com/a", nil) barResult1, err := pc.getProxy(barRequest) - c.Check(err, chk.IsNil) - c.Check(barResult1.String(), chk.Equals, "http://barproxy") + a.Nil(err) + a.Equal("http://barproxy", barResult1.String()) fakeMu.Lock() fakeResult, fakeError = url.Parse("http://this will give a parsing error") fakeMu.Unlock() erroringRequest, _ := http.NewRequest("GET", "http://willerror.com/a", nil) _, expectedErr := pc.getProxy(erroringRequest) - c.Check(expectedErr, chk.NotNil) + a.NotNil(expectedErr) // set dummy values for next lookup, so we can be sure that lookups don't happen (i.e. we don't get these values, so we know we hit the cache) fakeMu.Lock() @@ -81,20 +79,21 @@ func (s *proxyLookupCacheSuite) TestCacheIsUsed(c *chk.C) { // lookup URLs with same host portion, but different paths. Expect cache hits. fooRequest, _ = http.NewRequest("GET", "http://foo.com/differentPathFromBefore", nil) fooResult2, err := pc.getProxy(fooRequest) - c.Check(err, chk.IsNil) - c.Check(fooResult2.String(), chk.Equals, fooResult1.String()) + a.Nil(err) + a.Equal(fooResult1.String(), fooResult2.String()) barRequest, _ = http.NewRequest("GET", "http://bar.com/differentPathFromBefore", nil) barResult2, err := pc.getProxy(barRequest) - c.Check(err, chk.IsNil) - c.Check(barResult2.String(), chk.Equals, barResult1.String()) + a.Nil(err) + a.Equal(barResult1.String(), barResult2.String()) erroringRequest, _ = http.NewRequest("GET", "http://willerror.com/differentPathFromBefore", nil) _, expectedErr = pc.getProxy(erroringRequest) - c.Check(expectedErr, chk.NotNil) + a.NotNil(expectedErr) } -func (s *proxyLookupCacheSuite) TestCacheEntriesGetRefreshed(c *chk.C) { +func TestCacheEntriesGetRefreshed(t *testing.T) { + a := assert.New(t) fakeMu := &sync.Mutex{} // avoids race condition in test code var fakeResult *url.URL var fakeError error @@ -117,8 +116,8 @@ func (s *proxyLookupCacheSuite) TestCacheEntriesGetRefreshed(c *chk.C) { fakeMu.Unlock() fooRequest, _ := http.NewRequest("GET", "http://foo.com/a", nil) fooResult1, err := pc.getProxy(fooRequest) - c.Check(err, chk.IsNil) - c.Check(fooResult1.String(), chk.Equals, "http://fooproxy") + a.Nil(err) + a.Equal("http://fooproxy", fooResult1.String()) // prime the refresh to actually produce a change fakeMu.Lock() @@ -130,11 +129,12 @@ func (s *proxyLookupCacheSuite) TestCacheEntriesGetRefreshed(c *chk.C) { // read from cache, and check we get the update result fooResult2, err := pc.getProxy(fooRequest) - c.Check(err, chk.IsNil) - c.Check(fooResult2.String(), chk.Equals, "http://updatedFooProxy") + a.Nil(err) + a.Equal("http://updatedFooProxy", fooResult2.String()) } -func (s *proxyLookupCacheSuite) TestUseOfLookupMethodHasTimout(c *chk.C) { +func TestUseOfLookupMethodHasTimout(t *testing.T) { + a := assert.New(t) pc := &proxyLookupCache{ m: &sync.Map{}, lookupLock: &sync.Mutex{}, @@ -147,5 +147,5 @@ func (s *proxyLookupCacheSuite) TestUseOfLookupMethodHasTimout(c *chk.C) { fooRequest, _ := http.NewRequest("GET", "http://foo.com/a", nil) tuple := pc.getProxyNoCache(fooRequest) - c.Check(tuple.err, chk.Equals, ProxyLookupTimeoutError) -} + a.Equal(ProxyLookupTimeoutError, tuple.err) +} \ No newline at end of file diff --git a/common/zt_credCache_test.go b/common/zt_credCache_test.go index 69cda9efa..55f6f037f 100644 --- a/common/zt_credCache_test.go +++ b/common/zt_credCache_test.go @@ -21,19 +21,12 @@ package common import ( + "github.com/stretchr/testify/assert" "testing" "github.com/Azure/go-autorest/autorest/adal" - chk "gopkg.in/check.v1" ) -// Hookup to the testing framework -func Test(t *testing.T) { chk.TestingT(t) } - -type credCacheTestSuite struct{} - -var _ = chk.Suite(&credCacheTestSuite{}) - var fakeTokenInfo = OAuthTokenInfo{ Token: adal.Token{ AccessToken: "aaa0eXAiOiJKz1QiLCJhbGciOibbbbI1NiIsIng1dCcCImlCakwxUmNdddhpeTRmcHhJeGRacW5oTTJZayIsImtpZCI948lCakwxUmNxemhpeTRmcHhJeGRacW9oTTJZayJ9.eyJhdWQiOiJodHRwczovL3N0b3JhZ2UuYXp1cmUuY29tIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3LyIsImlhdCI6MTUyODEwNDQ5NywibmJmIjoxNTI4MTA0NDk3LCJleHAiOjE1MjgxMDgzOTcsIl9jbGFpbV9uYW1lcyI6eyJncm91aEHiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy9hOTIzZjhkMC1kNGNlLTQyODAtOTEzNS01ZWE2ODVjMzgwMjYvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhIQUFBQU1nVkUzWE9DMHdQcG9OeGt1b2VsK1haVGNwOEhLekRORlp4NDZkMW5VN2VHUGNmbWdWNGxnUlN0NjUwcndXaHJPaCtaTXlGa3h2S3hVR3QvTHBjanNnPT0iLCJhbXIiOlsid2lhIiwibWZhIl0sImFwcGlkIjoiMTk1MGEyNTgtMjI3Yi00ZTMxLWE5Y2YtNzE3NDk1OTQ1ZmMyIiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIyMjFjZTY3Yy1mYjM3LTQzMjYtYWJjYy0zNTRhZGJmNzk1NWYiLCJmYW1pbHlfbmFtZSI6IkZhbiIsImdpdmVuX25hbWUiOiJKYXNvbiIsImluX2NvcnAiOiJ0cnVlIiwiaXBhZGRyIjoiMTY3LjIyMC4yNTUuNTgiLCJuYW1lIjoiSmFzb24gRmFuIiwib2lkIjoiYTkyM2Y4ZDAtZDRjZS00MjgwLTkxMzUtNWVhNjg1YzM4MDI2Iiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxNDY3NzMwODUtOTAzMzYzMjg1LTcxOTM0NDcwNy0xODI4ODgzIiwicHVpZCI6IjEwMDMwMDAwOEFCNjkzQTUi10JzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJBVVBFWXo1Y0xPd1BYcmRQaUF2OXZRamNGelpDN3dRRWd5dUJhejFfVnBFIiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJqaWFjZmFuQG1pY3Jvc29mdC5jb20iLCJ1cG4iOiJqaWFjZmFuQG1pY3Jvc29mdC5jb20iLCJ1dGkiOiJfTlpKdlVQVG4wdTExTVFrTEcwTEFBIiwidmVyIjoiMS4wIn0.J3LZgQ7RTmqZzVcnsiruzLfcuK-vceNja7gp6wJhwwcPN1LzHK9Q1ANRVBKDMRulHiWvPNmavxf493EqkvgjHDkGSSTL3S7elLVF4Hr2SHHhUqyWoiEukY0jX5DT2tg71L4KujV7csJN-7ECqXyU0DSrRSRf3gCbD7c2ne5CFVCi1lEpEK_1lLiRZe45TTuJXmQrxEr4B6fY5MRkBz05lIbhxsUPmUunR02_-coNgQcHBOkdGdLGx4qjbzn58EJO0F2bimDRend3Tjnoia2aFq_kvQslcLU3BxIvYO5TZNfGkZyOlavoKEccPPmAb033zg9AKD_6_7K-R0mu1qmZUA", @@ -48,7 +41,8 @@ var fakeTokenInfo = OAuthTokenInfo{ ActiveDirectoryEndpoint: "https://login.microsoftonline.com", } -func (s *credCacheTestSuite) TestCredCacheSaveLoadDeleteHas(c *chk.C) { +func TestCredCacheSaveLoadDeleteHas(t *testing.T) { + a := assert.New(t) credCache := NewCredCache(CredCacheOptions{ DPAPIFilePath: ".", KeyName: "AzCopyOAuthTokenCache", @@ -68,43 +62,43 @@ func (s *credCacheTestSuite) TestCredCacheSaveLoadDeleteHas(c *chk.C) { hasCachedToken, err := credCache.HasCachedToken() if hasCachedToken { err = credCache.RemoveCachedToken() - c.Assert(err, chk.IsNil) + a.Nil(err) } // Ensure no token cached initially. hasCachedToken, err = credCache.HasCachedToken() - c.Assert(hasCachedToken, chk.Equals, false) + a.False(hasCachedToken) // Test save token. err = credCache.SaveToken(fakeTokenInfo) - c.Assert(err, chk.IsNil) + a.Nil(err) // Test has cached token, and validate save token. hasCachedToken, err = credCache.HasCachedToken() - c.Assert(err, chk.IsNil) - c.Assert(hasCachedToken, chk.Equals, true) + a.Nil(err) + a.True(hasCachedToken) // Test load token. token, err := credCache.LoadToken() - c.Assert(err, chk.IsNil) - c.Assert(token, chk.NotNil) - c.Assert(*token, chk.DeepEquals, fakeTokenInfo) + a.Nil(err) + a.NotNil(token) + a.Equal(fakeTokenInfo, *token) // Test update token. cloneTokenWithDiff := fakeTokenInfo // deep copy cloneTokenWithDiff.Tenant = "change the tenant info a little" err = credCache.SaveToken(cloneTokenWithDiff) - c.Assert(err, chk.IsNil) + a.Nil(err) token, err = credCache.LoadToken() - c.Assert(err, chk.IsNil) - c.Assert(token, chk.NotNil) - c.Assert(*token, chk.DeepEquals, cloneTokenWithDiff) + a.Nil(err) + a.NotNil(token) + a.Equal(cloneTokenWithDiff, *token) // Test remove token. err = credCache.RemoveCachedToken() - c.Assert(err, chk.IsNil) + a.Nil(err) // Test has cached token, and validate remove token. hasCachedToken, err = credCache.HasCachedToken() - c.Assert(hasCachedToken, chk.Equals, false) -} + a.False(hasCachedToken) +} \ No newline at end of file diff --git a/common/zt_decompressingWriter_test.go b/common/zt_decompressingWriter_test.go index 048977412..d1712fd4e 100644 --- a/common/zt_decompressingWriter_test.go +++ b/common/zt_decompressingWriter_test.go @@ -24,14 +24,13 @@ import ( "bytes" "compress/gzip" "compress/zlib" - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" "io" "math/rand" "sync/atomic" + "testing" ) -type decompressingWriterSuite struct{} - type closeableBuffer struct { atomicCloseWasCalled int32 *bytes.Buffer @@ -46,9 +45,8 @@ func (c *closeableBuffer) closeWasCalled() bool { return atomic.LoadInt32(&c.atomicCloseWasCalled) == 1 } -var _ = chk.Suite(&decompressingWriterSuite{}) - -func (d *decompressingWriterSuite) TestDecompressingWriter_SuccessCases(c *chk.C) { +func TestDecompressingWriter_SuccessCases(t *testing.T) { + a := assert.New(t) cases := []struct { desc string tp CompressionType @@ -69,7 +67,7 @@ func (d *decompressingWriterSuite) TestDecompressingWriter_SuccessCases(c *chk.C for _, cs := range cases { // given: - originalData, compressedData := d.getTestData(c, cs.tp, cs.originalSize) + originalData, compressedData := getTestData(a, cs.tp, cs.originalSize) // when: // we decompress using a decompressing writer @@ -77,21 +75,21 @@ func (d *decompressingWriterSuite) TestDecompressingWriter_SuccessCases(c *chk.C decWriter := NewDecompressingWriter(destFile, cs.tp) copyBuf := make([]byte, cs.writeBufferSize) _, err := io.CopyBuffer(decWriter, bytes.NewReader(compressedData), copyBuf) // write compressed data to decWriter - c.Assert(err, chk.IsNil) + a.Nil(err) err = decWriter.Close() - c.Assert(err, chk.IsNil) + a.Nil(err) // then: // the data that was written to the underlying destination is correctly decompressed dataWritten := destFile.Bytes() - c.Assert(dataWritten, chk.DeepEquals, originalData) + a.Equal(originalData, dataWritten) // the dest is closed - c.Assert(destFile.closeWasCalled(), chk.Equals, true) + a.True(destFile.closeWasCalled()) } } -func (d *decompressingWriterSuite) TestDecompressingWriter_EarlyClose(c *chk.C) { - +func TestDecompressingWriter_EarlyClose(t *testing.T) { + a := assert.New(t) cases := []CompressionType{ ECompressionType.GZip(), ECompressionType.ZLib(), @@ -99,7 +97,7 @@ func (d *decompressingWriterSuite) TestDecompressingWriter_EarlyClose(c *chk.C) for _, tp := range cases { // given: dataSize := int(rand.Int31n(1024*1024) + 100) - _, compressedData := d.getTestData(c, tp, dataSize) + _, compressedData := getTestData(a, tp, dataSize) sizeBeforeEarlyClose := int64(len(compressedData) / 2) // when: @@ -107,20 +105,20 @@ func (d *decompressingWriterSuite) TestDecompressingWriter_EarlyClose(c *chk.C) destFile := &closeableBuffer{Buffer: &bytes.Buffer{}} // will be a file in real usage, but just a buffer in this test decWriter := NewDecompressingWriter(destFile, tp) n, err := io.CopyN(decWriter, bytes.NewReader(compressedData), sizeBeforeEarlyClose) // process only some of the data - c.Assert(err, chk.IsNil) + a.Nil(err) err = decWriter.Close() // then: // the amount processed was as expected, the dest file is closed, and an error was returned from close (because decompressor never sees the expected footer) - c.Assert(n, chk.Equals, sizeBeforeEarlyClose) - c.Assert(destFile.closeWasCalled(), chk.Equals, true) - c.Assert(err, chk.NotNil) + a.Equal(sizeBeforeEarlyClose, n) + a.True(destFile.closeWasCalled()) + a.NotNil(err) } } -func (d *decompressingWriterSuite) getTestData(c *chk.C, tp CompressionType, originalSize int) (original []byte, compressed []byte) { +func getTestData(a *assert.Assertions, tp CompressionType, originalSize int) (original []byte, compressed []byte) { // we have original uncompressed data - originalData := d.genCompressibleTestData(originalSize) + originalData := genCompressibleTestData(originalSize) // and from that we have original compressed data compBuf := &bytes.Buffer{} var comp io.WriteCloser = zlib.NewWriter(compBuf) @@ -129,8 +127,8 @@ func (d *decompressingWriterSuite) getTestData(c *chk.C, tp CompressionType, ori } _, err := io.Copy(comp, bytes.NewReader(originalData)) // write into buf by way of comp - c.Assert(err, chk.IsNil) - c.Assert(comp.Close(), chk.IsNil) + a.Nil(err) + a.Nil(comp.Close()) compressedData := compBuf.Bytes() return originalData, compressedData } @@ -143,7 +141,7 @@ func (d *decompressingWriterSuite) TestDecompressingWriter_GenTestData(c *chk.C) f.Close() }*/ -func (d *decompressingWriterSuite) genCompressibleTestData(size int) []byte { +func genCompressibleTestData(size int) []byte { phrases := make([][]byte, rand.Intn(50)+1) for i := range phrases { phrases[i] = make([]byte, rand.Intn(100)+1) @@ -155,4 +153,4 @@ func (d *decompressingWriterSuite) genCompressibleTestData(size int) []byte { n += delta } return b.Bytes()[:size] -} +} \ No newline at end of file diff --git a/common/zt_exclusiveStringMap_test.go b/common/zt_exclusiveStringMap_test.go index c30c93570..5a10b6cff 100644 --- a/common/zt_exclusiveStringMap_test.go +++ b/common/zt_exclusiveStringMap_test.go @@ -21,24 +21,22 @@ package common import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "testing" ) -type exclusiveStringMapSuite struct{} - -var _ = chk.Suite(&exclusiveStringMapSuite{}) - -func (s *exclusiveStringMapSuite) TestExclusiveStringMap(c *chk.C) { +func TestExclusiveStringMap(t *testing.T) { + a := assert.New(t) var m *ExclusiveStringMap addShouldWork := func(v string) { err := m.Add(v) - c.Assert(err, chk.IsNil) + a.Nil(err) } addShouldErrorOut := func(v string) { err := m.Add(v) - c.Assert(err, chk.Equals, exclusiveStringMapCollisionError) + a.Equal(exclusiveStringMapCollisionError, err) } // case sensitive @@ -60,10 +58,11 @@ func (s *exclusiveStringMapSuite) TestExclusiveStringMap(c *chk.C) { } -func (s *exclusiveStringMapSuite) TestChooseRightCaseSensitivity(c *chk.C) { +func TestChooseRightCaseSensitivity(t *testing.T) { + a := assert.New(t) test := func(fromTo FromTo, goos string, shouldBeSensitive bool) { m := NewExclusiveStringMap(fromTo, goos) - c.Assert(m.caseSensitive, chk.Equals, shouldBeSensitive) + a.Equal(shouldBeSensitive, m.caseSensitive) } test(EFromTo.BlobLocal(), "linux", true) @@ -73,4 +72,4 @@ func (s *exclusiveStringMapSuite) TestChooseRightCaseSensitivity(c *chk.C) { test(EFromTo.LocalFile(), "linux", false) // anything ToFile should be INsensitive test(EFromTo.BlobFile(), "linux", false) // anything ToFile should be INsensitive test(EFromTo.BlobBlob(), "windows", true) -} +} \ No newline at end of file diff --git a/common/zt_folderDeletionManager_test.go b/common/zt_folderDeletionManager_test.go index 2f61182de..1c5c5e6a4 100644 --- a/common/zt_folderDeletionManager_test.go +++ b/common/zt_folderDeletionManager_test.go @@ -22,152 +22,155 @@ package common import ( "context" - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" "net/url" + "testing" ) -type folderDeletionManagerSuite struct{} - -var _ = chk.Suite(&folderDeletionManagerSuite{}) - -func (s *folderDeletionManagerSuite) u(str string) *url.URL { +func u(str string) *url.URL { u, _ := url.Parse("http://example.com/" + str) return u } -func (s *folderDeletionManagerSuite) TestFolderDeletion_BeforeChildrenSeen(c *chk.C) { +func TestFolderDeletion_BeforeChildrenSeen(t *testing.T) { + a := assert.New(t) f := NewFolderDeletionManager(context.Background(), EFolderPropertiesOption.AllFolders(), nil) deletionCallCount := 0 // ask for deletion of folder first - f.RequestDeletion(s.u("foo/bar"), func(context.Context, ILogger) bool { deletionCallCount++; return false }) - c.Assert(deletionCallCount, chk.Equals, 1) + f.RequestDeletion(u("foo/bar"), func(context.Context, ILogger) bool { deletionCallCount++; return false }) + a.Equal(1, deletionCallCount) // deletion should be attempted again after children seen and processed (if deletion returned false first time) - f.RecordChildExists(s.u("foo/bar/a")) - c.Assert(deletionCallCount, chk.Equals, 1) - f.RecordChildDeleted(s.u("foo/bar/a")) - c.Assert(deletionCallCount, chk.Equals, 2) + f.RecordChildExists(u("foo/bar/a")) + a.Equal(1, deletionCallCount) + f.RecordChildDeleted(u("foo/bar/a")) + a.Equal(2, deletionCallCount) } -func (s *folderDeletionManagerSuite) TestFolderDeletion_WithChildren(c *chk.C) { +func TestFolderDeletion_WithChildren(t *testing.T) { + a := assert.New(t) f := NewFolderDeletionManager(context.Background(), EFolderPropertiesOption.AllFolders(), nil) deletionCallCount := 0 lastDeletionFolder := "" - f.RecordChildExists(s.u("foo/bar/a")) - f.RecordChildExists(s.u("foo/bar/b")) - f.RecordChildExists(s.u("other/x")) + f.RecordChildExists(u("foo/bar/a")) + f.RecordChildExists(u("foo/bar/b")) + f.RecordChildExists(u("other/x")) - f.RequestDeletion(s.u("foo/bar"), func(context.Context, ILogger) bool { deletionCallCount++; lastDeletionFolder = "foo/bar"; return true }) - f.RequestDeletion(s.u("other"), func(context.Context, ILogger) bool { deletionCallCount++; lastDeletionFolder = "other"; return true }) - c.Assert(deletionCallCount, chk.Equals, 0) // deletion doesn't happen right now + f.RequestDeletion(u("foo/bar"), func(context.Context, ILogger) bool { deletionCallCount++; lastDeletionFolder = "foo/bar"; return true }) + f.RequestDeletion(u("other"), func(context.Context, ILogger) bool { deletionCallCount++; lastDeletionFolder = "other"; return true }) + a.Equal(0, deletionCallCount) // deletion doesn't happen right now - f.RecordChildDeleted(s.u("other/x")) // this is the last one in this parent, so deletion of that parent should happen now - c.Assert(deletionCallCount, chk.Equals, 1) - c.Assert(lastDeletionFolder, chk.Equals, "other") + f.RecordChildDeleted(u("other/x")) // this is the last one in this parent, so deletion of that parent should happen now + a.Equal(1, deletionCallCount) + a.Equal("other", lastDeletionFolder) - f.RecordChildDeleted(s.u("foo/bar/a")) - c.Assert(deletionCallCount, chk.Equals, 1) // no change - f.RecordChildDeleted(s.u("foo/bar/b")) // last one in its parent - c.Assert(deletionCallCount, chk.Equals, 2) // now deletion happens, since last child gone - c.Assert(lastDeletionFolder, chk.Equals, "foo/bar") + f.RecordChildDeleted(u("foo/bar/a")) + a.Equal(1, deletionCallCount) // no change + f.RecordChildDeleted(u("foo/bar/b")) // last one in its parent + a.Equal(2, deletionCallCount) // now deletion happens, since last child gone + a.Equal("foo/bar", lastDeletionFolder) } -func (s *folderDeletionManagerSuite) TestFolderDeletion_IsUnaffectedByQueryStringsAndPathEscaping(c *chk.C) { +func TestFolderDeletion_IsUnaffectedByQueryStringsAndPathEscaping(t *testing.T) { + a := assert.New(t) f := NewFolderDeletionManager(context.Background(), EFolderPropertiesOption.AllFolders(), nil) deletionCallCount := 0 lastDeletionFolder := "" - f.RecordChildExists(s.u("foo/bar%2Fa?SAS")) - f.RecordChildExists(s.u("foo/bar/b")) - f.RecordChildExists(s.u("other/x")) + f.RecordChildExists(u("foo/bar%2Fa?SAS")) + f.RecordChildExists(u("foo/bar/b")) + f.RecordChildExists(u("other/x")) - f.RequestDeletion(s.u("foo%2fbar"), func(context.Context, ILogger) bool { deletionCallCount++; lastDeletionFolder = "foo/bar"; return true }) - f.RequestDeletion(s.u("other?SAS"), func(context.Context, ILogger) bool { deletionCallCount++; lastDeletionFolder = "other"; return true }) - c.Assert(deletionCallCount, chk.Equals, 0) // deletion doesn't happen right now + f.RequestDeletion(u("foo%2fbar"), func(context.Context, ILogger) bool { deletionCallCount++; lastDeletionFolder = "foo/bar"; return true }) + f.RequestDeletion(u("other?SAS"), func(context.Context, ILogger) bool { deletionCallCount++; lastDeletionFolder = "other"; return true }) + a.Equal(0, deletionCallCount) // deletion doesn't happen right now - f.RecordChildDeleted(s.u("other%2fx")) // this is the last one in this parent, so deletion of that parent should happen now - c.Assert(deletionCallCount, chk.Equals, 1) - c.Assert(lastDeletionFolder, chk.Equals, "other") + f.RecordChildDeleted(u("other%2fx")) // this is the last one in this parent, so deletion of that parent should happen now + a.Equal(1, deletionCallCount) + a.Equal("other", lastDeletionFolder) - f.RecordChildDeleted(s.u("foo/bar/a")) - c.Assert(deletionCallCount, chk.Equals, 1) // no change - f.RecordChildDeleted(s.u("foo/bar/b?SAS")) // last one in its parent - c.Assert(deletionCallCount, chk.Equals, 2) // now deletion happens, since last child gone - c.Assert(lastDeletionFolder, chk.Equals, "foo/bar") + f.RecordChildDeleted(u("foo/bar/a")) + a.Equal(1, deletionCallCount) // no change + f.RecordChildDeleted(u("foo/bar/b?SAS")) // last one in its parent + a.Equal(2, deletionCallCount) // now deletion happens, since last child gone + a.Equal("foo/bar", lastDeletionFolder) } -func (s *folderDeletionManagerSuite) TestFolderDeletion_WithMultipleDeletionCallsOnOneFolder(c *chk.C) { +func TestFolderDeletion_WithMultipleDeletionCallsOnOneFolder(t *testing.T) { + a := assert.New(t) f := NewFolderDeletionManager(context.Background(), EFolderPropertiesOption.AllFolders(), nil) deletionResult := false deletionCallCount := 0 // run a deletion that where the deletion func returns false - f.RecordChildExists(s.u("foo/bar/a")) - f.RequestDeletion(s.u("foo/bar"), func(context.Context, ILogger) bool { deletionCallCount++; return deletionResult }) - c.Assert(deletionCallCount, chk.Equals, 0) - f.RecordChildDeleted(s.u("foo/bar/a")) - c.Assert(deletionCallCount, chk.Equals, 1) + f.RecordChildExists(u("foo/bar/a")) + f.RequestDeletion(u("foo/bar"), func(context.Context, ILogger) bool { deletionCallCount++; return deletionResult }) + a.Equal(0, deletionCallCount) + f.RecordChildDeleted(u("foo/bar/a")) + a.Equal(1, deletionCallCount) // Now find and process more children. When all are processed, // deletion should be automatically retried, because it didn't // succeed last time. // (May happen in AzCopy due to highly asynchronous nature and // fact that folders may be enumerated well before all their children) - f.RecordChildExists(s.u("foo/bar/b")) - c.Assert(deletionCallCount, chk.Equals, 1) + f.RecordChildExists(u("foo/bar/b")) + a.Equal(1, deletionCallCount) deletionResult = true // our next deletion should work - f.RecordChildDeleted(s.u("foo/bar/b")) - c.Assert(deletionCallCount, chk.Equals, 2) // deletion was called again, when count again dropped to zero + f.RecordChildDeleted(u("foo/bar/b")) + a.Equal(2, deletionCallCount) // deletion was called again, when count again dropped to zero // Now find and process even more children. // This time, here should be no deletion, because the deletion func _succeeded_ last time. // We don't expect ever to find another child after successful deletion, but may as well test it - f.RecordChildExists(s.u("foo/bar/c")) - f.RecordChildDeleted(s.u("foo/bar/c")) - c.Assert(deletionCallCount, chk.Equals, 2) // no change from above + f.RecordChildExists(u("foo/bar/c")) + f.RecordChildDeleted(u("foo/bar/c")) + a.Equal(2, deletionCallCount) // no change from above } -func (s *folderDeletionManagerSuite) TestFolderDeletion_WithMultipleFolderLevels(c *chk.C) { +func TestFolderDeletion_WithMultipleFolderLevels(t *testing.T) { + a := assert.New(t) f := NewFolderDeletionManager(context.Background(), EFolderPropertiesOption.AllFolders(), nil) deletionCallCount := 0 - f.RecordChildExists(s.u("base/a.txt")) - f.RecordChildExists(s.u("base/childfolder")) - f.RecordChildExists(s.u("base/childfolder/grandchildfolder")) - f.RecordChildExists(s.u("base/childfolder/grandchildfolder/ggcf")) - f.RecordChildExists(s.u("base/childfolder/grandchildfolder/ggcf/b.txt")) + f.RecordChildExists(u("base/a.txt")) + f.RecordChildExists(u("base/childfolder")) + f.RecordChildExists(u("base/childfolder/grandchildfolder")) + f.RecordChildExists(u("base/childfolder/grandchildfolder/ggcf")) + f.RecordChildExists(u("base/childfolder/grandchildfolder/ggcf/b.txt")) - f.RequestDeletion(s.u("base"), func(context.Context, ILogger) bool { deletionCallCount++; return true }) - f.RequestDeletion(s.u("base/childfolder"), func(context.Context, ILogger) bool { deletionCallCount++; return true }) - f.RequestDeletion(s.u("base/childfolder/grandchildfolder"), func(context.Context, ILogger) bool { deletionCallCount++; return true }) - f.RequestDeletion(s.u("base/childfolder/grandchildfolder/ggcf"), func(context.Context, ILogger) bool { deletionCallCount++; return true }) + f.RequestDeletion(u("base"), func(context.Context, ILogger) bool { deletionCallCount++; return true }) + f.RequestDeletion(u("base/childfolder"), func(context.Context, ILogger) bool { deletionCallCount++; return true }) + f.RequestDeletion(u("base/childfolder/grandchildfolder"), func(context.Context, ILogger) bool { deletionCallCount++; return true }) + f.RequestDeletion(u("base/childfolder/grandchildfolder/ggcf"), func(context.Context, ILogger) bool { deletionCallCount++; return true }) - f.RecordChildDeleted(s.u("base/childfolder/grandchildfolder/ggcf/b.txt")) - c.Assert(deletionCallCount, chk.Equals, 3) // everything except base + f.RecordChildDeleted(u("base/childfolder/grandchildfolder/ggcf/b.txt")) + a.Equal(3, deletionCallCount) // everything except base - f.RecordChildDeleted(s.u("base/a.txt")) - c.Assert(deletionCallCount, chk.Equals, 4) // base is gone now too + f.RecordChildDeleted(u("base/a.txt")) + a.Equal(4, deletionCallCount) // base is gone now too } -func (s *folderDeletionManagerSuite) TestGetParent(c *chk.C) { +func TestGetParent(t *testing.T) { + a := assert.New(t) f := NewFolderDeletionManager(context.Background(), EFolderPropertiesOption.AllFolders(), nil) test := func(child string, expectedParent string) { u, _ := url.Parse(child) p, ok := f.(*standardFolderDeletionManager).getParent(u) if expectedParent == "" { - c.Assert(ok, chk.Equals, false) + a.False(ok) } else { - c.Assert(ok, chk.Equals, true) - c.Assert(p.String(), chk.Equals, expectedParent) + a.True(ok) + a.Equal(expectedParent, p.String()) } } @@ -176,4 +179,4 @@ func (s *folderDeletionManagerSuite) TestGetParent(c *chk.C) { test("http://example.com/foo/bar", "http://example.com/foo") test("http://example.com/foo%2Fbar", "http://example.com/foo") test("http://example.com/foo/bar?ooo", "http://example.com/foo") -} +} \ No newline at end of file diff --git a/common/zt_logSanitizer_test.go b/common/zt_logSanitizer_test.go index aa48aeebf..6f35af9d0 100644 --- a/common/zt_logSanitizer_test.go +++ b/common/zt_logSanitizer_test.go @@ -21,14 +21,12 @@ package common import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "testing" ) -type logSanitizerSuite struct{} - -var _ = chk.Suite(&logSanitizerSuite{}) - -func (s *logSanitizerSuite) TestLogSanitizer(c *chk.C) { +func TestLogSanitizer(t *testing.T) { + a := assert.New(t) cases := []struct { raw string @@ -69,7 +67,7 @@ func (s *logSanitizerSuite) TestLogSanitizer(c *chk.C) { san := NewAzCopyLogSanitizer() for _, x := range cases { - c.Assert(san.SanitizeLogMessage(x.raw), chk.Equals, x.expectedSanitized) + a.Equal(x.expectedSanitized, san.SanitizeLogMessage(x.raw)) } -} +} \ No newline at end of file diff --git a/common/zt_multiSliceBytePooler_test.go b/common/zt_multiSliceBytePooler_test.go index ca309d79e..d80ebfbf3 100644 --- a/common/zt_multiSliceBytePooler_test.go +++ b/common/zt_multiSliceBytePooler_test.go @@ -21,16 +21,13 @@ package common import ( + "github.com/stretchr/testify/assert" "math" - - chk "gopkg.in/check.v1" + "testing" ) -type multiSliceBytePoolerSuite struct{} - -var _ = chk.Suite(&multiSliceBytePoolerSuite{}) - -func (s *multiSliceBytePoolerSuite) TestMultiSliceSlotInfo(c *chk.C) { +func TestMultiSliceSlotInfo(t *testing.T) { + a := assert.New(t) eightMB := 8 * 1024 * 1024 cases := []struct { @@ -58,10 +55,10 @@ func (s *multiSliceBytePoolerSuite) TestMultiSliceSlotInfo(c *chk.C) { // now lets see if the pooler is working as we expect slotIndex, maxCap := getSlotInfo(int64(x.size)) - c.Assert(slotIndex, chk.Equals, roundedLogBase2) // this what, mathematically, we expect - c.Assert(slotIndex, chk.Equals, x.expectedSlotIndex) // this what our test case said (should be same) + a.Equal(roundedLogBase2, slotIndex) // this what, mathematically, we expect + a.Equal(x.expectedSlotIndex, slotIndex) // this what our test case said (should be same) - c.Assert(maxCap, chk.Equals, x.expectedMaxCapInSlot) + a.Equal(x.expectedMaxCapInSlot, maxCap) } -} +} \ No newline at end of file diff --git a/go.mod b/go.mod index 9f82dcd73..5cf2165c7 100644 --- a/go.mod +++ b/go.mod @@ -27,6 +27,8 @@ require ( gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c ) +require github.com/stretchr/testify v1.8.1 + require ( cloud.google.com/go v0.107.0 // indirect cloud.google.com/go/compute v1.14.0 // indirect @@ -37,6 +39,7 @@ require ( github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-ini/ini v1.66.4 // indirect github.com/golang-jwt/jwt/v4 v4.3.0 // indirect github.com/golang/protobuf v1.5.2 // indirect @@ -47,6 +50,7 @@ require ( github.com/kr/pretty v0.3.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opencensus.io v0.24.0 // indirect @@ -58,6 +62,7 @@ require ( google.golang.org/grpc v1.51.0 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) go 1.19 diff --git a/sddl/parseSddl_test.go b/sddl/parseSddl_test.go index 8204c2c81..2593851d5 100644 --- a/sddl/parseSddl_test.go +++ b/sddl/parseSddl_test.go @@ -21,21 +21,14 @@ package sddl_test import ( + "github.com/stretchr/testify/assert" "testing" - chk "gopkg.in/check.v1" - "github.com/Azure/azure-storage-azcopy/v10/sddl" ) -// Hookup to the testing framework -func Test(t *testing.T) { chk.TestingT(t) } - -type sddlTestSuite struct{} - -var _ = chk.Suite(&sddlTestSuite{}) - -func (*sddlTestSuite) TestSDDLSplitting(c *chk.C) { +func TestSDDLSplitting(t *testing.T) { + a := assert.New(t) tests := []struct { input string result sddl.SDDLString @@ -161,8 +154,8 @@ func (*sddlTestSuite) TestSDDLSplitting(c *chk.C) { for _, v := range tests { res, err := sddl.ParseSDDL(v.input) - c.Assert(err, chk.IsNil) - c.Log("Input: ", v.input, " Expected result: ", v.result.String(), " Actual result: ", res.String()) - c.Assert(res, chk.DeepEquals, v.result) + a.Nil(err) + t.Log("Input: ", v.input, " Expected result: ", v.result.String(), " Actual result: ", res.String()) + a.Equal(v.result, res) } -} +} \ No newline at end of file diff --git a/sddl/sddlPortable_test.go b/sddl/sddlPortable_test.go index 71a57a2d6..e366a141b 100644 --- a/sddl/sddlPortable_test.go +++ b/sddl/sddlPortable_test.go @@ -21,23 +21,16 @@ package sddl import ( + "github.com/stretchr/testify/assert" "regexp" "testing" - - chk "gopkg.in/check.v1" ) -// Hookup to the testing framework -func Test(t *testing.T) { chk.TestingT(t) } - -type sddlPortableSuite struct{} - -var _ = chk.Suite(&sddlPortableSuite{}) - // this test uses "contoso" SIDs (don't want real SIDs here). The RID portion of the SIDs should also be fake here (e.g. using 9999x as below) // Contoso SID is from https://docs.microsoft.com/en-us/windows/security/identity-protection/access-control/security-identifiers -func (s *sddlPortableSuite) TestMakingSDDLPortable(c *chk.C) { - translateSID = s.TranslateContosoSID +func TestMakingSDDLPortable(t *testing.T) { + a := assert.New(t) + translateSID = TranslateContosoSID defer func() { translateSID = OSTranslateSID }() tests := []struct { @@ -90,25 +83,25 @@ func (s *sddlPortableSuite) TestMakingSDDLPortable(c *chk.C) { return wsRegex.ReplaceAllString(s, "") } - for _, t := range tests { - t.input = removeEols(t.input) - t.expectedOutput = removeEols(t.expectedOutput) - c.Log(t.input) - c.Log(t.expectedOutput) + for _, test := range tests { + test.input = removeEols(test.input) + test.expectedOutput = removeEols(test.expectedOutput) + t.Log(test.input) + t.Log(test.expectedOutput) - parsed, _ := ParseSDDL(removeEols(t.input)) + parsed, _ := ParseSDDL(removeEols(test.input)) portableVersion := parsed.PortableString() - c.Assert(portableVersion, chk.Equals, removeEols(t.expectedOutput)) + a.Equal(removeEols(test.expectedOutput), portableVersion) } } -func (*sddlPortableSuite) TranslateContosoSID(sid string) (string, error) { +func TranslateContosoSID(sid string) (string, error) { const contosoBase = "S-1-5-21-1004336348-1177238915-682003330" if len(sid) > 2 { // assume its already a full SID return sid, nil } return contosoBase + "-" + sid, nil // unlike real OS function, we leave the BU or whatever on the end instead of making it numeric, but that's OK because we just need to make sure the replacements happen -} +} \ No newline at end of file diff --git a/ste/concurrency_test.go b/ste/concurrency_test.go index fed14001d..08ee0f3ec 100644 --- a/ste/concurrency_test.go +++ b/ste/concurrency_test.go @@ -1,37 +1,35 @@ package ste import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "testing" ) -type mainTestSuite struct{} - -var _ = chk.Suite(&mainTestSuite{}) - const ( minConcurrency = 32 maxConcurrency = 300 ) -func (s *mainTestSuite) TestConcurrencyValue(c *chk.C) { +func TestConcurrencyValue(t *testing.T) { + a := assert.New(t) // weak machines for i := 1; i < 5; i++ { min, max := getMainPoolSize(i, false) - c.Assert(min, chk.Equals, minConcurrency) - c.Assert(max.Value, chk.Equals, minConcurrency) + a.Equal(minConcurrency, min) + a.Equal(minConcurrency, max.Value) } // moderately powerful machines for i := 5; i < 19; i++ { min, max := getMainPoolSize(i, false) - c.Assert(min, chk.Equals, 16*i) - c.Assert(max.Value, chk.Equals, 16*i) + a.Equal(16*i, min) + a.Equal(16*i, max.Value) } // powerful machines for i := 19; i < 24; i++ { min, max := getMainPoolSize(i, false) - c.Assert(min, chk.Equals, maxConcurrency) - c.Assert(max.Value, chk.Equals, maxConcurrency) + a.Equal(maxConcurrency, min) + a.Equal(maxConcurrency, max.Value) } -} +} \ No newline at end of file diff --git a/ste/mgr-JobPartMgr_test.go b/ste/mgr-JobPartMgr_test.go index ecdfbae70..ba632d759 100644 --- a/ste/mgr-JobPartMgr_test.go +++ b/ste/mgr-JobPartMgr_test.go @@ -21,19 +21,13 @@ package ste import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" "strings" "testing" ) -// Hookup to the testing framework -func Test(t *testing.T) { chk.TestingT(t) } - -type jobPartMgrTestSuite struct{} - -var _ = chk.Suite(&jobPartMgrTestSuite{}) - -func (s *jobPartMgrTestSuite) TestInferContentType(c *chk.C) { +func TestInferContentType(t *testing.T) { + a := assert.New(t) // Arrange partMgr := jobPartMgr{} @@ -57,6 +51,6 @@ func (s *jobPartMgrTestSuite) TestInferContentType(c *chk.C) { // make sure the inferred type is correct // we use Contains to check because charset is also in contentType - c.Assert(strings.Contains(contentType, expectedType), chk.Equals, true) + a.True(strings.Contains(contentType, expectedType)) } -} +} \ No newline at end of file diff --git a/ste/sender_blockBlob_test.go b/ste/sender_blockBlob_test.go index 0e0a4d615..19e952963 100644 --- a/ste/sender_blockBlob_test.go +++ b/ste/sender_blockBlob_test.go @@ -22,15 +22,12 @@ package ste import ( "fmt" - - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "testing" ) -type blockBlobSuite struct{} - -var _ = chk.Suite(&blockBlobSuite{}) - -func (s *blockBlobSuite) TestGetVerifiedChunkParams(c *chk.C) { +func TestGetVerifiedChunkParams(t *testing.T) { + a := assert.New(t) // Mock required params transferInfo := TransferInfo{ BlockSize: 4195352576, // 4001MiB @@ -42,19 +39,18 @@ func (s *blockBlobSuite) TestGetVerifiedChunkParams(c *chk.C) { memLimit := int64(2097152000) // 2000Mib expectedErr := fmt.Sprintf("Cannot use a block size of 3.91GiB. AzCopy is limited to use only 1.95GiB of memory") _, _, err := getVerifiedChunkParams(transferInfo, memLimit, memLimit) - c.Assert(err.Error(), chk.Equals, expectedErr) + a.Equal(expectedErr, err.Error()) // Verify large block Size memLimit = int64(8388608000) // 8000MiB expectedErr = fmt.Sprintf("block size of 3.91GiB for file tmpSrc of size 7.81GiB exceeds maximum allowed block size for a BlockBlob") _, _, err = getVerifiedChunkParams(transferInfo, memLimit, memLimit) - c.Assert(err.Error(), chk.Equals, expectedErr) + a.Equal(expectedErr, err.Error()) // High block count transferInfo.SourceSize = 2147483648 //16GiB transferInfo.BlockSize = 2048 // 2KiB expectedErr = fmt.Sprintf("Block size 2048 for source of size 2147483648 is not correct. Number of blocks will exceed the limit") _, _, err = getVerifiedChunkParams(transferInfo, memLimit, memLimit) - c.Assert(err.Error(), chk.Equals, expectedErr) - + a.Equal(expectedErr, err.Error()) } diff --git a/ste/sender_pageBlobFromURL_test.go b/ste/sender_pageBlobFromURL_test.go index 4ec6fba5d..a0f1c1c91 100644 --- a/ste/sender_pageBlobFromURL_test.go +++ b/ste/sender_pageBlobFromURL_test.go @@ -22,15 +22,12 @@ package ste import ( "github.com/Azure/azure-storage-blob-go/azblob" - - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "testing" ) -type pageBlobFromURLSuite struct{} - -var _ = chk.Suite(&pageBlobFromURLSuite{}) - -func (s *pageBlobFromURLSuite) TestRangeWorthTransferring(c *chk.C) { +func TestRangeWorthTransferring(t *testing.T) { + a := assert.New(t) // Arrange copier := pageRangeOptimizer{} copier.srcPageList = &azblob.PageList{ @@ -53,6 +50,6 @@ func (s *pageBlobFromURLSuite) TestRangeWorthTransferring(c *chk.C) { // Action & Assert for testRange, expectedResult := range testCases { doesContainData := copier.doesRangeContainData(testRange) - c.Assert(doesContainData, chk.Equals, expectedResult) + a.Equal(expectedResult, doesContainData) } -} +} \ No newline at end of file diff --git a/ste/zt_concurrencyTuner_test.go b/ste/zt_concurrencyTuner_test.go index 5384d8404..106945395 100644 --- a/ste/zt_concurrencyTuner_test.go +++ b/ste/zt_concurrencyTuner_test.go @@ -21,14 +21,11 @@ package ste import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" "math" + "testing" ) -type concurrencyTunerSuite struct{} - -var _ = chk.Suite(&concurrencyTunerSuite{}) - type tunerStep struct { concurrency int // the concurrency value recommended by the tuner reason string @@ -36,11 +33,12 @@ type tunerStep struct { highCpuObserved bool } -func (s *concurrencyTunerSuite) noMax() int { +func noMax() int { return math.MaxInt32 } -func (s *concurrencyTunerSuite) TestConcurrencyTuner_LowBW(c *chk.C) { +func TestConcurrencyTuner_LowBW(t *testing.T) { + a := assert.New(t) steps := []tunerStep{ {4, concurrencyReasonInitial, 40, false}, {16, concurrencyReasonSeeking, 100, false}, @@ -52,11 +50,12 @@ func (s *concurrencyTunerSuite) TestConcurrencyTuner_LowBW(c *chk.C) { {16, concurrencyReasonAtOptimum, 100, false}, {16, concurrencyReasonFinished, 100, false}} - s.runTest(c, steps, s.noMax(), true, false) + runTest(a, steps, noMax(), true, false) } -func (s *concurrencyTunerSuite) TestConcurrencyTuner_VeryLowBandwidth(c *chk.C) { +func TestConcurrencyTuner_VeryLowBandwidth(t *testing.T) { + a := assert.New(t) steps := []tunerStep{ {4, concurrencyReasonInitial, 10, false}, {16, concurrencyReasonSeeking, 11, false}, @@ -67,11 +66,12 @@ func (s *concurrencyTunerSuite) TestConcurrencyTuner_VeryLowBandwidth(c *chk.C) {4, concurrencyReasonAtOptimum, 10, false}, {4, concurrencyReasonFinished, 10, false}} - s.runTest(c, steps, s.noMax(), true, false) + runTest(a, steps, noMax(), true, false) } -func (s *concurrencyTunerSuite) TestConcurrencyTuner_HighBandwidth_PlentyOfCpu(c *chk.C) { +func TestConcurrencyTuner_HighBandwidth_PlentyOfCpu(t *testing.T) { + a := assert.New(t) steps := []tunerStep{ {4, concurrencyReasonInitial, 400, false}, {16, concurrencyReasonSeeking, 1000, false}, @@ -84,10 +84,11 @@ func (s *concurrencyTunerSuite) TestConcurrencyTuner_HighBandwidth_PlentyOfCpu(c {256, concurrencyReasonFinished, 20000, false}, } - s.runTest(c, steps, s.noMax(), true, false) + runTest(a, steps, noMax(), true, false) } -func (s *concurrencyTunerSuite) TestConcurrencyTuner_HighBandwidth_ConstrainedCpu(c *chk.C) { +func TestConcurrencyTuner_HighBandwidth_ConstrainedCpu(t *testing.T) { + a := assert.New(t) steps := []tunerStep{ {4, concurrencyReasonInitial, 400, false}, {16, concurrencyReasonSeeking, 1000, false}, @@ -100,10 +101,11 @@ func (s *concurrencyTunerSuite) TestConcurrencyTuner_HighBandwidth_ConstrainedCp {256, concurrencyReasonFinished, 20000, false}, } - s.runTest(c, steps, s.noMax(), true, false) + runTest(a, steps, noMax(), true, false) } -func (s *concurrencyTunerSuite) TestConcurrencyTuner_CapMaxConcurrency(c *chk.C) { +func TestConcurrencyTuner_CapMaxConcurrency(t *testing.T) { + a := assert.New(t) steps := []tunerStep{ {4, concurrencyReasonInitial, 400, false}, {16, concurrencyReasonSeeking, 1000, false}, @@ -112,10 +114,11 @@ func (s *concurrencyTunerSuite) TestConcurrencyTuner_CapMaxConcurrency(c *chk.C) {100, concurrencyReasonFinished, 8000, false}, } - s.runTest(c, steps, 100, true, false) + runTest(a, steps, 100, true, false) } -func (s *concurrencyTunerSuite) TestConcurrencyTuner_OptimalValueNotNearStandardSteps(c *chk.C) { +func TestConcurrencyTuner_OptimalValueNotNearStandardSteps(t *testing.T) { + a := assert.New(t) steps := []tunerStep{ {4, concurrencyReasonInitial, 200, false}, {16, concurrencyReasonSeeking, 800, false}, @@ -131,11 +134,12 @@ func (s *concurrencyTunerSuite) TestConcurrencyTuner_OptimalValueNotNearStandard {737, concurrencyReasonFinished, 19500, false}, } - s.runTest(c, steps, s.noMax(), true, false) + runTest(a, steps, noMax(), true, false) } -func (s *concurrencyTunerSuite) TestConcurrencyTuner_HighBandwidthWorkaround_AppliesWhenBenchmarking(c *chk.C) { +func TestConcurrencyTuner_HighBandwidthWorkaround_AppliesWhenBenchmarking(t *testing.T) { + a := assert.New(t) steps := []tunerStep{ {4, concurrencyReasonInitial, 2000, false}, {16, concurrencyReasonSeeking, 8000, false}, @@ -144,10 +148,11 @@ func (s *concurrencyTunerSuite) TestConcurrencyTuner_HighBandwidthWorkaround_App {64, concurrencyReasonBackoff, 11500, false}, // ... but, with no retries to prevent it backing off, it backs off from the higher value that it tried } - s.runTest(c, steps, s.noMax(), true, false) + runTest(a, steps, noMax(), true, false) } -func (s *concurrencyTunerSuite) TestConcurrencyTuner_HighBandwidthWorkaround_DoesntApplyWhenNotBenchmarking(c *chk.C) { +func TestConcurrencyTuner_HighBandwidthWorkaround_DoesntApplyWhenNotBenchmarking(t *testing.T) { + a := assert.New(t) steps := []tunerStep{ {4, concurrencyReasonInitial, 2000, false}, {16, concurrencyReasonSeeking, 8000, false}, @@ -155,10 +160,11 @@ func (s *concurrencyTunerSuite) TestConcurrencyTuner_HighBandwidthWorkaround_Doe {16, concurrencyReasonBackoff, 115000, false}, } - s.runTest(c, steps, s.noMax(), false, false) + runTest(a, steps, noMax(), false, false) } -func (s *concurrencyTunerSuite) TestConcurrencyTuner__HighBandwidthWorkaround_StaysHighIfSeesRetries(c *chk.C) { +func TestConcurrencyTuner__HighBandwidthWorkaround_StaysHighIfSeesRetries(t *testing.T) { + a := assert.New(t) steps := []tunerStep{ {4, concurrencyReasonInitial, 2000, false}, {16, concurrencyReasonSeeking, 8000, false}, @@ -167,10 +173,10 @@ func (s *concurrencyTunerSuite) TestConcurrencyTuner__HighBandwidthWorkaround_St {256, concurrencyReasonAtOptimum, 11500, false}, // ... and, because there ARE reties, it does not back off } - s.runTest(c, steps, s.noMax(), true, true) + runTest(a, steps, noMax(), true, true) } -func (s *concurrencyTunerSuite) runTest(c *chk.C, steps []tunerStep, maxConcurrency int, isBenchmarking bool, simulateRetries bool) { +func runTest(a *assert.Assertions, steps []tunerStep, maxConcurrency int, isBenchmarking bool, simulateRetries bool) { t := NewAutoConcurrencyTuner(4, maxConcurrency, isBenchmarking) observedMbps := -1 // there's no observation at first observedHighCpu := false @@ -183,11 +189,11 @@ func (s *concurrencyTunerSuite) runTest(c *chk.C, steps []tunerStep, maxConcurre conc, reason := t.GetRecommendedConcurrency(observedMbps, observedHighCpu) // assert that it told us what we expect in this test - c.Assert(conc, chk.Equals, x.concurrency) - c.Assert(reason, chk.Equals, x.reason) + a.Equal(x.concurrency, conc) + a.Equal(x.reason, reason) // get the "simulated" throughput that results from the new concurrency observedMbps = x.mbpsObserved observedHighCpu = x.highCpuObserved } -} +} \ No newline at end of file diff --git a/ste/zt_performanceAdvisor_test.go b/ste/zt_performanceAdvisor_test.go index 35facea4e..3092b2617 100644 --- a/ste/zt_performanceAdvisor_test.go +++ b/ste/zt_performanceAdvisor_test.go @@ -22,15 +22,13 @@ package ste import ( "github.com/Azure/azure-storage-azcopy/v10/common" + "github.com/stretchr/testify/assert" chk "gopkg.in/check.v1" + "testing" ) -type perfAdvisorSuite struct{} - -var _ = chk.Suite(&perfAdvisorSuite{}) - -func (s *perfAdvisorSuite) TestPerfAdvisor(c *chk.C) { - +func TestPerfAdvisor(t *testing.T) { + a := assert.New(t) none := AdviceType{"NoneUnitTestOnly", ""} // abbreviated names for the various advice types, to make the test more concise @@ -109,7 +107,7 @@ func (s *perfAdvisorSuite) TestPerfAdvisor(c *chk.C) { // Run the tests, asserting that for each case, the given inputs produces the expected output for _, cs := range cases { - a := &PerformanceAdvisor{ + pa := &PerformanceAdvisor{ networkErrorPercentage: cs.networkErrorPercentage, serverBusyPercentageIOPS: cs.serverBusyPercentageIOPS, serverBusyPercentageThroughput: cs.serverBusyPercentageThroughput, @@ -124,7 +122,7 @@ func (s *perfAdvisorSuite) TestPerfAdvisor(c *chk.C) { avgBytesPerFile: cs.fileSpec.avgFileSize, isToAzureFiles: cs.fileSpec.isAzFiles, } - obtained := a.GetAdvice() + obtained := pa.GetAdvice() expectedCount := 1 if cs.expectedSecondary1 != none { expectedCount++ @@ -135,24 +133,24 @@ func (s *perfAdvisorSuite) TestPerfAdvisor(c *chk.C) { if cs.expectedSecondary3 != none { expectedCount++ } - c.Assert(len(obtained), chk.Equals, expectedCount, chk.Commentf(cs.caseName)) + a.Equal(expectedCount, len(obtained), chk.Commentf(cs.caseName)) - s.assertAdviceMatches(c, cs.caseName, obtained, 0, cs.expectedPrimaryResult) - s.assertAdviceMatches(c, cs.caseName, obtained, 1, cs.expectedSecondary1) - s.assertAdviceMatches(c, cs.caseName, obtained, 2, cs.expectedSecondary2) - s.assertAdviceMatches(c, cs.caseName, obtained, 3, cs.expectedSecondary3) + assertAdviceMatches(a, cs.caseName, obtained, 0, cs.expectedPrimaryResult) + assertAdviceMatches(a, cs.caseName, obtained, 1, cs.expectedSecondary1) + assertAdviceMatches(a, cs.caseName, obtained, 2, cs.expectedSecondary2) + assertAdviceMatches(a, cs.caseName, obtained, 3, cs.expectedSecondary3) } } -func (s *perfAdvisorSuite) assertAdviceMatches(c *chk.C, caseName string, obtained []common.PerformanceAdvice, index int, expected AdviceType) { +func assertAdviceMatches(a *assert.Assertions, caseName string, obtained []common.PerformanceAdvice, index int, expected AdviceType) { if expected.code == "NoneUnitTestOnly" { return } adv := obtained[index] shouldBePrimary := index == 0 - c.Assert(adv.PriorityAdvice, chk.Equals, shouldBePrimary, chk.Commentf(caseName)) - c.Assert(adv.Code, chk.Equals, expected.code, chk.Commentf(caseName)) + a.Equal(shouldBePrimary, adv.PriorityAdvice, chk.Commentf(caseName)) + a.Equal(expected.code, adv.Code, chk.Commentf(caseName)) } // TODO: for conciseness, we don't check the Title or Reason of the advice objects that are generated. -// Should we? +// Should we? \ No newline at end of file diff --git a/ste/zt_ste_misc_windows_test.go b/ste/zt_ste_misc_windows_test.go index ab8e60e99..9c9efb72c 100644 --- a/ste/zt_ste_misc_windows_test.go +++ b/ste/zt_ste_misc_windows_test.go @@ -22,27 +22,25 @@ package ste import ( - chk "gopkg.in/check.v1" + "github.com/stretchr/testify/assert" + "testing" ) -type steMiscSuite struct{} - -var _ = chk.Suite(&steMiscSuite{}) - -func (s *concurrencyTunerSuite) Test_IsParentShareRoot(c *chk.C) { +func Test_IsParentShareRoot(t *testing.T) { + a := assert.New(t) d := azureFilesDownloader{} - c.Assert(d.parentIsShareRoot("https://a.file.core.windows.net/share"), chk.Equals, false) // THIS is the share root, not the parent of this - c.Assert(d.parentIsShareRoot("https://a.file.core.windows.net/share/"), chk.Equals, false) - c.Assert(d.parentIsShareRoot("https://a.file.core.windows.net/share?aaa/bbb"), chk.Equals, false) - c.Assert(d.parentIsShareRoot("https://a.file.core.windows.net/share/?aaa/bbb"), chk.Equals, false) + a.False(d.parentIsShareRoot("https://a.file.core.windows.net/share")) // THIS is the share root, not the parent of this + a.False(d.parentIsShareRoot("https://a.file.core.windows.net/share/")) + a.False(d.parentIsShareRoot("https://a.file.core.windows.net/share?aaa/bbb")) + a.False(d.parentIsShareRoot("https://a.file.core.windows.net/share/?aaa/bbb")) - c.Assert(d.parentIsShareRoot("https://a.file.core.windows.net/share/foo"), chk.Equals, true) - c.Assert(d.parentIsShareRoot("https://a.file.core.windows.net/share/foo/"), chk.Equals, true) - c.Assert(d.parentIsShareRoot("https://a.file.core.windows.net/share/foo/?x/y"), chk.Equals, true) - c.Assert(d.parentIsShareRoot("https://a.file.core.windows.net/share/foo?x/y"), chk.Equals, true) + a.True(d.parentIsShareRoot("https://a.file.core.windows.net/share/foo")) + a.True(d.parentIsShareRoot("https://a.file.core.windows.net/share/foo/")) + a.True(d.parentIsShareRoot("https://a.file.core.windows.net/share/foo/?x/y")) + a.True(d.parentIsShareRoot("https://a.file.core.windows.net/share/foo?x/y")) - c.Assert(d.parentIsShareRoot("https://a.file.core.windows.net/share/foo/bar"), chk.Equals, false) - c.Assert(d.parentIsShareRoot("https://a.file.core.windows.net/share/foo/bar/"), chk.Equals, false) - c.Assert(d.parentIsShareRoot("https://a.file.core.windows.net/share/foo/bar?nethe"), chk.Equals, false) -} + a.False(d.parentIsShareRoot("https://a.file.core.windows.net/share/foo/bar")) + a.False(d.parentIsShareRoot("https://a.file.core.windows.net/share/foo/bar/")) + a.False(d.parentIsShareRoot("https://a.file.core.windows.net/share/foo/bar?nethe")) +} \ No newline at end of file