From e6269c7bcda9bda63d8a753bbffa749aab00a29b Mon Sep 17 00:00:00 2001 From: Sean McCullough <44180881+seanmcc-msft@users.noreply.github.com> Date: Thu, 8 Aug 2024 11:35:20 -0500 Subject: [PATCH 01/25] Added STG 96 service version (#45387) --- .../tests/ChangeFeedTestBase.cs | 1 + .../api/Azure.Storage.Blobs.net6.0.cs | 1 + .../api/Azure.Storage.Blobs.netstandard2.0.cs | 1 + .../api/Azure.Storage.Blobs.netstandard2.1.cs | 1 + .../src/BlobClientOptions.cs | 7 +++- .../tests/BlobsClientTestFixtureAttribute.cs | 1 + .../api/Azure.Storage.Common.net6.0.cs | 2 +- .../Azure.Storage.Common.netstandard2.0.cs | 2 +- .../src/Shared/Constants.cs | 2 +- .../src/Shared/StorageVersionExtensions.cs | 32 +++---------------- .../tests/CommonTestBase.cs | 5 +-- .../Azure.Storage.Files.DataLake.net6.0.cs | 1 + ...e.Storage.Files.DataLake.netstandard2.0.cs | 1 + .../src/DataLakeClientOptions.cs | 7 +++- .../DataLakeClientTestFixtureAttribute.cs | 1 + .../api/Azure.Storage.Files.Shares.net6.0.cs | 1 + ...ure.Storage.Files.Shares.netstandard2.0.cs | 1 + .../src/ShareClientOptions.cs | 7 +++- .../tests/ShareClientTestFixtureAttribute.cs | 1 + .../api/Azure.Storage.Queues.net6.0.cs | 1 + .../Azure.Storage.Queues.netstandard2.0.cs | 1 + .../Azure.Storage.Queues.netstandard2.1.cs | 1 + .../src/QueueClientOptions.cs | 7 +++- .../tests/QueueClientTestFixtureAttribute.cs | 1 + 24 files changed, 50 insertions(+), 36 deletions(-) diff --git a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/ChangeFeedTestBase.cs b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/ChangeFeedTestBase.cs index cc5c65ae20673..d65ba1264ce66 100644 --- a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/ChangeFeedTestBase.cs +++ b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/ChangeFeedTestBase.cs @@ -32,6 +32,7 @@ namespace Azure.Storage.Blobs.ChangeFeed.Tests BlobClientOptions.ServiceVersion.V2024_05_04, BlobClientOptions.ServiceVersion.V2024_08_04, BlobClientOptions.ServiceVersion.V2024_11_04, + BlobClientOptions.ServiceVersion.V2025_01_05, StorageVersionExtensions.LatestVersion, StorageVersionExtensions.MaxVersion, RecordingServiceVersion = StorageVersionExtensions.MaxVersion, diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs index dd55404dfbc93..05cdde6988050 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs @@ -87,6 +87,7 @@ public enum ServiceVersion V2024_05_04 = 22, V2024_08_04 = 23, V2024_11_04 = 24, + V2025_01_05 = 25, } } public partial class BlobContainerClient diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs index dd55404dfbc93..05cdde6988050 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs @@ -87,6 +87,7 @@ public enum ServiceVersion V2024_05_04 = 22, V2024_08_04 = 23, V2024_11_04 = 24, + V2025_01_05 = 25, } } public partial class BlobContainerClient diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs index dd55404dfbc93..05cdde6988050 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs @@ -87,6 +87,7 @@ public enum ServiceVersion V2024_05_04 = 22, V2024_08_04 = 23, V2024_11_04 = 24, + V2025_01_05 = 25, } } public partial class BlobContainerClient diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs b/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs index b9167baec00dd..b16cefc83a535 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs @@ -151,7 +151,12 @@ public enum ServiceVersion /// /// The 2024-11-04 service version. /// - V2024_11_04 = 24 + V2024_11_04 = 24, + + /// + /// The 2025-01-05 service version. + /// + V2025_01_05 = 25 #pragma warning restore CA1707 // Identifiers should not contain underscores } diff --git a/sdk/storage/Azure.Storage.Blobs/tests/BlobsClientTestFixtureAttribute.cs b/sdk/storage/Azure.Storage.Blobs/tests/BlobsClientTestFixtureAttribute.cs index bb82aeae55ff2..d0372ab20cf47 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/BlobsClientTestFixtureAttribute.cs +++ b/sdk/storage/Azure.Storage.Blobs/tests/BlobsClientTestFixtureAttribute.cs @@ -35,6 +35,7 @@ public BlobsClientTestFixtureAttribute(params object[] additionalParameters) BlobClientOptions.ServiceVersion.V2024_05_04, BlobClientOptions.ServiceVersion.V2024_08_04, BlobClientOptions.ServiceVersion.V2024_11_04, + BlobClientOptions.ServiceVersion.V2025_01_05, StorageVersionExtensions.LatestVersion, StorageVersionExtensions.MaxVersion }, diff --git a/sdk/storage/Azure.Storage.Common/api/Azure.Storage.Common.net6.0.cs b/sdk/storage/Azure.Storage.Common/api/Azure.Storage.Common.net6.0.cs index 39ffce6f73614..121838723ee4f 100644 --- a/sdk/storage/Azure.Storage.Common/api/Azure.Storage.Common.net6.0.cs +++ b/sdk/storage/Azure.Storage.Common/api/Azure.Storage.Common.net6.0.cs @@ -183,7 +183,7 @@ public enum SasProtocol } public partial class SasQueryParameters { - public const string DefaultSasVersion = "2024-11-04"; + public const string DefaultSasVersion = "2025-01-05"; protected SasQueryParameters() { } protected SasQueryParameters(System.Collections.Generic.IDictionary values) { } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] diff --git a/sdk/storage/Azure.Storage.Common/api/Azure.Storage.Common.netstandard2.0.cs b/sdk/storage/Azure.Storage.Common/api/Azure.Storage.Common.netstandard2.0.cs index 55ce1a3aa640e..9b59550e809d0 100644 --- a/sdk/storage/Azure.Storage.Common/api/Azure.Storage.Common.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Common/api/Azure.Storage.Common.netstandard2.0.cs @@ -182,7 +182,7 @@ public enum SasProtocol } public partial class SasQueryParameters { - public const string DefaultSasVersion = "2024-11-04"; + public const string DefaultSasVersion = "2025-01-05"; protected SasQueryParameters() { } protected SasQueryParameters(System.Collections.Generic.IDictionary values) { } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs b/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs index 60c665ffc9baf..17a32b2d46d41 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs @@ -25,7 +25,7 @@ internal static class Constants /// Gets the default service version to use when building shared access /// signatures. /// - public const string DefaultSasVersion = "2024-11-04"; + public const string DefaultSasVersion = "2025-01-05"; /// /// Max download range size while requesting a transactional hash. diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs index 979dbbcf20ddc..2a7bd90fb82a1 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs @@ -56,7 +56,7 @@ internal static class StorageVersionExtensions /// internal const ServiceVersion MaxVersion = #if BlobSDK || QueueSDK || FileSDK || DataLakeSDK || ChangeFeedSDK || DataMovementSDK || BlobDataMovementSDK || ShareDataMovementSDK - ServiceVersion.V2024_11_04; + ServiceVersion.V2025_01_05; #else ERROR_STORAGE_SERVICE_NOT_DEFINED; #endif @@ -69,32 +69,7 @@ internal static class StorageVersionExtensions public static string ToVersionString(this ServiceVersion version) => version switch { -#if BlobSDK || FileSDK || DataLakeSDK - ServiceVersion.V2019_02_02 => "2019-02-02", - ServiceVersion.V2019_07_07 => "2019-07-07", - ServiceVersion.V2019_12_12 => "2019-12-12", - ServiceVersion.V2020_02_10 => "2020-02-10", - ServiceVersion.V2020_04_08 => "2020-04-08", - ServiceVersion.V2020_06_12 => "2020-06-12", - ServiceVersion.V2020_08_04 => "2020-08-04", - ServiceVersion.V2020_10_02 => "2020-10-02", - ServiceVersion.V2020_12_06 => "2020-12-06", - ServiceVersion.V2021_02_12 => "2021-02-12", - ServiceVersion.V2021_04_10 => "2021-04-10", - ServiceVersion.V2021_06_08 => "2021-06-08", - ServiceVersion.V2021_08_06 => "2021-08-06", - ServiceVersion.V2021_10_04 => "2021-10-04", - ServiceVersion.V2021_12_02 => "2021-12-02", - ServiceVersion.V2022_11_02 => "2022-11-02", - ServiceVersion.V2023_01_03 => "2023-01-03", - ServiceVersion.V2023_05_03 => "2023-05-03", - ServiceVersion.V2023_08_03 => "2023-08-03", - ServiceVersion.V2023_11_03 => "2023-11-03", - ServiceVersion.V2024_02_04 => "2024-02-04", - ServiceVersion.V2024_05_04 => "2024-05-04", - ServiceVersion.V2024_08_04 => "2024-08-04", - ServiceVersion.V2024_11_04 => "2024-11-04", -#elif QueueSDK +#if BlobSDK || FileSDK || DataLakeSDK || QueueSDK ServiceVersion.V2019_02_02 => "2019-02-02", ServiceVersion.V2019_07_07 => "2019-07-07", ServiceVersion.V2019_12_12 => "2019-12-12", @@ -119,6 +94,7 @@ public static string ToVersionString(this ServiceVersion version) => ServiceVersion.V2024_05_04 => "2024-05-04", ServiceVersion.V2024_08_04 => "2024-08-04", ServiceVersion.V2024_11_04 => "2024-11-04", + ServiceVersion.V2025_01_05 => "2025-01-05", #endif _ => throw Errors.VersionNotSupported(nameof(version)) }; @@ -180,6 +156,8 @@ public static Azure.Storage.Blobs.BlobClientOptions.ServiceVersion AsBlobsVersio Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_08_04, Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2024_11_04 => Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04, + Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2025_01_05 => + Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05, _ => throw Errors.VersionNotSupported(nameof(version)) }; #endif diff --git a/sdk/storage/Azure.Storage.Common/tests/CommonTestBase.cs b/sdk/storage/Azure.Storage.Common/tests/CommonTestBase.cs index 42d3ed10c84ba..5694c805d3550 100644 --- a/sdk/storage/Azure.Storage.Common/tests/CommonTestBase.cs +++ b/sdk/storage/Azure.Storage.Common/tests/CommonTestBase.cs @@ -34,8 +34,9 @@ namespace Azure.Storage.Test BlobClientOptions.ServiceVersion.V2024_05_04, BlobClientOptions.ServiceVersion.V2024_08_04, BlobClientOptions.ServiceVersion.V2024_11_04, - RecordingServiceVersion = BlobClientOptions.ServiceVersion.V2024_11_04, - LiveServiceVersions = new object[] { BlobClientOptions.ServiceVersion.V2024_08_04, })] + BlobClientOptions.ServiceVersion.V2025_01_05, + RecordingServiceVersion = BlobClientOptions.ServiceVersion.V2025_01_05, + LiveServiceVersions = new object[] { BlobClientOptions.ServiceVersion.V2024_11_04, })] public abstract class CommonTestBase : StorageTestBase { protected readonly BlobClientOptions.ServiceVersion _serviceVersion; diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs index efd9e87cdaeff..d2ced44d996eb 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs @@ -35,6 +35,7 @@ public enum ServiceVersion V2024_05_04 = 22, V2024_08_04 = 23, V2024_11_04 = 24, + V2025_01_05 = 25, } } public partial class DataLakeDirectoryClient : Azure.Storage.Files.DataLake.DataLakePathClient diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs index efd9e87cdaeff..d2ced44d996eb 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs @@ -35,6 +35,7 @@ public enum ServiceVersion V2024_05_04 = 22, V2024_08_04 = 23, V2024_11_04 = 24, + V2025_01_05 = 25, } } public partial class DataLakeDirectoryClient : Azure.Storage.Files.DataLake.DataLakePathClient diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeClientOptions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeClientOptions.cs index 6fa6ec5166bbd..5f8fd0849ba0f 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeClientOptions.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeClientOptions.cs @@ -151,7 +151,12 @@ public enum ServiceVersion /// /// The 2024-11-04 service version. /// - V2024_11_04 = 24 + V2024_11_04 = 24, + + /// + /// The 2025-01-05 service version. + /// + V2025_01_05 = 25 #pragma warning restore CA1707 // Identifiers should not contain underscores } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeClientTestFixtureAttribute.cs b/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeClientTestFixtureAttribute.cs index 1a3cee805c368..eab0498c5dfcc 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeClientTestFixtureAttribute.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeClientTestFixtureAttribute.cs @@ -33,6 +33,7 @@ public DataLakeClientTestFixtureAttribute() DataLakeClientOptions.ServiceVersion.V2024_05_04, DataLakeClientOptions.ServiceVersion.V2024_08_04, DataLakeClientOptions.ServiceVersion.V2024_11_04, + DataLakeClientOptions.ServiceVersion.V2025_01_05, StorageVersionExtensions.LatestVersion, StorageVersionExtensions.MaxVersion) { diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs index d76d0c71300fd..88fbd1326e018 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs @@ -147,6 +147,7 @@ public enum ServiceVersion V2024_05_04 = 22, V2024_08_04 = 23, V2024_11_04 = 24, + V2025_01_05 = 25, } } public partial class ShareDirectoryClient diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs index d76d0c71300fd..88fbd1326e018 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs @@ -147,6 +147,7 @@ public enum ServiceVersion V2024_05_04 = 22, V2024_08_04 = 23, V2024_11_04 = 24, + V2025_01_05 = 25, } } public partial class ShareDirectoryClient diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/ShareClientOptions.cs b/sdk/storage/Azure.Storage.Files.Shares/src/ShareClientOptions.cs index 787f0c299080b..30c5ab3b05155 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/ShareClientOptions.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/ShareClientOptions.cs @@ -148,7 +148,12 @@ public enum ServiceVersion /// /// The 2024-11-04 service version. /// - V2024_11_04 = 24 + V2024_11_04 = 24, + + /// + /// The 2025-01-05 service version. + /// + V2025_01_05 = 25 #pragma warning restore CA1707 // Identifiers should not contain underscores } diff --git a/sdk/storage/Azure.Storage.Files.Shares/tests/ShareClientTestFixtureAttribute.cs b/sdk/storage/Azure.Storage.Files.Shares/tests/ShareClientTestFixtureAttribute.cs index eb312bb15a31c..eb73ce7ea2859 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/tests/ShareClientTestFixtureAttribute.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/tests/ShareClientTestFixtureAttribute.cs @@ -37,6 +37,7 @@ public ShareClientTestFixtureAttribute(params object[] additionalParameters) ShareClientOptions.ServiceVersion.V2024_05_04, ShareClientOptions.ServiceVersion.V2024_08_04, ShareClientOptions.ServiceVersion.V2024_11_04, + ShareClientOptions.ServiceVersion.V2025_01_05, StorageVersionExtensions.LatestVersion, StorageVersionExtensions.MaxVersion }, diff --git a/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs b/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs index 08034009108be..25839b91776ca 100644 --- a/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs +++ b/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs @@ -107,6 +107,7 @@ public enum ServiceVersion V2024_05_04 = 22, V2024_08_04 = 23, V2024_11_04 = 24, + V2025_01_05 = 25, } } public partial class QueueMessageDecodingFailedEventArgs : Azure.SyncAsyncEventArgs diff --git a/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.netstandard2.0.cs b/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.netstandard2.0.cs index 08034009108be..25839b91776ca 100644 --- a/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.netstandard2.0.cs @@ -107,6 +107,7 @@ public enum ServiceVersion V2024_05_04 = 22, V2024_08_04 = 23, V2024_11_04 = 24, + V2025_01_05 = 25, } } public partial class QueueMessageDecodingFailedEventArgs : Azure.SyncAsyncEventArgs diff --git a/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.netstandard2.1.cs b/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.netstandard2.1.cs index 08034009108be..25839b91776ca 100644 --- a/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.netstandard2.1.cs +++ b/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.netstandard2.1.cs @@ -107,6 +107,7 @@ public enum ServiceVersion V2024_05_04 = 22, V2024_08_04 = 23, V2024_11_04 = 24, + V2025_01_05 = 25, } } public partial class QueueMessageDecodingFailedEventArgs : Azure.SyncAsyncEventArgs diff --git a/sdk/storage/Azure.Storage.Queues/src/QueueClientOptions.cs b/sdk/storage/Azure.Storage.Queues/src/QueueClientOptions.cs index 53b5c5bb342a9..9468a41cc15e5 100644 --- a/sdk/storage/Azure.Storage.Queues/src/QueueClientOptions.cs +++ b/sdk/storage/Azure.Storage.Queues/src/QueueClientOptions.cs @@ -154,7 +154,12 @@ public enum ServiceVersion /// /// The 2024-11-04 service version. /// - V2024_11_04 = 24 + V2024_11_04 = 24, + + /// + /// The 2025-01-05 service version. + /// + V2025_01_05 = 25 #pragma warning restore CA1707 // Identifiers should not contain underscores } diff --git a/sdk/storage/Azure.Storage.Queues/tests/QueueClientTestFixtureAttribute.cs b/sdk/storage/Azure.Storage.Queues/tests/QueueClientTestFixtureAttribute.cs index 0f2a81dbf9e52..b053e71cdf051 100644 --- a/sdk/storage/Azure.Storage.Queues/tests/QueueClientTestFixtureAttribute.cs +++ b/sdk/storage/Azure.Storage.Queues/tests/QueueClientTestFixtureAttribute.cs @@ -36,6 +36,7 @@ public QueueClientTestFixtureAttribute(params object[] additionalParameters) QueueClientOptions.ServiceVersion.V2024_05_04, QueueClientOptions.ServiceVersion.V2024_08_04, QueueClientOptions.ServiceVersion.V2024_11_04, + QueueClientOptions.ServiceVersion.V2025_01_05, StorageVersionExtensions.LatestVersion, StorageVersionExtensions.MaxVersion }, From 8e9715845d0453e9eb092de45becc1d4856a7224 Mon Sep 17 00:00:00 2001 From: Jocelyn <41338290+jaschrep-msft@users.noreply.github.com> Date: Tue, 20 Aug 2024 11:32:10 -0400 Subject: [PATCH 02/25] Structured message cherrypick stg96 (#45496) * Structured Message Decode Stream (#42079) * Initial implementation and basic test * seek/write tests * fix test param * fix exceptions * Content validation update spec (#42191) * enum rename and footer read/write methods * align encode/decode tests | update encoding stream * decode stream footer * rename * decode tests & bugfixes (#42256) * decode tests & bugfixes * roundtrip tests * more tests * better errors | remove duplicate test * test coverage | exception message (#42363) * Structured Message Decode: Validate Content Length (#42370) * validate stream length * tests * stageblock | appendblock | putpages StructuredMessage (#42699) * regenerate and stage block uses structured message * page and append * testproxy * cleanup * fix datalake/share tests * testproxy * testproxy * re-add null-safe access * GET Blob Structured Message (#42959) * download range structured message * testproxy * throw when service fails to give back structured message * test * testproxy * PUT Blob Structured Message (#43130) * putblob structured message * testproxy * fixes * Structured Message: DataLake Append (#43275) * datalake append * null fix * fixes * Retriable decode (#44155) * retriable decode * rewind mock test * bugfix * bugfix * tests * Download retriable stream structured message (#44176) * blobs retriable structured message download * test proxy * testproxy * remove commented code * CRC: Always Structured Message (#44955) * blockblob working * revert testing change * page/append * datalake file * testfix * bug fixes | test fixes * disable new API for presenting CRC from structured message * fix nunit * whitespace * fix/test-proxy * csproj * more csproj removeals This is building fine locally idk what's up * Trigger Fresh Build * fileshare testproxy * fix mock * Update macos image from 11 to latest (#44607) * Update macos image from 11 to latest * Update eng/pipelines/templates/jobs/ci.mgmt.yml Co-authored-by: Ben Broderick Phillips --------- Co-authored-by: Ben Broderick Phillips * Revert "Update macos image from 11 to latest (#44607)" this is causing too many problems. skipping macos tests for now. They'll run when this feature branch merges into main. This reverts commit 29e87b496fa2b60d53849afaa926a3bd0fc23529. --------- Co-authored-by: Wes Haggard Co-authored-by: Ben Broderick Phillips * Enable fileshare crc (#45124) * impl * testproxy * shares fix * testproxy * block blob fix (#45129) * block blob fix * testproxy * rename and validation (#45160) * rename and validation * fix * crc tracking converted to longs (#45307) * Crc reporting pt2 (#45447) * expose crc from structured message * testproxy * undo typo * exportapi * testproxy * remove unused parameter * add `ExpectTrailingDetails` to download response * fix test inconsistency * fix auto --------- Co-authored-by: Wes Haggard Co-authored-by: Ben Broderick Phillips --- .../Azure.Storage.Blobs.Batch.Tests.csproj | 3 +- ...zure.Storage.Blobs.ChangeFeed.Tests.csproj | 3 +- .../api/Azure.Storage.Blobs.net6.0.cs | 1 + .../api/Azure.Storage.Blobs.netstandard2.0.cs | 1 + .../api/Azure.Storage.Blobs.netstandard2.1.cs | 1 + sdk/storage/Azure.Storage.Blobs/assets.json | 2 +- .../src/AppendBlobClient.cs | 45 +- .../src/Azure.Storage.Blobs.csproj | 6 + .../Azure.Storage.Blobs/src/BlobBaseClient.cs | 110 +++- .../src/BlobClientOptions.cs | 2 + .../src/BlockBlobClient.cs | 92 ++- .../Generated/AppendBlobAppendBlockHeaders.cs | 2 + .../src/Generated/AppendBlobRestClient.cs | 24 +- .../src/Generated/BlobDownloadHeaders.cs | 4 + .../src/Generated/BlobRestClient.cs | 18 +- .../src/Generated/BlockBlobRestClient.cs | 46 +- .../Generated/BlockBlobStageBlockHeaders.cs | 2 + .../src/Generated/BlockBlobUploadHeaders.cs | 2 + .../src/Generated/ContainerRestClient.cs | 2 +- .../src/Generated/PageBlobRestClient.cs | 24 +- .../Generated/PageBlobUploadPagesHeaders.cs | 2 + .../src/Generated/ServiceRestClient.cs | 2 +- .../src/Models/BlobDownloadDetails.cs | 8 + .../src/Models/BlobDownloadInfo.cs | 10 + .../src/Models/BlobDownloadStreamingResult.cs | 8 + .../Azure.Storage.Blobs/src/PageBlobClient.cs | 49 +- .../src/PartitionedDownloader.cs | 50 +- .../Azure.Storage.Blobs/src/autorest.md | 6 +- .../tests/Azure.Storage.Blobs.Tests.csproj | 3 + .../BlobBaseClientTransferValidationTests.cs | 113 ++-- .../tests/PartitionedDownloaderTests.cs | 2 +- .../src/Shared/ChecksumExtensions.cs | 22 + .../src/Shared/Constants.cs | 9 + .../src/Shared/Errors.Clients.cs | 10 + .../Azure.Storage.Common/src/Shared/Errors.cs | 19 + .../src/Shared/LazyLoadingReadOnlyStream.cs | 40 +- .../src/Shared/PooledMemoryStream.cs | 2 +- .../src/Shared/StorageCrc64Composer.cs | 48 +- .../StorageRequestValidationPipelinePolicy.cs | 29 + .../src/Shared/StorageVersionExtensions.cs | 2 +- .../src/Shared/StreamExtensions.cs | 22 +- .../src/Shared/StructuredMessage.cs | 244 ++++++++ ...tructuredMessageDecodingRetriableStream.cs | 264 +++++++++ .../Shared/StructuredMessageDecodingStream.cs | 542 +++++++++++++++++ .../Shared/StructuredMessageEncodingStream.cs | 545 ++++++++++++++++++ ...redMessagePrecalculatedCrcWrapperStream.cs | 451 +++++++++++++++ .../TransferValidationOptionsExtensions.cs | 7 - .../tests/Azure.Storage.Common.Tests.csproj | 9 + .../tests/Shared/FaultyStream.cs | 13 +- .../Shared/ObserveStructuredMessagePolicy.cs | 85 +++ .../tests/Shared/RequestExtensions.cs | 27 + .../Shared/TamperStreamContentsPolicy.cs | 11 +- .../Shared/TransferValidationTestBase.cs | 325 ++++++++--- ...uredMessageDecodingRetriableStreamTests.cs | 246 ++++++++ .../StructuredMessageDecodingStreamTests.cs | 323 +++++++++++ .../StructuredMessageEncodingStreamTests.cs | 271 +++++++++ .../tests/StructuredMessageHelper.cs | 68 +++ .../StructuredMessageStreamRoundtripTests.cs | 127 ++++ .../tests/StructuredMessageTests.cs | 114 ++++ .../Azure.Storage.DataMovement.Blobs.csproj | 1 + ...re.Storage.DataMovement.Blobs.Tests.csproj | 5 + ...taMovement.Blobs.Files.Shares.Tests.csproj | 1 + ...age.DataMovement.Files.Shares.Tests.csproj | 1 + .../src/Azure.Storage.DataMovement.csproj | 2 +- .../Azure.Storage.DataMovement.Tests.csproj | 1 + .../Azure.Storage.Files.DataLake/assets.json | 2 +- .../src/Azure.Storage.Files.DataLake.csproj | 5 + .../src/DataLakeFileClient.cs | 43 +- .../src/Generated/FileSystemRestClient.cs | 2 +- .../src/Generated/PathAppendDataHeaders.cs | 2 + .../src/Generated/PathRestClient.cs | 46 +- .../src/Generated/PathUpdateHeaders.cs | 2 + .../src/Generated/ServiceRestClient.cs | 2 +- .../src/autorest.md | 6 +- .../Azure.Storage.Files.DataLake.Tests.csproj | 3 + ...taLakeFileClientTransferValidationTests.cs | 5 +- .../api/Azure.Storage.Files.Shares.net6.0.cs | 1 + ...ure.Storage.Files.Shares.netstandard2.0.cs | 1 + .../Azure.Storage.Files.Shares/assets.json | 2 +- .../src/Azure.Storage.Files.Shares.csproj | 8 +- .../src/Generated/DirectoryRestClient.cs | 2 +- .../src/Generated/FileDownloadHeaders.cs | 4 + .../src/Generated/FileRestClient.cs | 40 +- .../src/Generated/FileUploadRangeHeaders.cs | 2 + .../src/Generated/ServiceRestClient.cs | 2 +- .../src/Generated/ShareRestClient.cs | 2 +- .../src/Models/ShareFileDownloadInfo.cs | 6 + .../src/ShareErrors.cs | 15 - .../src/ShareFileClient.cs | 165 ++++-- .../src/autorest.md | 6 +- .../Azure.Storage.Files.Shares.Tests.csproj | 1 + .../ShareFileClientTransferValidationTests.cs | 42 +- .../tests/Azure.Storage.Queues.Tests.csproj | 1 + 93 files changed, 4533 insertions(+), 414 deletions(-) create mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/ChecksumExtensions.cs create mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessage.cs create mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingRetriableStream.cs create mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingStream.cs create mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageEncodingStream.cs create mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessagePrecalculatedCrcWrapperStream.cs create mode 100644 sdk/storage/Azure.Storage.Common/tests/Shared/ObserveStructuredMessagePolicy.cs create mode 100644 sdk/storage/Azure.Storage.Common/tests/Shared/RequestExtensions.cs create mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingRetriableStreamTests.cs create mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingStreamTests.cs create mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageEncodingStreamTests.cs create mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageHelper.cs create mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageStreamRoundtripTests.cs create mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageTests.cs diff --git a/sdk/storage/Azure.Storage.Blobs.Batch/tests/Azure.Storage.Blobs.Batch.Tests.csproj b/sdk/storage/Azure.Storage.Blobs.Batch/tests/Azure.Storage.Blobs.Batch.Tests.csproj index 2b77907e9aaac..286ab317256bf 100644 --- a/sdk/storage/Azure.Storage.Blobs.Batch/tests/Azure.Storage.Blobs.Batch.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs.Batch/tests/Azure.Storage.Blobs.Batch.Tests.csproj @@ -23,6 +23,7 @@ + PreserveNewest @@ -42,4 +43,4 @@ - \ No newline at end of file + diff --git a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/Azure.Storage.Blobs.ChangeFeed.Tests.csproj b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/Azure.Storage.Blobs.ChangeFeed.Tests.csproj index 9682ab15ecd60..8cf13cd60744f 100644 --- a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/Azure.Storage.Blobs.ChangeFeed.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/Azure.Storage.Blobs.ChangeFeed.Tests.csproj @@ -17,6 +17,7 @@ + @@ -28,4 +29,4 @@ PreserveNewest - \ No newline at end of file + diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs index 05cdde6988050..fb52e93f85a56 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs @@ -516,6 +516,7 @@ public BlobDownloadDetails() { } public long BlobSequenceNumber { get { throw null; } } public Azure.Storage.Blobs.Models.BlobType BlobType { get { throw null; } } public string CacheControl { get { throw null; } } + public byte[] ContentCrc { get { throw null; } } public string ContentDisposition { get { throw null; } } public string ContentEncoding { get { throw null; } } public byte[] ContentHash { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs index 05cdde6988050..fb52e93f85a56 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs @@ -516,6 +516,7 @@ public BlobDownloadDetails() { } public long BlobSequenceNumber { get { throw null; } } public Azure.Storage.Blobs.Models.BlobType BlobType { get { throw null; } } public string CacheControl { get { throw null; } } + public byte[] ContentCrc { get { throw null; } } public string ContentDisposition { get { throw null; } } public string ContentEncoding { get { throw null; } } public byte[] ContentHash { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs index 05cdde6988050..fb52e93f85a56 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs @@ -516,6 +516,7 @@ public BlobDownloadDetails() { } public long BlobSequenceNumber { get { throw null; } } public Azure.Storage.Blobs.Models.BlobType BlobType { get { throw null; } } public string CacheControl { get { throw null; } } + public byte[] ContentCrc { get { throw null; } } public string ContentDisposition { get { throw null; } } public string ContentEncoding { get { throw null; } } public byte[] ContentHash { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/assets.json b/sdk/storage/Azure.Storage.Blobs/assets.json index e0cc7497a2f22..bf650c1112c2f 100644 --- a/sdk/storage/Azure.Storage.Blobs/assets.json +++ b/sdk/storage/Azure.Storage.Blobs/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Blobs", - "Tag": "net/storage/Azure.Storage.Blobs_14eb1d6279" + "Tag": "net/storage/Azure.Storage.Blobs_d0e3597ddc" } diff --git a/sdk/storage/Azure.Storage.Blobs/src/AppendBlobClient.cs b/sdk/storage/Azure.Storage.Blobs/src/AppendBlobClient.cs index e70d5e02c82d7..9a110cf8eb13a 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/AppendBlobClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/AppendBlobClient.cs @@ -1242,14 +1242,39 @@ internal async Task> AppendBlockInternal( BlobErrors.VerifyHttpsCustomerProvidedKey(Uri, ClientConfiguration.CustomerProvidedKey); Errors.VerifyStreamPosition(content, nameof(content)); - // compute hash BEFORE attaching progress handler - ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - - content = content.WithNoDispose().WithProgress(progressHandler); + ContentHasher.GetHashResult hashResult = null; + long contentLength = (content?.Length - content?.Position) ?? 0; + long? structuredContentLength = default; + string structuredBodyType = null; + if (validationOptions != null && + validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && + ClientSideEncryption == null) // don't allow feature combination + { + // report progress in terms of caller bytes, not encoded bytes + structuredContentLength = contentLength; + contentLength = (content?.Length - content?.Position) ?? 0; + structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; + content = content.WithNoDispose().WithProgress(progressHandler); + content = validationOptions.PrecalculatedChecksum.IsEmpty + ? new StructuredMessageEncodingStream( + content, + Constants.StructuredMessage.DefaultSegmentContentLength, + StructuredMessage.Flags.StorageCrc64) + : new StructuredMessagePrecalculatedCrcWrapperStream( + content, + validationOptions.PrecalculatedChecksum.Span); + contentLength = (content?.Length - content?.Position) ?? 0; + } + else + { + // compute hash BEFORE attaching progress handler + hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + content = content.WithNoDispose().WithProgress(progressHandler); + } ResponseWithHeaders response; @@ -1267,6 +1292,8 @@ internal async Task> AppendBlockInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, ifModifiedSince: conditions?.IfModifiedSince, ifUnmodifiedSince: conditions?.IfUnmodifiedSince, ifMatch: conditions?.IfMatch?.ToString(), @@ -1289,6 +1316,8 @@ internal async Task> AppendBlockInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, ifModifiedSince: conditions?.IfModifiedSince, ifUnmodifiedSince: conditions?.IfUnmodifiedSince, ifMatch: conditions?.IfMatch?.ToString(), diff --git a/sdk/storage/Azure.Storage.Blobs/src/Azure.Storage.Blobs.csproj b/sdk/storage/Azure.Storage.Blobs/src/Azure.Storage.Blobs.csproj index 32b8511ab6ab4..731c7468bb7b2 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Azure.Storage.Blobs.csproj +++ b/sdk/storage/Azure.Storage.Blobs/src/Azure.Storage.Blobs.csproj @@ -52,6 +52,7 @@ + @@ -91,6 +92,11 @@ + + + + + diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs b/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs index c1416524f0221..b48da27583a98 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs @@ -1031,6 +1031,7 @@ private async Task> DownloadInternal( ContentHash = blobDownloadDetails.ContentHash, ContentLength = blobDownloadDetails.ContentLength, ContentType = blobDownloadDetails.ContentType, + ExpectTrailingDetails = blobDownloadStreamingResult.ExpectTrailingDetails, }, response.GetRawResponse()); } #endregion @@ -1547,30 +1548,52 @@ internal virtual async ValueTask> Download // Wrap the response Content in a RetriableStream so we // can return it before it's finished downloading, but still // allow retrying if it fails. - Stream stream = RetriableStream.Create( - response.Value.Content, - startOffset => - StartDownloadAsync( - range, - conditionsWithEtag, - validationOptions, - startOffset, - async, - cancellationToken) - .EnsureCompleted() - .Value.Content, - async startOffset => - (await StartDownloadAsync( - range, - conditionsWithEtag, - validationOptions, - startOffset, - async, - cancellationToken) - .ConfigureAwait(false)) - .Value.Content, - ClientConfiguration.Pipeline.ResponseClassifier, - Constants.MaxReliabilityRetries); + ValueTask> Factory(long offset, bool async, CancellationToken cancellationToken) + => StartDownloadAsync( + range, + conditionsWithEtag, + validationOptions, + offset, + async, + cancellationToken); + async ValueTask<(Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData)> StructuredMessageFactory( + long offset, bool async, CancellationToken cancellationToken) + { + Response result = await Factory(offset, async, cancellationToken).ConfigureAwait(false); + return StructuredMessageDecodingStream.WrapStream(result.Value.Content, result.Value.Details.ContentLength); + } + Stream stream; + if (response.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) + { + (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = StructuredMessageDecodingStream.WrapStream( + response.Value.Content, response.Value.Details.ContentLength); + stream = new StructuredMessageDecodingRetriableStream( + decodingStream, + decodedData, + StructuredMessage.Flags.StorageCrc64, + startOffset => StructuredMessageFactory(startOffset, async: false, cancellationToken) + .EnsureCompleted(), + async startOffset => await StructuredMessageFactory(startOffset, async: true, cancellationToken) + .ConfigureAwait(false), + decodedData => + { + response.Value.Details.ContentCrc = new byte[StructuredMessage.Crc64Length]; + decodedData.Crc.WriteCrc64(response.Value.Details.ContentCrc); + }, + ClientConfiguration.Pipeline.ResponseClassifier, + Constants.MaxReliabilityRetries); + } + else + { + stream = RetriableStream.Create( + response.Value.Content, + startOffset => Factory(startOffset, async: false, cancellationToken) + .EnsureCompleted().Value.Content, + async startOffset => (await Factory(startOffset, async: true, cancellationToken) + .ConfigureAwait(false)).Value.Content, + ClientConfiguration.Pipeline.ResponseClassifier, + Constants.MaxReliabilityRetries); + } stream = stream.WithNoDispose().WithProgress(progressHandler); @@ -1578,7 +1601,11 @@ internal virtual async ValueTask> Download * Buffer response stream and ensure it matches the transactional checksum if any. * Storage will not return a checksum for payload >4MB, so this buffer is capped similarly. * Checksum validation is opt-in, so this buffer is part of that opt-in. */ - if (validationOptions != default && validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && validationOptions.AutoValidateChecksum) + if (validationOptions != default && + validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && + validationOptions.AutoValidateChecksum && + // structured message decoding does the validation for us + !response.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) { // safe-buffer; transactional hash download limit well below maxInt var readDestStream = new MemoryStream((int)response.Value.Details.ContentLength); @@ -1649,8 +1676,8 @@ await ContentHasher.AssertResponseHashMatchInternal( /// notifications that the operation should be cancelled. /// /// - /// A describing the - /// downloaded blob. contains + /// A describing the + /// downloaded blob. contains /// the blob's data. /// /// @@ -1689,13 +1716,29 @@ private async ValueTask> StartDownloadAsyn operationName: nameof(BlobBaseClient.Download), parameterName: nameof(conditions)); + bool? rangeGetContentMD5 = null; + bool? rangeGetContentCRC64 = null; + string structuredBodyType = null; + switch (validationOptions?.ChecksumAlgorithm.ResolveAuto()) + { + case StorageChecksumAlgorithm.MD5: + rangeGetContentMD5 = true; + break; + case StorageChecksumAlgorithm.StorageCrc64: + structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; + break; + default: + break; + } + if (async) { response = await BlobRestClient.DownloadAsync( range: pageRange?.ToString(), leaseId: conditions?.LeaseId, - rangeGetContentMD5: validationOptions?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.MD5 ? true : null, - rangeGetContentCRC64: validationOptions?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 ? true : null, + rangeGetContentMD5: rangeGetContentMD5, + rangeGetContentCRC64: rangeGetContentCRC64, + structuredBodyType: structuredBodyType, encryptionKey: ClientConfiguration.CustomerProvidedKey?.EncryptionKey, encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, @@ -1712,8 +1755,9 @@ private async ValueTask> StartDownloadAsyn response = BlobRestClient.Download( range: pageRange?.ToString(), leaseId: conditions?.LeaseId, - rangeGetContentMD5: validationOptions?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.MD5 ? true : null, - rangeGetContentCRC64: validationOptions?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 ? true : null, + rangeGetContentMD5: rangeGetContentMD5, + rangeGetContentCRC64: rangeGetContentCRC64, + structuredBodyType: structuredBodyType, encryptionKey: ClientConfiguration.CustomerProvidedKey?.EncryptionKey, encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, @@ -1729,9 +1773,11 @@ private async ValueTask> StartDownloadAsyn long length = response.IsUnavailable() ? 0 : response.Headers.ContentLength ?? 0; ClientConfiguration.Pipeline.LogTrace($"Response: {response.GetRawResponse().Status}, ContentLength: {length}"); - return Response.FromValue( + Response result = Response.FromValue( response.ToBlobDownloadStreamingResult(), response.GetRawResponse()); + result.Value.ExpectTrailingDetails = structuredBodyType != null; + return result; } #endregion diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs b/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs index b16cefc83a535..f312e621bffc4 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs @@ -318,6 +318,8 @@ private void AddHeadersAndQueryParameters() Diagnostics.LoggedHeaderNames.Add("x-ms-encryption-key-sha256"); Diagnostics.LoggedHeaderNames.Add("x-ms-copy-source-error-code"); Diagnostics.LoggedHeaderNames.Add("x-ms-copy-source-status-code"); + Diagnostics.LoggedHeaderNames.Add("x-ms-structured-body"); + Diagnostics.LoggedHeaderNames.Add("x-ms-structured-content-length"); Diagnostics.LoggedQueryParameters.Add("comp"); Diagnostics.LoggedQueryParameters.Add("maxresults"); diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlockBlobClient.cs b/sdk/storage/Azure.Storage.Blobs/src/BlockBlobClient.cs index cd6bc3788fc26..5e5ec82e96dca 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlockBlobClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlockBlobClient.cs @@ -875,14 +875,35 @@ internal virtual async Task> UploadInternal( scope.Start(); Errors.VerifyStreamPosition(content, nameof(content)); - // compute hash BEFORE attaching progress handler - ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - - content = content?.WithNoDispose().WithProgress(progressHandler); + ContentHasher.GetHashResult hashResult = null; + long contentLength = (content?.Length - content?.Position) ?? 0; + long? structuredContentLength = default; + string structuredBodyType = null; + if (content != null && + validationOptions != null && + validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && + ClientSideEncryption == null) // don't allow feature combination + { + // report progress in terms of caller bytes, not encoded bytes + structuredContentLength = contentLength; + structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; + content = content.WithNoDispose().WithProgress(progressHandler); + content = new StructuredMessageEncodingStream( + content, + Constants.StructuredMessage.DefaultSegmentContentLength, + StructuredMessage.Flags.StorageCrc64); + contentLength = content.Length - content.Position; + } + else + { + // compute hash BEFORE attaching progress handler + hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + content = content.WithNoDispose().WithProgress(progressHandler); + } ResponseWithHeaders response; @@ -921,6 +942,8 @@ internal virtual async Task> UploadInternal( legalHold: legalHold, transactionalContentMD5: hashResult?.MD5AsArray, transactionalContentCrc64: hashResult?.StorageCrc64AsArray, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, cancellationToken: cancellationToken) .ConfigureAwait(false); } @@ -953,6 +976,8 @@ internal virtual async Task> UploadInternal( legalHold: legalHold, transactionalContentMD5: hashResult?.MD5AsArray, transactionalContentCrc64: hashResult?.StorageCrc64AsArray, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, cancellationToken: cancellationToken); } @@ -1305,14 +1330,39 @@ internal virtual async Task> StageBlockInternal( Errors.VerifyStreamPosition(content, nameof(content)); - // compute hash BEFORE attaching progress handler - ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - - content = content.WithNoDispose().WithProgress(progressHandler); + ContentHasher.GetHashResult hashResult = null; + long contentLength = (content?.Length - content?.Position) ?? 0; + long? structuredContentLength = default; + string structuredBodyType = null; + if (validationOptions != null && + validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && + ClientSideEncryption == null) // don't allow feature combination + { + // report progress in terms of caller bytes, not encoded bytes + structuredContentLength = contentLength; + contentLength = (content?.Length - content?.Position) ?? 0; + structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; + content = content.WithNoDispose().WithProgress(progressHandler); + content = validationOptions.PrecalculatedChecksum.IsEmpty + ? new StructuredMessageEncodingStream( + content, + Constants.StructuredMessage.DefaultSegmentContentLength, + StructuredMessage.Flags.StorageCrc64) + : new StructuredMessagePrecalculatedCrcWrapperStream( + content, + validationOptions.PrecalculatedChecksum.Span); + contentLength = (content?.Length - content?.Position) ?? 0; + } + else + { + // compute hash BEFORE attaching progress handler + hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + content = content.WithNoDispose().WithProgress(progressHandler); + } ResponseWithHeaders response; @@ -1320,7 +1370,7 @@ internal virtual async Task> StageBlockInternal( { response = await BlockBlobRestClient.StageBlockAsync( blockId: base64BlockId, - contentLength: (content?.Length - content?.Position) ?? 0, + contentLength: contentLength, body: content, transactionalContentCrc64: hashResult?.StorageCrc64AsArray, transactionalContentMD5: hashResult?.MD5AsArray, @@ -1329,6 +1379,8 @@ internal virtual async Task> StageBlockInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, cancellationToken: cancellationToken) .ConfigureAwait(false); } @@ -1336,7 +1388,7 @@ internal virtual async Task> StageBlockInternal( { response = BlockBlobRestClient.StageBlock( blockId: base64BlockId, - contentLength: (content?.Length - content?.Position) ?? 0, + contentLength: contentLength, body: content, transactionalContentCrc64: hashResult?.StorageCrc64AsArray, transactionalContentMD5: hashResult?.MD5AsArray, @@ -1345,6 +1397,8 @@ internal virtual async Task> StageBlockInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, cancellationToken: cancellationToken); } @@ -2791,7 +2845,7 @@ internal async Task OpenWriteInternal( immutabilityPolicy: default, legalHold: default, progressHandler: default, - transferValidationOverride: default, + transferValidationOverride: new() { ChecksumAlgorithm = StorageChecksumAlgorithm.None }, operationName: default, async: async, cancellationToken: cancellationToken) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobAppendBlockHeaders.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobAppendBlockHeaders.cs index 9303ec3a3d653..48139cc16a682 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobAppendBlockHeaders.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobAppendBlockHeaders.cs @@ -35,5 +35,7 @@ public AppendBlobAppendBlockHeaders(Response response) public string EncryptionKeySha256 => _response.Headers.TryGetValue("x-ms-encryption-key-sha256", out string value) ? value : null; /// Returns the name of the encryption scope used to encrypt the blob contents and application metadata. Note that the absence of this header implies use of the default account encryption scope. public string EncryptionScope => _response.Headers.TryGetValue("x-ms-encryption-scope", out string value) ? value : null; + /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs index 88104aa95bb00..a3d0eca1ec405 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs @@ -29,7 +29,7 @@ internal partial class AppendBlobRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// , , or is null. public AppendBlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { @@ -219,7 +219,7 @@ public ResponseWithHeaders Create(long contentLength, i } } - internal HttpMessage CreateAppendBlockRequest(long contentLength, Stream body, int? timeout, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, string leaseId, long? maxSize, long? appendPosition, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags) + internal HttpMessage CreateAppendBlockRequest(long contentLength, Stream body, int? timeout, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, string leaseId, long? maxSize, long? appendPosition, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags, string structuredBodyType, long? structuredContentLength) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -285,6 +285,14 @@ internal HttpMessage CreateAppendBlockRequest(long contentLength, Stream body, i request.Headers.Add("x-ms-if-tags", ifTags); } request.Headers.Add("x-ms-version", _version); + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } + if (structuredContentLength != null) + { + request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); + } request.Headers.Add("Accept", "application/xml"); request.Headers.Add("Content-Length", contentLength); if (transactionalContentMD5 != null) @@ -314,16 +322,18 @@ internal HttpMessage CreateAppendBlockRequest(long contentLength, Stream body, i /// Specify an ETag value to operate only on blobs with a matching value. /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public async Task> AppendBlockAsync(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, string leaseId = null, long? maxSize = null, long? appendPosition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) + public async Task> AppendBlockAsync(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, string leaseId = null, long? maxSize = null, long? appendPosition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateAppendBlockRequest(contentLength, body, timeout, transactionalContentMD5, transactionalContentCrc64, leaseId, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); + using var message = CreateAppendBlockRequest(contentLength, body, timeout, transactionalContentMD5, transactionalContentCrc64, leaseId, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, structuredBodyType, structuredContentLength); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new AppendBlobAppendBlockHeaders(message.Response); switch (message.Response.Status) @@ -353,16 +363,18 @@ public async Task> AppendBlock /// Specify an ETag value to operate only on blobs with a matching value. /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public ResponseWithHeaders AppendBlock(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, string leaseId = null, long? maxSize = null, long? appendPosition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders AppendBlock(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, string leaseId = null, long? maxSize = null, long? appendPosition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateAppendBlockRequest(contentLength, body, timeout, transactionalContentMD5, transactionalContentCrc64, leaseId, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); + using var message = CreateAppendBlockRequest(contentLength, body, timeout, transactionalContentMD5, transactionalContentCrc64, leaseId, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, structuredBodyType, structuredContentLength); _pipeline.Send(message, cancellationToken); var headers = new AppendBlobAppendBlockHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobDownloadHeaders.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobDownloadHeaders.cs index ad17079901a72..1897117cb01d8 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobDownloadHeaders.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobDownloadHeaders.cs @@ -96,6 +96,10 @@ public BlobDownloadHeaders(Response response) public BlobImmutabilityPolicyMode? ImmutabilityPolicyMode => _response.Headers.TryGetValue("x-ms-immutability-policy-mode", out string value) ? value.ToBlobImmutabilityPolicyMode() : null; /// Indicates if a legal hold is present on the blob. public bool? LegalHold => _response.Headers.TryGetValue("x-ms-legal-hold", out bool? value) ? value : null; + /// Indicates the response body contains a structured message and specifies the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; + /// The length of the blob/file content inside the message body when the response body is returned as a structured message. Will always be smaller than Content-Length. + public long? StructuredContentLength => _response.Headers.TryGetValue("x-ms-structured-content-length", out long? value) ? value : null; /// If the request is to read a specified range and the x-ms-range-get-content-crc64 is set to true, then the request returns a crc64 for the range, as long as the range size is less than or equal to 4 MB. If both x-ms-range-get-content-crc64 & x-ms-range-get-content-md5 is specified in the same request, it will fail with 400(Bad Request). public byte[] ContentCrc64 => _response.Headers.TryGetValue("x-ms-content-crc64", out byte[] value) ? value : null; public string ErrorCode => _response.Headers.TryGetValue("x-ms-error-code", out string value) ? value : null; diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs index 615257741b781..4f891a0a14684 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs @@ -30,7 +30,7 @@ internal partial class BlobRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// , , or is null. public BlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { @@ -40,7 +40,7 @@ public BlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline _version = version ?? throw new ArgumentNullException(nameof(version)); } - internal HttpMessage CreateDownloadRequest(string snapshot, string versionId, int? timeout, string range, string leaseId, bool? rangeGetContentMD5, bool? rangeGetContentCRC64, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags) + internal HttpMessage CreateDownloadRequest(string snapshot, string versionId, int? timeout, string range, string leaseId, bool? rangeGetContentMD5, bool? rangeGetContentCRC64, string structuredBodyType, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -77,6 +77,10 @@ internal HttpMessage CreateDownloadRequest(string snapshot, string versionId, in { request.Headers.Add("x-ms-range-get-content-crc64", rangeGetContentCRC64.Value); } + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } if (encryptionKey != null) { request.Headers.Add("x-ms-encryption-key", encryptionKey); @@ -122,6 +126,7 @@ internal HttpMessage CreateDownloadRequest(string snapshot, string versionId, in /// If specified, the operation only succeeds if the resource's lease is active and matches this ID. /// When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. /// When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 MB in size. + /// Specifies the response content should be returned as a structured message and specifies the message schema version and properties. /// Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services. /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. @@ -131,9 +136,9 @@ internal HttpMessage CreateDownloadRequest(string snapshot, string versionId, in /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. /// The cancellation token to use. - public async Task> DownloadAsync(string snapshot = null, string versionId = null, int? timeout = null, string range = null, string leaseId = null, bool? rangeGetContentMD5 = null, bool? rangeGetContentCRC64 = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) + public async Task> DownloadAsync(string snapshot = null, string versionId = null, int? timeout = null, string range = null, string leaseId = null, bool? rangeGetContentMD5 = null, bool? rangeGetContentCRC64 = null, string structuredBodyType = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) { - using var message = CreateDownloadRequest(snapshot, versionId, timeout, range, leaseId, rangeGetContentMD5, rangeGetContentCRC64, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); + using var message = CreateDownloadRequest(snapshot, versionId, timeout, range, leaseId, rangeGetContentMD5, rangeGetContentCRC64, structuredBodyType, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new BlobDownloadHeaders(message.Response); switch (message.Response.Status) @@ -159,6 +164,7 @@ public async Task> DownloadAsyn /// If specified, the operation only succeeds if the resource's lease is active and matches this ID. /// When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. /// When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 MB in size. + /// Specifies the response content should be returned as a structured message and specifies the message schema version and properties. /// Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services. /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. @@ -168,9 +174,9 @@ public async Task> DownloadAsyn /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. /// The cancellation token to use. - public ResponseWithHeaders Download(string snapshot = null, string versionId = null, int? timeout = null, string range = null, string leaseId = null, bool? rangeGetContentMD5 = null, bool? rangeGetContentCRC64 = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders Download(string snapshot = null, string versionId = null, int? timeout = null, string range = null, string leaseId = null, bool? rangeGetContentMD5 = null, bool? rangeGetContentCRC64 = null, string structuredBodyType = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) { - using var message = CreateDownloadRequest(snapshot, versionId, timeout, range, leaseId, rangeGetContentMD5, rangeGetContentCRC64, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); + using var message = CreateDownloadRequest(snapshot, versionId, timeout, range, leaseId, rangeGetContentMD5, rangeGetContentCRC64, structuredBodyType, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); _pipeline.Send(message, cancellationToken); var headers = new BlobDownloadHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs index 0723c07204ac2..78ef424f66b13 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs @@ -30,7 +30,7 @@ internal partial class BlockBlobRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// , , or is null. public BlockBlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { @@ -40,7 +40,7 @@ public BlockBlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pip _version = version ?? throw new ArgumentNullException(nameof(version)); } - internal HttpMessage CreateUploadRequest(long contentLength, Stream body, int? timeout, byte[] transactionalContentMD5, string blobContentType, string blobContentEncoding, string blobContentLanguage, byte[] blobContentMD5, string blobCacheControl, IDictionary metadata, string leaseId, string blobContentDisposition, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, AccessTier? tier, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags, string blobTagsString, DateTimeOffset? immutabilityPolicyExpiry, BlobImmutabilityPolicyMode? immutabilityPolicyMode, bool? legalHold, byte[] transactionalContentCrc64) + internal HttpMessage CreateUploadRequest(long contentLength, Stream body, int? timeout, byte[] transactionalContentMD5, string blobContentType, string blobContentEncoding, string blobContentLanguage, byte[] blobContentMD5, string blobCacheControl, IDictionary metadata, string leaseId, string blobContentDisposition, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, AccessTier? tier, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags, string blobTagsString, DateTimeOffset? immutabilityPolicyExpiry, BlobImmutabilityPolicyMode? immutabilityPolicyMode, bool? legalHold, byte[] transactionalContentCrc64, string structuredBodyType, long? structuredContentLength) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -146,6 +146,14 @@ internal HttpMessage CreateUploadRequest(long contentLength, Stream body, int? t { request.Headers.Add("x-ms-content-crc64", transactionalContentCrc64, "D"); } + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } + if (structuredContentLength != null) + { + request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); + } request.Headers.Add("Accept", "application/xml"); if (transactionalContentMD5 != null) { @@ -185,16 +193,18 @@ internal HttpMessage CreateUploadRequest(long contentLength, Stream body, int? t /// Specifies the immutability policy mode to set on the blob. /// Specified if a legal hold should be set on the blob. /// Specify the transactional crc64 for the body, to be validated by the service. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public async Task> UploadAsync(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, string blobContentType = null, string blobContentEncoding = null, string blobContentLanguage = null, byte[] blobContentMD5 = null, string blobCacheControl = null, IDictionary metadata = null, string leaseId = null, string blobContentDisposition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, AccessTier? tier = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string blobTagsString = null, DateTimeOffset? immutabilityPolicyExpiry = null, BlobImmutabilityPolicyMode? immutabilityPolicyMode = null, bool? legalHold = null, byte[] transactionalContentCrc64 = null, CancellationToken cancellationToken = default) + public async Task> UploadAsync(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, string blobContentType = null, string blobContentEncoding = null, string blobContentLanguage = null, byte[] blobContentMD5 = null, string blobCacheControl = null, IDictionary metadata = null, string leaseId = null, string blobContentDisposition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, AccessTier? tier = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string blobTagsString = null, DateTimeOffset? immutabilityPolicyExpiry = null, BlobImmutabilityPolicyMode? immutabilityPolicyMode = null, bool? legalHold = null, byte[] transactionalContentCrc64 = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUploadRequest(contentLength, body, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseId, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold, transactionalContentCrc64); + using var message = CreateUploadRequest(contentLength, body, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseId, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold, transactionalContentCrc64, structuredBodyType, structuredContentLength); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new BlockBlobUploadHeaders(message.Response); switch (message.Response.Status) @@ -234,16 +244,18 @@ public async Task> UploadAsync(long /// Specifies the immutability policy mode to set on the blob. /// Specified if a legal hold should be set on the blob. /// Specify the transactional crc64 for the body, to be validated by the service. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public ResponseWithHeaders Upload(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, string blobContentType = null, string blobContentEncoding = null, string blobContentLanguage = null, byte[] blobContentMD5 = null, string blobCacheControl = null, IDictionary metadata = null, string leaseId = null, string blobContentDisposition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, AccessTier? tier = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string blobTagsString = null, DateTimeOffset? immutabilityPolicyExpiry = null, BlobImmutabilityPolicyMode? immutabilityPolicyMode = null, bool? legalHold = null, byte[] transactionalContentCrc64 = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders Upload(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, string blobContentType = null, string blobContentEncoding = null, string blobContentLanguage = null, byte[] blobContentMD5 = null, string blobCacheControl = null, IDictionary metadata = null, string leaseId = null, string blobContentDisposition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, AccessTier? tier = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string blobTagsString = null, DateTimeOffset? immutabilityPolicyExpiry = null, BlobImmutabilityPolicyMode? immutabilityPolicyMode = null, bool? legalHold = null, byte[] transactionalContentCrc64 = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUploadRequest(contentLength, body, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseId, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold, transactionalContentCrc64); + using var message = CreateUploadRequest(contentLength, body, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseId, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold, transactionalContentCrc64, structuredBodyType, structuredContentLength); _pipeline.Send(message, cancellationToken); var headers = new BlockBlobUploadHeaders(message.Response); switch (message.Response.Status) @@ -494,7 +506,7 @@ public ResponseWithHeaders PutBlobFromUrl(long c } } - internal HttpMessage CreateStageBlockRequest(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, int? timeout, string leaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope) + internal HttpMessage CreateStageBlockRequest(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, int? timeout, string leaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, string structuredBodyType, long? structuredContentLength) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -533,6 +545,14 @@ internal HttpMessage CreateStageBlockRequest(string blockId, long contentLength, request.Headers.Add("x-ms-encryption-scope", encryptionScope); } request.Headers.Add("x-ms-version", _version); + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } + if (structuredContentLength != null) + { + request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); + } request.Headers.Add("Accept", "application/xml"); request.Headers.Add("Content-Length", contentLength); if (transactionalContentMD5 != null) @@ -556,9 +576,11 @@ internal HttpMessage CreateStageBlockRequest(string blockId, long contentLength, /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. /// Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// or is null. - public async Task> StageBlockAsync(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, CancellationToken cancellationToken = default) + public async Task> StageBlockAsync(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (blockId == null) { @@ -569,7 +591,7 @@ public async Task> StageBlockAsy throw new ArgumentNullException(nameof(body)); } - using var message = CreateStageBlockRequest(blockId, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope); + using var message = CreateStageBlockRequest(blockId, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, structuredBodyType, structuredContentLength); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new BlockBlobStageBlockHeaders(message.Response); switch (message.Response.Status) @@ -593,9 +615,11 @@ public async Task> StageBlockAsy /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. /// Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// or is null. - public ResponseWithHeaders StageBlock(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders StageBlock(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (blockId == null) { @@ -606,7 +630,7 @@ public ResponseWithHeaders StageBlock(string blockId throw new ArgumentNullException(nameof(body)); } - using var message = CreateStageBlockRequest(blockId, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope); + using var message = CreateStageBlockRequest(blockId, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, structuredBodyType, structuredContentLength); _pipeline.Send(message, cancellationToken); var headers = new BlockBlobStageBlockHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobStageBlockHeaders.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobStageBlockHeaders.cs index 7888b27dd7383..b13a3b7d1609a 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobStageBlockHeaders.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobStageBlockHeaders.cs @@ -29,5 +29,7 @@ public BlockBlobStageBlockHeaders(Response response) public string EncryptionKeySha256 => _response.Headers.TryGetValue("x-ms-encryption-key-sha256", out string value) ? value : null; /// Returns the name of the encryption scope used to encrypt the blob contents and application metadata. Note that the absence of this header implies use of the default account encryption scope. public string EncryptionScope => _response.Headers.TryGetValue("x-ms-encryption-scope", out string value) ? value : null; + /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobUploadHeaders.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobUploadHeaders.cs index 1cfbd3924fa55..ca024b1fb5d84 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobUploadHeaders.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobUploadHeaders.cs @@ -31,5 +31,7 @@ public BlockBlobUploadHeaders(Response response) public string EncryptionKeySha256 => _response.Headers.TryGetValue("x-ms-encryption-key-sha256", out string value) ? value : null; /// Returns the name of the encryption scope used to encrypt the blob contents and application metadata. Note that the absence of this header implies use of the default account encryption scope. public string EncryptionScope => _response.Headers.TryGetValue("x-ms-encryption-scope", out string value) ? value : null; + /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/ContainerRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/ContainerRestClient.cs index 024bfecd4e90b..9dd20ee7e1811 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/ContainerRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/ContainerRestClient.cs @@ -31,7 +31,7 @@ internal partial class ContainerRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// , , or is null. public ContainerRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs index 260d8021543e2..68a9e85b00d1b 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs @@ -30,7 +30,7 @@ internal partial class PageBlobRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// , , or is null. public PageBlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { @@ -235,7 +235,7 @@ public ResponseWithHeaders Create(long contentLength, lon } } - internal HttpMessage CreateUploadPagesRequest(long contentLength, Stream body, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, int? timeout, string range, string leaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, long? ifSequenceNumberLessThanOrEqualTo, long? ifSequenceNumberLessThan, long? ifSequenceNumberEqualTo, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags) + internal HttpMessage CreateUploadPagesRequest(long contentLength, Stream body, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, int? timeout, string range, string leaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, long? ifSequenceNumberLessThanOrEqualTo, long? ifSequenceNumberLessThan, long? ifSequenceNumberEqualTo, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags, string structuredBodyType, long? structuredContentLength) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -310,6 +310,14 @@ internal HttpMessage CreateUploadPagesRequest(long contentLength, Stream body, b request.Headers.Add("x-ms-if-tags", ifTags); } request.Headers.Add("x-ms-version", _version); + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } + if (structuredContentLength != null) + { + request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); + } request.Headers.Add("Accept", "application/xml"); request.Headers.Add("Content-Length", contentLength); if (transactionalContentMD5 != null) @@ -341,16 +349,18 @@ internal HttpMessage CreateUploadPagesRequest(long contentLength, Stream body, b /// Specify an ETag value to operate only on blobs with a matching value. /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public async Task> UploadPagesAsync(long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string range = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, long? ifSequenceNumberLessThanOrEqualTo = null, long? ifSequenceNumberLessThan = null, long? ifSequenceNumberEqualTo = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) + public async Task> UploadPagesAsync(long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string range = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, long? ifSequenceNumberLessThanOrEqualTo = null, long? ifSequenceNumberLessThan = null, long? ifSequenceNumberEqualTo = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUploadPagesRequest(contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, range, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); + using var message = CreateUploadPagesRequest(contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, range, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, structuredBodyType, structuredContentLength); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new PageBlobUploadPagesHeaders(message.Response); switch (message.Response.Status) @@ -382,16 +392,18 @@ public async Task> UploadPagesAs /// Specify an ETag value to operate only on blobs with a matching value. /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public ResponseWithHeaders UploadPages(long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string range = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, long? ifSequenceNumberLessThanOrEqualTo = null, long? ifSequenceNumberLessThan = null, long? ifSequenceNumberEqualTo = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders UploadPages(long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string range = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, long? ifSequenceNumberLessThanOrEqualTo = null, long? ifSequenceNumberLessThan = null, long? ifSequenceNumberEqualTo = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUploadPagesRequest(contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, range, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); + using var message = CreateUploadPagesRequest(contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, range, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, structuredBodyType, structuredContentLength); _pipeline.Send(message, cancellationToken); var headers = new PageBlobUploadPagesHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobUploadPagesHeaders.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobUploadPagesHeaders.cs index 77d37d90027aa..c04659bc43322 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobUploadPagesHeaders.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobUploadPagesHeaders.cs @@ -33,5 +33,7 @@ public PageBlobUploadPagesHeaders(Response response) public string EncryptionKeySha256 => _response.Headers.TryGetValue("x-ms-encryption-key-sha256", out string value) ? value : null; /// Returns the name of the encryption scope used to encrypt the blob contents and application metadata. Note that the absence of this header implies use of the default account encryption scope. public string EncryptionScope => _response.Headers.TryGetValue("x-ms-encryption-scope", out string value) ? value : null; + /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/ServiceRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/ServiceRestClient.cs index e274940f81e8d..2abac369c0cae 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/ServiceRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/ServiceRestClient.cs @@ -31,7 +31,7 @@ internal partial class ServiceRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// , , or is null. public ServiceRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { diff --git a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadDetails.cs b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadDetails.cs index bc119822cdc12..0490ec239798e 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadDetails.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadDetails.cs @@ -34,6 +34,14 @@ public class BlobDownloadDetails public byte[] ContentHash { get; internal set; } #pragma warning restore CA1819 // Properties should not return arrays + /// + /// When requested using , this value contains the CRC for the download blob range. + /// This value may only become populated once the network stream is fully consumed. If this instance is accessed through + /// , the network stream has already been consumed. Otherwise, consume the content stream before + /// checking this value. + /// + public byte[] ContentCrc { get; internal set; } + /// /// Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob. /// diff --git a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadInfo.cs b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadInfo.cs index e034573b54b3a..b42801e36ab55 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadInfo.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadInfo.cs @@ -4,6 +4,8 @@ using System; using System.ComponentModel; using System.IO; +using System.Threading.Tasks; +using Azure.Core; using Azure.Storage.Shared; namespace Azure.Storage.Blobs.Models @@ -49,6 +51,14 @@ public class BlobDownloadInfo : IDisposable, IDownloadedContent /// public BlobDownloadDetails Details { get; internal set; } + /// + /// Indicates some contents of are mixed into the response stream. + /// They will not be set until has been fully consumed. These details + /// will be extracted from the content stream by the library before the calling code can + /// encounter them. + /// + public bool ExpectTrailingDetails { get; internal set; } + /// /// Constructor. /// diff --git a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadStreamingResult.cs b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadStreamingResult.cs index 4fbada6e67aad..9b7d4d4e00dad 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadStreamingResult.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadStreamingResult.cs @@ -24,6 +24,14 @@ internal BlobDownloadStreamingResult() { } /// public Stream Content { get; internal set; } + /// + /// Indicates some contents of are mixed into the response stream. + /// They will not be set until has been fully consumed. These details + /// will be extracted from the content stream by the library before the calling code can + /// encounter them. + /// + public bool ExpectTrailingDetails { get; internal set; } + /// /// Disposes the by calling Dispose on the underlying stream. /// diff --git a/sdk/storage/Azure.Storage.Blobs/src/PageBlobClient.cs b/sdk/storage/Azure.Storage.Blobs/src/PageBlobClient.cs index fa575e41b8ebe..7038897531fbb 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/PageBlobClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/PageBlobClient.cs @@ -1363,15 +1363,42 @@ internal async Task> UploadPagesInternal( scope.Start(); Errors.VerifyStreamPosition(content, nameof(content)); - // compute hash BEFORE attaching progress handler - ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - - content = content?.WithNoDispose().WithProgress(progressHandler); - HttpRange range = new HttpRange(offset, (content?.Length - content?.Position) ?? null); + ContentHasher.GetHashResult hashResult = null; + long contentLength = (content?.Length - content?.Position) ?? 0; + long? structuredContentLength = default; + string structuredBodyType = null; + HttpRange range; + if (validationOptions != null && + validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && + ClientSideEncryption == null) // don't allow feature combination + { + // report progress in terms of caller bytes, not encoded bytes + structuredContentLength = contentLength; + contentLength = (content?.Length - content?.Position) ?? 0; + range = new HttpRange(offset, (content?.Length - content?.Position) ?? null); + structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; + content = content?.WithNoDispose().WithProgress(progressHandler); + content = validationOptions.PrecalculatedChecksum.IsEmpty + ? new StructuredMessageEncodingStream( + content, + Constants.StructuredMessage.DefaultSegmentContentLength, + StructuredMessage.Flags.StorageCrc64) + : new StructuredMessagePrecalculatedCrcWrapperStream( + content, + validationOptions.PrecalculatedChecksum.Span); + contentLength = (content?.Length - content?.Position) ?? 0; + } + else + { + // compute hash BEFORE attaching progress handler + hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + content = content?.WithNoDispose().WithProgress(progressHandler); + range = new HttpRange(offset, (content?.Length - content?.Position) ?? null); + } ResponseWithHeaders response; @@ -1388,6 +1415,8 @@ internal async Task> UploadPagesInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, ifSequenceNumberLessThanOrEqualTo: conditions?.IfSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan: conditions?.IfSequenceNumberLessThan, ifSequenceNumberEqualTo: conditions?.IfSequenceNumberEqual, @@ -1412,6 +1441,8 @@ internal async Task> UploadPagesInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, ifSequenceNumberLessThanOrEqualTo: conditions?.IfSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan: conditions?.IfSequenceNumberLessThan, ifSequenceNumberEqualTo: conditions?.IfSequenceNumberEqual, diff --git a/sdk/storage/Azure.Storage.Blobs/src/PartitionedDownloader.cs b/sdk/storage/Azure.Storage.Blobs/src/PartitionedDownloader.cs index 2c52d0c256e34..08a1090716f2b 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/PartitionedDownloader.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/PartitionedDownloader.cs @@ -48,7 +48,8 @@ internal class PartitionedDownloader /// private readonly StorageChecksumAlgorithm _validationAlgorithm; private readonly int _checksumSize; - private bool UseMasterCrc => _validationAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64; + // TODO disabling master crc temporarily. segment CRCs still handled. + private bool UseMasterCrc => false; // _validationAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64; private StorageCrc64HashAlgorithm _masterCrcCalculator = null; /// @@ -212,8 +213,20 @@ public async Task DownloadToInternal( // If the first segment was the entire blob, we'll copy that to // the output stream and finish now - long initialLength = initialResponse.Value.Details.ContentLength; - long totalLength = ParseRangeTotalLength(initialResponse.Value.Details.ContentRange); + long initialLength; + long totalLength; + // Get blob content length downloaded from content range when available to handle transit encoding + if (string.IsNullOrWhiteSpace(initialResponse.Value.Details.ContentRange)) + { + initialLength = initialResponse.Value.Details.ContentLength; + totalLength = 0; + } + else + { + ContentRange recievedRange = ContentRange.Parse(initialResponse.Value.Details.ContentRange); + initialLength = recievedRange.End.Value - recievedRange.Start.Value + 1; + totalLength = recievedRange.Size.Value; + } if (initialLength == totalLength) { await HandleOneShotDownload(initialResponse, destination, async, cancellationToken) @@ -395,20 +408,6 @@ private async Task FinalizeDownloadInternal( } } - private static long ParseRangeTotalLength(string range) - { - if (range == null) - { - return 0; - } - int lengthSeparator = range.IndexOf("/", StringComparison.InvariantCultureIgnoreCase); - if (lengthSeparator == -1) - { - throw BlobErrors.ParsingFullHttpRangeFailed(range); - } - return long.Parse(range.Substring(lengthSeparator + 1), CultureInfo.InvariantCulture); - } - private async Task CopyToInternal( Response response, Stream destination, @@ -417,7 +416,10 @@ private async Task CopyToInternal( CancellationToken cancellationToken) { CancellationHelper.ThrowIfCancellationRequested(cancellationToken); - using IHasher hasher = ContentHasher.GetHasherFromAlgorithmId(_validationAlgorithm); + // if structured message, this crc is validated in the decoding process. don't decode it here. + using IHasher hasher = response.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader) + ? null + : ContentHasher.GetHasherFromAlgorithmId(_validationAlgorithm); using Stream rawSource = response.Value.Content; using Stream source = hasher != null ? ChecksumCalculatingStream.GetReadStream(rawSource, hasher.AppendHash) @@ -432,13 +434,13 @@ await source.CopyToInternal( if (hasher != null) { hasher.GetFinalHash(checksumBuffer.Span); - (ReadOnlyMemory checksum, StorageChecksumAlgorithm _) - = ContentHasher.GetResponseChecksumOrDefault(response.GetRawResponse()); - if (!checksumBuffer.Span.SequenceEqual(checksum.Span)) - { - throw Errors.HashMismatchOnStreamedDownload(response.Value.Details.ContentRange); + (ReadOnlyMemory checksum, StorageChecksumAlgorithm _) + = ContentHasher.GetResponseChecksumOrDefault(response.GetRawResponse()); + if (!checksumBuffer.Span.SequenceEqual(checksum.Span)) + { + throw Errors.HashMismatchOnStreamedDownload(response.Value.Details.ContentRange); + } } - } } private IEnumerable GetRanges(long initialLength, long totalLength) diff --git a/sdk/storage/Azure.Storage.Blobs/src/autorest.md b/sdk/storage/Azure.Storage.Blobs/src/autorest.md index 85fb92c2349cd..34efb5857c4a4 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/autorest.md +++ b/sdk/storage/Azure.Storage.Blobs/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/f6f50c6388fd5836fa142384641b8353a99874ef/specification/storage/data-plane/Microsoft.BlobStorage/stable/2024-08-04/blob.json + - https://github.com/Azure/azure-rest-api-specs/blob/794c6178bc06c6c9dceb139e9f9d1b35b1a99701/specification/storage/data-plane/Microsoft.BlobStorage/preview/2025-01-05/blob.json generation1-convenience-client: true # https://github.com/Azure/autorest/issues/4075 skip-semantics-validation: true @@ -34,7 +34,7 @@ directive: if (property.includes('/{containerName}/{blob}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))}); - } + } else if (property.includes('/{containerName}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))}); @@ -158,7 +158,7 @@ directive: var newName = property.replace('/{containerName}/{blob}', ''); $[newName] = $[oldName]; delete $[oldName]; - } + } else if (property.includes('/{containerName}')) { var oldName = property; diff --git a/sdk/storage/Azure.Storage.Blobs/tests/Azure.Storage.Blobs.Tests.csproj b/sdk/storage/Azure.Storage.Blobs/tests/Azure.Storage.Blobs.Tests.csproj index 62c7b6d17e63e..1c3856c83b64e 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/Azure.Storage.Blobs.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs/tests/Azure.Storage.Blobs.Tests.csproj @@ -6,6 +6,9 @@ Microsoft Azure.Storage.Blobs client library tests false + + BlobSDK + diff --git a/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTransferValidationTests.cs b/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTransferValidationTests.cs index 73d11612f1d8c..c502231087ed6 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTransferValidationTests.cs +++ b/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTransferValidationTests.cs @@ -1,6 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +using System; using System.IO; using System.Threading.Tasks; using Azure.Core.TestFramework; @@ -37,7 +38,10 @@ protected override async Task> GetDispo StorageChecksumAlgorithm uploadAlgorithm = StorageChecksumAlgorithm.None, StorageChecksumAlgorithm downloadAlgorithm = StorageChecksumAlgorithm.None) { - var disposingContainer = await ClientBuilder.GetTestContainerAsync(service: service, containerName: containerName); + var disposingContainer = await ClientBuilder.GetTestContainerAsync( + service: service, + containerName: containerName, + publicAccessType: PublicAccessType.None); disposingContainer.Container.ClientConfiguration.TransferValidation.Upload.ChecksumAlgorithm = uploadAlgorithm; disposingContainer.Container.ClientConfiguration.TransferValidation.Download.ChecksumAlgorithm = downloadAlgorithm; @@ -91,57 +95,96 @@ public override void TestAutoResolve() } #region Added Tests - [TestCaseSource("GetValidationAlgorithms")] - public async Task ExpectedDownloadStreamingStreamTypeReturned(StorageChecksumAlgorithm algorithm) + [Test] + public virtual async Task OlderServiceVersionThrowsOnStructuredMessage() { - await using var test = await GetDisposingContainerAsync(); + // use service version before structured message was introduced + await using DisposingContainer disposingContainer = await ClientBuilder.GetTestContainerAsync( + service: ClientBuilder.GetServiceClient_SharedKey( + InstrumentClientOptions(new BlobClientOptions(BlobClientOptions.ServiceVersion.V2024_11_04))), + publicAccessType: PublicAccessType.None); // Arrange - var data = GetRandomBuffer(Constants.KB); - BlobClient blob = InstrumentClient(test.Container.GetBlobClient(GetNewResourceName())); - using (var stream = new MemoryStream(data)) + const int dataLength = Constants.KB; + var data = GetRandomBuffer(dataLength); + + var resourceName = GetNewResourceName(); + var blob = InstrumentClient(disposingContainer.Container.GetBlobClient(GetNewResourceName())); + await blob.UploadAsync(BinaryData.FromBytes(data)); + + var validationOptions = new DownloadTransferValidationOptions { - await blob.UploadAsync(stream); - } - // don't make options instance at all for no hash request - DownloadTransferValidationOptions transferValidation = algorithm == StorageChecksumAlgorithm.None - ? default - : new DownloadTransferValidationOptions { ChecksumAlgorithm = algorithm }; + ChecksumAlgorithm = StorageChecksumAlgorithm.StorageCrc64 + }; + AsyncTestDelegate operation = async () => await (await blob.DownloadStreamingAsync( + new BlobDownloadOptions + { + Range = new HttpRange(length: Constants.StructuredMessage.MaxDownloadCrcWithHeader + 1), + TransferValidation = validationOptions, + })).Value.Content.CopyToAsync(Stream.Null); + Assert.That(operation, Throws.TypeOf()); + } + + [Test] + public async Task StructuredMessagePopulatesCrcDownloadStreaming() + { + await using DisposingContainer disposingContainer = await ClientBuilder.GetTestContainerAsync( + publicAccessType: PublicAccessType.None); + + const int dataLength = Constants.KB; + byte[] data = GetRandomBuffer(dataLength); + byte[] dataCrc = new byte[8]; + StorageCrc64Calculator.ComputeSlicedSafe(data, 0L).WriteCrc64(dataCrc); + + var blob = disposingContainer.Container.GetBlobClient(GetNewResourceName()); + await blob.UploadAsync(BinaryData.FromBytes(data)); - // Act - Response response = await blob.DownloadStreamingAsync(new BlobDownloadOptions + Response response = await blob.DownloadStreamingAsync(new() { - TransferValidation = transferValidation, - Range = new HttpRange(length: data.Length) + TransferValidation = new DownloadTransferValidationOptions + { + ChecksumAlgorithm = StorageChecksumAlgorithm.StorageCrc64 + } }); - // Assert - // validated stream is buffered - Assert.AreEqual(typeof(MemoryStream), response.Value.Content.GetType()); + // crc is not present until response stream is consumed + Assert.That(response.Value.Details.ContentCrc, Is.Null); + + byte[] downloadedData; + using (MemoryStream ms = new()) + { + await response.Value.Content.CopyToAsync(ms); + downloadedData = ms.ToArray(); + } + + Assert.That(response.Value.Details.ContentCrc, Is.EqualTo(dataCrc)); + Assert.That(downloadedData, Is.EqualTo(data)); } [Test] - public async Task ExpectedDownloadStreamingStreamTypeReturned_None() + public async Task StructuredMessagePopulatesCrcDownloadContent() { - await using var test = await GetDisposingContainerAsync(); + await using DisposingContainer disposingContainer = await ClientBuilder.GetTestContainerAsync( + publicAccessType: PublicAccessType.None); - // Arrange - var data = GetRandomBuffer(Constants.KB); - BlobClient blob = InstrumentClient(test.Container.GetBlobClient(GetNewResourceName())); - using (var stream = new MemoryStream(data)) - { - await blob.UploadAsync(stream); - } + const int dataLength = Constants.KB; + byte[] data = GetRandomBuffer(dataLength); + byte[] dataCrc = new byte[8]; + StorageCrc64Calculator.ComputeSlicedSafe(data, 0L).WriteCrc64(dataCrc); + + var blob = disposingContainer.Container.GetBlobClient(GetNewResourceName()); + await blob.UploadAsync(BinaryData.FromBytes(data)); - // Act - Response response = await blob.DownloadStreamingAsync(new BlobDownloadOptions + Response response = await blob.DownloadContentAsync(new BlobDownloadOptions() { - Range = new HttpRange(length: data.Length) + TransferValidation = new DownloadTransferValidationOptions + { + ChecksumAlgorithm = StorageChecksumAlgorithm.StorageCrc64 + } }); - // Assert - // unvalidated stream type is private; just check we didn't get back a buffered stream - Assert.AreNotEqual(typeof(MemoryStream), response.Value.Content.GetType()); + Assert.That(response.Value.Details.ContentCrc, Is.EqualTo(dataCrc)); + Assert.That(response.Value.Content.ToArray(), Is.EqualTo(data)); } #endregion } diff --git a/sdk/storage/Azure.Storage.Blobs/tests/PartitionedDownloaderTests.cs b/sdk/storage/Azure.Storage.Blobs/tests/PartitionedDownloaderTests.cs index d8d4756a510c1..af408264c5bfa 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/PartitionedDownloaderTests.cs +++ b/sdk/storage/Azure.Storage.Blobs/tests/PartitionedDownloaderTests.cs @@ -305,7 +305,7 @@ public Response GetStream(HttpRange range, BlobRequ ContentHash = new byte[] { 1, 2, 3 }, LastModified = DateTimeOffset.Now, Metadata = new Dictionary() { { "meta", "data" } }, - ContentRange = $"bytes {range.Offset}-{range.Offset + contentLength}/{_length}", + ContentRange = $"bytes {range.Offset}-{Math.Max(1, range.Offset + contentLength - 1)}/{_length}", ETag = s_etag, ContentEncoding = "test", CacheControl = "test", diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/ChecksumExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/ChecksumExtensions.cs new file mode 100644 index 0000000000000..48304640eee43 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/src/Shared/ChecksumExtensions.cs @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers.Binary; + +namespace Azure.Storage; + +internal static class ChecksumExtensions +{ + public static void WriteCrc64(this ulong crc, Span dest) + => BinaryPrimitives.WriteUInt64LittleEndian(dest, crc); + + public static bool TryWriteCrc64(this ulong crc, Span dest) + => BinaryPrimitives.TryWriteUInt64LittleEndian(dest, crc); + + public static ulong ReadCrc64(this ReadOnlySpan crc) + => BinaryPrimitives.ReadUInt64LittleEndian(crc); + + public static bool TryReadCrc64(this ReadOnlySpan crc, out ulong value) + => BinaryPrimitives.TryReadUInt64LittleEndian(crc, out value); +} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs b/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs index 17a32b2d46d41..4893b971d6529 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs @@ -665,6 +665,15 @@ internal static class AccountResources internal static readonly int[] PathStylePorts = { 10000, 10001, 10002, 10003, 10004, 10100, 10101, 10102, 10103, 10104, 11000, 11001, 11002, 11003, 11004, 11100, 11101, 11102, 11103, 11104 }; } + internal static class StructuredMessage + { + public const string StructuredMessageHeader = "x-ms-structured-body"; + public const string StructuredContentLength = "x-ms-structured-content-length"; + public const string CrcStructuredMessage = "XSM/1.0; properties=crc64"; + public const int DefaultSegmentContentLength = 4 * MB; + public const int MaxDownloadCrcWithHeader = 4 * MB; + } + internal static class ClientSideEncryption { public const string HttpMessagePropertyKeyV1 = "Azure.Storage.StorageTelemetryPolicy.ClientSideEncryption.V1"; diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs index 4e5464fa17e6e..4d49edeb72ecf 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs @@ -3,6 +3,7 @@ using System; using System.Globalization; +using System.IO; using System.Linq; using System.Security.Authentication; using System.Xml.Serialization; @@ -105,9 +106,18 @@ public static ArgumentException VersionNotSupported(string paramName) public static RequestFailedException ClientRequestIdMismatch(Response response, string echo, string original) => new RequestFailedException(response.Status, $"Response x-ms-client-request-id '{echo}' does not match the original expected request id, '{original}'.", null); + public static InvalidDataException StructuredMessageNotAcknowledgedGET(Response response) + => new InvalidDataException($"Response does not acknowledge structured message was requested. Unknown data structure in response body."); + + public static InvalidDataException StructuredMessageNotAcknowledgedPUT(Response response) + => new InvalidDataException($"Response does not acknowledge structured message was sent. Unexpected data may have been persisted to storage."); + public static ArgumentException TransactionalHashingNotSupportedWithClientSideEncryption() => new ArgumentException("Client-side encryption and transactional hashing are not supported at the same time."); + public static InvalidDataException ExpectedStructuredMessage() + => new InvalidDataException($"Expected {Constants.StructuredMessage.StructuredMessageHeader} in response, but found none."); + public static void VerifyHttpsTokenAuth(Uri uri) { if (uri.Scheme != Constants.Https) diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.cs b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.cs index 6b89a59011d51..e3372665928c1 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.cs @@ -72,6 +72,9 @@ public static ArgumentException CannotDeferTransactionalHashVerification() public static ArgumentException CannotInitializeWriteStreamWithData() => new ArgumentException("Initialized buffer for StorageWriteStream must be empty."); + public static InvalidDataException InvalidStructuredMessage(string optionalMessage = default) + => new InvalidDataException(("Invalid structured message data. " + optionalMessage ?? "").Trim()); + internal static void VerifyStreamPosition(Stream stream, string streamName) { if (stream != null && stream.CanSeek && stream.Length > 0 && stream.Position >= stream.Length) @@ -80,6 +83,22 @@ internal static void VerifyStreamPosition(Stream stream, string streamName) } } + internal static void AssertBufferMinimumSize(ReadOnlySpan buffer, int minSize, string paramName) + { + if (buffer.Length < minSize) + { + throw new ArgumentException($"Expected buffer Length of at least {minSize} bytes. Got {buffer.Length}.", paramName); + } + } + + internal static void AssertBufferExactSize(ReadOnlySpan buffer, int size, string paramName) + { + if (buffer.Length != size) + { + throw new ArgumentException($"Expected buffer Length of exactly {size} bytes. Got {buffer.Length}.", paramName); + } + } + public static void ThrowIfParamNull(object obj, string paramName) { if (obj == null) diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/LazyLoadingReadOnlyStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/LazyLoadingReadOnlyStream.cs index c3e9c641c3fea..fe2db427bef02 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/LazyLoadingReadOnlyStream.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/LazyLoadingReadOnlyStream.cs @@ -249,41 +249,9 @@ private async Task DownloadInternal(bool async, CancellationToken cancellat response = await _downloadInternalFunc(range, _validationOptions, async, cancellationToken).ConfigureAwait(false); using Stream networkStream = response.Value.Content; - - // The number of bytes we just downloaded. - long downloadSize = GetResponseRange(response.GetRawResponse()).Length.Value; - - // The number of bytes we copied in the last loop. - int copiedBytes; - - // Bytes we have copied so far. - int totalCopiedBytes = 0; - - // Bytes remaining to copy. It is save to truncate the long because we asked for a max of int _buffer size bytes. - int remainingBytes = (int)downloadSize; - - do - { - if (async) - { - copiedBytes = await networkStream.ReadAsync( - buffer: _buffer, - offset: totalCopiedBytes, - count: remainingBytes, - cancellationToken: cancellationToken).ConfigureAwait(false); - } - else - { - copiedBytes = networkStream.Read( - buffer: _buffer, - offset: totalCopiedBytes, - count: remainingBytes); - } - - totalCopiedBytes += copiedBytes; - remainingBytes -= copiedBytes; - } - while (copiedBytes != 0); + // use stream copy to ensure consumption of any trailing metadata (e.g. structured message) + // allow buffer limits to catch the error of data size mismatch + int totalCopiedBytes = (int) await networkStream.CopyToInternal(new MemoryStream(_buffer), async, cancellationToken).ConfigureAwait((false)); _bufferPosition = 0; _bufferLength = totalCopiedBytes; @@ -291,7 +259,7 @@ private async Task DownloadInternal(bool async, CancellationToken cancellat // if we deferred transactional hash validation on download, validate now // currently we always defer but that may change - if (_validationOptions != default && _validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && !_validationOptions.AutoValidateChecksum) + if (_validationOptions != default && _validationOptions.ChecksumAlgorithm == StorageChecksumAlgorithm.MD5 && !_validationOptions.AutoValidateChecksum) // TODO better condition { ContentHasher.AssertResponseHashMatch(_buffer, _bufferPosition, _bufferLength, _validationOptions.ChecksumAlgorithm, response.GetRawResponse()); } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/PooledMemoryStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/PooledMemoryStream.cs index 3e218d18a90af..6070329d10d3d 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/PooledMemoryStream.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/PooledMemoryStream.cs @@ -251,7 +251,7 @@ public override int Read(byte[] buffer, int offset, int count) Length - Position, bufferCount - (Position - offsetOfBuffer), count - read); - Array.Copy(currentBuffer, Position - offsetOfBuffer, buffer, read, toCopy); + Array.Copy(currentBuffer, Position - offsetOfBuffer, buffer, offset + read, toCopy); read += toCopy; Position += toCopy; } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StorageCrc64Composer.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StorageCrc64Composer.cs index ab6b76d78a87e..307ff23b21144 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StorageCrc64Composer.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StorageCrc64Composer.cs @@ -12,22 +12,52 @@ namespace Azure.Storage /// internal static class StorageCrc64Composer { - public static Memory Compose(params (byte[] Crc64, long OriginalDataLength)[] partitions) + public static byte[] Compose(params (byte[] Crc64, long OriginalDataLength)[] partitions) + => Compose(partitions.AsEnumerable()); + + public static byte[] Compose(IEnumerable<(byte[] Crc64, long OriginalDataLength)> partitions) { - return Compose(partitions.AsEnumerable()); + ulong result = Compose(partitions.Select(tup => (BitConverter.ToUInt64(tup.Crc64, 0), tup.OriginalDataLength))); + return BitConverter.GetBytes(result); } - public static Memory Compose(IEnumerable<(byte[] Crc64, long OriginalDataLength)> partitions) + public static byte[] Compose(params (ReadOnlyMemory Crc64, long OriginalDataLength)[] partitions) + => Compose(partitions.AsEnumerable()); + + public static byte[] Compose(IEnumerable<(ReadOnlyMemory Crc64, long OriginalDataLength)> partitions) { - ulong result = Compose(partitions.Select(tup => (BitConverter.ToUInt64(tup.Crc64, 0), tup.OriginalDataLength))); - return new Memory(BitConverter.GetBytes(result)); +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + ulong result = Compose(partitions.Select(tup => (BitConverter.ToUInt64(tup.Crc64.Span), tup.OriginalDataLength))); +#else + ulong result = Compose(partitions.Select(tup => (System.BitConverter.ToUInt64(tup.Crc64.ToArray(), 0), tup.OriginalDataLength))); +#endif + return BitConverter.GetBytes(result); } + public static byte[] Compose( + ReadOnlySpan leftCrc64, long leftOriginalDataLength, + ReadOnlySpan rightCrc64, long rightOriginalDataLength) + { +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + ulong result = Compose( + (BitConverter.ToUInt64(leftCrc64), leftOriginalDataLength), + (BitConverter.ToUInt64(rightCrc64), rightOriginalDataLength)); +#else + ulong result = Compose( + (BitConverter.ToUInt64(leftCrc64.ToArray(), 0), leftOriginalDataLength), + (BitConverter.ToUInt64(rightCrc64.ToArray(), 0), rightOriginalDataLength)); +#endif + return BitConverter.GetBytes(result); + } + + public static ulong Compose(params (ulong Crc64, long OriginalDataLength)[] partitions) + => Compose(partitions.AsEnumerable()); + public static ulong Compose(IEnumerable<(ulong Crc64, long OriginalDataLength)> partitions) { ulong composedCrc = 0; long composedDataLength = 0; - foreach (var tup in partitions) + foreach ((ulong crc64, long originalDataLength) in partitions) { composedCrc = StorageCrc64Calculator.Concatenate( uInitialCrcAB: 0, @@ -35,9 +65,9 @@ public static ulong Compose(IEnumerable<(ulong Crc64, long OriginalDataLength)> uFinalCrcA: composedCrc, uSizeA: (ulong) composedDataLength, uInitialCrcB: 0, - uFinalCrcB: tup.Crc64, - uSizeB: (ulong)tup.OriginalDataLength); - composedDataLength += tup.OriginalDataLength; + uFinalCrcB: crc64, + uSizeB: (ulong)originalDataLength); + composedDataLength += originalDataLength; } return composedCrc; } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StorageRequestValidationPipelinePolicy.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StorageRequestValidationPipelinePolicy.cs index 0cef4f4d8d4ed..9f4ddb5249e82 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StorageRequestValidationPipelinePolicy.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StorageRequestValidationPipelinePolicy.cs @@ -33,6 +33,35 @@ public override void OnReceivedResponse(HttpMessage message) { throw Errors.ClientRequestIdMismatch(message.Response, echo.First(), original); } + + if (message.Request.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader) && + message.Request.Headers.Contains(Constants.StructuredMessage.StructuredContentLength)) + { + AssertStructuredMessageAcknowledgedPUT(message); + } + else if (message.Request.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) + { + AssertStructuredMessageAcknowledgedGET(message); + } + } + + private static void AssertStructuredMessageAcknowledgedPUT(HttpMessage message) + { + if (!message.Response.IsError && + !message.Response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) + { + throw Errors.StructuredMessageNotAcknowledgedPUT(message.Response); + } + } + + private static void AssertStructuredMessageAcknowledgedGET(HttpMessage message) + { + if (!message.Response.IsError && + !(message.Response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader) && + message.Response.Headers.Contains(Constants.StructuredMessage.StructuredContentLength))) + { + throw Errors.StructuredMessageNotAcknowledgedGET(message.Response); + } } } } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs index 2a7bd90fb82a1..44c0973ea9be1 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs @@ -46,7 +46,7 @@ internal static class StorageVersionExtensions /// public const ServiceVersion LatestVersion = #if BlobSDK || QueueSDK || FileSDK || DataLakeSDK || ChangeFeedSDK || DataMovementSDK || BlobDataMovementSDK || ShareDataMovementSDK - ServiceVersion.V2024_11_04; + ServiceVersion.V2025_01_05; #else ERROR_STORAGE_SERVICE_NOT_DEFINED; #endif diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StreamExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StreamExtensions.cs index 31f121d414ea4..c8803ecf421e7 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StreamExtensions.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StreamExtensions.cs @@ -1,6 +1,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +using System; +using System.Buffers; using System.IO; using System.Threading; using System.Threading.Tasks; @@ -48,7 +50,7 @@ public static async Task WriteInternal( } } - public static Task CopyToInternal( + public static Task CopyToInternal( this Stream src, Stream dest, bool async, @@ -79,21 +81,33 @@ public static Task CopyToInternal( /// Cancellation token for the operation. /// /// - public static async Task CopyToInternal( + public static async Task CopyToInternal( this Stream src, Stream dest, int bufferSize, bool async, CancellationToken cancellationToken) { + using IDisposable _ = ArrayPool.Shared.RentDisposable(bufferSize, out byte[] buffer); + long totalRead = 0; + int read; if (async) { - await src.CopyToAsync(dest, bufferSize, cancellationToken).ConfigureAwait(false); + while (0 < (read = await src.ReadAsync(buffer, 0, buffer.Length, cancellationToken).ConfigureAwait(false))) + { + totalRead += read; + await dest.WriteAsync(buffer, 0, read, cancellationToken).ConfigureAwait(false); + } } else { - src.CopyTo(dest, bufferSize); + while (0 < (read = src.Read(buffer, 0, buffer.Length))) + { + totalRead += read; + dest.Write(buffer, 0, read); + } } + return totalRead; } } } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessage.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessage.cs new file mode 100644 index 0000000000000..a0a46837797b9 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessage.cs @@ -0,0 +1,244 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers; +using System.Buffers.Binary; +using System.IO; +using Azure.Storage.Common; + +namespace Azure.Storage.Shared; + +internal static class StructuredMessage +{ + public const int Crc64Length = 8; + + [Flags] + public enum Flags + { + None = 0, + StorageCrc64 = 1, + } + + public static class V1_0 + { + public const byte MessageVersionByte = 1; + + public const int StreamHeaderLength = 13; + public const int StreamHeaderVersionOffset = 0; + public const int StreamHeaderMessageLengthOffset = 1; + public const int StreamHeaderFlagsOffset = 9; + public const int StreamHeaderSegmentCountOffset = 11; + + public const int SegmentHeaderLength = 10; + public const int SegmentHeaderNumOffset = 0; + public const int SegmentHeaderContentLengthOffset = 2; + + #region Stream Header + public static void ReadStreamHeader( + ReadOnlySpan buffer, + out long messageLength, + out Flags flags, + out int totalSegments) + { + Errors.AssertBufferExactSize(buffer, 13, nameof(buffer)); + if (buffer[StreamHeaderVersionOffset] != 1) + { + throw new InvalidDataException("Unrecognized version of structured message."); + } + messageLength = (long)BinaryPrimitives.ReadUInt64LittleEndian(buffer.Slice(StreamHeaderMessageLengthOffset, 8)); + flags = (Flags)BinaryPrimitives.ReadUInt16LittleEndian(buffer.Slice(StreamHeaderFlagsOffset, 2)); + totalSegments = BinaryPrimitives.ReadUInt16LittleEndian(buffer.Slice(StreamHeaderSegmentCountOffset, 2)); + } + + public static int WriteStreamHeader( + Span buffer, + long messageLength, + Flags flags, + int totalSegments) + { + const int versionOffset = 0; + const int messageLengthOffset = 1; + const int flagsOffset = 9; + const int numSegmentsOffset = 11; + + Errors.AssertBufferMinimumSize(buffer, StreamHeaderLength, nameof(buffer)); + + buffer[versionOffset] = MessageVersionByte; + BinaryPrimitives.WriteUInt64LittleEndian(buffer.Slice(messageLengthOffset, 8), (ulong)messageLength); + BinaryPrimitives.WriteUInt16LittleEndian(buffer.Slice(flagsOffset, 2), (ushort)flags); + BinaryPrimitives.WriteUInt16LittleEndian(buffer.Slice(numSegmentsOffset, 2), (ushort)totalSegments); + + return StreamHeaderLength; + } + + /// + /// Gets stream header in a buffer rented from the provided ArrayPool. + /// + /// + /// Disposable to return the buffer to the pool. + /// + public static IDisposable GetStreamHeaderBytes( + ArrayPool pool, + out Memory bytes, + long messageLength, + Flags flags, + int totalSegments) + { + Argument.AssertNotNull(pool, nameof(pool)); + IDisposable disposable = pool.RentAsMemoryDisposable(StreamHeaderLength, out bytes); + WriteStreamHeader(bytes.Span, messageLength, flags, totalSegments); + return disposable; + } + #endregion + + #region StreamFooter + public static int GetStreamFooterSize(Flags flags) + => flags.HasFlag(Flags.StorageCrc64) ? Crc64Length : 0; + + public static void ReadStreamFooter( + ReadOnlySpan buffer, + Flags flags, + out ulong crc64) + { + int expectedBufferSize = GetSegmentFooterSize(flags); + Errors.AssertBufferExactSize(buffer, expectedBufferSize, nameof(buffer)); + + crc64 = flags.HasFlag(Flags.StorageCrc64) ? buffer.ReadCrc64() : default; + } + + public static int WriteStreamFooter(Span buffer, ReadOnlySpan crc64 = default) + { + int requiredSpace = 0; + if (!crc64.IsEmpty) + { + Errors.AssertBufferExactSize(crc64, Crc64Length, nameof(crc64)); + requiredSpace += Crc64Length; + } + + Errors.AssertBufferMinimumSize(buffer, requiredSpace, nameof(buffer)); + int offset = 0; + if (!crc64.IsEmpty) + { + crc64.CopyTo(buffer.Slice(offset, Crc64Length)); + offset += Crc64Length; + } + + return offset; + } + + /// + /// Gets stream header in a buffer rented from the provided ArrayPool. + /// + /// + /// Disposable to return the buffer to the pool. + /// + public static IDisposable GetStreamFooterBytes( + ArrayPool pool, + out Memory bytes, + ReadOnlySpan crc64 = default) + { + Argument.AssertNotNull(pool, nameof(pool)); + IDisposable disposable = pool.RentAsMemoryDisposable(StreamHeaderLength, out bytes); + WriteStreamFooter(bytes.Span, crc64); + return disposable; + } + #endregion + + #region SegmentHeader + public static void ReadSegmentHeader( + ReadOnlySpan buffer, + out int segmentNum, + out long contentLength) + { + Errors.AssertBufferExactSize(buffer, 10, nameof(buffer)); + segmentNum = BinaryPrimitives.ReadUInt16LittleEndian(buffer.Slice(0, 2)); + contentLength = (long)BinaryPrimitives.ReadUInt64LittleEndian(buffer.Slice(2, 8)); + } + + public static int WriteSegmentHeader(Span buffer, int segmentNum, long segmentLength) + { + const int segmentNumOffset = 0; + const int segmentLengthOffset = 2; + + Errors.AssertBufferMinimumSize(buffer, SegmentHeaderLength, nameof(buffer)); + + BinaryPrimitives.WriteUInt16LittleEndian(buffer.Slice(segmentNumOffset, 2), (ushort)segmentNum); + BinaryPrimitives.WriteUInt64LittleEndian(buffer.Slice(segmentLengthOffset, 8), (ulong)segmentLength); + + return SegmentHeaderLength; + } + + /// + /// Gets segment header in a buffer rented from the provided ArrayPool. + /// + /// + /// Disposable to return the buffer to the pool. + /// + public static IDisposable GetSegmentHeaderBytes( + ArrayPool pool, + out Memory bytes, + int segmentNum, + long segmentLength) + { + Argument.AssertNotNull(pool, nameof(pool)); + IDisposable disposable = pool.RentAsMemoryDisposable(SegmentHeaderLength, out bytes); + WriteSegmentHeader(bytes.Span, segmentNum, segmentLength); + return disposable; + } + #endregion + + #region SegmentFooter + public static int GetSegmentFooterSize(Flags flags) + => flags.HasFlag(Flags.StorageCrc64) ? Crc64Length : 0; + + public static void ReadSegmentFooter( + ReadOnlySpan buffer, + Flags flags, + out ulong crc64) + { + int expectedBufferSize = GetSegmentFooterSize(flags); + Errors.AssertBufferExactSize(buffer, expectedBufferSize, nameof(buffer)); + + crc64 = flags.HasFlag(Flags.StorageCrc64) ? buffer.ReadCrc64() : default; + } + + public static int WriteSegmentFooter(Span buffer, ReadOnlySpan crc64 = default) + { + int requiredSpace = 0; + if (!crc64.IsEmpty) + { + Errors.AssertBufferExactSize(crc64, Crc64Length, nameof(crc64)); + requiredSpace += Crc64Length; + } + + Errors.AssertBufferMinimumSize(buffer, requiredSpace, nameof(buffer)); + int offset = 0; + if (!crc64.IsEmpty) + { + crc64.CopyTo(buffer.Slice(offset, Crc64Length)); + offset += Crc64Length; + } + + return offset; + } + + /// + /// Gets stream header in a buffer rented from the provided ArrayPool. + /// + /// + /// Disposable to return the buffer to the pool. + /// + public static IDisposable GetSegmentFooterBytes( + ArrayPool pool, + out Memory bytes, + ReadOnlySpan crc64 = default) + { + Argument.AssertNotNull(pool, nameof(pool)); + IDisposable disposable = pool.RentAsMemoryDisposable(StreamHeaderLength, out bytes); + WriteSegmentFooter(bytes.Span, crc64); + return disposable; + } + #endregion + } +} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingRetriableStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingRetriableStream.cs new file mode 100644 index 0000000000000..22dfaef259972 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingRetriableStream.cs @@ -0,0 +1,264 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers; +using System.Buffers.Binary; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Azure.Core; +using Azure.Core.Pipeline; + +namespace Azure.Storage.Shared; + +internal class StructuredMessageDecodingRetriableStream : Stream +{ + public class DecodedData + { + public ulong Crc { get; set; } + } + + private readonly Stream _innerRetriable; + private long _decodedBytesRead; + + private readonly StructuredMessage.Flags _expectedFlags; + private readonly List _decodedDatas; + private readonly Action _onComplete; + + private StorageCrc64HashAlgorithm _totalContentCrc; + + private readonly Func _decodingStreamFactory; + private readonly Func> _decodingAsyncStreamFactory; + + public StructuredMessageDecodingRetriableStream( + Stream initialDecodingStream, + StructuredMessageDecodingStream.RawDecodedData initialDecodedData, + StructuredMessage.Flags expectedFlags, + Func decodingStreamFactory, + Func> decodingAsyncStreamFactory, + Action onComplete, + ResponseClassifier responseClassifier, + int maxRetries) + { + _decodingStreamFactory = decodingStreamFactory; + _decodingAsyncStreamFactory = decodingAsyncStreamFactory; + _innerRetriable = RetriableStream.Create(initialDecodingStream, StreamFactory, StreamFactoryAsync, responseClassifier, maxRetries); + _decodedDatas = new() { initialDecodedData }; + _expectedFlags = expectedFlags; + _onComplete = onComplete; + + if (expectedFlags.HasFlag(StructuredMessage.Flags.StorageCrc64)) + { + _totalContentCrc = StorageCrc64HashAlgorithm.Create(); + } + } + + private Stream StreamFactory(long _) + { + long offset = _decodedDatas.SelectMany(d => d.SegmentCrcs).Select(s => s.SegmentLen).Sum(); + (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = _decodingStreamFactory(offset); + _decodedDatas.Add(decodedData); + FastForwardInternal(decodingStream, _decodedBytesRead - offset, false).EnsureCompleted(); + return decodingStream; + } + + private async ValueTask StreamFactoryAsync(long _) + { + long offset = _decodedDatas.SelectMany(d => d.SegmentCrcs).Select(s => s.SegmentLen).Sum(); + (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = await _decodingAsyncStreamFactory(offset).ConfigureAwait(false); + _decodedDatas.Add(decodedData); + await FastForwardInternal(decodingStream, _decodedBytesRead - offset, true).ConfigureAwait(false); + return decodingStream; + } + + private static async ValueTask FastForwardInternal(Stream stream, long bytes, bool async) + { + using (ArrayPool.Shared.RentDisposable(4 * Constants.KB, out byte[] buffer)) + { + if (async) + { + while (bytes > 0) + { + bytes -= await stream.ReadAsync(buffer, 0, (int)Math.Min(bytes, buffer.Length)).ConfigureAwait(false); + } + } + else + { + while (bytes > 0) + { + bytes -= stream.Read(buffer, 0, (int)Math.Min(bytes, buffer.Length)); + } + } + } + } + + protected override void Dispose(bool disposing) + { + _decodedDatas.Clear(); + _innerRetriable.Dispose(); + } + + private void OnCompleted() + { + DecodedData final = new(); + if (_totalContentCrc != null) + { + final.Crc = ValidateCrc(); + } + _onComplete?.Invoke(final); + } + + private ulong ValidateCrc() + { + using IDisposable _ = ArrayPool.Shared.RentDisposable(StructuredMessage.Crc64Length * 2, out byte[] buf); + Span calculatedBytes = new(buf, 0, StructuredMessage.Crc64Length); + _totalContentCrc.GetCurrentHash(calculatedBytes); + ulong calculated = BinaryPrimitives.ReadUInt64LittleEndian(calculatedBytes); + + ulong reported = _decodedDatas.Count == 1 + ? _decodedDatas.First().TotalCrc.Value + : StorageCrc64Composer.Compose(_decodedDatas.SelectMany(d => d.SegmentCrcs)); + + if (calculated != reported) + { + Span reportedBytes = new(buf, calculatedBytes.Length, StructuredMessage.Crc64Length); + BinaryPrimitives.WriteUInt64LittleEndian(reportedBytes, reported); + throw Errors.ChecksumMismatch(calculatedBytes, reportedBytes); + } + + return calculated; + } + + #region Read + public override int Read(byte[] buffer, int offset, int count) + { + int read = _innerRetriable.Read(buffer, offset, count); + _decodedBytesRead += read; + if (read == 0) + { + OnCompleted(); + } + else + { + _totalContentCrc?.Append(new ReadOnlySpan(buffer, offset, read)); + } + return read; + } + + public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + int read = await _innerRetriable.ReadAsync(buffer, offset, count, cancellationToken).ConfigureAwait(false); + _decodedBytesRead += read; + if (read == 0) + { + OnCompleted(); + } + else + { + _totalContentCrc?.Append(new ReadOnlySpan(buffer, offset, read)); + } + return read; + } + +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + public override int Read(Span buffer) + { + int read = _innerRetriable.Read(buffer); + _decodedBytesRead += read; + if (read == 0) + { + OnCompleted(); + } + else + { + _totalContentCrc?.Append(buffer.Slice(0, read)); + } + return read; + } + + public override async ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) + { + int read = await _innerRetriable.ReadAsync(buffer, cancellationToken).ConfigureAwait(false); + _decodedBytesRead += read; + if (read == 0) + { + OnCompleted(); + } + else + { + _totalContentCrc?.Append(buffer.Span.Slice(0, read)); + } + return read; + } +#endif + + public override int ReadByte() + { + int val = _innerRetriable.ReadByte(); + _decodedBytesRead += 1; + if (val == -1) + { + OnCompleted(); + } + return val; + } + + public override int EndRead(IAsyncResult asyncResult) + { + int read = _innerRetriable.EndRead(asyncResult); + _decodedBytesRead += read; + if (read == 0) + { + OnCompleted(); + } + return read; + } + #endregion + + #region Passthru + public override bool CanRead => _innerRetriable.CanRead; + + public override bool CanSeek => _innerRetriable.CanSeek; + + public override bool CanWrite => _innerRetriable.CanWrite; + + public override bool CanTimeout => _innerRetriable.CanTimeout; + + public override long Length => _innerRetriable.Length; + + public override long Position { get => _innerRetriable.Position; set => _innerRetriable.Position = value; } + + public override void Flush() => _innerRetriable.Flush(); + + public override Task FlushAsync(CancellationToken cancellationToken) => _innerRetriable.FlushAsync(cancellationToken); + + public override long Seek(long offset, SeekOrigin origin) => _innerRetriable.Seek(offset, origin); + + public override void SetLength(long value) => _innerRetriable.SetLength(value); + + public override void Write(byte[] buffer, int offset, int count) => _innerRetriable.Write(buffer, offset, count); + + public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) => _innerRetriable.WriteAsync(buffer, offset, count, cancellationToken); + + public override void WriteByte(byte value) => _innerRetriable.WriteByte(value); + + public override IAsyncResult BeginWrite(byte[] buffer, int offset, int count, AsyncCallback callback, object state) => _innerRetriable.BeginWrite(buffer, offset, count, callback, state); + + public override void EndWrite(IAsyncResult asyncResult) => _innerRetriable.EndWrite(asyncResult); + + public override IAsyncResult BeginRead(byte[] buffer, int offset, int count, AsyncCallback callback, object state) => _innerRetriable.BeginRead(buffer, offset, count, callback, state); + + public override int ReadTimeout { get => _innerRetriable.ReadTimeout; set => _innerRetriable.ReadTimeout = value; } + + public override int WriteTimeout { get => _innerRetriable.WriteTimeout; set => _innerRetriable.WriteTimeout = value; } + +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + public override void Write(ReadOnlySpan buffer) => _innerRetriable.Write(buffer); + + public override ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) => _innerRetriable.WriteAsync(buffer, cancellationToken); +#endif + #endregion +} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingStream.cs new file mode 100644 index 0000000000000..e6b193ae18260 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingStream.cs @@ -0,0 +1,542 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers; +using System.Buffers.Binary; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Azure.Storage.Common; + +namespace Azure.Storage.Shared; + +/// +/// Decodes a structured message stream as the data is read. +/// +/// +/// Wraps the inner stream in a , which avoids using its internal +/// buffer if individual Read() calls are larger than it. This ensures one of the three scenarios +/// +/// +/// Read buffer >= stream buffer: +/// There is enough space in the read buffer for inline metadata to be safely +/// extracted in only one read to the true inner stream. +/// +/// +/// Read buffer < next inline metadata: +/// The stream buffer has been activated, and we can read multiple small times from the inner stream +/// without multi-reading the real stream, even when partway through an existing stream buffer. +/// +/// +/// Else: +/// Same as #1, but also the already-allocated stream buffer has been used to slightly improve +/// resource churn when reading inner stream. +/// +/// +/// +internal class StructuredMessageDecodingStream : Stream +{ + internal class RawDecodedData + { + public long? InnerStreamLength { get; set; } + public int? TotalSegments { get; set; } + public StructuredMessage.Flags? Flags { get; set; } + public List<(ulong SegmentCrc, long SegmentLen)> SegmentCrcs { get; } = new(); + public ulong? TotalCrc { get; set; } + public bool DecodeCompleted { get; set; } + } + + private enum SMRegion + { + StreamHeader, + StreamFooter, + SegmentHeader, + SegmentFooter, + SegmentContent, + } + + private readonly Stream _innerBufferedStream; + + private byte[] _metadataBuffer = ArrayPool.Shared.Rent(Constants.KB); + private int _metadataBufferOffset = 0; + private int _metadataBufferLength = 0; + + private int _streamHeaderLength; + private int _streamFooterLength; + private int _segmentHeaderLength; + private int _segmentFooterLength; + + private long? _expectedInnerStreamLength; + + private bool _disposed; + + private readonly RawDecodedData _decodedData; + private StorageCrc64HashAlgorithm _totalContentCrc; + private StorageCrc64HashAlgorithm _segmentCrc; + + private readonly bool _validateChecksums; + + public override bool CanRead => true; + + public override bool CanWrite => false; + + public override bool CanSeek => false; + + public override bool CanTimeout => _innerBufferedStream.CanTimeout; + + public override int ReadTimeout => _innerBufferedStream.ReadTimeout; + + public override int WriteTimeout => _innerBufferedStream.WriteTimeout; + + public override long Length => throw new NotSupportedException(); + + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } + + public static (Stream DecodedStream, RawDecodedData DecodedData) WrapStream( + Stream innerStream, + long? expextedStreamLength = default) + { + RawDecodedData data = new(); + return (new StructuredMessageDecodingStream(innerStream, data, expextedStreamLength), data); + } + + private StructuredMessageDecodingStream( + Stream innerStream, + RawDecodedData decodedData, + long? expectedStreamLength) + { + Argument.AssertNotNull(innerStream, nameof(innerStream)); + Argument.AssertNotNull(decodedData, nameof(decodedData)); + + _expectedInnerStreamLength = expectedStreamLength; + _innerBufferedStream = new BufferedStream(innerStream); + _decodedData = decodedData; + + // Assumes stream will be structured message 1.0. Will validate this when consuming stream. + _streamHeaderLength = StructuredMessage.V1_0.StreamHeaderLength; + _segmentHeaderLength = StructuredMessage.V1_0.SegmentHeaderLength; + + _validateChecksums = true; + } + + #region Write + public override void Flush() => throw new NotSupportedException(); + + public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException(); + + public override void SetLength(long value) => throw new NotSupportedException(); + #endregion + + #region Read + public override int Read(byte[] buf, int offset, int count) + { + int decodedRead; + int read; + do + { + read = _innerBufferedStream.Read(buf, offset, count); + _innerStreamConsumed += read; + decodedRead = Decode(new Span(buf, offset, read)); + } while (decodedRead <= 0 && read > 0); + + if (read <= 0) + { + AssertDecodeFinished(); + } + + return decodedRead; + } + + public override async Task ReadAsync(byte[] buf, int offset, int count, CancellationToken cancellationToken) + { + int decodedRead; + int read; + do + { + read = await _innerBufferedStream.ReadAsync(buf, offset, count, cancellationToken).ConfigureAwait(false); + _innerStreamConsumed += read; + decodedRead = Decode(new Span(buf, offset, read)); + } while (decodedRead <= 0 && read > 0); + + if (read <= 0) + { + AssertDecodeFinished(); + } + + return decodedRead; + } + +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + public override int Read(Span buf) + { + int decodedRead; + int read; + do + { + read = _innerBufferedStream.Read(buf); + _innerStreamConsumed += read; + decodedRead = Decode(buf.Slice(0, read)); + } while (decodedRead <= 0 && read > 0); + + if (read <= 0) + { + AssertDecodeFinished(); + } + + return decodedRead; + } + + public override async ValueTask ReadAsync(Memory buf, CancellationToken cancellationToken = default) + { + int decodedRead; + int read; + do + { + read = await _innerBufferedStream.ReadAsync(buf).ConfigureAwait(false); + _innerStreamConsumed += read; + decodedRead = Decode(buf.Slice(0, read).Span); + } while (decodedRead <= 0 && read > 0); + + if (read <= 0) + { + AssertDecodeFinished(); + } + + return decodedRead; + } +#endif + + private void AssertDecodeFinished() + { + if (_streamFooterLength > 0 && !_decodedData.DecodeCompleted) + { + throw Errors.InvalidStructuredMessage("Premature end of stream."); + } + _decodedData.DecodeCompleted = true; + } + + private long _innerStreamConsumed = 0; + private long _decodedContentConsumed = 0; + private SMRegion _currentRegion = SMRegion.StreamHeader; + private int _currentSegmentNum = 0; + private long _currentSegmentContentLength; + private long _currentSegmentContentRemaining; + private long CurrentRegionLength => _currentRegion switch + { + SMRegion.StreamHeader => _streamHeaderLength, + SMRegion.StreamFooter => _streamFooterLength, + SMRegion.SegmentHeader => _segmentHeaderLength, + SMRegion.SegmentFooter => _segmentFooterLength, + SMRegion.SegmentContent => _currentSegmentContentLength, + _ => 0, + }; + + /// + /// Decodes given bytes in place. Decoding based on internal stream position info. + /// Decoded data size will be less than or equal to encoded data length. + /// + /// + /// Length of the decoded data in . + /// + private int Decode(Span buffer) + { + if (buffer.IsEmpty) + { + return 0; + } + List<(int Offset, int Count)> gaps = new(); + + int bufferConsumed = ProcessMetadataBuffer(buffer); + + if (bufferConsumed > 0) + { + gaps.Add((0, bufferConsumed)); + } + + while (bufferConsumed < buffer.Length) + { + if (_currentRegion == SMRegion.SegmentContent) + { + int read = (int)Math.Min(buffer.Length - bufferConsumed, _currentSegmentContentRemaining); + _totalContentCrc?.Append(buffer.Slice(bufferConsumed, read)); + _segmentCrc?.Append(buffer.Slice(bufferConsumed, read)); + bufferConsumed += read; + _decodedContentConsumed += read; + _currentSegmentContentRemaining -= read; + if (_currentSegmentContentRemaining == 0) + { + _currentRegion = SMRegion.SegmentFooter; + } + } + else if (buffer.Length - bufferConsumed < CurrentRegionLength) + { + SavePartialMetadata(buffer.Slice(bufferConsumed)); + gaps.Add((bufferConsumed, buffer.Length - bufferConsumed)); + bufferConsumed = buffer.Length; + } + else + { + int processed = _currentRegion switch + { + SMRegion.StreamHeader => ProcessStreamHeader(buffer.Slice(bufferConsumed)), + SMRegion.StreamFooter => ProcessStreamFooter(buffer.Slice(bufferConsumed)), + SMRegion.SegmentHeader => ProcessSegmentHeader(buffer.Slice(bufferConsumed)), + SMRegion.SegmentFooter => ProcessSegmentFooter(buffer.Slice(bufferConsumed)), + _ => 0, + }; + // TODO surface error if processed is 0 + gaps.Add((bufferConsumed, processed)); + bufferConsumed += processed; + } + } + + if (gaps.Count == 0) + { + return buffer.Length; + } + + // gaps is already sorted by offset due to how it was assembled + int gap = 0; + for (int i = gaps.First().Offset; i < buffer.Length; i++) + { + if (gaps.Count > 0 && gaps.First().Offset == i) + { + int count = gaps.First().Count; + gap += count; + i += count - 1; + gaps.RemoveAt(0); + } + else + { + buffer[i - gap] = buffer[i]; + } + } + return buffer.Length - gap; + } + + /// + /// Processes metadata in the internal buffer, if any. Appends any necessary data + /// from the append buffer to complete metadata. + /// + /// + /// Bytes consumed from . + /// + private int ProcessMetadataBuffer(ReadOnlySpan append) + { + if (_metadataBufferLength == 0) + { + return 0; + } + if (_currentRegion == SMRegion.SegmentContent) + { + return 0; + } + int appended = 0; + if (_metadataBufferLength < CurrentRegionLength && append.Length > 0) + { + appended = Math.Min((int)CurrentRegionLength - _metadataBufferLength, append.Length); + SavePartialMetadata(append.Slice(0, appended)); + } + if (_metadataBufferLength == CurrentRegionLength) + { + Span metadata = new(_metadataBuffer, _metadataBufferOffset, (int)CurrentRegionLength); + switch (_currentRegion) + { + case SMRegion.StreamHeader: + ProcessStreamHeader(metadata); + break; + case SMRegion.StreamFooter: + ProcessStreamFooter(metadata); + break; + case SMRegion.SegmentHeader: + ProcessSegmentHeader(metadata); + break; + case SMRegion.SegmentFooter: + ProcessSegmentFooter(metadata); + break; + } + _metadataBufferOffset = 0; + _metadataBufferLength = 0; + } + return appended; + } + + private void SavePartialMetadata(ReadOnlySpan span) + { + // safety array resize w/ArrayPool + if (_metadataBufferLength + span.Length > _metadataBuffer.Length) + { + ResizeMetadataBuffer(2 * (_metadataBufferLength + span.Length)); + } + + // realign any existing content if necessary + if (_metadataBufferLength != 0 && _metadataBufferOffset != 0) + { + // don't use Array.Copy() to move elements in the same array + for (int i = 0; i < _metadataBufferLength; i++) + { + _metadataBuffer[i] = _metadataBuffer[i + _metadataBufferOffset]; + } + _metadataBufferOffset = 0; + } + + span.CopyTo(new Span(_metadataBuffer, _metadataBufferOffset + _metadataBufferLength, span.Length)); + _metadataBufferLength += span.Length; + } + + private int ProcessStreamHeader(ReadOnlySpan span) + { + StructuredMessage.V1_0.ReadStreamHeader( + span.Slice(0, _streamHeaderLength), + out long streamLength, + out StructuredMessage.Flags flags, + out int totalSegments); + + _decodedData.InnerStreamLength = streamLength; + _decodedData.Flags = flags; + _decodedData.TotalSegments = totalSegments; + + if (_expectedInnerStreamLength.HasValue && _expectedInnerStreamLength.Value != streamLength) + { + throw Errors.InvalidStructuredMessage("Unexpected message size."); + } + + if (_decodedData.Flags.Value.HasFlag(StructuredMessage.Flags.StorageCrc64)) + { + _segmentFooterLength = StructuredMessage.Crc64Length; + _streamFooterLength = StructuredMessage.Crc64Length; + if (_validateChecksums) + { + _segmentCrc = StorageCrc64HashAlgorithm.Create(); + _totalContentCrc = StorageCrc64HashAlgorithm.Create(); + } + } + _currentRegion = SMRegion.SegmentHeader; + return _streamHeaderLength; + } + + private int ProcessStreamFooter(ReadOnlySpan span) + { + int footerLen = StructuredMessage.V1_0.GetStreamFooterSize(_decodedData.Flags.Value); + StructuredMessage.V1_0.ReadStreamFooter( + span.Slice(0, footerLen), + _decodedData.Flags.Value, + out ulong reportedCrc); + if (_decodedData.Flags.Value.HasFlag(StructuredMessage.Flags.StorageCrc64)) + { + if (_validateChecksums) + { + ValidateCrc64(_totalContentCrc, reportedCrc); + } + _decodedData.TotalCrc = reportedCrc; + } + + if (_innerStreamConsumed != _decodedData.InnerStreamLength) + { + throw Errors.InvalidStructuredMessage("Unexpected message size."); + } + if (_currentSegmentNum != _decodedData.TotalSegments) + { + throw Errors.InvalidStructuredMessage("Missing expected message segments."); + } + + _decodedData.DecodeCompleted = true; + return footerLen; + } + + private int ProcessSegmentHeader(ReadOnlySpan span) + { + StructuredMessage.V1_0.ReadSegmentHeader( + span.Slice(0, _segmentHeaderLength), + out int newSegNum, + out _currentSegmentContentLength); + _currentSegmentContentRemaining = _currentSegmentContentLength; + if (newSegNum != _currentSegmentNum + 1) + { + throw Errors.InvalidStructuredMessage("Unexpected segment number in structured message."); + } + _currentSegmentNum = newSegNum; + _currentRegion = SMRegion.SegmentContent; + return _segmentHeaderLength; + } + + private int ProcessSegmentFooter(ReadOnlySpan span) + { + int footerLen = StructuredMessage.V1_0.GetSegmentFooterSize(_decodedData.Flags.Value); + StructuredMessage.V1_0.ReadSegmentFooter( + span.Slice(0, footerLen), + _decodedData.Flags.Value, + out ulong reportedCrc); + if (_decodedData.Flags.Value.HasFlag(StructuredMessage.Flags.StorageCrc64)) + { + if (_validateChecksums) + { + ValidateCrc64(_segmentCrc, reportedCrc); + _segmentCrc = StorageCrc64HashAlgorithm.Create(); + } + _decodedData.SegmentCrcs.Add((reportedCrc, _currentSegmentContentLength)); + } + _currentRegion = _currentSegmentNum == _decodedData.TotalSegments ? SMRegion.StreamFooter : SMRegion.SegmentHeader; + return footerLen; + } + + private static void ValidateCrc64(StorageCrc64HashAlgorithm calculation, ulong reported) + { + using IDisposable _ = ArrayPool.Shared.RentDisposable(StructuredMessage.Crc64Length * 2, out byte[] buf); + Span calculatedBytes = new(buf, 0, StructuredMessage.Crc64Length); + Span reportedBytes = new(buf, calculatedBytes.Length, StructuredMessage.Crc64Length); + calculation.GetCurrentHash(calculatedBytes); + reported.WriteCrc64(reportedBytes); + if (!calculatedBytes.SequenceEqual(reportedBytes)) + { + throw Errors.ChecksumMismatch(calculatedBytes, reportedBytes); + } + } + #endregion + + public override long Seek(long offset, SeekOrigin origin) + => throw new NotSupportedException(); + + protected override void Dispose(bool disposing) + { + base.Dispose(disposing); + + if (_disposed) + { + return; + } + + if (disposing) + { + _innerBufferedStream.Dispose(); + _disposed = true; + } + } + + private void ResizeMetadataBuffer(int newSize) + { + byte[] newBuf = ArrayPool.Shared.Rent(newSize); + Array.Copy(_metadataBuffer, _metadataBufferOffset, newBuf, 0, _metadataBufferLength); + ArrayPool.Shared.Return(_metadataBuffer); + _metadataBuffer = newBuf; + } + + private void AlignMetadataBuffer() + { + if (_metadataBufferOffset != 0 && _metadataBufferLength != 0) + { + for (int i = 0; i < _metadataBufferLength; i++) + { + _metadataBuffer[i] = _metadataBuffer[_metadataBufferOffset + i]; + } + _metadataBufferOffset = 0; + } + } +} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageEncodingStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageEncodingStream.cs new file mode 100644 index 0000000000000..cb0ef340155ec --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageEncodingStream.cs @@ -0,0 +1,545 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Azure.Core.Pipeline; +using Azure.Storage.Common; + +namespace Azure.Storage.Shared; + +internal class StructuredMessageEncodingStream : Stream +{ + private readonly Stream _innerStream; + + private readonly int _streamHeaderLength; + private readonly int _streamFooterLength; + private readonly int _segmentHeaderLength; + private readonly int _segmentFooterLength; + private readonly int _segmentContentLength; + + private readonly StructuredMessage.Flags _flags; + private bool _disposed; + + private bool UseCrcSegment => _flags.HasFlag(StructuredMessage.Flags.StorageCrc64); + private readonly StorageCrc64HashAlgorithm _totalCrc; + private StorageCrc64HashAlgorithm _segmentCrc; + private readonly byte[] _segmentCrcs; + private int _latestSegmentCrcd = 0; + + #region Segments + /// + /// Gets the 1-indexed segment number the underlying stream is currently positioned in. + /// 1-indexed to match segment labelling as specified by SM spec. + /// + private int CurrentInnerSegment => (int)Math.Floor(_innerStream.Position / (float)_segmentContentLength) + 1; + + /// + /// Gets the 1-indexed segment number the encoded data stream is currently positioned in. + /// 1-indexed to match segment labelling as specified by SM spec. + /// + private int CurrentEncodingSegment + { + get + { + // edge case: always on final segment when at end of inner stream + if (_innerStream.Position == _innerStream.Length) + { + return TotalSegments; + } + // when writing footer, inner stream is positioned at next segment, + // but this stream is still writing the previous one + if (_currentRegion == SMRegion.SegmentFooter) + { + return CurrentInnerSegment - 1; + } + return CurrentInnerSegment; + } + } + + /// + /// Segment length including header and footer. + /// + private int SegmentTotalLength => _segmentHeaderLength + _segmentContentLength + _segmentFooterLength; + + private int TotalSegments => GetTotalSegments(_innerStream, _segmentContentLength); + private static int GetTotalSegments(Stream innerStream, long segmentContentLength) + { + return (int)Math.Ceiling(innerStream.Length / (float)segmentContentLength); + } + #endregion + + public override bool CanRead => true; + + public override bool CanWrite => false; + + public override bool CanSeek => _innerStream.CanSeek; + + public override bool CanTimeout => _innerStream.CanTimeout; + + public override int ReadTimeout => _innerStream.ReadTimeout; + + public override int WriteTimeout => _innerStream.WriteTimeout; + + public override long Length => + _streamHeaderLength + _streamFooterLength + + (_segmentHeaderLength + _segmentFooterLength) * TotalSegments + + _innerStream.Length; + + #region Position + private enum SMRegion + { + StreamHeader, + StreamFooter, + SegmentHeader, + SegmentFooter, + SegmentContent, + } + + private SMRegion _currentRegion = SMRegion.StreamHeader; + private int _currentRegionPosition = 0; + + private long _maxSeekPosition = 0; + + public override long Position + { + get + { + return _currentRegion switch + { + SMRegion.StreamHeader => _currentRegionPosition, + SMRegion.StreamFooter => _streamHeaderLength + + TotalSegments * (_segmentHeaderLength + _segmentFooterLength) + + _innerStream.Length + + _currentRegionPosition, + SMRegion.SegmentHeader => _innerStream.Position + + _streamHeaderLength + + (CurrentEncodingSegment - 1) * (_segmentHeaderLength + _segmentFooterLength) + + _currentRegionPosition, + SMRegion.SegmentFooter => _innerStream.Position + + _streamHeaderLength + + // Inner stream has moved to next segment but we're still writing the previous segment footer + CurrentEncodingSegment * (_segmentHeaderLength + _segmentFooterLength) - + _segmentFooterLength + _currentRegionPosition, + SMRegion.SegmentContent => _innerStream.Position + + _streamHeaderLength + + CurrentEncodingSegment * (_segmentHeaderLength + _segmentFooterLength) - + _segmentFooterLength, + _ => throw new InvalidDataException($"{nameof(StructuredMessageEncodingStream)} invalid state."), + }; + } + set + { + Argument.AssertInRange(value, 0, _maxSeekPosition, nameof(value)); + if (value < _streamHeaderLength) + { + _currentRegion = SMRegion.StreamHeader; + _currentRegionPosition = (int)value; + _innerStream.Position = 0; + return; + } + if (value >= Length - _streamFooterLength) + { + _currentRegion = SMRegion.StreamFooter; + _currentRegionPosition = (int)(value - (Length - _streamFooterLength)); + _innerStream.Position = _innerStream.Length; + return; + } + int newSegmentNum = 1 + (int)Math.Floor((value - _streamHeaderLength) / (double)(_segmentHeaderLength + _segmentFooterLength + _segmentContentLength)); + int segmentPosition = (int)(value - _streamHeaderLength - + ((newSegmentNum - 1) * (_segmentHeaderLength + _segmentFooterLength + _segmentContentLength))); + + if (segmentPosition < _segmentHeaderLength) + { + _currentRegion = SMRegion.SegmentHeader; + _currentRegionPosition = (int)((value - _streamHeaderLength) % SegmentTotalLength); + _innerStream.Position = (newSegmentNum - 1) * _segmentContentLength; + return; + } + if (segmentPosition < _segmentHeaderLength + _segmentContentLength) + { + _currentRegion = SMRegion.SegmentContent; + _currentRegionPosition = (int)((value - _streamHeaderLength) % SegmentTotalLength) - + _segmentHeaderLength; + _innerStream.Position = (newSegmentNum - 1) * _segmentContentLength + _currentRegionPosition; + return; + } + + _currentRegion = SMRegion.SegmentFooter; + _currentRegionPosition = (int)((value - _streamHeaderLength) % SegmentTotalLength) - + _segmentHeaderLength - _segmentContentLength; + _innerStream.Position = newSegmentNum * _segmentContentLength; + } + } + #endregion + + public StructuredMessageEncodingStream( + Stream innerStream, + int segmentContentLength, + StructuredMessage.Flags flags) + { + Argument.AssertNotNull(innerStream, nameof(innerStream)); + if (innerStream.GetLengthOrDefault() == default) + { + throw new ArgumentException("Stream must have known length.", nameof(innerStream)); + } + if (innerStream.Position != 0) + { + throw new ArgumentException("Stream must be at starting position.", nameof(innerStream)); + } + // stream logic likely breaks down with segment length of 1; enforce >=2 rather than just positive number + // real world scenarios will probably use a minimum of tens of KB + Argument.AssertInRange(segmentContentLength, 2, int.MaxValue, nameof(segmentContentLength)); + + _flags = flags; + _segmentContentLength = segmentContentLength; + + _streamHeaderLength = StructuredMessage.V1_0.StreamHeaderLength; + _streamFooterLength = UseCrcSegment ? StructuredMessage.Crc64Length : 0; + _segmentHeaderLength = StructuredMessage.V1_0.SegmentHeaderLength; + _segmentFooterLength = UseCrcSegment ? StructuredMessage.Crc64Length : 0; + + if (UseCrcSegment) + { + _totalCrc = StorageCrc64HashAlgorithm.Create(); + _segmentCrc = StorageCrc64HashAlgorithm.Create(); + _segmentCrcs = ArrayPool.Shared.Rent( + GetTotalSegments(innerStream, segmentContentLength) * StructuredMessage.Crc64Length); + innerStream = ChecksumCalculatingStream.GetReadStream(innerStream, span => + { + _totalCrc.Append(span); + _segmentCrc.Append(span); + }); + } + + _innerStream = innerStream; + } + + #region Write + public override void Flush() => throw new NotSupportedException(); + + public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException(); + + public override void SetLength(long value) => throw new NotSupportedException(); + #endregion + + #region Read + public override int Read(byte[] buffer, int offset, int count) + => ReadInternal(buffer, offset, count, async: false, cancellationToken: default).EnsureCompleted(); + + public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => await ReadInternal(buffer, offset, count, async: true, cancellationToken).ConfigureAwait(false); + + private async ValueTask ReadInternal(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken) + { + int totalRead = 0; + bool readInner = false; + while (totalRead < count && Position < Length) + { + int subreadOffset = offset + totalRead; + int subreadCount = count - totalRead; + switch (_currentRegion) + { + case SMRegion.StreamHeader: + totalRead += ReadFromStreamHeader(new Span(buffer, subreadOffset, subreadCount)); + break; + case SMRegion.StreamFooter: + totalRead += ReadFromStreamFooter(new Span(buffer, subreadOffset, subreadCount)); + break; + case SMRegion.SegmentHeader: + totalRead += ReadFromSegmentHeader(new Span(buffer, subreadOffset, subreadCount)); + break; + case SMRegion.SegmentFooter: + totalRead += ReadFromSegmentFooter(new Span(buffer, subreadOffset, subreadCount)); + break; + case SMRegion.SegmentContent: + // don't double read from stream. Allow caller to multi-read when desired. + if (readInner) + { + UpdateLatestPosition(); + return totalRead; + } + totalRead += await ReadFromInnerStreamInternal( + buffer, subreadOffset, subreadCount, async, cancellationToken).ConfigureAwait(false); + readInner = true; + break; + default: + break; + } + } + UpdateLatestPosition(); + return totalRead; + } + +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + public override int Read(Span buffer) + { + int totalRead = 0; + bool readInner = false; + while (totalRead < buffer.Length && Position < Length) + { + switch (_currentRegion) + { + case SMRegion.StreamHeader: + totalRead += ReadFromStreamHeader(buffer.Slice(totalRead)); + break; + case SMRegion.StreamFooter: + totalRead += ReadFromStreamFooter(buffer.Slice(totalRead)); + break; + case SMRegion.SegmentHeader: + totalRead += ReadFromSegmentHeader(buffer.Slice(totalRead)); + break; + case SMRegion.SegmentFooter: + totalRead += ReadFromSegmentFooter(buffer.Slice(totalRead)); + break; + case SMRegion.SegmentContent: + // don't double read from stream. Allow caller to multi-read when desired. + if (readInner) + { + UpdateLatestPosition(); + return totalRead; + } + totalRead += ReadFromInnerStream(buffer.Slice(totalRead)); + readInner = true; + break; + default: + break; + } + } + UpdateLatestPosition(); + return totalRead; + } + + public override async ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) + { + int totalRead = 0; + bool readInner = false; + while (totalRead < buffer.Length && Position < Length) + { + switch (_currentRegion) + { + case SMRegion.StreamHeader: + totalRead += ReadFromStreamHeader(buffer.Slice(totalRead).Span); + break; + case SMRegion.StreamFooter: + totalRead += ReadFromStreamFooter(buffer.Slice(totalRead).Span); + break; + case SMRegion.SegmentHeader: + totalRead += ReadFromSegmentHeader(buffer.Slice(totalRead).Span); + break; + case SMRegion.SegmentFooter: + totalRead += ReadFromSegmentFooter(buffer.Slice(totalRead).Span); + break; + case SMRegion.SegmentContent: + // don't double read from stream. Allow caller to multi-read when desired. + if (readInner) + { + UpdateLatestPosition(); + return totalRead; + } + totalRead += await ReadFromInnerStreamAsync(buffer.Slice(totalRead), cancellationToken).ConfigureAwait(false); + readInner = true; + break; + default: + break; + } + } + UpdateLatestPosition(); + return totalRead; + } +#endif + + #region Read Headers/Footers + private int ReadFromStreamHeader(Span buffer) + { + int read = Math.Min(buffer.Length, _streamHeaderLength - _currentRegionPosition); + using IDisposable _ = StructuredMessage.V1_0.GetStreamHeaderBytes( + ArrayPool.Shared, out Memory headerBytes, Length, _flags, TotalSegments); + headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); + _currentRegionPosition += read; + + if (_currentRegionPosition == _streamHeaderLength) + { + _currentRegion = SMRegion.SegmentHeader; + _currentRegionPosition = 0; + } + + return read; + } + + private int ReadFromStreamFooter(Span buffer) + { + int read = Math.Min(buffer.Length, _segmentFooterLength - _currentRegionPosition); + if (read <= 0) + { + return 0; + } + + using IDisposable _ = StructuredMessage.V1_0.GetStreamFooterBytes( + ArrayPool.Shared, + out Memory footerBytes, + crc64: UseCrcSegment + ? _totalCrc.GetCurrentHash() // TODO array pooling + : default); + footerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); + _currentRegionPosition += read; + + return read; + } + + private int ReadFromSegmentHeader(Span buffer) + { + int read = Math.Min(buffer.Length, _segmentHeaderLength - _currentRegionPosition); + using IDisposable _ = StructuredMessage.V1_0.GetSegmentHeaderBytes( + ArrayPool.Shared, + out Memory headerBytes, + CurrentInnerSegment, + Math.Min(_segmentContentLength, _innerStream.Length - _innerStream.Position)); + headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); + _currentRegionPosition += read; + + if (_currentRegionPosition == _segmentHeaderLength) + { + _currentRegion = SMRegion.SegmentContent; + _currentRegionPosition = 0; + } + + return read; + } + + private int ReadFromSegmentFooter(Span buffer) + { + int read = Math.Min(buffer.Length, _segmentFooterLength - _currentRegionPosition); + if (read < 0) + { + return 0; + } + + using IDisposable _ = StructuredMessage.V1_0.GetSegmentFooterBytes( + ArrayPool.Shared, + out Memory headerBytes, + crc64: UseCrcSegment + ? new Span( + _segmentCrcs, + (CurrentEncodingSegment-1) * _totalCrc.HashLengthInBytes, + _totalCrc.HashLengthInBytes) + : default); + headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); + _currentRegionPosition += read; + + if (_currentRegionPosition == _segmentFooterLength) + { + _currentRegion = _innerStream.Position == _innerStream.Length + ? SMRegion.StreamFooter : SMRegion.SegmentHeader; + _currentRegionPosition = 0; + } + + return read; + } + #endregion + + #region ReadUnderlyingStream + private int MaxInnerStreamRead => _segmentContentLength - _currentRegionPosition; + + private void CleanupContentSegment() + { + if (_currentRegionPosition == _segmentContentLength || _innerStream.Position >= _innerStream.Length) + { + _currentRegion = SMRegion.SegmentFooter; + _currentRegionPosition = 0; + if (UseCrcSegment && CurrentEncodingSegment - 1 == _latestSegmentCrcd) + { + _segmentCrc.GetCurrentHash(new Span( + _segmentCrcs, + _latestSegmentCrcd * _segmentCrc.HashLengthInBytes, + _segmentCrc.HashLengthInBytes)); + _latestSegmentCrcd++; + _segmentCrc = StorageCrc64HashAlgorithm.Create(); + } + } + } + + private async ValueTask ReadFromInnerStreamInternal( + byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken) + { + int read = async + ? await _innerStream.ReadAsync(buffer, offset, Math.Min(count, MaxInnerStreamRead)).ConfigureAwait(false) + : _innerStream.Read(buffer, offset, Math.Min(count, MaxInnerStreamRead)); + _currentRegionPosition += read; + CleanupContentSegment(); + return read; + } + +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + private int ReadFromInnerStream(Span buffer) + { + if (MaxInnerStreamRead < buffer.Length) + { + buffer = buffer.Slice(0, MaxInnerStreamRead); + } + int read = _innerStream.Read(buffer); + _currentRegionPosition += read; + CleanupContentSegment(); + return read; + } + + private async ValueTask ReadFromInnerStreamAsync(Memory buffer, CancellationToken cancellationToken) + { + if (MaxInnerStreamRead < buffer.Length) + { + buffer = buffer.Slice(0, MaxInnerStreamRead); + } + int read = await _innerStream.ReadAsync(buffer, cancellationToken).ConfigureAwait(false); + _currentRegionPosition += read; + CleanupContentSegment(); + return read; + } +#endif + #endregion + + // don't allow stream to seek too far forward. track how far the stream has been naturally read. + private void UpdateLatestPosition() + { + if (_maxSeekPosition < Position) + { + _maxSeekPosition = Position; + } + } + #endregion + + public override long Seek(long offset, SeekOrigin origin) + { + switch (origin) + { + case SeekOrigin.Begin: + Position = offset; + break; + case SeekOrigin.Current: + Position += offset; + break; + case SeekOrigin.End: + Position = Length + offset; + break; + } + return Position; + } + + protected override void Dispose(bool disposing) + { + base.Dispose(disposing); + + if (_disposed) + { + return; + } + + if (disposing) + { + _innerStream.Dispose(); + _disposed = true; + } + } +} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessagePrecalculatedCrcWrapperStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessagePrecalculatedCrcWrapperStream.cs new file mode 100644 index 0000000000000..3569ef4339735 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessagePrecalculatedCrcWrapperStream.cs @@ -0,0 +1,451 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Azure.Core.Pipeline; +using Azure.Storage.Common; + +namespace Azure.Storage.Shared; + +internal class StructuredMessagePrecalculatedCrcWrapperStream : Stream +{ + private readonly Stream _innerStream; + + private readonly int _streamHeaderLength; + private readonly int _streamFooterLength; + private readonly int _segmentHeaderLength; + private readonly int _segmentFooterLength; + + private bool _disposed; + + private readonly byte[] _crc; + + public override bool CanRead => true; + + public override bool CanWrite => false; + + public override bool CanSeek => _innerStream.CanSeek; + + public override bool CanTimeout => _innerStream.CanTimeout; + + public override int ReadTimeout => _innerStream.ReadTimeout; + + public override int WriteTimeout => _innerStream.WriteTimeout; + + public override long Length => + _streamHeaderLength + _streamFooterLength + + _segmentHeaderLength + _segmentFooterLength + + _innerStream.Length; + + #region Position + private enum SMRegion + { + StreamHeader, + StreamFooter, + SegmentHeader, + SegmentFooter, + SegmentContent, + } + + private SMRegion _currentRegion = SMRegion.StreamHeader; + private int _currentRegionPosition = 0; + + private long _maxSeekPosition = 0; + + public override long Position + { + get + { + return _currentRegion switch + { + SMRegion.StreamHeader => _currentRegionPosition, + SMRegion.SegmentHeader => _innerStream.Position + + _streamHeaderLength + + _currentRegionPosition, + SMRegion.SegmentContent => _streamHeaderLength + + _segmentHeaderLength + + _innerStream.Position, + SMRegion.SegmentFooter => _streamHeaderLength + + _segmentHeaderLength + + _innerStream.Length + + _currentRegionPosition, + SMRegion.StreamFooter => _streamHeaderLength + + _segmentHeaderLength + + _innerStream.Length + + _segmentFooterLength + + _currentRegionPosition, + _ => throw new InvalidDataException($"{nameof(StructuredMessageEncodingStream)} invalid state."), + }; + } + set + { + Argument.AssertInRange(value, 0, _maxSeekPosition, nameof(value)); + if (value < _streamHeaderLength) + { + _currentRegion = SMRegion.StreamHeader; + _currentRegionPosition = (int)value; + _innerStream.Position = 0; + return; + } + if (value < _streamHeaderLength + _segmentHeaderLength) + { + _currentRegion = SMRegion.SegmentHeader; + _currentRegionPosition = (int)(value - _streamHeaderLength); + _innerStream.Position = 0; + return; + } + if (value < _streamHeaderLength + _segmentHeaderLength + _innerStream.Length) + { + _currentRegion = SMRegion.SegmentContent; + _currentRegionPosition = (int)(value - _streamHeaderLength - _segmentHeaderLength); + _innerStream.Position = value - _streamHeaderLength - _segmentHeaderLength; + return; + } + if (value < _streamHeaderLength + _segmentHeaderLength + _innerStream.Length + _segmentFooterLength) + { + _currentRegion = SMRegion.SegmentFooter; + _currentRegionPosition = (int)(value - _streamHeaderLength - _segmentHeaderLength - _innerStream.Length); + _innerStream.Position = _innerStream.Length; + return; + } + + _currentRegion = SMRegion.StreamFooter; + _currentRegionPosition = (int)(value - _streamHeaderLength - _segmentHeaderLength - _innerStream.Length - _segmentFooterLength); + _innerStream.Position = _innerStream.Length; + } + } + #endregion + + public StructuredMessagePrecalculatedCrcWrapperStream( + Stream innerStream, + ReadOnlySpan precalculatedCrc) + { + Argument.AssertNotNull(innerStream, nameof(innerStream)); + if (innerStream.GetLengthOrDefault() == default) + { + throw new ArgumentException("Stream must have known length.", nameof(innerStream)); + } + if (innerStream.Position != 0) + { + throw new ArgumentException("Stream must be at starting position.", nameof(innerStream)); + } + + _streamHeaderLength = StructuredMessage.V1_0.StreamHeaderLength; + _streamFooterLength = StructuredMessage.Crc64Length; + _segmentHeaderLength = StructuredMessage.V1_0.SegmentHeaderLength; + _segmentFooterLength = StructuredMessage.Crc64Length; + + _crc = ArrayPool.Shared.Rent(StructuredMessage.Crc64Length); + precalculatedCrc.CopyTo(_crc); + + _innerStream = innerStream; + } + + #region Write + public override void Flush() => throw new NotSupportedException(); + + public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException(); + + public override void SetLength(long value) => throw new NotSupportedException(); + #endregion + + #region Read + public override int Read(byte[] buffer, int offset, int count) + => ReadInternal(buffer, offset, count, async: false, cancellationToken: default).EnsureCompleted(); + + public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => await ReadInternal(buffer, offset, count, async: true, cancellationToken).ConfigureAwait(false); + + private async ValueTask ReadInternal(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken) + { + int totalRead = 0; + bool readInner = false; + while (totalRead < count && Position < Length) + { + int subreadOffset = offset + totalRead; + int subreadCount = count - totalRead; + switch (_currentRegion) + { + case SMRegion.StreamHeader: + totalRead += ReadFromStreamHeader(new Span(buffer, subreadOffset, subreadCount)); + break; + case SMRegion.StreamFooter: + totalRead += ReadFromStreamFooter(new Span(buffer, subreadOffset, subreadCount)); + break; + case SMRegion.SegmentHeader: + totalRead += ReadFromSegmentHeader(new Span(buffer, subreadOffset, subreadCount)); + break; + case SMRegion.SegmentFooter: + totalRead += ReadFromSegmentFooter(new Span(buffer, subreadOffset, subreadCount)); + break; + case SMRegion.SegmentContent: + // don't double read from stream. Allow caller to multi-read when desired. + if (readInner) + { + UpdateLatestPosition(); + return totalRead; + } + totalRead += await ReadFromInnerStreamInternal( + buffer, subreadOffset, subreadCount, async, cancellationToken).ConfigureAwait(false); + readInner = true; + break; + default: + break; + } + } + UpdateLatestPosition(); + return totalRead; + } + +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + public override int Read(Span buffer) + { + int totalRead = 0; + bool readInner = false; + while (totalRead < buffer.Length && Position < Length) + { + switch (_currentRegion) + { + case SMRegion.StreamHeader: + totalRead += ReadFromStreamHeader(buffer.Slice(totalRead)); + break; + case SMRegion.StreamFooter: + totalRead += ReadFromStreamFooter(buffer.Slice(totalRead)); + break; + case SMRegion.SegmentHeader: + totalRead += ReadFromSegmentHeader(buffer.Slice(totalRead)); + break; + case SMRegion.SegmentFooter: + totalRead += ReadFromSegmentFooter(buffer.Slice(totalRead)); + break; + case SMRegion.SegmentContent: + // don't double read from stream. Allow caller to multi-read when desired. + if (readInner) + { + UpdateLatestPosition(); + return totalRead; + } + totalRead += ReadFromInnerStream(buffer.Slice(totalRead)); + readInner = true; + break; + default: + break; + } + } + UpdateLatestPosition(); + return totalRead; + } + + public override async ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) + { + int totalRead = 0; + bool readInner = false; + while (totalRead < buffer.Length && Position < Length) + { + switch (_currentRegion) + { + case SMRegion.StreamHeader: + totalRead += ReadFromStreamHeader(buffer.Slice(totalRead).Span); + break; + case SMRegion.StreamFooter: + totalRead += ReadFromStreamFooter(buffer.Slice(totalRead).Span); + break; + case SMRegion.SegmentHeader: + totalRead += ReadFromSegmentHeader(buffer.Slice(totalRead).Span); + break; + case SMRegion.SegmentFooter: + totalRead += ReadFromSegmentFooter(buffer.Slice(totalRead).Span); + break; + case SMRegion.SegmentContent: + // don't double read from stream. Allow caller to multi-read when desired. + if (readInner) + { + UpdateLatestPosition(); + return totalRead; + } + totalRead += await ReadFromInnerStreamAsync(buffer.Slice(totalRead), cancellationToken).ConfigureAwait(false); + readInner = true; + break; + default: + break; + } + } + UpdateLatestPosition(); + return totalRead; + } +#endif + + #region Read Headers/Footers + private int ReadFromStreamHeader(Span buffer) + { + int read = Math.Min(buffer.Length, _streamHeaderLength - _currentRegionPosition); + using IDisposable _ = StructuredMessage.V1_0.GetStreamHeaderBytes( + ArrayPool.Shared, + out Memory headerBytes, + Length, + StructuredMessage.Flags.StorageCrc64, + totalSegments: 1); + headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); + _currentRegionPosition += read; + + if (_currentRegionPosition == _streamHeaderLength) + { + _currentRegion = SMRegion.SegmentHeader; + _currentRegionPosition = 0; + } + + return read; + } + + private int ReadFromStreamFooter(Span buffer) + { + int read = Math.Min(buffer.Length, _segmentFooterLength - _currentRegionPosition); + if (read <= 0) + { + return 0; + } + + using IDisposable _ = StructuredMessage.V1_0.GetStreamFooterBytes( + ArrayPool.Shared, + out Memory footerBytes, + new ReadOnlySpan(_crc, 0, StructuredMessage.Crc64Length)); + footerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); + _currentRegionPosition += read; + + return read; + } + + private int ReadFromSegmentHeader(Span buffer) + { + int read = Math.Min(buffer.Length, _segmentHeaderLength - _currentRegionPosition); + using IDisposable _ = StructuredMessage.V1_0.GetSegmentHeaderBytes( + ArrayPool.Shared, + out Memory headerBytes, + segmentNum: 1, + _innerStream.Length); + headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); + _currentRegionPosition += read; + + if (_currentRegionPosition == _segmentHeaderLength) + { + _currentRegion = SMRegion.SegmentContent; + _currentRegionPosition = 0; + } + + return read; + } + + private int ReadFromSegmentFooter(Span buffer) + { + int read = Math.Min(buffer.Length, _segmentFooterLength - _currentRegionPosition); + if (read < 0) + { + return 0; + } + + using IDisposable _ = StructuredMessage.V1_0.GetSegmentFooterBytes( + ArrayPool.Shared, + out Memory headerBytes, + new ReadOnlySpan(_crc, 0, StructuredMessage.Crc64Length)); + headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); + _currentRegionPosition += read; + + if (_currentRegionPosition == _segmentFooterLength) + { + _currentRegion = _innerStream.Position == _innerStream.Length + ? SMRegion.StreamFooter : SMRegion.SegmentHeader; + _currentRegionPosition = 0; + } + + return read; + } + #endregion + + #region ReadUnderlyingStream + private void CleanupContentSegment() + { + if (_innerStream.Position >= _innerStream.Length) + { + _currentRegion = SMRegion.SegmentFooter; + _currentRegionPosition = 0; + } + } + + private async ValueTask ReadFromInnerStreamInternal( + byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken) + { + int read = async + ? await _innerStream.ReadAsync(buffer, offset, count).ConfigureAwait(false) + : _innerStream.Read(buffer, offset, count); + _currentRegionPosition += read; + CleanupContentSegment(); + return read; + } + +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + private int ReadFromInnerStream(Span buffer) + { + int read = _innerStream.Read(buffer); + _currentRegionPosition += read; + CleanupContentSegment(); + return read; + } + + private async ValueTask ReadFromInnerStreamAsync(Memory buffer, CancellationToken cancellationToken) + { + int read = await _innerStream.ReadAsync(buffer, cancellationToken).ConfigureAwait(false); + _currentRegionPosition += read; + CleanupContentSegment(); + return read; + } +#endif + #endregion + + // don't allow stream to seek too far forward. track how far the stream has been naturally read. + private void UpdateLatestPosition() + { + if (_maxSeekPosition < Position) + { + _maxSeekPosition = Position; + } + } + #endregion + + public override long Seek(long offset, SeekOrigin origin) + { + switch (origin) + { + case SeekOrigin.Begin: + Position = offset; + break; + case SeekOrigin.Current: + Position += offset; + break; + case SeekOrigin.End: + Position = Length + offset; + break; + } + return Position; + } + + protected override void Dispose(bool disposing) + { + base.Dispose(disposing); + + if (_disposed) + { + return; + } + + if (disposing) + { + ArrayPool.Shared.Return(_crc); + _innerStream.Dispose(); + _disposed = true; + } + } +} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/TransferValidationOptionsExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/TransferValidationOptionsExtensions.cs index af21588b4ae09..763d385240383 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/TransferValidationOptionsExtensions.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/TransferValidationOptionsExtensions.cs @@ -9,14 +9,7 @@ public static StorageChecksumAlgorithm ResolveAuto(this StorageChecksumAlgorithm { if (checksumAlgorithm == StorageChecksumAlgorithm.Auto) { -#if BlobSDK || DataLakeSDK || CommonSDK return StorageChecksumAlgorithm.StorageCrc64; -#elif FileSDK // file shares don't support crc64 - return StorageChecksumAlgorithm.MD5; -#else - throw new System.NotSupportedException( - $"{typeof(TransferValidationOptionsExtensions).FullName}.{nameof(ResolveAuto)} is not supported."); -#endif } return checksumAlgorithm; } diff --git a/sdk/storage/Azure.Storage.Common/tests/Azure.Storage.Common.Tests.csproj b/sdk/storage/Azure.Storage.Common/tests/Azure.Storage.Common.Tests.csproj index 5db86ebee984b..2863b85f6feb2 100644 --- a/sdk/storage/Azure.Storage.Common/tests/Azure.Storage.Common.Tests.csproj +++ b/sdk/storage/Azure.Storage.Common/tests/Azure.Storage.Common.Tests.csproj @@ -13,9 +13,12 @@ + + + @@ -28,6 +31,7 @@ + @@ -46,6 +50,11 @@ + + + + + diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/FaultyStream.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/FaultyStream.cs index 7411eb1499312..f4e4b92ed73c4 100644 --- a/sdk/storage/Azure.Storage.Common/tests/Shared/FaultyStream.cs +++ b/sdk/storage/Azure.Storage.Common/tests/Shared/FaultyStream.cs @@ -15,6 +15,7 @@ internal class FaultyStream : Stream private readonly Exception _exceptionToRaise; private int _remainingExceptions; private Action _onFault; + private long _position = 0; public FaultyStream( Stream innerStream, @@ -40,7 +41,7 @@ public FaultyStream( public override long Position { - get => _innerStream.Position; + get => CanSeek ? _innerStream.Position : _position; set => _innerStream.Position = value; } @@ -53,7 +54,9 @@ public override int Read(byte[] buffer, int offset, int count) { if (_remainingExceptions == 0 || Position + count <= _raiseExceptionAt || _raiseExceptionAt >= _innerStream.Length) { - return _innerStream.Read(buffer, offset, count); + int read = _innerStream.Read(buffer, offset, count); + _position += read; + return read; } else { @@ -61,11 +64,13 @@ public override int Read(byte[] buffer, int offset, int count) } } - public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) { if (_remainingExceptions == 0 || Position + count <= _raiseExceptionAt || _raiseExceptionAt >= _innerStream.Length) { - return _innerStream.ReadAsync(buffer, offset, count, cancellationToken); + int read = await _innerStream.ReadAsync(buffer, offset, count, cancellationToken); + _position += read; + return read; } else { diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/ObserveStructuredMessagePolicy.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/ObserveStructuredMessagePolicy.cs new file mode 100644 index 0000000000000..828c41179bba3 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/tests/Shared/ObserveStructuredMessagePolicy.cs @@ -0,0 +1,85 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.IO; +using Azure.Core; +using Azure.Core.Pipeline; +using Azure.Storage.Shared; + +namespace Azure.Storage.Test.Shared +{ + internal class ObserveStructuredMessagePolicy : HttpPipelineSynchronousPolicy + { + private readonly HashSet _requestScopes = new(); + + private readonly HashSet _responseScopes = new(); + + public ObserveStructuredMessagePolicy() + { + } + + public override void OnSendingRequest(HttpMessage message) + { + if (_requestScopes.Count > 0) + { + byte[] encodedContent; + byte[] underlyingContent; + StructuredMessageDecodingStream.RawDecodedData decodedData; + using (MemoryStream ms = new()) + { + message.Request.Content.WriteTo(ms, default); + encodedContent = ms.ToArray(); + using (MemoryStream ms2 = new()) + { + (Stream s, decodedData) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedContent)); + s.CopyTo(ms2); + underlyingContent = ms2.ToArray(); + } + } + } + } + + public override void OnReceivedResponse(HttpMessage message) + { + } + + public IDisposable CheckRequestScope() => CheckMessageScope.CheckRequestScope(this); + + public IDisposable CheckResponseScope() => CheckMessageScope.CheckResponseScope(this); + + private class CheckMessageScope : IDisposable + { + private bool _isRequestScope; + private ObserveStructuredMessagePolicy _policy; + + public static CheckMessageScope CheckRequestScope(ObserveStructuredMessagePolicy policy) + { + CheckMessageScope result = new() + { + _isRequestScope = true, + _policy = policy + }; + result._policy._requestScopes.Add(result); + return result; + } + + public static CheckMessageScope CheckResponseScope(ObserveStructuredMessagePolicy policy) + { + CheckMessageScope result = new() + { + _isRequestScope = false, + _policy = policy + }; + result._policy._responseScopes.Add(result); + return result; + } + + public void Dispose() + { + (_isRequestScope ? _policy._requestScopes : _policy._responseScopes).Remove(this); + } + } + } +} diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/RequestExtensions.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/RequestExtensions.cs new file mode 100644 index 0000000000000..ad395e862f827 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/tests/Shared/RequestExtensions.cs @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Linq; +using System.Text; +using Azure.Core; +using NUnit.Framework; + +namespace Azure.Storage; + +public static partial class RequestExtensions +{ + public static string AssertHeaderPresent(this Request request, string headerName) + { + if (request.Headers.TryGetValue(headerName, out string value)) + { + return headerName == Constants.StructuredMessage.StructuredMessageHeader ? null : value; + } + StringBuilder sb = new StringBuilder() + .AppendLine($"`{headerName}` expected on request but was not found.") + .AppendLine($"{request.Method} {request.Uri}") + .AppendLine(string.Join("\n", request.Headers.Select(h => $"{h.Name}: {h.Value}s"))) + ; + Assert.Fail(sb.ToString()); + return null; + } +} diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/TamperStreamContentsPolicy.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/TamperStreamContentsPolicy.cs index f4198e9dfd532..7e6c78117f53b 100644 --- a/sdk/storage/Azure.Storage.Common/tests/Shared/TamperStreamContentsPolicy.cs +++ b/sdk/storage/Azure.Storage.Common/tests/Shared/TamperStreamContentsPolicy.cs @@ -14,7 +14,7 @@ internal class TamperStreamContentsPolicy : HttpPipelineSynchronousPolicy /// /// Default tampering that changes the first byte of the stream. /// - private static readonly Func _defaultStreamTransform = stream => + private static Func GetTamperByteStreamTransform(long position) => stream => { if (stream is not MemoryStream) { @@ -23,10 +23,10 @@ internal class TamperStreamContentsPolicy : HttpPipelineSynchronousPolicy stream = buffer; } - stream.Position = 0; + stream.Position = position; var firstByte = stream.ReadByte(); - stream.Position = 0; + stream.Position = position; stream.WriteByte((byte)((firstByte + 1) % byte.MaxValue)); stream.Position = 0; @@ -37,9 +37,12 @@ internal class TamperStreamContentsPolicy : HttpPipelineSynchronousPolicy public TamperStreamContentsPolicy(Func streamTransform = default) { - _streamTransform = streamTransform ?? _defaultStreamTransform; + _streamTransform = streamTransform ?? GetTamperByteStreamTransform(0); } + public static TamperStreamContentsPolicy TamperByteAt(long position) + => new(GetTamperByteStreamTransform(position)); + public bool TransformRequestBody { get; set; } public bool TransformResponseBody { get; set; } diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/TransferValidationTestBase.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/TransferValidationTestBase.cs index c18492d2fb4dd..248acf8811960 100644 --- a/sdk/storage/Azure.Storage.Common/tests/Shared/TransferValidationTestBase.cs +++ b/sdk/storage/Azure.Storage.Common/tests/Shared/TransferValidationTestBase.cs @@ -5,10 +5,13 @@ using System.Collections.Generic; using System.IO; using System.Linq; +using System.Security.Cryptography; using System.Threading.Tasks; using Azure.Core; +using Azure.Core.Diagnostics; +using Azure.Core.Pipeline; using Azure.Core.TestFramework; -using FastSerialization; +using Azure.Storage.Shared; using NUnit.Framework; namespace Azure.Storage.Test.Shared @@ -190,21 +193,15 @@ protected string GetNewResourceName() /// The actual checksum value expected to be on the request, if known. Defaults to no specific value expected or checked. /// /// An assertion to put into a pipeline policy. - internal static Action GetRequestChecksumAssertion(StorageChecksumAlgorithm algorithm, Func isChecksumExpected = default, byte[] expectedChecksum = default) + internal static Action GetRequestChecksumHeaderAssertion(StorageChecksumAlgorithm algorithm, Func isChecksumExpected = default, byte[] expectedChecksum = default) { // action to assert a request header is as expected - void AssertChecksum(RequestHeaders headers, string headerName) + void AssertChecksum(Request req, string headerName) { - if (headers.TryGetValue(headerName, out string checksum)) + string checksum = req.AssertHeaderPresent(headerName); + if (expectedChecksum != default) { - if (expectedChecksum != default) - { - Assert.AreEqual(Convert.ToBase64String(expectedChecksum), checksum); - } - } - else - { - Assert.Fail($"{headerName} expected on request but was not found."); + Assert.AreEqual(Convert.ToBase64String(expectedChecksum), checksum); } }; @@ -219,14 +216,39 @@ void AssertChecksum(RequestHeaders headers, string headerName) switch (algorithm.ResolveAuto()) { case StorageChecksumAlgorithm.MD5: - AssertChecksum(request.Headers, "Content-MD5"); + AssertChecksum(request, "Content-MD5"); break; case StorageChecksumAlgorithm.StorageCrc64: - AssertChecksum(request.Headers, "x-ms-content-crc64"); + AssertChecksum(request, Constants.StructuredMessage.StructuredMessageHeader); break; default: - throw new Exception($"Bad {nameof(StorageChecksumAlgorithm)} provided to {nameof(GetRequestChecksumAssertion)}."); + throw new Exception($"Bad {nameof(StorageChecksumAlgorithm)} provided to {nameof(GetRequestChecksumHeaderAssertion)}."); + } + }; + } + + internal static Action GetRequestStructuredMessageAssertion( + StructuredMessage.Flags flags, + Func isStructuredMessageExpected = default, + long? structuredContentSegmentLength = default) + { + return request => + { + // filter some requests out with predicate + if (isStructuredMessageExpected != default && !isStructuredMessageExpected(request)) + { + return; } + + Assert.That(request.Headers.TryGetValue("x-ms-structured-body", out string structuredBody)); + Assert.That(structuredBody, Does.Contain("XSM/1.0")); + if (flags.HasFlag(StructuredMessage.Flags.StorageCrc64)) + { + Assert.That(structuredBody, Does.Contain("crc64")); + } + + Assert.That(request.Headers.TryGetValue("Content-Length", out string contentLength)); + Assert.That(request.Headers.TryGetValue("x-ms-structured-content-length", out string structuredContentLength)); }; } @@ -278,32 +300,66 @@ void AssertChecksum(ResponseHeaders headers, string headerName) AssertChecksum(response.Headers, "Content-MD5"); break; case StorageChecksumAlgorithm.StorageCrc64: - AssertChecksum(response.Headers, "x-ms-content-crc64"); + AssertChecksum(response.Headers, Constants.StructuredMessage.StructuredMessageHeader); break; default: - throw new Exception($"Bad {nameof(StorageChecksumAlgorithm)} provided to {nameof(GetRequestChecksumAssertion)}."); + throw new Exception($"Bad {nameof(StorageChecksumAlgorithm)} provided to {nameof(GetRequestChecksumHeaderAssertion)}."); } }; } + internal static Action GetResponseStructuredMessageAssertion( + StructuredMessage.Flags flags, + Func isStructuredMessageExpected = default) + { + return response => + { + // filter some requests out with predicate + if (isStructuredMessageExpected != default && !isStructuredMessageExpected(response)) + { + return; + } + + Assert.That(response.Headers.TryGetValue("x-ms-structured-body", out string structuredBody)); + Assert.That(structuredBody, Does.Contain("XSM/1.0")); + if (flags.HasFlag(StructuredMessage.Flags.StorageCrc64)) + { + Assert.That(structuredBody, Does.Contain("crc64")); + } + + Assert.That(response.Headers.TryGetValue("Content-Length", out string contentLength)); + Assert.That(response.Headers.TryGetValue("x-ms-structured-content-length", out string structuredContentLength)); + }; + } + /// /// Asserts the service returned an error that expected checksum did not match checksum on upload. /// /// Async action to upload data to service. /// Checksum algorithm used. - internal static void AssertWriteChecksumMismatch(AsyncTestDelegate writeAction, StorageChecksumAlgorithm algorithm) + internal static void AssertWriteChecksumMismatch( + AsyncTestDelegate writeAction, + StorageChecksumAlgorithm algorithm, + bool expectStructuredMessage = false) { var exception = ThrowsOrInconclusiveAsync(writeAction); - switch (algorithm.ResolveAuto()) + if (expectStructuredMessage) { - case StorageChecksumAlgorithm.MD5: - Assert.AreEqual("Md5Mismatch", exception.ErrorCode); - break; - case StorageChecksumAlgorithm.StorageCrc64: - Assert.AreEqual("Crc64Mismatch", exception.ErrorCode); - break; - default: - throw new ArgumentException("Test arguments contain bad algorithm specifier."); + Assert.That(exception.ErrorCode, Is.EqualTo("Crc64Mismatch")); + } + else + { + switch (algorithm.ResolveAuto()) + { + case StorageChecksumAlgorithm.MD5: + Assert.That(exception.ErrorCode, Is.EqualTo("Md5Mismatch")); + break; + case StorageChecksumAlgorithm.StorageCrc64: + Assert.That(exception.ErrorCode, Is.EqualTo("Crc64Mismatch")); + break; + default: + throw new ArgumentException("Test arguments contain bad algorithm specifier."); + } } } #endregion @@ -348,6 +404,7 @@ public virtual async Task UploadPartitionSuccessfulHashComputation(StorageChecks await using IDisposingContainer disposingContainer = await GetDisposingContainerAsync(); // Arrange + bool expectStructuredMessage = algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64; const int dataLength = Constants.KB; var data = GetRandomBuffer(dataLength); var validationOptions = new UploadTransferValidationOptions @@ -356,7 +413,10 @@ public virtual async Task UploadPartitionSuccessfulHashComputation(StorageChecks }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(algorithm)); + var assertion = algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 + ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, null, dataLength) + : GetRequestChecksumHeaderAssertion(algorithm); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -406,7 +466,11 @@ public virtual async Task UploadPartitionUsePrecalculatedHash(StorageChecksumAlg }; // make pipeline assertion for checking precalculated checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(algorithm, expectedChecksum: precalculatedChecksum)); + // precalculated partition upload will never use structured message. always check header + var assertion = GetRequestChecksumHeaderAssertion( + algorithm, + expectedChecksum: algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 ? default : precalculatedChecksum); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -423,12 +487,12 @@ public virtual async Task UploadPartitionUsePrecalculatedHash(StorageChecksumAlg AsyncTestDelegate operation = async () => await UploadPartitionAsync(client, stream, validationOptions); // Assert - AssertWriteChecksumMismatch(operation, algorithm); + AssertWriteChecksumMismatch(operation, algorithm, algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64); } } [TestCaseSource(nameof(GetValidationAlgorithms))] - public virtual async Task UploadPartitionMismatchedHashThrows(StorageChecksumAlgorithm algorithm) + public virtual async Task UploadPartitionTamperedStreamThrows(StorageChecksumAlgorithm algorithm) { await using IDisposingContainer disposingContainer = await GetDisposingContainerAsync(); @@ -441,7 +505,7 @@ public virtual async Task UploadPartitionMismatchedHashThrows(StorageChecksumAlg }; // Tamper with stream contents in the pipeline to simulate silent failure in the transit layer - var streamTamperPolicy = new TamperStreamContentsPolicy(); + var streamTamperPolicy = TamperStreamContentsPolicy.TamperByteAt(100); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(streamTamperPolicy, HttpPipelinePosition.PerCall); @@ -456,9 +520,10 @@ public virtual async Task UploadPartitionMismatchedHashThrows(StorageChecksumAlg // Act streamTamperPolicy.TransformRequestBody = true; AsyncTestDelegate operation = async () => await UploadPartitionAsync(client, stream, validationOptions); - + using var listener = AzureEventSourceListener.CreateConsoleLogger(); // Assert - AssertWriteChecksumMismatch(operation, algorithm); + AssertWriteChecksumMismatch(operation, algorithm, + expectStructuredMessage: algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64); } } @@ -473,7 +538,10 @@ public virtual async Task UploadPartitionUsesDefaultClientValidationOptions( var data = GetRandomBuffer(dataLength); // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(clientAlgorithm)); + var assertion = clientAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 + ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, null, dataLength) + : GetRequestChecksumHeaderAssertion(clientAlgorithm); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -512,7 +580,10 @@ public virtual async Task UploadPartitionOverwritesDefaultClientValidationOption }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(overrideAlgorithm)); + var assertion = overrideAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 + ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, null, dataLength) + : GetRequestChecksumHeaderAssertion(overrideAlgorithm); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -555,10 +626,14 @@ public virtual async Task UploadPartitionDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (request.Headers.Contains("x-ms-content-crc64")) + if (request.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) { Assert.Fail($"Hash found when none expected."); } + if (request.Headers.Contains("x-ms-structured-body")) + { + Assert.Fail($"Structured body used when none expected."); + } }); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -601,9 +676,11 @@ public virtual async Task OpenWriteSuccessfulHashComputation( }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(algorithm)); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumHeaderAssertion(algorithm)); var clientOptions = ClientBuilder.GetOptions(); + //ObserveStructuredMessagePolicy observe = new(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); + //clientOptions.AddPolicy(observe, HttpPipelinePosition.BeforeTransport); var client = await GetResourceClientAsync( disposingContainer.Container, @@ -616,6 +693,7 @@ public virtual async Task OpenWriteSuccessfulHashComputation( using var writeStream = await OpenWriteAsync(client, validationOptions, streamBufferSize); // Assert + //using var obsv = observe.CheckRequestScope(); using (checksumPipelineAssertion.CheckRequestScope()) { foreach (var _ in Enumerable.Range(0, streamWrites)) @@ -644,7 +722,7 @@ public virtual async Task OpenWriteMismatchedHashThrows(StorageChecksumAlgorithm // Tamper with stream contents in the pipeline to simulate silent failure in the transit layer var clientOptions = ClientBuilder.GetOptions(); - var tamperPolicy = new TamperStreamContentsPolicy(); + var tamperPolicy = TamperStreamContentsPolicy.TamperByteAt(100); clientOptions.AddPolicy(tamperPolicy, HttpPipelinePosition.PerCall); var client = await GetResourceClientAsync( @@ -682,7 +760,7 @@ public virtual async Task OpenWriteUsesDefaultClientValidationOptions( var data = GetRandomBuffer(dataLength); // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(clientAlgorithm)); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumHeaderAssertion(clientAlgorithm)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -726,7 +804,7 @@ public virtual async Task OpenWriteOverwritesDefaultClientValidationOptions( }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(overrideAlgorithm)); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumHeaderAssertion(overrideAlgorithm)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -774,7 +852,7 @@ public virtual async Task OpenWriteDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (request.Headers.Contains("x-ms-content-crc64")) + if (request.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) { Assert.Fail($"Hash found when none expected."); } @@ -886,7 +964,7 @@ public virtual async Task ParallelUploadSplitSuccessfulHashComputation(StorageCh // make pipeline assertion for checking checksum was present on upload var checksumPipelineAssertion = new AssertMessageContentsPolicy( - checkRequest: GetRequestChecksumAssertion(algorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); + checkRequest: GetRequestChecksumHeaderAssertion(algorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -923,8 +1001,10 @@ public virtual async Task ParallelUploadOneShotSuccessfulHashComputation(Storage }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy( - checkRequest: GetRequestChecksumAssertion(algorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); + var assertion = algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 + ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, ParallelUploadIsChecksumExpected, dataLength) + : GetRequestChecksumHeaderAssertion(algorithm, isChecksumExpected: ParallelUploadIsChecksumExpected); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -981,7 +1061,7 @@ public virtual async Task ParallelUploadPrecalculatedComposableHashAccepted(Stor PrecalculatedChecksum = hash }; - var client = await GetResourceClientAsync(disposingContainer.Container, dataLength); + var client = await GetResourceClientAsync(disposingContainer.Container, dataLength, createResource: true); // Act await DoesNotThrowOrInconclusiveAsync( @@ -1011,8 +1091,10 @@ public virtual async Task ParallelUploadUsesDefaultClientValidationOptions( }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion( - clientAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); + var assertion = clientAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && !split + ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, ParallelUploadIsChecksumExpected, dataLength) + : GetRequestChecksumHeaderAssertion(clientAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -1063,8 +1145,10 @@ public virtual async Task ParallelUploadOverwritesDefaultClientValidationOptions }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion( - overrideAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); + var assertion = overrideAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && !split + ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, ParallelUploadIsChecksumExpected, dataLength) + : GetRequestChecksumHeaderAssertion(overrideAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -1119,7 +1203,7 @@ public virtual async Task ParallelUploadDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (request.Headers.Contains("x-ms-content-crc64")) + if (request.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) { Assert.Fail($"Hash found when none expected."); } @@ -1184,15 +1268,17 @@ public virtual async Task ParallelDownloadSuccessfulHashVerification( }; // Act - var dest = new MemoryStream(); + byte[] dest; + using (MemoryStream ms = new()) using (checksumPipelineAssertion.CheckRequestScope()) { - await ParallelDownloadAsync(client, dest, validationOptions, transferOptions); + await ParallelDownloadAsync(client, ms, validationOptions, transferOptions); + dest = ms.ToArray(); } // Assert // Assertion was in the pipeline and the SDK not throwing means the checksum was validated - Assert.IsTrue(dest.ToArray().SequenceEqual(data)); + Assert.IsTrue(dest.SequenceEqual(data)); } [Test] @@ -1357,7 +1443,7 @@ public virtual async Task ParallelDownloadDisablesDefaultClientValidationOptions { Assert.Fail($"Hash found when none expected."); } - if (response.Headers.Contains("x-ms-content-crc64")) + if (response.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) { Assert.Fail($"Hash found when none expected."); } @@ -1565,7 +1651,7 @@ public virtual async Task OpenReadDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (response.Headers.Contains("x-ms-content-crc64")) + if (response.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) { Assert.Fail($"Hash found when none expected."); } @@ -1615,7 +1701,7 @@ public virtual async Task DownloadSuccessfulHashVerification(StorageChecksumAlgo var validationOptions = new DownloadTransferValidationOptions { ChecksumAlgorithm = algorithm }; // Act - var dest = new MemoryStream(); + using var dest = new MemoryStream(); var response = await DownloadPartitionAsync(client, dest, validationOptions, new HttpRange(length: data.Length)); // Assert @@ -1626,13 +1712,71 @@ public virtual async Task DownloadSuccessfulHashVerification(StorageChecksumAlgo Assert.True(response.Headers.Contains("Content-MD5")); break; case StorageChecksumAlgorithm.StorageCrc64: - Assert.True(response.Headers.Contains("x-ms-content-crc64")); + Assert.True(response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)); break; default: Assert.Fail("Test can't validate given algorithm type."); break; } - Assert.IsTrue(dest.ToArray().SequenceEqual(data)); + var result = dest.ToArray(); + Assert.IsTrue(result.SequenceEqual(data)); + } + + [TestCase(StorageChecksumAlgorithm.StorageCrc64, Constants.StructuredMessage.MaxDownloadCrcWithHeader, false, false)] + [TestCase(StorageChecksumAlgorithm.StorageCrc64, Constants.StructuredMessage.MaxDownloadCrcWithHeader-1, false, false)] + [TestCase(StorageChecksumAlgorithm.StorageCrc64, Constants.StructuredMessage.MaxDownloadCrcWithHeader+1, true, false)] + [TestCase(StorageChecksumAlgorithm.MD5, Constants.StructuredMessage.MaxDownloadCrcWithHeader+1, false, true)] + public virtual async Task DownloadApporpriatelyUsesStructuredMessage( + StorageChecksumAlgorithm algorithm, + int? downloadLen, + bool expectStructuredMessage, + bool expectThrow) + { + await using IDisposingContainer disposingContainer = await GetDisposingContainerAsync(); + + // Arrange + const int dataLength = Constants.KB; + var data = GetRandomBuffer(dataLength); + + var resourceName = GetNewResourceName(); + var client = await GetResourceClientAsync( + disposingContainer.Container, + resourceLength: dataLength, + createResource: true, + resourceName: resourceName); + await SetupDataAsync(client, new MemoryStream(data)); + + // make pipeline assertion for checking checksum was present on download + HttpPipelinePolicy checksumPipelineAssertion = new AssertMessageContentsPolicy(checkResponse: expectStructuredMessage + ? GetResponseStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64) + : GetResponseChecksumAssertion(algorithm)); + TClientOptions clientOptions = ClientBuilder.GetOptions(); + clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); + + client = await GetResourceClientAsync( + disposingContainer.Container, + resourceLength: dataLength, + resourceName: resourceName, + createResource: false, + downloadAlgorithm: algorithm, + options: clientOptions); + + var validationOptions = new DownloadTransferValidationOptions { ChecksumAlgorithm = algorithm }; + + // Act + var dest = new MemoryStream(); + AsyncTestDelegate operation = async () => await DownloadPartitionAsync( + client, dest, validationOptions, downloadLen.HasValue ? new HttpRange(length: downloadLen.Value) : default); + // Assert (policies checked use of content validation) + if (expectThrow) + { + Assert.That(operation, Throws.TypeOf()); + } + else + { + Assert.That(operation, Throws.Nothing); + Assert.IsTrue(dest.ToArray().SequenceEqual(data)); + } } [Test, Combinatorial] @@ -1658,7 +1802,9 @@ public virtual async Task DownloadHashMismatchThrows( // alter response contents in pipeline, forcing a checksum mismatch on verification step var clientOptions = ClientBuilder.GetOptions(); - clientOptions.AddPolicy(new TamperStreamContentsPolicy() { TransformResponseBody = true }, HttpPipelinePosition.PerCall); + var tamperPolicy = TamperStreamContentsPolicy.TamperByteAt(50); + tamperPolicy.TransformResponseBody = true; + clientOptions.AddPolicy(tamperPolicy, HttpPipelinePosition.PerCall); client = await GetResourceClientAsync( disposingContainer.Container, createResource: false, @@ -1670,7 +1816,7 @@ public virtual async Task DownloadHashMismatchThrows( AsyncTestDelegate operation = async () => await DownloadPartitionAsync(client, dest, validationOptions, new HttpRange(length: data.Length)); // Assert - if (validate) + if (validate || algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64) { // SDK responsible for finding bad checksum. Throw. ThrowsOrInconclusiveAsync(operation); @@ -1728,7 +1874,7 @@ public virtual async Task DownloadUsesDefaultClientValidationOptions( Assert.True(response.Headers.Contains("Content-MD5")); break; case StorageChecksumAlgorithm.StorageCrc64: - Assert.True(response.Headers.Contains("x-ms-content-crc64")); + Assert.True(response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)); break; default: Assert.Fail("Test can't validate given algorithm type."); @@ -1788,7 +1934,7 @@ public virtual async Task DownloadOverwritesDefaultClientValidationOptions( Assert.True(response.Headers.Contains("Content-MD5")); break; case StorageChecksumAlgorithm.StorageCrc64: - Assert.True(response.Headers.Contains("x-ms-content-crc64")); + Assert.True(response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)); break; default: Assert.Fail("Test can't validate given algorithm type."); @@ -1827,7 +1973,7 @@ public virtual async Task DownloadDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (response.Headers.Contains("x-ms-content-crc64")) + if (response.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) { Assert.Fail($"Hash found when none expected."); } @@ -1850,7 +1996,54 @@ public virtual async Task DownloadDisablesDefaultClientValidationOptions( // Assert // no policies this time; just check response headers Assert.False(response.Headers.Contains("Content-MD5")); - Assert.False(response.Headers.Contains("x-ms-content-crc64")); + Assert.False(response.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)); + Assert.IsTrue(dest.ToArray().SequenceEqual(data)); + } + + [Test] + public virtual async Task DownloadRecoversFromInterruptWithValidation( + [ValueSource(nameof(GetValidationAlgorithms))] StorageChecksumAlgorithm algorithm) + { + using var _ = AzureEventSourceListener.CreateConsoleLogger(); + int dataLen = algorithm.ResolveAuto() switch { + StorageChecksumAlgorithm.StorageCrc64 => 5 * Constants.MB, // >4MB for multisegment + _ => Constants.KB, + }; + + await using IDisposingContainer disposingContainer = await GetDisposingContainerAsync(); + + // Arrange + var data = GetRandomBuffer(dataLen); + + TClientOptions options = ClientBuilder.GetOptions(); + options.AddPolicy(new FaultyDownloadPipelinePolicy(dataLen - 512, new IOException(), () => { }), HttpPipelinePosition.BeforeTransport); + var client = await GetResourceClientAsync( + disposingContainer.Container, + resourceLength: dataLen, + createResource: true, + options: options); + await SetupDataAsync(client, new MemoryStream(data)); + + var validationOptions = new DownloadTransferValidationOptions { ChecksumAlgorithm = algorithm }; + + // Act + var dest = new MemoryStream(); + var response = await DownloadPartitionAsync(client, dest, validationOptions, new HttpRange(length: data.Length)); + + // Assert + // no policies this time; just check response headers + switch (algorithm.ResolveAuto()) + { + case StorageChecksumAlgorithm.MD5: + Assert.True(response.Headers.Contains("Content-MD5")); + break; + case StorageChecksumAlgorithm.StorageCrc64: + Assert.True(response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)); + break; + default: + Assert.Fail("Test can't validate given algorithm type."); + break; + } Assert.IsTrue(dest.ToArray().SequenceEqual(data)); } #endregion @@ -1891,7 +2084,7 @@ public async Task RoundtripWIthDefaults() // make pipeline assertion for checking checksum was present on upload AND download var checksumPipelineAssertion = new AssertMessageContentsPolicy( - checkRequest: GetRequestChecksumAssertion(expectedAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected), + checkRequest: GetRequestChecksumHeaderAssertion(expectedAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected), checkResponse: GetResponseChecksumAssertion(expectedAlgorithm)); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingRetriableStreamTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingRetriableStreamTests.cs new file mode 100644 index 0000000000000..a0f9158040b11 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingRetriableStreamTests.cs @@ -0,0 +1,246 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers.Binary; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Azure.Core; +using Azure.Storage.Shared; +using Azure.Storage.Test.Shared; +using Microsoft.Diagnostics.Tracing.Parsers.AspNet; +using Moq; +using NUnit.Framework; + +namespace Azure.Storage.Tests; + +[TestFixture(true)] +[TestFixture(false)] +public class StructuredMessageDecodingRetriableStreamTests +{ + public bool Async { get; } + + public StructuredMessageDecodingRetriableStreamTests(bool async) + { + Async = async; + } + + private Mock AllExceptionsRetry() + { + Mock mock = new(MockBehavior.Strict); + mock.Setup(rc => rc.IsRetriableException(It.IsAny())).Returns(true); + return mock; + } + + [Test] + public async ValueTask UninterruptedStream() + { + byte[] data = new Random().NextBytesInline(4 * Constants.KB).ToArray(); + byte[] dest = new byte[data.Length]; + + // mock with a simple MemoryStream rather than an actual StructuredMessageDecodingStream + using (Stream src = new MemoryStream(data)) + using (Stream retriableSrc = new StructuredMessageDecodingRetriableStream(src, new(), default, default, default, default, default, 1)) + using (Stream dst = new MemoryStream(dest)) + { + await retriableSrc.CopyToInternal(dst, Async, default); + } + + Assert.AreEqual(data, dest); + } + + [Test] + public async Task Interrupt_DataIntact([Values(true, false)] bool multipleInterrupts) + { + const int segments = 4; + const int segmentLen = Constants.KB; + const int readLen = 128; + const int interruptPos = segmentLen + (3 * readLen) + 10; + + Random r = new(); + byte[] data = r.NextBytesInline(segments * Constants.KB).ToArray(); + byte[] dest = new byte[data.Length]; + + // Mock a decoded data for the mocked StructuredMessageDecodingStream + StructuredMessageDecodingStream.RawDecodedData initialDecodedData = new() + { + TotalSegments = segments, + InnerStreamLength = data.Length, + Flags = StructuredMessage.Flags.StorageCrc64 + }; + // for test purposes, initialize a DecodedData, since we are not actively decoding in this test + initialDecodedData.SegmentCrcs.Add((BinaryPrimitives.ReadUInt64LittleEndian(r.NextBytesInline(StructuredMessage.Crc64Length)), segmentLen)); + + (Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData) Factory(long offset, bool faulty) + { + Stream stream = new MemoryStream(data, (int)offset, data.Length - (int)offset); + if (faulty) + { + stream = new FaultyStream(stream, interruptPos, 1, new Exception(), () => { }); + } + // Mock a decoded data for the mocked StructuredMessageDecodingStream + StructuredMessageDecodingStream.RawDecodedData decodedData = new() + { + TotalSegments = segments, + InnerStreamLength = data.Length, + Flags = StructuredMessage.Flags.StorageCrc64, + }; + // for test purposes, initialize a DecodedData, since we are not actively decoding in this test + initialDecodedData.SegmentCrcs.Add((BinaryPrimitives.ReadUInt64LittleEndian(r.NextBytesInline(StructuredMessage.Crc64Length)), segmentLen)); + return (stream, decodedData); + } + + // mock with a simple MemoryStream rather than an actual StructuredMessageDecodingStream + using (Stream src = new MemoryStream(data)) + using (Stream faultySrc = new FaultyStream(src, interruptPos, 1, new Exception(), () => { })) + using (Stream retriableSrc = new StructuredMessageDecodingRetriableStream( + faultySrc, + initialDecodedData, + default, + offset => Factory(offset, multipleInterrupts), + offset => new ValueTask<(Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData)>(Factory(offset, multipleInterrupts)), + null, + AllExceptionsRetry().Object, + int.MaxValue)) + using (Stream dst = new MemoryStream(dest)) + { + await retriableSrc.CopyToInternal(dst, readLen, Async, default); + } + + Assert.AreEqual(data, dest); + } + + [Test] + public async Task Interrupt_AppropriateRewind() + { + const int segments = 2; + const int segmentLen = Constants.KB; + const int dataLen = segments * segmentLen; + const int readLen = segmentLen / 4; + const int interruptOffset = 10; + const int interruptPos = segmentLen + (2 * readLen) + interruptOffset; + Random r = new(); + + // Mock a decoded data for the mocked StructuredMessageDecodingStream + StructuredMessageDecodingStream.RawDecodedData initialDecodedData = new() + { + TotalSegments = segments, + InnerStreamLength = segments * segmentLen, + Flags = StructuredMessage.Flags.StorageCrc64, + }; + // By the time of interrupt, there will be one segment reported + initialDecodedData.SegmentCrcs.Add((BinaryPrimitives.ReadUInt64LittleEndian(r.NextBytesInline(StructuredMessage.Crc64Length)), segmentLen)); + + Mock mock = new(MockBehavior.Strict); + mock.SetupGet(s => s.CanRead).Returns(true); + mock.SetupGet(s => s.CanSeek).Returns(false); + if (Async) + { + mock.SetupSequence(s => s.ReadAsync(It.IsAny(), It.IsAny(), It.IsAny(), default)) + .Returns(Task.FromResult(readLen)) // start first segment + .Returns(Task.FromResult(readLen)) + .Returns(Task.FromResult(readLen)) + .Returns(Task.FromResult(readLen)) // finish first segment + .Returns(Task.FromResult(readLen)) // start second segment + .Returns(Task.FromResult(readLen)) + // faulty stream interrupt + .Returns(Task.FromResult(readLen * 2)) // restart second segment. fast-forward uses an internal 4KB buffer, so it will leap the 512 byte catchup all at once + .Returns(Task.FromResult(readLen)) + .Returns(Task.FromResult(readLen)) // end second segment + .Returns(Task.FromResult(0)) // signal end of stream + .Returns(Task.FromResult(0)) // second signal needed for stream wrapping reasons + ; + } + else + { + mock.SetupSequence(s => s.Read(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(readLen) // start first segment + .Returns(readLen) + .Returns(readLen) + .Returns(readLen) // finish first segment + .Returns(readLen) // start second segment + .Returns(readLen) + // faulty stream interrupt + .Returns(readLen * 2) // restart second segment. fast-forward uses an internal 4KB buffer, so it will leap the 512 byte catchup all at once + .Returns(readLen) + .Returns(readLen) // end second segment + .Returns(0) // signal end of stream + .Returns(0) // second signal needed for stream wrapping reasons + ; + } + Stream faultySrc = new FaultyStream(mock.Object, interruptPos, 1, new Exception(), default); + Stream retriableSrc = new StructuredMessageDecodingRetriableStream( + faultySrc, + initialDecodedData, + default, + offset => (mock.Object, new()), + offset => new(Task.FromResult((mock.Object, new StructuredMessageDecodingStream.RawDecodedData()))), + null, + AllExceptionsRetry().Object, + 1); + + int totalRead = 0; + int read = 0; + byte[] buf = new byte[readLen]; + if (Async) + { + while ((read = await retriableSrc.ReadAsync(buf, 0, buf.Length)) > 0) + { + totalRead += read; + } + } + else + { + while ((read = retriableSrc.Read(buf, 0, buf.Length)) > 0) + { + totalRead += read; + } + } + await retriableSrc.CopyToInternal(Stream.Null, readLen, Async, default); + + // Asserts we read exactly the data length, excluding the fastforward of the inner stream + Assert.That(totalRead, Is.EqualTo(dataLen)); + } + + [Test] + public async Task Interrupt_ProperDecode([Values(true, false)] bool multipleInterrupts) + { + // decoding stream inserts a buffered layer of 4 KB. use larger sizes to avoid interference from it. + const int segments = 4; + const int segmentLen = 128 * Constants.KB; + const int readLen = 8 * Constants.KB; + const int interruptPos = segmentLen + (3 * readLen) + 10; + + Random r = new(); + byte[] data = r.NextBytesInline(segments * Constants.KB).ToArray(); + byte[] dest = new byte[data.Length]; + + (Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData) Factory(long offset, bool faulty) + { + Stream stream = new MemoryStream(data, (int)offset, data.Length - (int)offset); + stream = new StructuredMessageEncodingStream(stream, segmentLen, StructuredMessage.Flags.StorageCrc64); + if (faulty) + { + stream = new FaultyStream(stream, interruptPos, 1, new Exception(), () => { }); + } + return StructuredMessageDecodingStream.WrapStream(stream); + } + + (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = Factory(0, true); + using Stream retriableSrc = new StructuredMessageDecodingRetriableStream( + decodingStream, + decodedData, + default, + offset => Factory(offset, multipleInterrupts), + offset => new ValueTask<(Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData)>(Factory(offset, multipleInterrupts)), + null, + AllExceptionsRetry().Object, + int.MaxValue); + using Stream dst = new MemoryStream(dest); + + await retriableSrc.CopyToInternal(dst, readLen, Async, default); + + Assert.AreEqual(data, dest); + } +} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingStreamTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingStreamTests.cs new file mode 100644 index 0000000000000..2789672df4976 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingStreamTests.cs @@ -0,0 +1,323 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers.Binary; +using System.Dynamic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Azure.Storage.Blobs.Tests; +using Azure.Storage.Shared; +using NUnit.Framework; +using static Azure.Storage.Shared.StructuredMessage; + +namespace Azure.Storage.Tests +{ + [TestFixture(ReadMethod.SyncArray)] + [TestFixture(ReadMethod.AsyncArray)] +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + [TestFixture(ReadMethod.SyncSpan)] + [TestFixture(ReadMethod.AsyncMemory)] +#endif + public class StructuredMessageDecodingStreamTests + { + // Cannot just implement as passthru in the stream + // Must test each one + public enum ReadMethod + { + SyncArray, + AsyncArray, +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + SyncSpan, + AsyncMemory +#endif + } + + public ReadMethod Method { get; } + + public StructuredMessageDecodingStreamTests(ReadMethod method) + { + Method = method; + } + + private class CopyStreamException : Exception + { + public long TotalCopied { get; } + + public CopyStreamException(Exception inner, long totalCopied) + : base($"Failed read after {totalCopied}-many bytes.", inner) + { + TotalCopied = totalCopied; + } + } + private async ValueTask CopyStream(Stream source, Stream destination, int bufferSize = 81920) // number default for CopyTo impl + { + byte[] buf = new byte[bufferSize]; + int read; + long totalRead = 0; + try + { + switch (Method) + { + case ReadMethod.SyncArray: + while ((read = source.Read(buf, 0, bufferSize)) > 0) + { + totalRead += read; + destination.Write(buf, 0, read); + } + break; + case ReadMethod.AsyncArray: + while ((read = await source.ReadAsync(buf, 0, bufferSize)) > 0) + { + totalRead += read; + await destination.WriteAsync(buf, 0, read); + } + break; +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + case ReadMethod.SyncSpan: + while ((read = source.Read(new Span(buf))) > 0) + { + totalRead += read; + destination.Write(new Span(buf, 0, read)); + } + break; + case ReadMethod.AsyncMemory: + while ((read = await source.ReadAsync(new Memory(buf))) > 0) + { + totalRead += read; + await destination.WriteAsync(new Memory(buf, 0, read)); + } + break; +#endif + } + destination.Flush(); + } + catch (Exception ex) + { + throw new CopyStreamException(ex, totalRead); + } + return totalRead; + } + + [Test] + [Pairwise] + public async Task DecodesData( + [Values(2048, 2005)] int dataLength, + [Values(default, 512)] int? seglen, + [Values(8*Constants.KB, 512, 530, 3)] int readLen, + [Values(true, false)] bool useCrc) + { + int segmentContentLength = seglen ?? int.MaxValue; + Flags flags = useCrc ? Flags.StorageCrc64 : Flags.None; + + byte[] originalData = new byte[dataLength]; + new Random().NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentContentLength, flags); + + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); + byte[] decodedData; + using (MemoryStream dest = new()) + { + await CopyStream(decodingStream, dest, readLen); + decodedData = dest.ToArray(); + } + + Assert.That(new Span(decodedData).SequenceEqual(originalData)); + } + + [Test] + public void BadStreamBadVersion() + { + byte[] originalData = new byte[1024]; + new Random().NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); + + encodedData[0] = byte.MaxValue; + + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); + Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); + } + + [Test] + public async Task BadSegmentCrcThrows() + { + const int segmentLength = 256; + Random r = new(); + + byte[] originalData = new byte[2048]; + r.NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentLength, Flags.StorageCrc64); + + const int badBytePos = 1024; + encodedData[badBytePos] = (byte)~encodedData[badBytePos]; + + MemoryStream encodedDataStream = new(encodedData); + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(encodedDataStream); + + // manual try/catch to validate the proccess failed mid-stream rather than the end + const int copyBufferSize = 4; + bool caught = false; + try + { + await CopyStream(decodingStream, Stream.Null, copyBufferSize); + } + catch (CopyStreamException ex) + { + caught = true; + Assert.That(ex.TotalCopied, Is.LessThanOrEqualTo(badBytePos)); + } + Assert.That(caught); + } + + [Test] + public void BadStreamCrcThrows() + { + const int segmentLength = 256; + Random r = new(); + + byte[] originalData = new byte[2048]; + r.NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentLength, Flags.StorageCrc64); + + encodedData[originalData.Length - 1] = (byte)~encodedData[originalData.Length - 1]; + + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); + Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); + } + + [Test] + public void BadStreamWrongContentLength() + { + byte[] originalData = new byte[1024]; + new Random().NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); + + BinaryPrimitives.WriteInt64LittleEndian(new Span(encodedData, V1_0.StreamHeaderMessageLengthOffset, 8), 123456789L); + + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); + Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); + } + + [TestCase(-1)] + [TestCase(1)] + public void BadStreamWrongSegmentCount(int difference) + { + const int dataSize = 1024; + const int segmentSize = 256; + const int numSegments = 4; + + byte[] originalData = new byte[dataSize]; + new Random().NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentSize, Flags.StorageCrc64); + + // rewrite the segment count to be different than the actual number of segments + BinaryPrimitives.WriteInt16LittleEndian( + new Span(encodedData, V1_0.StreamHeaderSegmentCountOffset, 2), (short)(numSegments + difference)); + + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); + Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); + } + + [Test] + public void BadStreamWrongSegmentNum() + { + byte[] originalData = new byte[1024]; + new Random().NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); + + BinaryPrimitives.WriteInt16LittleEndian( + new Span(encodedData, V1_0.StreamHeaderLength + V1_0.SegmentHeaderNumOffset, 2), 123); + + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); + Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); + } + + [Test] + [Combinatorial] + public async Task BadStreamWrongContentLength( + [Values(-1, 1)] int difference, + [Values(true, false)] bool lengthProvided) + { + byte[] originalData = new byte[1024]; + new Random().NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); + + BinaryPrimitives.WriteInt64LittleEndian( + new Span(encodedData, V1_0.StreamHeaderMessageLengthOffset, 8), + encodedData.Length + difference); + + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream( + new MemoryStream(encodedData), + lengthProvided ? (long?)encodedData.Length : default); + + // manual try/catch with tiny buffer to validate the proccess failed mid-stream rather than the end + const int copyBufferSize = 4; + bool caught = false; + try + { + await CopyStream(decodingStream, Stream.Null, copyBufferSize); + } + catch (CopyStreamException ex) + { + caught = true; + if (lengthProvided) + { + Assert.That(ex.TotalCopied, Is.EqualTo(0)); + } + else + { + Assert.That(ex.TotalCopied, Is.EqualTo(originalData.Length)); + } + } + Assert.That(caught); + } + + [Test] + public void BadStreamMissingExpectedStreamFooter() + { + byte[] originalData = new byte[1024]; + new Random().NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); + + byte[] brokenData = new byte[encodedData.Length - Crc64Length]; + new Span(encodedData, 0, encodedData.Length - Crc64Length).CopyTo(brokenData); + + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(brokenData)); + Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); + } + + [Test] + public void NoSeek() + { + (Stream stream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream()); + + Assert.That(stream.CanSeek, Is.False); + Assert.That(() => stream.Length, Throws.TypeOf()); + Assert.That(() => stream.Position, Throws.TypeOf()); + Assert.That(() => stream.Position = 0, Throws.TypeOf()); + Assert.That(() => stream.Seek(0, SeekOrigin.Begin), Throws.TypeOf()); + } + + [Test] + public void NoWrite() + { + (Stream stream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream()); + byte[] data = new byte[1024]; + new Random().NextBytes(data); + + Assert.That(stream.CanWrite, Is.False); + Assert.That(() => stream.Write(data, 0, data.Length), + Throws.TypeOf()); + Assert.That(async () => await stream.WriteAsync(data, 0, data.Length, CancellationToken.None), + Throws.TypeOf()); +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + Assert.That(() => stream.Write(new Span(data)), + Throws.TypeOf()); + Assert.That(async () => await stream.WriteAsync(new Memory(data), CancellationToken.None), + Throws.TypeOf()); +#endif + } + } +} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageEncodingStreamTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageEncodingStreamTests.cs new file mode 100644 index 0000000000000..e0f91dee7de3a --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageEncodingStreamTests.cs @@ -0,0 +1,271 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers.Binary; +using System.IO; +using System.Linq; +using System.Threading.Tasks; +using Azure.Storage.Blobs.Tests; +using Azure.Storage.Shared; +using NUnit.Framework; +using static Azure.Storage.Shared.StructuredMessage; + +namespace Azure.Storage.Tests +{ + [TestFixture(ReadMethod.SyncArray)] + [TestFixture(ReadMethod.AsyncArray)] +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + [TestFixture(ReadMethod.SyncSpan)] + [TestFixture(ReadMethod.AsyncMemory)] +#endif + public class StructuredMessageEncodingStreamTests + { + // Cannot just implement as passthru in the stream + // Must test each one + public enum ReadMethod + { + SyncArray, + AsyncArray, +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + SyncSpan, + AsyncMemory +#endif + } + + public ReadMethod Method { get; } + + public StructuredMessageEncodingStreamTests(ReadMethod method) + { + Method = method; + } + + private async ValueTask CopyStream(Stream source, Stream destination, int bufferSize = 81920) // number default for CopyTo impl + { + byte[] buf = new byte[bufferSize]; + int read; + switch (Method) + { + case ReadMethod.SyncArray: + while ((read = source.Read(buf, 0, bufferSize)) > 0) + { + destination.Write(buf, 0, read); + } + break; + case ReadMethod.AsyncArray: + while ((read = await source.ReadAsync(buf, 0, bufferSize)) > 0) + { + await destination.WriteAsync(buf, 0, read); + } + break; +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + case ReadMethod.SyncSpan: + while ((read = source.Read(new Span(buf))) > 0) + { + destination.Write(new Span(buf, 0, read)); + } + break; + case ReadMethod.AsyncMemory: + while ((read = await source.ReadAsync(new Memory(buf))) > 0) + { + await destination.WriteAsync(new Memory(buf, 0, read)); + } + break; +#endif + } + destination.Flush(); + } + + [Test] + [Pairwise] + public async Task EncodesData( + [Values(2048, 2005)] int dataLength, + [Values(default, 512)] int? seglen, + [Values(8 * Constants.KB, 512, 530, 3)] int readLen, + [Values(true, false)] bool useCrc) + { + int segmentContentLength = seglen ?? int.MaxValue; + Flags flags = useCrc ? Flags.StorageCrc64 : Flags.None; + + byte[] originalData = new byte[dataLength]; + new Random().NextBytes(originalData); + byte[] expectedEncodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentContentLength, flags); + + Stream encodingStream = new StructuredMessageEncodingStream(new MemoryStream(originalData), segmentContentLength, flags); + byte[] encodedData; + using (MemoryStream dest = new()) + { + await CopyStream(encodingStream, dest, readLen); + encodedData = dest.ToArray(); + } + + Assert.That(new Span(encodedData).SequenceEqual(expectedEncodedData)); + } + + [TestCase(0, 0)] // start + [TestCase(5, 0)] // partway through stream header + [TestCase(V1_0.StreamHeaderLength, 0)] // start of segment + [TestCase(V1_0.StreamHeaderLength + 3, 0)] // partway through segment header + [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength, 0)] // start of segment content + [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 123, 123)] // partway through segment content + [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 512, 512)] // start of segment footer + [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 515, 512)] // partway through segment footer + [TestCase(V1_0.StreamHeaderLength + 3*V1_0.SegmentHeaderLength + 2*Crc64Length + 1500, 1500)] // partway through not first segment content + public async Task Seek(int targetRewindOffset, int expectedInnerStreamPosition) + { + const int segmentLength = 512; + const int dataLength = 2055; + byte[] data = new byte[dataLength]; + new Random().NextBytes(data); + + MemoryStream dataStream = new(data); + StructuredMessageEncodingStream encodingStream = new(dataStream, segmentLength, Flags.StorageCrc64); + + // no support for seeking past existing read, need to consume whole stream before seeking + await CopyStream(encodingStream, Stream.Null); + + encodingStream.Position = targetRewindOffset; + Assert.That(encodingStream.Position, Is.EqualTo(targetRewindOffset)); + Assert.That(dataStream.Position, Is.EqualTo(expectedInnerStreamPosition)); + } + + [TestCase(0)] // start + [TestCase(5)] // partway through stream header + [TestCase(V1_0.StreamHeaderLength)] // start of segment + [TestCase(V1_0.StreamHeaderLength + 3)] // partway through segment header + [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength)] // start of segment content + [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 123)] // partway through segment content + [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 512)] // start of segment footer + [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 515)] // partway through segment footer + [TestCase(V1_0.StreamHeaderLength + 2 * V1_0.SegmentHeaderLength + Crc64Length + 1500)] // partway through not first segment content + public async Task SupportsRewind(int targetRewindOffset) + { + const int segmentLength = 512; + const int dataLength = 2055; + byte[] data = new byte[dataLength]; + new Random().NextBytes(data); + + Stream encodingStream = new StructuredMessageEncodingStream(new MemoryStream(data), segmentLength, Flags.StorageCrc64); + byte[] encodedData1; + using (MemoryStream dest = new()) + { + await CopyStream(encodingStream, dest); + encodedData1 = dest.ToArray(); + } + encodingStream.Position = targetRewindOffset; + byte[] encodedData2; + using (MemoryStream dest = new()) + { + await CopyStream(encodingStream, dest); + encodedData2 = dest.ToArray(); + } + + Assert.That(new Span(encodedData1).Slice(targetRewindOffset).SequenceEqual(encodedData2)); + } + + [Test] + public async Task SupportsFastForward() + { + const int segmentLength = 512; + const int dataLength = 2055; + byte[] data = new byte[dataLength]; + new Random().NextBytes(data); + + // must have read stream to fastforward. so read whole stream upfront & save result to check later + Stream encodingStream = new StructuredMessageEncodingStream(new MemoryStream(data), segmentLength, Flags.StorageCrc64); + byte[] encodedData; + using (MemoryStream dest = new()) + { + await CopyStream(encodingStream, dest); + encodedData = dest.ToArray(); + } + + encodingStream.Position = 0; + + bool skip = false; + const int increment = 499; + while (encodingStream.Position < encodingStream.Length) + { + if (skip) + { + encodingStream.Position = Math.Min(dataLength, encodingStream.Position + increment); + skip = !skip; + continue; + } + ReadOnlyMemory expected = new(encodedData, (int)encodingStream.Position, + (int)Math.Min(increment, encodedData.Length - encodingStream.Position)); + ReadOnlyMemory actual; + using (MemoryStream dest = new(increment)) + { + await CopyStream(WindowStream.GetWindow(encodingStream, increment), dest); + actual = dest.ToArray(); + } + Assert.That(expected.Span.SequenceEqual(actual.Span)); + skip = !skip; + } + } + + [Test] + public void NotSupportsFastForwardBeyondLatestRead() + { + const int segmentLength = 512; + const int dataLength = 2055; + byte[] data = new byte[dataLength]; + new Random().NextBytes(data); + + Stream encodingStream = new StructuredMessageEncodingStream(new MemoryStream(data), segmentLength, Flags.StorageCrc64); + + Assert.That(() => encodingStream.Position = 123, Throws.TypeOf()); + } + + [Test] + [Pairwise] + public async Task WrapperStreamCorrectData( + [Values(2048, 2005)] int dataLength, + [Values(8 * Constants.KB, 512, 530, 3)] int readLen) + { + int segmentContentLength = dataLength; + Flags flags = Flags.StorageCrc64; + + byte[] originalData = new byte[dataLength]; + new Random().NextBytes(originalData); + byte[] crc = CrcInline(originalData); + byte[] expectedEncodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentContentLength, flags); + + Stream encodingStream = new StructuredMessagePrecalculatedCrcWrapperStream(new MemoryStream(originalData), crc); + byte[] encodedData; + using (MemoryStream dest = new()) + { + await CopyStream(encodingStream, dest, readLen); + encodedData = dest.ToArray(); + } + + Assert.That(new Span(encodedData).SequenceEqual(expectedEncodedData)); + } + + private static void AssertExpectedStreamHeader(ReadOnlySpan actual, int originalDataLength, Flags flags, int expectedSegments) + { + int expectedFooterLen = flags.HasFlag(Flags.StorageCrc64) ? Crc64Length : 0; + + Assert.That(actual.Length, Is.EqualTo(V1_0.StreamHeaderLength)); + Assert.That(actual[0], Is.EqualTo(1)); + Assert.That(BinaryPrimitives.ReadInt64LittleEndian(actual.Slice(1, 8)), + Is.EqualTo(V1_0.StreamHeaderLength + expectedSegments * (V1_0.SegmentHeaderLength + expectedFooterLen) + originalDataLength)); + Assert.That(BinaryPrimitives.ReadInt16LittleEndian(actual.Slice(9, 2)), Is.EqualTo((short)flags)); + Assert.That(BinaryPrimitives.ReadInt16LittleEndian(actual.Slice(11, 2)), Is.EqualTo((short)expectedSegments)); + } + + private static void AssertExpectedSegmentHeader(ReadOnlySpan actual, int segmentNum, long contentLength) + { + Assert.That(BinaryPrimitives.ReadInt16LittleEndian(actual.Slice(0, 2)), Is.EqualTo((short) segmentNum)); + Assert.That(BinaryPrimitives.ReadInt64LittleEndian(actual.Slice(2, 8)), Is.EqualTo(contentLength)); + } + + private static byte[] CrcInline(ReadOnlySpan data) + { + var crc = StorageCrc64HashAlgorithm.Create(); + crc.Append(data); + return crc.GetCurrentHash(); + } + } +} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageHelper.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageHelper.cs new file mode 100644 index 0000000000000..59e80320d96a0 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageHelper.cs @@ -0,0 +1,68 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Azure.Storage.Shared; +using static Azure.Storage.Shared.StructuredMessage; + +namespace Azure.Storage.Blobs.Tests +{ + internal class StructuredMessageHelper + { + public static byte[] MakeEncodedData(ReadOnlySpan data, long segmentContentLength, Flags flags) + { + int segmentCount = (int) Math.Ceiling(data.Length / (double)segmentContentLength); + int segmentFooterLen = flags.HasFlag(Flags.StorageCrc64) ? 8 : 0; + int streamFooterLen = flags.HasFlag(Flags.StorageCrc64) ? 8 : 0; + + byte[] encodedData = new byte[ + V1_0.StreamHeaderLength + + segmentCount*(V1_0.SegmentHeaderLength + segmentFooterLen) + + streamFooterLen + + data.Length]; + V1_0.WriteStreamHeader( + new Span(encodedData, 0, V1_0.StreamHeaderLength), + encodedData.Length, + flags, + segmentCount); + + int i = V1_0.StreamHeaderLength; + int j = 0; + foreach (int seg in Enumerable.Range(1, segmentCount)) + { + int segContentLen = Math.Min((int)segmentContentLength, data.Length - j); + V1_0.WriteSegmentHeader( + new Span(encodedData, i, V1_0.SegmentHeaderLength), + seg, + segContentLen); + i += V1_0.SegmentHeaderLength; + + data.Slice(j, segContentLen) + .CopyTo(new Span(encodedData).Slice(i)); + i += segContentLen; + + if (flags.HasFlag(Flags.StorageCrc64)) + { + var crc = StorageCrc64HashAlgorithm.Create(); + crc.Append(data.Slice(j, segContentLen)); + crc.GetCurrentHash(new Span(encodedData, i, Crc64Length)); + i += Crc64Length; + } + j += segContentLen; + } + + if (flags.HasFlag(Flags.StorageCrc64)) + { + var crc = StorageCrc64HashAlgorithm.Create(); + crc.Append(data); + crc.GetCurrentHash(new Span(encodedData, i, Crc64Length)); + } + + return encodedData; + } + } +} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageStreamRoundtripTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageStreamRoundtripTests.cs new file mode 100644 index 0000000000000..61583aa1ebe4e --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageStreamRoundtripTests.cs @@ -0,0 +1,127 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.IO; +using System.Linq; +using System.Threading.Tasks; +using Azure.Storage.Shared; +using NUnit.Framework; +using static Azure.Storage.Shared.StructuredMessage; + +namespace Azure.Storage.Tests +{ + [TestFixture(ReadMethod.SyncArray)] + [TestFixture(ReadMethod.AsyncArray)] +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + [TestFixture(ReadMethod.SyncSpan)] + [TestFixture(ReadMethod.AsyncMemory)] +#endif + public class StructuredMessageStreamRoundtripTests + { + // Cannot just implement as passthru in the stream + // Must test each one + public enum ReadMethod + { + SyncArray, + AsyncArray, +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + SyncSpan, + AsyncMemory +#endif + } + + public ReadMethod Method { get; } + + public StructuredMessageStreamRoundtripTests(ReadMethod method) + { + Method = method; + } + + private class CopyStreamException : Exception + { + public long TotalCopied { get; } + + public CopyStreamException(Exception inner, long totalCopied) + : base($"Failed read after {totalCopied}-many bytes.", inner) + { + TotalCopied = totalCopied; + } + } + private async ValueTask CopyStream(Stream source, Stream destination, int bufferSize = 81920) // number default for CopyTo impl + { + byte[] buf = new byte[bufferSize]; + int read; + long totalRead = 0; + try + { + switch (Method) + { + case ReadMethod.SyncArray: + while ((read = source.Read(buf, 0, bufferSize)) > 0) + { + totalRead += read; + destination.Write(buf, 0, read); + } + break; + case ReadMethod.AsyncArray: + while ((read = await source.ReadAsync(buf, 0, bufferSize)) > 0) + { + totalRead += read; + await destination.WriteAsync(buf, 0, read); + } + break; +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + case ReadMethod.SyncSpan: + while ((read = source.Read(new Span(buf))) > 0) + { + totalRead += read; + destination.Write(new Span(buf, 0, read)); + } + break; + case ReadMethod.AsyncMemory: + while ((read = await source.ReadAsync(new Memory(buf))) > 0) + { + totalRead += read; + await destination.WriteAsync(new Memory(buf, 0, read)); + } + break; +#endif + } + destination.Flush(); + } + catch (Exception ex) + { + throw new CopyStreamException(ex, totalRead); + } + return totalRead; + } + + [Test] + [Pairwise] + public async Task RoundTrip( + [Values(2048, 2005)] int dataLength, + [Values(default, 512)] int? seglen, + [Values(8 * Constants.KB, 512, 530, 3)] int readLen, + [Values(true, false)] bool useCrc) + { + int segmentLength = seglen ?? int.MaxValue; + Flags flags = useCrc ? Flags.StorageCrc64 : Flags.None; + + byte[] originalData = new byte[dataLength]; + new Random().NextBytes(originalData); + + byte[] roundtripData; + using (MemoryStream source = new(originalData)) + using (Stream encode = new StructuredMessageEncodingStream(source, segmentLength, flags)) + using (Stream decode = StructuredMessageDecodingStream.WrapStream(encode).DecodedStream) + using (MemoryStream dest = new()) + { + await CopyStream(source, dest, readLen); + roundtripData = dest.ToArray(); + } + + Assert.That(originalData.SequenceEqual(roundtripData)); + } + } +} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageTests.cs new file mode 100644 index 0000000000000..b4f1dfe178246 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageTests.cs @@ -0,0 +1,114 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers.Binary; +using System.Collections.Generic; +using NUnit.Framework; +using static Azure.Storage.Shared.StructuredMessage; + +namespace Azure.Storage.Tests +{ + public class StructuredMessageTests + { + [TestCase(1024, Flags.None, 2)] + [TestCase(2000, Flags.StorageCrc64, 4)] + public void EncodeStreamHeader(int messageLength, int flags, int numSegments) + { + Span encoding = new(new byte[V1_0.StreamHeaderLength]); + V1_0.WriteStreamHeader(encoding, messageLength, (Flags)flags, numSegments); + + Assert.That(encoding[0], Is.EqualTo((byte)1)); + Assert.That(BinaryPrimitives.ReadUInt64LittleEndian(encoding.Slice(1, 8)), Is.EqualTo(messageLength)); + Assert.That(BinaryPrimitives.ReadUInt16LittleEndian(encoding.Slice(9, 2)), Is.EqualTo(flags)); + Assert.That(BinaryPrimitives.ReadUInt16LittleEndian(encoding.Slice(11, 2)), Is.EqualTo(numSegments)); + } + + [TestCase(V1_0.StreamHeaderLength)] + [TestCase(V1_0.StreamHeaderLength + 1)] + [TestCase(V1_0.StreamHeaderLength - 1)] + public void EncodeStreamHeaderRejectBadBufferSize(int bufferSize) + { + Random r = new(); + byte[] encoding = new byte[bufferSize]; + + void Action() => V1_0.WriteStreamHeader(encoding, r.Next(2, int.MaxValue), Flags.StorageCrc64, r.Next(2, int.MaxValue)); + if (bufferSize < V1_0.StreamHeaderLength) + { + Assert.That(Action, Throws.ArgumentException); + } + else + { + Assert.That(Action, Throws.Nothing); + } + } + + [TestCase(1, 1024)] + [TestCase(5, 39578)] + public void EncodeSegmentHeader(int segmentNum, int contentLength) + { + Span encoding = new(new byte[V1_0.SegmentHeaderLength]); + V1_0.WriteSegmentHeader(encoding, segmentNum, contentLength); + + Assert.That(BinaryPrimitives.ReadUInt16LittleEndian(encoding.Slice(0, 2)), Is.EqualTo(segmentNum)); + Assert.That(BinaryPrimitives.ReadUInt64LittleEndian(encoding.Slice(2, 8)), Is.EqualTo(contentLength)); + } + + [TestCase(V1_0.SegmentHeaderLength)] + [TestCase(V1_0.SegmentHeaderLength + 1)] + [TestCase(V1_0.SegmentHeaderLength - 1)] + public void EncodeSegmentHeaderRejectBadBufferSize(int bufferSize) + { + Random r = new(); + byte[] encoding = new byte[bufferSize]; + + void Action() => V1_0.WriteSegmentHeader(encoding, r.Next(1, int.MaxValue), r.Next(2, int.MaxValue)); + if (bufferSize < V1_0.SegmentHeaderLength) + { + Assert.That(Action, Throws.ArgumentException); + } + else + { + Assert.That(Action, Throws.Nothing); + } + } + + [TestCase(true)] + [TestCase(false)] + public void EncodeSegmentFooter(bool useCrc) + { + Span encoding = new(new byte[Crc64Length]); + Span crc = useCrc ? new Random().NextBytesInline(Crc64Length) : default; + V1_0.WriteSegmentFooter(encoding, crc); + + if (useCrc) + { + Assert.That(encoding.SequenceEqual(crc), Is.True); + } + else + { + Assert.That(encoding.SequenceEqual(new Span(new byte[Crc64Length])), Is.True); + } + } + + [TestCase(Crc64Length)] + [TestCase(Crc64Length + 1)] + [TestCase(Crc64Length - 1)] + public void EncodeSegmentFooterRejectBadBufferSize(int bufferSize) + { + byte[] encoding = new byte[bufferSize]; + byte[] crc = new byte[Crc64Length]; + new Random().NextBytes(crc); + + void Action() => V1_0.WriteSegmentFooter(encoding, crc); + if (bufferSize < Crc64Length) + { + Assert.That(Action, Throws.ArgumentException); + } + else + { + Assert.That(Action, Throws.Nothing); + } + } + } +} diff --git a/sdk/storage/Azure.Storage.DataMovement.Blobs/src/Azure.Storage.DataMovement.Blobs.csproj b/sdk/storage/Azure.Storage.DataMovement.Blobs/src/Azure.Storage.DataMovement.Blobs.csproj index 6098dcd8ba33d..93e7432f186e3 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Blobs/src/Azure.Storage.DataMovement.Blobs.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Blobs/src/Azure.Storage.DataMovement.Blobs.csproj @@ -37,6 +37,7 @@ + diff --git a/sdk/storage/Azure.Storage.DataMovement.Blobs/tests/Azure.Storage.DataMovement.Blobs.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Blobs/tests/Azure.Storage.DataMovement.Blobs.Tests.csproj index f8b62d0b947e2..214903eb5f9c4 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Blobs/tests/Azure.Storage.DataMovement.Blobs.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Blobs/tests/Azure.Storage.DataMovement.Blobs.Tests.csproj @@ -22,11 +22,15 @@ + + + + @@ -40,6 +44,7 @@ + diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/BlobToFileSharesTests/Azure.Storage.DataMovement.Blobs.Files.Shares.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/BlobToFileSharesTests/Azure.Storage.DataMovement.Blobs.Files.Shares.Tests.csproj index a6abde432473f..66a9fea0861a2 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/BlobToFileSharesTests/Azure.Storage.DataMovement.Blobs.Files.Shares.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/BlobToFileSharesTests/Azure.Storage.DataMovement.Blobs.Files.Shares.Tests.csproj @@ -35,6 +35,7 @@ + diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Azure.Storage.DataMovement.Files.Shares.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Azure.Storage.DataMovement.Files.Shares.Tests.csproj index 8e574bca36a48..d75775beceafd 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Azure.Storage.DataMovement.Files.Shares.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Azure.Storage.DataMovement.Files.Shares.Tests.csproj @@ -27,6 +27,7 @@ + diff --git a/sdk/storage/Azure.Storage.DataMovement/src/Azure.Storage.DataMovement.csproj b/sdk/storage/Azure.Storage.DataMovement/src/Azure.Storage.DataMovement.csproj index 5aaf548493b15..dd30659cf0a5d 100644 --- a/sdk/storage/Azure.Storage.DataMovement/src/Azure.Storage.DataMovement.csproj +++ b/sdk/storage/Azure.Storage.DataMovement/src/Azure.Storage.DataMovement.csproj @@ -1,4 +1,4 @@ - + $(RequiredTargetFrameworks);net6.0 diff --git a/sdk/storage/Azure.Storage.DataMovement/tests/Azure.Storage.DataMovement.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement/tests/Azure.Storage.DataMovement.Tests.csproj index 8afd7735a0168..21a1ea45f92a0 100644 --- a/sdk/storage/Azure.Storage.DataMovement/tests/Azure.Storage.DataMovement.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement/tests/Azure.Storage.DataMovement.Tests.csproj @@ -34,6 +34,7 @@ + diff --git a/sdk/storage/Azure.Storage.Files.DataLake/assets.json b/sdk/storage/Azure.Storage.Files.DataLake/assets.json index 442889d04be63..8949234de7a1a 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/assets.json +++ b/sdk/storage/Azure.Storage.Files.DataLake/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Files.DataLake", - "Tag": "net/storage/Azure.Storage.Files.DataLake_186c14971d" + "Tag": "net/storage/Azure.Storage.Files.DataLake_4b543941a8" } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj b/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj index 7adb79645b0a9..ccd45baaff251 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj @@ -42,6 +42,7 @@ + @@ -81,6 +82,10 @@ + + + + diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs index 3d2bd710e25aa..93ca4c3f9a1fd 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs @@ -16,6 +16,7 @@ using Azure.Storage.Common; using Azure.Storage.Files.DataLake.Models; using Azure.Storage.Sas; +using Azure.Storage.Shared; using Metadata = System.Collections.Generic.IDictionary; namespace Azure.Storage.Files.DataLake @@ -2332,13 +2333,39 @@ internal virtual async Task AppendInternal( using (ClientConfiguration.Pipeline.BeginLoggingScope(nameof(DataLakeFileClient))) { // compute hash BEFORE attaching progress handler - ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); + ContentHasher.GetHashResult hashResult = null; + long contentLength = (content?.Length - content?.Position) ?? 0; + long? structuredContentLength = default; + string structuredBodyType = null; + if (content != null && + validationOptions != null && + validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64) + { + // report progress in terms of caller bytes, not encoded bytes + structuredContentLength = contentLength; + structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; + content = content.WithNoDispose().WithProgress(progressHandler); + content = validationOptions.PrecalculatedChecksum.IsEmpty + ? new StructuredMessageEncodingStream( + content, + Constants.StructuredMessage.DefaultSegmentContentLength, + StructuredMessage.Flags.StorageCrc64) + : new StructuredMessagePrecalculatedCrcWrapperStream( + content, + validationOptions.PrecalculatedChecksum.Span); + contentLength = content.Length - content.Position; + } + else + { + // compute hash BEFORE attaching progress handler + hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + content = content?.WithNoDispose().WithProgress(progressHandler); + } - content = content?.WithNoDispose().WithProgress(progressHandler); ClientConfiguration.Pipeline.LogMethodEnter( nameof(DataLakeFileClient), message: @@ -2373,6 +2400,8 @@ internal virtual async Task AppendInternal( encryptionKey: ClientConfiguration.CustomerProvidedKey?.EncryptionKey, encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, leaseId: leaseId, leaseAction: leaseAction, leaseDuration: leaseDurationLong, @@ -2392,6 +2421,8 @@ internal virtual async Task AppendInternal( encryptionKey: ClientConfiguration.CustomerProvidedKey?.EncryptionKey, encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, leaseId: leaseId, leaseAction: leaseAction, leaseDuration: leaseDurationLong, diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/FileSystemRestClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/FileSystemRestClient.cs index 719932d5cd500..4144d908b7549 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/FileSystemRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/FileSystemRestClient.cs @@ -33,7 +33,7 @@ internal partial class FileSystemRestClient /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. /// The value must be "filesystem" for all filesystem operations. The default value is "filesystem". - /// Specifies the version of the operation to use for this request. The default value is "2023-05-03". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// , , , or is null. public FileSystemRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string resource, string version) { diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathAppendDataHeaders.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathAppendDataHeaders.cs index 6ec456a438564..502dd557f4822 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathAppendDataHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathAppendDataHeaders.cs @@ -29,5 +29,7 @@ public PathAppendDataHeaders(Response response) public string EncryptionKeySha256 => _response.Headers.TryGetValue("x-ms-encryption-key-sha256", out string value) ? value : null; /// If the lease was auto-renewed with this request. public bool? LeaseRenewed => _response.Headers.TryGetValue("x-ms-lease-renewed", out bool? value) ? value : null; + /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathRestClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathRestClient.cs index 6b1e970bd2fc8..d328c3079de6b 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathRestClient.cs @@ -30,7 +30,7 @@ internal partial class PathRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2023-05-03". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// The lease duration is required to acquire a lease, and specifies the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or -1 for infinite lease. /// , , or is null. public PathRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version, int? xMsLeaseDuration = null) @@ -293,7 +293,7 @@ public ResponseWithHeaders Create(int? timeout = null, PathRe } } - internal HttpMessage CreateUpdateRequest(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout, int? maxRecords, string continuation, bool? forceFlag, long? position, bool? retainUncommittedData, bool? close, long? contentLength, byte[] contentMD5, string leaseId, string cacheControl, string contentType, string contentDisposition, string contentEncoding, string contentLanguage, string properties, string owner, string group, string permissions, string acl, string ifMatch, string ifNoneMatch, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince) + internal HttpMessage CreateUpdateRequest(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout, int? maxRecords, string continuation, bool? forceFlag, long? position, bool? retainUncommittedData, bool? close, long? contentLength, byte[] contentMD5, string leaseId, string cacheControl, string contentType, string contentDisposition, string contentEncoding, string contentLanguage, string properties, string owner, string group, string permissions, string acl, string ifMatch, string ifNoneMatch, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string structuredBodyType, long? structuredContentLength) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -396,6 +396,14 @@ internal HttpMessage CreateUpdateRequest(PathUpdateAction action, PathSetAccessC { request.Headers.Add("If-Unmodified-Since", ifUnmodifiedSince.Value, "R"); } + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } + if (structuredContentLength != null) + { + request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); + } request.Headers.Add("Accept", "application/json"); if (contentLength != null) { @@ -434,17 +442,19 @@ internal HttpMessage CreateUpdateRequest(PathUpdateAction action, PathSetAccessC /// Specify an ETag value to operate only on blobs without a matching value. /// Specify this header value to operate only on a blob if it has been modified since the specified date/time. /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. /// Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, sets properties for a file or directory, or sets access control for a file or directory. Data can only be appended to a file. Concurrent writes to the same file using multiple clients are not supported. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - public async Task> UpdateAsync(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout = null, int? maxRecords = null, string continuation = null, bool? forceFlag = null, long? position = null, bool? retainUncommittedData = null, bool? close = null, long? contentLength = null, byte[] contentMD5 = null, string leaseId = null, string cacheControl = null, string contentType = null, string contentDisposition = null, string contentEncoding = null, string contentLanguage = null, string properties = null, string owner = null, string group = null, string permissions = null, string acl = null, string ifMatch = null, string ifNoneMatch = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, CancellationToken cancellationToken = default) + public async Task> UpdateAsync(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout = null, int? maxRecords = null, string continuation = null, bool? forceFlag = null, long? position = null, bool? retainUncommittedData = null, bool? close = null, long? contentLength = null, byte[] contentMD5 = null, string leaseId = null, string cacheControl = null, string contentType = null, string contentDisposition = null, string contentEncoding = null, string contentLanguage = null, string properties = null, string owner = null, string group = null, string permissions = null, string acl = null, string ifMatch = null, string ifNoneMatch = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUpdateRequest(action, mode, body, timeout, maxRecords, continuation, forceFlag, position, retainUncommittedData, close, contentLength, contentMD5, leaseId, cacheControl, contentType, contentDisposition, contentEncoding, contentLanguage, properties, owner, group, permissions, acl, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince); + using var message = CreateUpdateRequest(action, mode, body, timeout, maxRecords, continuation, forceFlag, position, retainUncommittedData, close, contentLength, contentMD5, leaseId, cacheControl, contentType, contentDisposition, contentEncoding, contentLanguage, properties, owner, group, permissions, acl, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince, structuredBodyType, structuredContentLength); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new PathUpdateHeaders(message.Response); switch (message.Response.Status) @@ -491,17 +501,19 @@ public async Task Specify an ETag value to operate only on blobs without a matching value. /// Specify this header value to operate only on a blob if it has been modified since the specified date/time. /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. /// Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, sets properties for a file or directory, or sets access control for a file or directory. Data can only be appended to a file. Concurrent writes to the same file using multiple clients are not supported. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - public ResponseWithHeaders Update(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout = null, int? maxRecords = null, string continuation = null, bool? forceFlag = null, long? position = null, bool? retainUncommittedData = null, bool? close = null, long? contentLength = null, byte[] contentMD5 = null, string leaseId = null, string cacheControl = null, string contentType = null, string contentDisposition = null, string contentEncoding = null, string contentLanguage = null, string properties = null, string owner = null, string group = null, string permissions = null, string acl = null, string ifMatch = null, string ifNoneMatch = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders Update(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout = null, int? maxRecords = null, string continuation = null, bool? forceFlag = null, long? position = null, bool? retainUncommittedData = null, bool? close = null, long? contentLength = null, byte[] contentMD5 = null, string leaseId = null, string cacheControl = null, string contentType = null, string contentDisposition = null, string contentEncoding = null, string contentLanguage = null, string properties = null, string owner = null, string group = null, string permissions = null, string acl = null, string ifMatch = null, string ifNoneMatch = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUpdateRequest(action, mode, body, timeout, maxRecords, continuation, forceFlag, position, retainUncommittedData, close, contentLength, contentMD5, leaseId, cacheControl, contentType, contentDisposition, contentEncoding, contentLanguage, properties, owner, group, permissions, acl, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince); + using var message = CreateUpdateRequest(action, mode, body, timeout, maxRecords, continuation, forceFlag, position, retainUncommittedData, close, contentLength, contentMD5, leaseId, cacheControl, contentType, contentDisposition, contentEncoding, contentLanguage, properties, owner, group, permissions, acl, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince, structuredBodyType, structuredContentLength); _pipeline.Send(message, cancellationToken); var headers = new PathUpdateHeaders(message.Response); switch (message.Response.Status) @@ -1315,7 +1327,7 @@ public ResponseWithHeaders FlushData(int? timeout = null, } } - internal HttpMessage CreateAppendDataRequest(Stream body, long? position, int? timeout, long? contentLength, byte[] transactionalContentHash, byte[] transactionalContentCrc64, string leaseId, DataLakeLeaseAction? leaseAction, long? leaseDuration, string proposedLeaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, bool? flush) + internal HttpMessage CreateAppendDataRequest(Stream body, long? position, int? timeout, long? contentLength, byte[] transactionalContentHash, byte[] transactionalContentCrc64, string leaseId, DataLakeLeaseAction? leaseAction, long? leaseDuration, string proposedLeaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, bool? flush, string structuredBodyType, long? structuredContentLength) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -1369,6 +1381,14 @@ internal HttpMessage CreateAppendDataRequest(Stream body, long? position, int? t { request.Headers.Add("x-ms-encryption-algorithm", encryptionAlgorithm.Value.ToSerialString()); } + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } + if (structuredContentLength != null) + { + request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); + } request.Headers.Add("Accept", "application/json"); if (contentLength != null) { @@ -1398,16 +1418,18 @@ internal HttpMessage CreateAppendDataRequest(Stream body, long? position, int? t /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. /// If file should be flushed after the append. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public async Task> AppendDataAsync(Stream body, long? position = null, int? timeout = null, long? contentLength = null, byte[] transactionalContentHash = null, byte[] transactionalContentCrc64 = null, string leaseId = null, DataLakeLeaseAction? leaseAction = null, long? leaseDuration = null, string proposedLeaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, bool? flush = null, CancellationToken cancellationToken = default) + public async Task> AppendDataAsync(Stream body, long? position = null, int? timeout = null, long? contentLength = null, byte[] transactionalContentHash = null, byte[] transactionalContentCrc64 = null, string leaseId = null, DataLakeLeaseAction? leaseAction = null, long? leaseDuration = null, string proposedLeaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, bool? flush = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateAppendDataRequest(body, position, timeout, contentLength, transactionalContentHash, transactionalContentCrc64, leaseId, leaseAction, leaseDuration, proposedLeaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, flush); + using var message = CreateAppendDataRequest(body, position, timeout, contentLength, transactionalContentHash, transactionalContentCrc64, leaseId, leaseAction, leaseDuration, proposedLeaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, flush, structuredBodyType, structuredContentLength); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new PathAppendDataHeaders(message.Response); switch (message.Response.Status) @@ -1434,16 +1456,18 @@ public async Task> AppendDataAsync(St /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. /// If file should be flushed after the append. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public ResponseWithHeaders AppendData(Stream body, long? position = null, int? timeout = null, long? contentLength = null, byte[] transactionalContentHash = null, byte[] transactionalContentCrc64 = null, string leaseId = null, DataLakeLeaseAction? leaseAction = null, long? leaseDuration = null, string proposedLeaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, bool? flush = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders AppendData(Stream body, long? position = null, int? timeout = null, long? contentLength = null, byte[] transactionalContentHash = null, byte[] transactionalContentCrc64 = null, string leaseId = null, DataLakeLeaseAction? leaseAction = null, long? leaseDuration = null, string proposedLeaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, bool? flush = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateAppendDataRequest(body, position, timeout, contentLength, transactionalContentHash, transactionalContentCrc64, leaseId, leaseAction, leaseDuration, proposedLeaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, flush); + using var message = CreateAppendDataRequest(body, position, timeout, contentLength, transactionalContentHash, transactionalContentCrc64, leaseId, leaseAction, leaseDuration, proposedLeaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, flush, structuredBodyType, structuredContentLength); _pipeline.Send(message, cancellationToken); var headers = new PathAppendDataHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathUpdateHeaders.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathUpdateHeaders.cs index 35668cb1c3a1d..026c78e72481a 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathUpdateHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathUpdateHeaders.cs @@ -43,5 +43,7 @@ public PathUpdateHeaders(Response response) public string XMsContinuation => _response.Headers.TryGetValue("x-ms-continuation", out string value) ? value : null; /// The version of the REST protocol used to process the request. public string Version => _response.Headers.TryGetValue("x-ms-version", out string value) ? value : null; + /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/ServiceRestClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/ServiceRestClient.cs index 118595b4d87d1..b00fa12238f4e 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/ServiceRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/ServiceRestClient.cs @@ -28,7 +28,7 @@ internal partial class ServiceRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2023-05-03". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// , , or is null. public ServiceRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md b/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md index 4121ebab9932e..58f5c3d055d3b 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/5da3c08b92d05858b728b013b69502dc93485373/specification/storage/data-plane/Azure.Storage.Files.DataLake/stable/2023-05-03/DataLakeStorage.json + - https://github.com/Azure/azure-rest-api-specs/blob/794c6178bc06c6c9dceb139e9f9d1b35b1a99701/specification/storage/data-plane/Azure.Storage.Files.DataLake/preview/2025-01-05/DataLakeStorage.json generation1-convenience-client: true modelerfour: seal-single-value-enum-by-default: true @@ -23,7 +23,7 @@ directive: if (property.includes('/{filesystem}/{path}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/FileSystem") && false == param['$ref'].endsWith("#/parameters/Path"))}); - } + } else if (property.includes('/{filesystem}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/FileSystem"))}); @@ -127,7 +127,7 @@ directive: } $[newName] = $[oldName]; delete $[oldName]; - } + } else if (property.includes('/{filesystem}')) { var oldName = property; diff --git a/sdk/storage/Azure.Storage.Files.DataLake/tests/Azure.Storage.Files.DataLake.Tests.csproj b/sdk/storage/Azure.Storage.Files.DataLake/tests/Azure.Storage.Files.DataLake.Tests.csproj index bef13bb21a1c6..1fa78690077be 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/tests/Azure.Storage.Files.DataLake.Tests.csproj +++ b/sdk/storage/Azure.Storage.Files.DataLake/tests/Azure.Storage.Files.DataLake.Tests.csproj @@ -6,6 +6,9 @@ Microsoft Azure.Storage.Files.DataLake client library tests false + + DataLakeSDK + diff --git a/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeFileClientTransferValidationTests.cs b/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeFileClientTransferValidationTests.cs index 4bdefdbf756cd..5067f98517bd2 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeFileClientTransferValidationTests.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeFileClientTransferValidationTests.cs @@ -34,7 +34,10 @@ protected override async Task> Get StorageChecksumAlgorithm uploadAlgorithm = StorageChecksumAlgorithm.None, StorageChecksumAlgorithm downloadAlgorithm = StorageChecksumAlgorithm.None) { - var disposingFileSystem = await ClientBuilder.GetNewFileSystem(service: service, fileSystemName: containerName); + var disposingFileSystem = await ClientBuilder.GetNewFileSystem( + service: service, + fileSystemName: containerName, + publicAccessType: PublicAccessType.None); disposingFileSystem.FileSystem.ClientConfiguration.TransferValidation.Upload.ChecksumAlgorithm = uploadAlgorithm; disposingFileSystem.FileSystem.ClientConfiguration.TransferValidation.Download.ChecksumAlgorithm = downloadAlgorithm; diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs index 88fbd1326e018..0cd25700dd1d7 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs @@ -796,6 +796,7 @@ public partial class ShareFileDownloadInfo : System.IDisposable { internal ShareFileDownloadInfo() { } public System.IO.Stream Content { get { throw null; } } + public byte[] ContentCrc { get { throw null; } } public byte[] ContentHash { get { throw null; } } public long ContentLength { get { throw null; } } public string ContentType { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs index 88fbd1326e018..0cd25700dd1d7 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs @@ -796,6 +796,7 @@ public partial class ShareFileDownloadInfo : System.IDisposable { internal ShareFileDownloadInfo() { } public System.IO.Stream Content { get { throw null; } } + public byte[] ContentCrc { get { throw null; } } public byte[] ContentHash { get { throw null; } } public long ContentLength { get { throw null; } } public string ContentType { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/assets.json b/sdk/storage/Azure.Storage.Files.Shares/assets.json index 9ca749681b79e..184d64e873031 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/assets.json +++ b/sdk/storage/Azure.Storage.Files.Shares/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Files.Shares", - "Tag": "net/storage/Azure.Storage.Files.Shares_14e0fa0c22" + "Tag": "net/storage/Azure.Storage.Files.Shares_b3158cd2dd" } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Azure.Storage.Files.Shares.csproj b/sdk/storage/Azure.Storage.Files.Shares/src/Azure.Storage.Files.Shares.csproj index 60f6f200fd402..547cccbd0a5c3 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Azure.Storage.Files.Shares.csproj +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Azure.Storage.Files.Shares.csproj @@ -1,4 +1,4 @@ - + $(RequiredTargetFrameworks);net6.0 @@ -42,6 +42,7 @@ + @@ -85,6 +86,11 @@ + + + + + diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/DirectoryRestClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/DirectoryRestClient.cs index 961c6ff47ce59..8a2edb8b99134 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/DirectoryRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/DirectoryRestClient.cs @@ -33,7 +33,7 @@ internal partial class DirectoryRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, share, directory or file that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-11-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// If true, the trailing dot will not be trimmed from the target URI. /// Valid value is backup. /// If true, the trailing dot will not be trimmed from the source URI. diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileDownloadHeaders.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileDownloadHeaders.cs index 61384dee810d4..c4d7056a5cfa3 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileDownloadHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileDownloadHeaders.cs @@ -79,5 +79,9 @@ public FileDownloadHeaders(Response response) public ShareLeaseState? LeaseState => _response.Headers.TryGetValue("x-ms-lease-state", out string value) ? value.ToShareLeaseState() : null; /// The current lease status of the file. public ShareLeaseStatus? LeaseStatus => _response.Headers.TryGetValue("x-ms-lease-status", out string value) ? value.ToShareLeaseStatus() : null; + /// Indicates the response body contains a structured message and specifies the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; + /// The length of the blob/file content inside the message body when the response body is returned as a structured message. Will always be smaller than Content-Length. + public long? StructuredContentLength => _response.Headers.TryGetValue("x-ms-structured-content-length", out long? value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs index d4b584e6660ee..093de99705c4d 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs @@ -34,7 +34,7 @@ internal partial class FileRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, share, directory or file that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-11-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// Only update is supported: - Update: Writes the bytes downloaded from the source url into the specified range. The default value is "update". /// If true, the trailing dot will not be trimmed from the target URI. /// Valid value is backup. @@ -204,7 +204,7 @@ public ResponseWithHeaders Create(long fileContentLength, str } } - internal HttpMessage CreateDownloadRequest(int? timeout, string range, bool? rangeGetContentMD5, ShareFileRequestConditions shareFileRequestConditions) + internal HttpMessage CreateDownloadRequest(int? timeout, string range, bool? rangeGetContentMD5, string structuredBodyType, ShareFileRequestConditions shareFileRequestConditions) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -230,6 +230,10 @@ internal HttpMessage CreateDownloadRequest(int? timeout, string range, bool? ran { request.Headers.Add("x-ms-range-get-content-md5", rangeGetContentMD5.Value); } + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } if (shareFileRequestConditions?.LeaseId != null) { request.Headers.Add("x-ms-lease-id", shareFileRequestConditions.LeaseId); @@ -246,11 +250,12 @@ internal HttpMessage CreateDownloadRequest(int? timeout, string range, bool? ran /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting Timeouts for File Service Operations.</a>. /// Return file data only from the specified byte range. /// When this header is set to true and specified together with the Range header, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. + /// Specifies the response content should be returned as a structured message and specifies the message schema version and properties. /// Parameter group. /// The cancellation token to use. - public async Task> DownloadAsync(int? timeout = null, string range = null, bool? rangeGetContentMD5 = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) + public async Task> DownloadAsync(int? timeout = null, string range = null, bool? rangeGetContentMD5 = null, string structuredBodyType = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) { - using var message = CreateDownloadRequest(timeout, range, rangeGetContentMD5, shareFileRequestConditions); + using var message = CreateDownloadRequest(timeout, range, rangeGetContentMD5, structuredBodyType, shareFileRequestConditions); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new FileDownloadHeaders(message.Response); switch (message.Response.Status) @@ -270,11 +275,12 @@ public async Task> DownloadAsyn /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting Timeouts for File Service Operations.</a>. /// Return file data only from the specified byte range. /// When this header is set to true and specified together with the Range header, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. + /// Specifies the response content should be returned as a structured message and specifies the message schema version and properties. /// Parameter group. /// The cancellation token to use. - public ResponseWithHeaders Download(int? timeout = null, string range = null, bool? rangeGetContentMD5 = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders Download(int? timeout = null, string range = null, bool? rangeGetContentMD5 = null, string structuredBodyType = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) { - using var message = CreateDownloadRequest(timeout, range, rangeGetContentMD5, shareFileRequestConditions); + using var message = CreateDownloadRequest(timeout, range, rangeGetContentMD5, structuredBodyType, shareFileRequestConditions); _pipeline.Send(message, cancellationToken); var headers = new FileDownloadHeaders(message.Response); switch (message.Response.Status) @@ -945,7 +951,7 @@ public ResponseWithHeaders BreakLease(int? timeout = null } } - internal HttpMessage CreateUploadRangeRequest(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout, byte[] contentMD5, FileLastWrittenMode? fileLastWrittenMode, Stream optionalbody, ShareFileRequestConditions shareFileRequestConditions) + internal HttpMessage CreateUploadRangeRequest(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout, byte[] contentMD5, FileLastWrittenMode? fileLastWrittenMode, string structuredBodyType, long? structuredContentLength, Stream optionalbody, ShareFileRequestConditions shareFileRequestConditions) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -977,6 +983,14 @@ internal HttpMessage CreateUploadRangeRequest(string range, ShareFileRangeWriteT { request.Headers.Add("x-ms-file-request-intent", _fileRequestIntent.Value.ToString()); } + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } + if (structuredContentLength != null) + { + request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); + } request.Headers.Add("Accept", "application/xml"); if (optionalbody != null) { @@ -998,18 +1012,20 @@ internal HttpMessage CreateUploadRangeRequest(string range, ShareFileRangeWriteT /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting Timeouts for File Service Operations.</a>. /// An MD5 hash of the content. This hash is used to verify the integrity of the data during transport. When the Content-MD5 header is specified, the File service compares the hash of the content that has arrived with the header value that was sent. If the two hashes do not match, the operation will fail with error code 400 (Bad Request). /// If the file last write time should be preserved or overwritten. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// Initial data. /// Parameter group. /// The cancellation token to use. /// is null. - public async Task> UploadRangeAsync(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout = null, byte[] contentMD5 = null, FileLastWrittenMode? fileLastWrittenMode = null, Stream optionalbody = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) + public async Task> UploadRangeAsync(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout = null, byte[] contentMD5 = null, FileLastWrittenMode? fileLastWrittenMode = null, string structuredBodyType = null, long? structuredContentLength = null, Stream optionalbody = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) { if (range == null) { throw new ArgumentNullException(nameof(range)); } - using var message = CreateUploadRangeRequest(range, fileRangeWrite, contentLength, timeout, contentMD5, fileLastWrittenMode, optionalbody, shareFileRequestConditions); + using var message = CreateUploadRangeRequest(range, fileRangeWrite, contentLength, timeout, contentMD5, fileLastWrittenMode, structuredBodyType, structuredContentLength, optionalbody, shareFileRequestConditions); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new FileUploadRangeHeaders(message.Response); switch (message.Response.Status) @@ -1028,18 +1044,20 @@ public async Task> UploadRangeAsync( /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting Timeouts for File Service Operations.</a>. /// An MD5 hash of the content. This hash is used to verify the integrity of the data during transport. When the Content-MD5 header is specified, the File service compares the hash of the content that has arrived with the header value that was sent. If the two hashes do not match, the operation will fail with error code 400 (Bad Request). /// If the file last write time should be preserved or overwritten. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// Initial data. /// Parameter group. /// The cancellation token to use. /// is null. - public ResponseWithHeaders UploadRange(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout = null, byte[] contentMD5 = null, FileLastWrittenMode? fileLastWrittenMode = null, Stream optionalbody = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders UploadRange(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout = null, byte[] contentMD5 = null, FileLastWrittenMode? fileLastWrittenMode = null, string structuredBodyType = null, long? structuredContentLength = null, Stream optionalbody = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) { if (range == null) { throw new ArgumentNullException(nameof(range)); } - using var message = CreateUploadRangeRequest(range, fileRangeWrite, contentLength, timeout, contentMD5, fileLastWrittenMode, optionalbody, shareFileRequestConditions); + using var message = CreateUploadRangeRequest(range, fileRangeWrite, contentLength, timeout, contentMD5, fileLastWrittenMode, structuredBodyType, structuredContentLength, optionalbody, shareFileRequestConditions); _pipeline.Send(message, cancellationToken); var headers = new FileUploadRangeHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileUploadRangeHeaders.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileUploadRangeHeaders.cs index db079c2692663..322bfcd1b6d83 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileUploadRangeHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileUploadRangeHeaders.cs @@ -27,5 +27,7 @@ public FileUploadRangeHeaders(Response response) public bool? IsServerEncrypted => _response.Headers.TryGetValue("x-ms-request-server-encrypted", out bool? value) ? value : null; /// Last write time for the file. public DateTimeOffset? FileLastWriteTime => _response.Headers.TryGetValue("x-ms-file-last-write-time", out DateTimeOffset? value) ? value : null; + /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ServiceRestClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ServiceRestClient.cs index ef4c21b9a33c7..fe5ea495a7a15 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ServiceRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ServiceRestClient.cs @@ -31,7 +31,7 @@ internal partial class ServiceRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, share, directory or file that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-11-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// Valid value is backup. /// , , or is null. public ServiceRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version, ShareTokenIntent? fileRequestIntent = null) diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs index 599aacf2c6287..3012d3d8735b1 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs @@ -32,7 +32,7 @@ internal partial class ShareRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, share, directory or file that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-11-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// Valid value is backup. /// , , or is null. public ShareRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version, ShareTokenIntent? fileRequestIntent = null) diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileDownloadInfo.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileDownloadInfo.cs index 0165af94435a0..4037cbdfd875e 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileDownloadInfo.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileDownloadInfo.cs @@ -38,6 +38,12 @@ public partial class ShareFileDownloadInfo : IDisposable, IDownloadedContent public byte[] ContentHash { get; internal set; } #pragma warning restore CA1819 // Properties should not return arrays + /// + /// When requested using , this value contains the CRC for the download blob range. + /// This value may only become populated once the network stream is fully consumed. + /// + public byte[] ContentCrc { get; internal set; } + /// /// Details returned when downloading a file /// diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/ShareErrors.cs b/sdk/storage/Azure.Storage.Files.Shares/src/ShareErrors.cs index f776384d06add..0b27510aaa6c4 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/ShareErrors.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/ShareErrors.cs @@ -17,20 +17,5 @@ public static InvalidOperationException FileOrShareMissing( string fileClient, string shareClient) => new InvalidOperationException($"{leaseClient} requires either a {fileClient} or {shareClient}"); - - public static void AssertAlgorithmSupport(StorageChecksumAlgorithm? algorithm) - { - StorageChecksumAlgorithm resolved = (algorithm ?? StorageChecksumAlgorithm.None).ResolveAuto(); - switch (resolved) - { - case StorageChecksumAlgorithm.None: - case StorageChecksumAlgorithm.MD5: - return; - case StorageChecksumAlgorithm.StorageCrc64: - throw new ArgumentException("Azure File Shares do not support CRC-64."); - default: - throw new ArgumentException($"{nameof(StorageChecksumAlgorithm)} does not support value {Enum.GetName(typeof(StorageChecksumAlgorithm), resolved) ?? ((int)resolved).ToString(CultureInfo.InvariantCulture)}."); - } - } } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs index 2d58482950b9a..23c5fd40d2db1 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs @@ -2385,51 +2385,70 @@ private async Task> DownloadInternal( // Wrap the response Content in a RetriableStream so we // can return it before it's finished downloading, but still // allow retrying if it fails. - initialResponse.Value.Content = RetriableStream.Create( - stream, - startOffset => - { - (Response Response, Stream ContentStream) = StartDownloadAsync( - range, - validationOptions, - conditions, - startOffset, - async, - cancellationToken) - .EnsureCompleted(); - if (etag != Response.GetRawResponse().Headers.ETag) - { - throw new ShareFileModifiedException( - "File has been modified concurrently", - Uri, etag, Response.GetRawResponse().Headers.ETag.GetValueOrDefault(), range); - } - return ContentStream; - }, - async startOffset => + async ValueTask> Factory(long offset, bool async, CancellationToken cancellationToken) + { + (Response response, Stream contentStream) = await StartDownloadAsync( + range, + validationOptions, + conditions, + offset, + async, + cancellationToken).ConfigureAwait(false); + if (etag != response.GetRawResponse().Headers.ETag) { - (Response Response, Stream ContentStream) = await StartDownloadAsync( - range, - validationOptions, - conditions, - startOffset, - async, - cancellationToken) - .ConfigureAwait(false); - if (etag != Response.GetRawResponse().Headers.ETag) + throw new ShareFileModifiedException( + "File has been modified concurrently", + Uri, etag, response.GetRawResponse().Headers.ETag.GetValueOrDefault(), range); + } + return response; + } + async ValueTask<(Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData)> StructuredMessageFactory( + long offset, bool async, CancellationToken cancellationToken) + { + Response result = await Factory(offset, async, cancellationToken).ConfigureAwait(false); + return StructuredMessageDecodingStream.WrapStream(result.Value.Content, result.Value.ContentLength); + } + + if (initialResponse.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) + { + (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = StructuredMessageDecodingStream.WrapStream( + initialResponse.Value.Content, initialResponse.Value.ContentLength); + initialResponse.Value.Content = new StructuredMessageDecodingRetriableStream( + decodingStream, + decodedData, + StructuredMessage.Flags.StorageCrc64, + startOffset => StructuredMessageFactory(startOffset, async: false, cancellationToken) + .EnsureCompleted(), + async startOffset => await StructuredMessageFactory(startOffset, async: true, cancellationToken) + .ConfigureAwait(false), + decodedData => { - throw new ShareFileModifiedException( - "File has been modified concurrently", - Uri, etag, Response.GetRawResponse().Headers.ETag.GetValueOrDefault(), range); - } - return ContentStream; - }, - ClientConfiguration.Pipeline.ResponseClassifier, - Constants.MaxReliabilityRetries); + initialResponse.Value.ContentCrc = new byte[StructuredMessage.Crc64Length]; + decodedData.Crc.WriteCrc64(initialResponse.Value.ContentCrc); + }, + ClientConfiguration.Pipeline.ResponseClassifier, + Constants.MaxReliabilityRetries); + } + else + { + initialResponse.Value.Content = RetriableStream.Create( + initialResponse.Value.Content, + startOffset => Factory(startOffset, async: false, cancellationToken) + .EnsureCompleted().Value.Content, + async startOffset => (await Factory(startOffset, async: true, cancellationToken) + .ConfigureAwait(false)).Value.Content, + ClientConfiguration.Pipeline.ResponseClassifier, + Constants.MaxReliabilityRetries); + } // buffer response stream and ensure it matches the transactional hash if any // Storage will not return a hash for payload >4MB, so this buffer is capped similarly // hashing is opt-in, so this buffer is part of that opt-in - if (validationOptions != default && validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && validationOptions.AutoValidateChecksum) + if (validationOptions != default && + validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && + validationOptions.AutoValidateChecksum && + // structured message decoding does the validation for us + !initialResponse.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) { // safe-buffer; transactional hash download limit well below maxInt var readDestStream = new MemoryStream((int)initialResponse.Value.ContentLength); @@ -2512,8 +2531,6 @@ await ContentHasher.AssertResponseHashMatchInternal( bool async = true, CancellationToken cancellationToken = default) { - ShareErrors.AssertAlgorithmSupport(transferValidationOverride?.ChecksumAlgorithm); - // calculation gets illegible with null coalesce; just pre-initialize var pageRange = range; pageRange = new HttpRange( @@ -2523,13 +2540,27 @@ await ContentHasher.AssertResponseHashMatchInternal( (long?)null); ClientConfiguration.Pipeline.LogTrace($"Download {Uri} with range: {pageRange}"); - ResponseWithHeaders response; + bool? rangeGetContentMD5 = null; + string structuredBodyType = null; + switch (transferValidationOverride?.ChecksumAlgorithm.ResolveAuto()) + { + case StorageChecksumAlgorithm.MD5: + rangeGetContentMD5 = true; + break; + case StorageChecksumAlgorithm.StorageCrc64: + structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; + break; + default: + break; + } + ResponseWithHeaders response; if (async) { response = await FileRestClient.DownloadAsync( range: pageRange == default ? null : pageRange.ToString(), - rangeGetContentMD5: transferValidationOverride?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.MD5 ? true : null, + rangeGetContentMD5: rangeGetContentMD5, + structuredBodyType: structuredBodyType, shareFileRequestConditions: conditions, cancellationToken: cancellationToken) .ConfigureAwait(false); @@ -2538,7 +2569,8 @@ await ContentHasher.AssertResponseHashMatchInternal( { response = FileRestClient.Download( range: pageRange == default ? null : pageRange.ToString(), - rangeGetContentMD5: transferValidationOverride?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.MD5 ? true : null, + rangeGetContentMD5: rangeGetContentMD5, + structuredBodyType: structuredBodyType, shareFileRequestConditions: conditions, cancellationToken: cancellationToken); } @@ -4612,7 +4644,6 @@ internal async Task> UploadRangeInternal( CancellationToken cancellationToken) { UploadTransferValidationOptions validationOptions = transferValidationOverride ?? ClientConfiguration.TransferValidation.Upload; - ShareErrors.AssertAlgorithmSupport(validationOptions?.ChecksumAlgorithm); using (ClientConfiguration.Pipeline.BeginLoggingScope(nameof(ShareFileClient))) { @@ -4628,14 +4659,38 @@ internal async Task> UploadRangeInternal( scope.Start(); Errors.VerifyStreamPosition(content, nameof(content)); - // compute hash BEFORE attaching progress handler - ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - - content = content.WithNoDispose().WithProgress(progressHandler); + ContentHasher.GetHashResult hashResult = null; + long contentLength = (content?.Length - content?.Position) ?? 0; + long? structuredContentLength = default; + string structuredBodyType = null; + if (validationOptions != null && + validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64) + { + // report progress in terms of caller bytes, not encoded bytes + structuredContentLength = contentLength; + contentLength = (content?.Length - content?.Position) ?? 0; + structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; + content = content.WithNoDispose().WithProgress(progressHandler); + content = validationOptions.PrecalculatedChecksum.IsEmpty + ? new StructuredMessageEncodingStream( + content, + Constants.StructuredMessage.DefaultSegmentContentLength, + StructuredMessage.Flags.StorageCrc64) + : new StructuredMessagePrecalculatedCrcWrapperStream( + content, + validationOptions.PrecalculatedChecksum.Span); + contentLength = (content?.Length - content?.Position) ?? 0; + } + else + { + // compute hash BEFORE attaching progress handler + hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + content = content.WithNoDispose().WithProgress(progressHandler); + } ResponseWithHeaders response; @@ -4648,6 +4703,8 @@ internal async Task> UploadRangeInternal( fileLastWrittenMode: fileLastWrittenMode, optionalbody: content, contentMD5: hashResult?.MD5AsArray, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, shareFileRequestConditions: conditions, cancellationToken: cancellationToken) .ConfigureAwait(false); @@ -4661,6 +4718,8 @@ internal async Task> UploadRangeInternal( fileLastWrittenMode: fileLastWrittenMode, optionalbody: content, contentMD5: hashResult?.MD5AsArray, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, shareFileRequestConditions: conditions, cancellationToken: cancellationToken); } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md index 43022bc56d1c1..d7ed8ae3216df 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md +++ b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/98b600498947073c18c2ac5eb7c3c658db5a1a59/specification/storage/data-plane/Microsoft.FileStorage/stable/2024-11-04/file.json + - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/c8eee2dfa99d517e12e6ac8c96b14b707bb3c8eb/specification/storage/data-plane/Microsoft.FileStorage/stable/2025-01-05/file.json generation1-convenience-client: true # https://github.com/Azure/autorest/issues/4075 skip-semantics-validation: true @@ -25,7 +25,7 @@ directive: if (property.includes('/{shareName}/{directory}/{fileName}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ShareName") && false == param['$ref'].endsWith("#/parameters/DirectoryPath") && false == param['$ref'].endsWith("#/parameters/FilePath"))}); - } + } else if (property.includes('/{shareName}/{directory}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ShareName") && false == param['$ref'].endsWith("#/parameters/DirectoryPath"))}); @@ -46,7 +46,7 @@ directive: $.Metrics.type = "object"; ``` -### Times aren't required +### Times aren't required ``` yaml directive: - from: swagger-document diff --git a/sdk/storage/Azure.Storage.Files.Shares/tests/Azure.Storage.Files.Shares.Tests.csproj b/sdk/storage/Azure.Storage.Files.Shares/tests/Azure.Storage.Files.Shares.Tests.csproj index 398a4b6367489..d09dd8fe8949f 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/tests/Azure.Storage.Files.Shares.Tests.csproj +++ b/sdk/storage/Azure.Storage.Files.Shares/tests/Azure.Storage.Files.Shares.Tests.csproj @@ -17,6 +17,7 @@ + PreserveNewest diff --git a/sdk/storage/Azure.Storage.Files.Shares/tests/ShareFileClientTransferValidationTests.cs b/sdk/storage/Azure.Storage.Files.Shares/tests/ShareFileClientTransferValidationTests.cs index 3dcdb21f27b36..9fd8905e388b1 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/tests/ShareFileClientTransferValidationTests.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/tests/ShareFileClientTransferValidationTests.cs @@ -64,10 +64,6 @@ protected override async Task GetResourceClientAsync( private void AssertSupportsHashAlgorithm(StorageChecksumAlgorithm algorithm) { - if (algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64) - { - TestHelper.AssertInconclusiveRecordingFriendly(Recording.Mode, "Azure File Share does not support CRC64."); - } } protected override async Task UploadPartitionAsync(ShareFileClient client, Stream source, UploadTransferValidationOptions transferValidation) @@ -147,8 +143,44 @@ protected override async Task SetupDataAsync(ShareFileClient client, Stream data public override void TestAutoResolve() { Assert.AreEqual( - StorageChecksumAlgorithm.MD5, + StorageChecksumAlgorithm.StorageCrc64, TransferValidationOptionsExtensions.ResolveAuto(StorageChecksumAlgorithm.Auto)); } + + [Test] + public async Task StructuredMessagePopulatesCrcDownloadStreaming() + { + await using DisposingShare disposingContainer = await ClientBuilder.GetTestShareAsync(); + + const int dataLength = Constants.KB; + byte[] data = GetRandomBuffer(dataLength); + byte[] dataCrc = new byte[8]; + StorageCrc64Calculator.ComputeSlicedSafe(data, 0L).WriteCrc64(dataCrc); + + ShareFileClient file = disposingContainer.Container.GetRootDirectoryClient().GetFileClient(GetNewResourceName()); + await file.CreateAsync(data.Length); + await file.UploadAsync(new MemoryStream(data)); + + Response response = await file.DownloadAsync(new ShareFileDownloadOptions() + { + TransferValidation = new DownloadTransferValidationOptions + { + ChecksumAlgorithm = StorageChecksumAlgorithm.StorageCrc64 + } + }); + + // crc is not present until response stream is consumed + Assert.That(response.Value.ContentCrc, Is.Null); + + byte[] downloadedData; + using (MemoryStream ms = new()) + { + await response.Value.Content.CopyToAsync(ms); + downloadedData = ms.ToArray(); + } + + Assert.That(response.Value.ContentCrc, Is.EqualTo(dataCrc)); + Assert.That(downloadedData, Is.EqualTo(data)); + } } } diff --git a/sdk/storage/Azure.Storage.Queues/tests/Azure.Storage.Queues.Tests.csproj b/sdk/storage/Azure.Storage.Queues/tests/Azure.Storage.Queues.Tests.csproj index e0a6fab3c753b..4d0334255f041 100644 --- a/sdk/storage/Azure.Storage.Queues/tests/Azure.Storage.Queues.Tests.csproj +++ b/sdk/storage/Azure.Storage.Queues/tests/Azure.Storage.Queues.Tests.csproj @@ -21,6 +21,7 @@ + From 034b6451dcf406bf44f49d77e5747df7ad0c6dc4 Mon Sep 17 00:00:00 2001 From: Amanda Nguyen <48961492+amnguye@users.noreply.github.com> Date: Tue, 20 Aug 2024 14:49:27 -0700 Subject: [PATCH 03/25] Removed ObserveStructuredMessagePolicy from being compiled into the Storage Test Packages (#45608) --- .../samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj | 1 + .../Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj | 3 ++- .../samples/Azure.Storage.Blobs.Samples.Tests.csproj | 1 + .../samples/Azure.Storage.Common.Samples.Tests.csproj | 1 + .../Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj | 1 + ...zure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj | 3 ++- .../samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj | 1 + .../samples/Azure.Storage.Files.Shares.Samples.Tests.csproj | 1 + .../samples/Azure.Storage.Queues.Samples.Tests.csproj | 1 + 9 files changed, 11 insertions(+), 2 deletions(-) diff --git a/sdk/storage/Azure.Storage.Blobs.Batch/samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Blobs.Batch/samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj index 3dea34a02b7ea..6009a5336b8b9 100644 --- a/sdk/storage/Azure.Storage.Blobs.Batch/samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs.Batch/samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj @@ -17,6 +17,7 @@ + PreserveNewest diff --git a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/samples/Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/samples/Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj index 7711cae537db6..6f8fcaf6528b3 100644 --- a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/samples/Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/samples/Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj @@ -1,4 +1,4 @@ - + $(RequiredTargetFrameworks) Microsoft Azure.Storage.Blobs.ChangeFeed client library samples @@ -14,6 +14,7 @@ + diff --git a/sdk/storage/Azure.Storage.Blobs/samples/Azure.Storage.Blobs.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Blobs/samples/Azure.Storage.Blobs.Samples.Tests.csproj index 77fd767c3486c..568dd6cba9516 100644 --- a/sdk/storage/Azure.Storage.Blobs/samples/Azure.Storage.Blobs.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs/samples/Azure.Storage.Blobs.Samples.Tests.csproj @@ -16,6 +16,7 @@ + diff --git a/sdk/storage/Azure.Storage.Common/samples/Azure.Storage.Common.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Common/samples/Azure.Storage.Common.Samples.Tests.csproj index 7d454aeaa0af2..aeca4497a8770 100644 --- a/sdk/storage/Azure.Storage.Common/samples/Azure.Storage.Common.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Common/samples/Azure.Storage.Common.Samples.Tests.csproj @@ -19,6 +19,7 @@ + PreserveNewest diff --git a/sdk/storage/Azure.Storage.DataMovement.Blobs/samples/Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Blobs/samples/Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj index 7ab901e963e03..30d4b1f79daaf 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Blobs/samples/Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Blobs/samples/Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj @@ -11,6 +11,7 @@ + diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/samples/Azure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/samples/Azure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj index 9cde066f64eb7..6a472b9f74158 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/samples/Azure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/samples/Azure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj @@ -1,4 +1,4 @@ - + $(RequiredTargetFrameworks) Microsoft Azure.Storage.DataMovement.Files.Shares client library samples @@ -11,6 +11,7 @@ + diff --git a/sdk/storage/Azure.Storage.Files.DataLake/samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Files.DataLake/samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj index c230f2ed8fa20..eecbe0543fe87 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Files.DataLake/samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj @@ -15,6 +15,7 @@ + diff --git a/sdk/storage/Azure.Storage.Files.Shares/samples/Azure.Storage.Files.Shares.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Files.Shares/samples/Azure.Storage.Files.Shares.Samples.Tests.csproj index 0bcec423c144d..d1efeca0c2da2 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/samples/Azure.Storage.Files.Shares.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Files.Shares/samples/Azure.Storage.Files.Shares.Samples.Tests.csproj @@ -16,6 +16,7 @@ + PreserveNewest diff --git a/sdk/storage/Azure.Storage.Queues/samples/Azure.Storage.Queues.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Queues/samples/Azure.Storage.Queues.Samples.Tests.csproj index f9ed70da2e75d..12794e190f4e1 100644 --- a/sdk/storage/Azure.Storage.Queues/samples/Azure.Storage.Queues.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Queues/samples/Azure.Storage.Queues.Samples.Tests.csproj @@ -16,6 +16,7 @@ + PreserveNewest From 3f9a40479fb1dd4bebe5662c7b5f81b7eea306a7 Mon Sep 17 00:00:00 2001 From: Sean McCullough Date: Tue, 20 Aug 2024 19:34:44 -0500 Subject: [PATCH 04/25] Fixed autorest.md after Content Validation merge --- sdk/storage/Azure.Storage.Blobs/src/autorest.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/storage/Azure.Storage.Blobs/src/autorest.md b/sdk/storage/Azure.Storage.Blobs/src/autorest.md index 34efb5857c4a4..a42f71dbc65d2 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/autorest.md +++ b/sdk/storage/Azure.Storage.Blobs/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://github.com/Azure/azure-rest-api-specs/blob/794c6178bc06c6c9dceb139e9f9d1b35b1a99701/specification/storage/data-plane/Microsoft.BlobStorage/preview/2025-01-05/blob.json + - https://raw.githubusercontent.com/jalauzon-msft/azure-rest-api-specs/c8eee2dfa99d517e12e6ac8c96b14b707bb3c8eb/specification/storage/data-plane/Microsoft.BlobStorage/stable/2025-01-05/blob.json generation1-convenience-client: true # https://github.com/Azure/autorest/issues/4075 skip-semantics-validation: true From e40cbc3581e1c62d01cd4662ec5bbe5d4a088969 Mon Sep 17 00:00:00 2001 From: Sean McCullough Date: Tue, 20 Aug 2024 19:51:08 -0500 Subject: [PATCH 05/25] Export API --- .../Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs | 6 ++++-- .../api/Azure.Storage.Blobs.netstandard2.0.cs | 6 ++++-- .../api/Azure.Storage.Blobs.netstandard2.1.cs | 6 ++++-- .../api/Azure.Storage.Files.DataLake.net6.0.cs | 2 +- .../api/Azure.Storage.Files.DataLake.netstandard2.0.cs | 2 +- .../api/Azure.Storage.Files.Shares.net6.0.cs | 2 +- .../api/Azure.Storage.Files.Shares.netstandard2.0.cs | 2 +- .../Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs | 4 ++-- .../api/Azure.Storage.Queues.netstandard2.0.cs | 4 ++-- .../api/Azure.Storage.Queues.netstandard2.1.cs | 4 ++-- 10 files changed, 22 insertions(+), 16 deletions(-) diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs index fb52e93f85a56..f32f82092bee2 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs @@ -51,7 +51,7 @@ public BlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredential c } public partial class BlobClientOptions : Azure.Core.ClientOptions { - public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) { } + public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Blobs.Models.BlobAudience? Audience { get { throw null; } set { } } public Azure.Storage.Blobs.Models.CustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } @@ -562,6 +562,7 @@ internal BlobDownloadInfo() { } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public string ContentType { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } + public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadOptions @@ -583,6 +584,7 @@ public partial class BlobDownloadStreamingResult : System.IDisposable internal BlobDownloadStreamingResult() { } public System.IO.Stream Content { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } + public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadToOptions @@ -1833,7 +1835,7 @@ public PageBlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti } public partial class SpecializedBlobClientOptions : Azure.Storage.Blobs.BlobClientOptions { - public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } + public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } public Azure.Storage.ClientSideEncryptionOptions ClientSideEncryption { get { throw null; } set { } } } public static partial class SpecializedBlobExtensions diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs index fb52e93f85a56..f32f82092bee2 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs @@ -51,7 +51,7 @@ public BlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredential c } public partial class BlobClientOptions : Azure.Core.ClientOptions { - public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) { } + public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Blobs.Models.BlobAudience? Audience { get { throw null; } set { } } public Azure.Storage.Blobs.Models.CustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } @@ -562,6 +562,7 @@ internal BlobDownloadInfo() { } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public string ContentType { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } + public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadOptions @@ -583,6 +584,7 @@ public partial class BlobDownloadStreamingResult : System.IDisposable internal BlobDownloadStreamingResult() { } public System.IO.Stream Content { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } + public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadToOptions @@ -1833,7 +1835,7 @@ public PageBlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti } public partial class SpecializedBlobClientOptions : Azure.Storage.Blobs.BlobClientOptions { - public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } + public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } public Azure.Storage.ClientSideEncryptionOptions ClientSideEncryption { get { throw null; } set { } } } public static partial class SpecializedBlobExtensions diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs index fb52e93f85a56..f32f82092bee2 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs @@ -51,7 +51,7 @@ public BlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredential c } public partial class BlobClientOptions : Azure.Core.ClientOptions { - public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) { } + public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Blobs.Models.BlobAudience? Audience { get { throw null; } set { } } public Azure.Storage.Blobs.Models.CustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } @@ -562,6 +562,7 @@ internal BlobDownloadInfo() { } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public string ContentType { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } + public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadOptions @@ -583,6 +584,7 @@ public partial class BlobDownloadStreamingResult : System.IDisposable internal BlobDownloadStreamingResult() { } public System.IO.Stream Content { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } + public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadToOptions @@ -1833,7 +1835,7 @@ public PageBlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti } public partial class SpecializedBlobClientOptions : Azure.Storage.Blobs.BlobClientOptions { - public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } + public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } public Azure.Storage.ClientSideEncryptionOptions ClientSideEncryption { get { throw null; } set { } } } public static partial class SpecializedBlobExtensions diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs index d2ced44d996eb..8372ae8254fa2 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs @@ -2,7 +2,7 @@ namespace Azure.Storage.Files.DataLake { public partial class DataLakeClientOptions : Azure.Core.ClientOptions { - public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2024_11_04) { } + public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Files.DataLake.Models.DataLakeAudience? Audience { get { throw null; } set { } } public Azure.Storage.Files.DataLake.Models.DataLakeCustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs index d2ced44d996eb..8372ae8254fa2 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs @@ -2,7 +2,7 @@ namespace Azure.Storage.Files.DataLake { public partial class DataLakeClientOptions : Azure.Core.ClientOptions { - public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2024_11_04) { } + public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Files.DataLake.Models.DataLakeAudience? Audience { get { throw null; } set { } } public Azure.Storage.Files.DataLake.Models.DataLakeCustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs index 0cd25700dd1d7..f5ebf4e14087c 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs @@ -114,7 +114,7 @@ public ShareClient(System.Uri shareUri, Azure.Storage.StorageSharedKeyCredential } public partial class ShareClientOptions : Azure.Core.ClientOptions { - public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2024_11_04) { } + public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2025_01_05) { } public bool? AllowSourceTrailingDot { get { throw null; } set { } } public bool? AllowTrailingDot { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.ShareAudience? Audience { get { throw null; } set { } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs index 0cd25700dd1d7..f5ebf4e14087c 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs @@ -114,7 +114,7 @@ public ShareClient(System.Uri shareUri, Azure.Storage.StorageSharedKeyCredential } public partial class ShareClientOptions : Azure.Core.ClientOptions { - public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2024_11_04) { } + public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2025_01_05) { } public bool? AllowSourceTrailingDot { get { throw null; } set { } } public bool? AllowTrailingDot { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.ShareAudience? Audience { get { throw null; } set { } } diff --git a/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs b/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs index 25839b91776ca..39fb18bae408b 100644 --- a/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs +++ b/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs @@ -74,7 +74,7 @@ public QueueClient(System.Uri queueUri, Azure.Storage.StorageSharedKeyCredential } public partial class QueueClientOptions : Azure.Core.ClientOptions { - public QueueClientOptions(Azure.Storage.Queues.QueueClientOptions.ServiceVersion version = Azure.Storage.Queues.QueueClientOptions.ServiceVersion.V2024_11_04) { } + public QueueClientOptions(Azure.Storage.Queues.QueueClientOptions.ServiceVersion version = Azure.Storage.Queues.QueueClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Queues.Models.QueueAudience? Audience { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } public System.Uri GeoRedundantSecondaryUri { get { throw null; } set { } } @@ -433,7 +433,7 @@ public event System.EventHandler Date: Tue, 20 Aug 2024 20:50:18 -0500 Subject: [PATCH 06/25] Revert "Export API" This reverts commit e40cbc3581e1c62d01cd4662ec5bbe5d4a088969. --- .../Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs | 6 ++---- .../api/Azure.Storage.Blobs.netstandard2.0.cs | 6 ++---- .../api/Azure.Storage.Blobs.netstandard2.1.cs | 6 ++---- .../api/Azure.Storage.Files.DataLake.net6.0.cs | 2 +- .../api/Azure.Storage.Files.DataLake.netstandard2.0.cs | 2 +- .../api/Azure.Storage.Files.Shares.net6.0.cs | 2 +- .../api/Azure.Storage.Files.Shares.netstandard2.0.cs | 2 +- .../Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs | 4 ++-- .../api/Azure.Storage.Queues.netstandard2.0.cs | 4 ++-- .../api/Azure.Storage.Queues.netstandard2.1.cs | 4 ++-- 10 files changed, 16 insertions(+), 22 deletions(-) diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs index f32f82092bee2..fb52e93f85a56 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs @@ -51,7 +51,7 @@ public BlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredential c } public partial class BlobClientOptions : Azure.Core.ClientOptions { - public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) { } + public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) { } public Azure.Storage.Blobs.Models.BlobAudience? Audience { get { throw null; } set { } } public Azure.Storage.Blobs.Models.CustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } @@ -562,7 +562,6 @@ internal BlobDownloadInfo() { } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public string ContentType { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } - public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadOptions @@ -584,7 +583,6 @@ public partial class BlobDownloadStreamingResult : System.IDisposable internal BlobDownloadStreamingResult() { } public System.IO.Stream Content { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } - public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadToOptions @@ -1835,7 +1833,7 @@ public PageBlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti } public partial class SpecializedBlobClientOptions : Azure.Storage.Blobs.BlobClientOptions { - public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } + public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } public Azure.Storage.ClientSideEncryptionOptions ClientSideEncryption { get { throw null; } set { } } } public static partial class SpecializedBlobExtensions diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs index f32f82092bee2..fb52e93f85a56 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs @@ -51,7 +51,7 @@ public BlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredential c } public partial class BlobClientOptions : Azure.Core.ClientOptions { - public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) { } + public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) { } public Azure.Storage.Blobs.Models.BlobAudience? Audience { get { throw null; } set { } } public Azure.Storage.Blobs.Models.CustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } @@ -562,7 +562,6 @@ internal BlobDownloadInfo() { } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public string ContentType { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } - public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadOptions @@ -584,7 +583,6 @@ public partial class BlobDownloadStreamingResult : System.IDisposable internal BlobDownloadStreamingResult() { } public System.IO.Stream Content { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } - public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadToOptions @@ -1835,7 +1833,7 @@ public PageBlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti } public partial class SpecializedBlobClientOptions : Azure.Storage.Blobs.BlobClientOptions { - public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } + public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } public Azure.Storage.ClientSideEncryptionOptions ClientSideEncryption { get { throw null; } set { } } } public static partial class SpecializedBlobExtensions diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs index f32f82092bee2..fb52e93f85a56 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs @@ -51,7 +51,7 @@ public BlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredential c } public partial class BlobClientOptions : Azure.Core.ClientOptions { - public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) { } + public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) { } public Azure.Storage.Blobs.Models.BlobAudience? Audience { get { throw null; } set { } } public Azure.Storage.Blobs.Models.CustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } @@ -562,7 +562,6 @@ internal BlobDownloadInfo() { } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public string ContentType { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } - public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadOptions @@ -584,7 +583,6 @@ public partial class BlobDownloadStreamingResult : System.IDisposable internal BlobDownloadStreamingResult() { } public System.IO.Stream Content { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } - public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadToOptions @@ -1835,7 +1833,7 @@ public PageBlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti } public partial class SpecializedBlobClientOptions : Azure.Storage.Blobs.BlobClientOptions { - public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } + public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } public Azure.Storage.ClientSideEncryptionOptions ClientSideEncryption { get { throw null; } set { } } } public static partial class SpecializedBlobExtensions diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs index 8372ae8254fa2..d2ced44d996eb 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs @@ -2,7 +2,7 @@ namespace Azure.Storage.Files.DataLake { public partial class DataLakeClientOptions : Azure.Core.ClientOptions { - public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2025_01_05) { } + public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2024_11_04) { } public Azure.Storage.Files.DataLake.Models.DataLakeAudience? Audience { get { throw null; } set { } } public Azure.Storage.Files.DataLake.Models.DataLakeCustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs index 8372ae8254fa2..d2ced44d996eb 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs @@ -2,7 +2,7 @@ namespace Azure.Storage.Files.DataLake { public partial class DataLakeClientOptions : Azure.Core.ClientOptions { - public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2025_01_05) { } + public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2024_11_04) { } public Azure.Storage.Files.DataLake.Models.DataLakeAudience? Audience { get { throw null; } set { } } public Azure.Storage.Files.DataLake.Models.DataLakeCustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs index f5ebf4e14087c..0cd25700dd1d7 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs @@ -114,7 +114,7 @@ public ShareClient(System.Uri shareUri, Azure.Storage.StorageSharedKeyCredential } public partial class ShareClientOptions : Azure.Core.ClientOptions { - public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2025_01_05) { } + public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2024_11_04) { } public bool? AllowSourceTrailingDot { get { throw null; } set { } } public bool? AllowTrailingDot { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.ShareAudience? Audience { get { throw null; } set { } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs index f5ebf4e14087c..0cd25700dd1d7 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs @@ -114,7 +114,7 @@ public ShareClient(System.Uri shareUri, Azure.Storage.StorageSharedKeyCredential } public partial class ShareClientOptions : Azure.Core.ClientOptions { - public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2025_01_05) { } + public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2024_11_04) { } public bool? AllowSourceTrailingDot { get { throw null; } set { } } public bool? AllowTrailingDot { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.ShareAudience? Audience { get { throw null; } set { } } diff --git a/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs b/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs index 39fb18bae408b..25839b91776ca 100644 --- a/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs +++ b/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs @@ -74,7 +74,7 @@ public QueueClient(System.Uri queueUri, Azure.Storage.StorageSharedKeyCredential } public partial class QueueClientOptions : Azure.Core.ClientOptions { - public QueueClientOptions(Azure.Storage.Queues.QueueClientOptions.ServiceVersion version = Azure.Storage.Queues.QueueClientOptions.ServiceVersion.V2025_01_05) { } + public QueueClientOptions(Azure.Storage.Queues.QueueClientOptions.ServiceVersion version = Azure.Storage.Queues.QueueClientOptions.ServiceVersion.V2024_11_04) { } public Azure.Storage.Queues.Models.QueueAudience? Audience { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } public System.Uri GeoRedundantSecondaryUri { get { throw null; } set { } } @@ -433,7 +433,7 @@ public event System.EventHandler Date: Tue, 20 Aug 2024 20:50:32 -0500 Subject: [PATCH 07/25] Revert "Fixed autorest.md after Content Validation merge" This reverts commit 3f9a40479fb1dd4bebe5662c7b5f81b7eea306a7. --- sdk/storage/Azure.Storage.Blobs/src/autorest.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/storage/Azure.Storage.Blobs/src/autorest.md b/sdk/storage/Azure.Storage.Blobs/src/autorest.md index a42f71dbc65d2..34efb5857c4a4 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/autorest.md +++ b/sdk/storage/Azure.Storage.Blobs/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://raw.githubusercontent.com/jalauzon-msft/azure-rest-api-specs/c8eee2dfa99d517e12e6ac8c96b14b707bb3c8eb/specification/storage/data-plane/Microsoft.BlobStorage/stable/2025-01-05/blob.json + - https://github.com/Azure/azure-rest-api-specs/blob/794c6178bc06c6c9dceb139e9f9d1b35b1a99701/specification/storage/data-plane/Microsoft.BlobStorage/preview/2025-01-05/blob.json generation1-convenience-client: true # https://github.com/Azure/autorest/issues/4075 skip-semantics-validation: true From c30ccb9194d28d8b6749ffb4e56428d1921300f7 Mon Sep 17 00:00:00 2001 From: Sean McCullough Date: Tue, 20 Aug 2024 20:50:44 -0500 Subject: [PATCH 08/25] Revert "Removed ObserveStructuredMessagePolicy from being compiled into the Storage Test Packages (#45608)" This reverts commit 034b6451dcf406bf44f49d77e5747df7ad0c6dc4. --- .../samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj | 1 - .../Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj | 3 +-- .../samples/Azure.Storage.Blobs.Samples.Tests.csproj | 1 - .../samples/Azure.Storage.Common.Samples.Tests.csproj | 1 - .../Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj | 1 - ...zure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj | 3 +-- .../samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj | 1 - .../samples/Azure.Storage.Files.Shares.Samples.Tests.csproj | 1 - .../samples/Azure.Storage.Queues.Samples.Tests.csproj | 1 - 9 files changed, 2 insertions(+), 11 deletions(-) diff --git a/sdk/storage/Azure.Storage.Blobs.Batch/samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Blobs.Batch/samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj index 6009a5336b8b9..3dea34a02b7ea 100644 --- a/sdk/storage/Azure.Storage.Blobs.Batch/samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs.Batch/samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj @@ -17,7 +17,6 @@ - PreserveNewest diff --git a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/samples/Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/samples/Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj index 6f8fcaf6528b3..7711cae537db6 100644 --- a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/samples/Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/samples/Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj @@ -1,4 +1,4 @@ - + $(RequiredTargetFrameworks) Microsoft Azure.Storage.Blobs.ChangeFeed client library samples @@ -14,7 +14,6 @@ - diff --git a/sdk/storage/Azure.Storage.Blobs/samples/Azure.Storage.Blobs.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Blobs/samples/Azure.Storage.Blobs.Samples.Tests.csproj index 568dd6cba9516..77fd767c3486c 100644 --- a/sdk/storage/Azure.Storage.Blobs/samples/Azure.Storage.Blobs.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs/samples/Azure.Storage.Blobs.Samples.Tests.csproj @@ -16,7 +16,6 @@ - diff --git a/sdk/storage/Azure.Storage.Common/samples/Azure.Storage.Common.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Common/samples/Azure.Storage.Common.Samples.Tests.csproj index aeca4497a8770..7d454aeaa0af2 100644 --- a/sdk/storage/Azure.Storage.Common/samples/Azure.Storage.Common.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Common/samples/Azure.Storage.Common.Samples.Tests.csproj @@ -19,7 +19,6 @@ - PreserveNewest diff --git a/sdk/storage/Azure.Storage.DataMovement.Blobs/samples/Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Blobs/samples/Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj index 30d4b1f79daaf..7ab901e963e03 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Blobs/samples/Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Blobs/samples/Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj @@ -11,7 +11,6 @@ - diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/samples/Azure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/samples/Azure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj index 6a472b9f74158..9cde066f64eb7 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/samples/Azure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/samples/Azure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj @@ -1,4 +1,4 @@ - + $(RequiredTargetFrameworks) Microsoft Azure.Storage.DataMovement.Files.Shares client library samples @@ -11,7 +11,6 @@ - diff --git a/sdk/storage/Azure.Storage.Files.DataLake/samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Files.DataLake/samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj index eecbe0543fe87..c230f2ed8fa20 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Files.DataLake/samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj @@ -15,7 +15,6 @@ - diff --git a/sdk/storage/Azure.Storage.Files.Shares/samples/Azure.Storage.Files.Shares.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Files.Shares/samples/Azure.Storage.Files.Shares.Samples.Tests.csproj index d1efeca0c2da2..0bcec423c144d 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/samples/Azure.Storage.Files.Shares.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Files.Shares/samples/Azure.Storage.Files.Shares.Samples.Tests.csproj @@ -16,7 +16,6 @@ - PreserveNewest diff --git a/sdk/storage/Azure.Storage.Queues/samples/Azure.Storage.Queues.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Queues/samples/Azure.Storage.Queues.Samples.Tests.csproj index 12794e190f4e1..f9ed70da2e75d 100644 --- a/sdk/storage/Azure.Storage.Queues/samples/Azure.Storage.Queues.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Queues/samples/Azure.Storage.Queues.Samples.Tests.csproj @@ -16,7 +16,6 @@ - PreserveNewest From 770b9e37f4ed06387dd3657a23d7806d4da1374d Mon Sep 17 00:00:00 2001 From: Sean McCullough Date: Tue, 20 Aug 2024 21:03:34 -0500 Subject: [PATCH 09/25] Revert "Structured message cherrypick stg96 (#45496)" This reverts commit 8e9715845d0453e9eb092de45becc1d4856a7224. --- .../Azure.Storage.Blobs.Batch.Tests.csproj | 3 +- ...zure.Storage.Blobs.ChangeFeed.Tests.csproj | 3 +- .../api/Azure.Storage.Blobs.net6.0.cs | 1 - .../api/Azure.Storage.Blobs.netstandard2.0.cs | 1 - .../api/Azure.Storage.Blobs.netstandard2.1.cs | 1 - sdk/storage/Azure.Storage.Blobs/assets.json | 2 +- .../src/AppendBlobClient.cs | 45 +- .../src/Azure.Storage.Blobs.csproj | 6 - .../Azure.Storage.Blobs/src/BlobBaseClient.cs | 110 +--- .../src/BlobClientOptions.cs | 2 - .../src/BlockBlobClient.cs | 92 +-- .../Generated/AppendBlobAppendBlockHeaders.cs | 2 - .../src/Generated/AppendBlobRestClient.cs | 24 +- .../src/Generated/BlobDownloadHeaders.cs | 4 - .../src/Generated/BlobRestClient.cs | 18 +- .../src/Generated/BlockBlobRestClient.cs | 46 +- .../Generated/BlockBlobStageBlockHeaders.cs | 2 - .../src/Generated/BlockBlobUploadHeaders.cs | 2 - .../src/Generated/ContainerRestClient.cs | 2 +- .../src/Generated/PageBlobRestClient.cs | 24 +- .../Generated/PageBlobUploadPagesHeaders.cs | 2 - .../src/Generated/ServiceRestClient.cs | 2 +- .../src/Models/BlobDownloadDetails.cs | 8 - .../src/Models/BlobDownloadInfo.cs | 10 - .../src/Models/BlobDownloadStreamingResult.cs | 8 - .../Azure.Storage.Blobs/src/PageBlobClient.cs | 49 +- .../src/PartitionedDownloader.cs | 50 +- .../Azure.Storage.Blobs/src/autorest.md | 6 +- .../tests/Azure.Storage.Blobs.Tests.csproj | 3 - .../BlobBaseClientTransferValidationTests.cs | 113 ++-- .../tests/PartitionedDownloaderTests.cs | 2 +- .../src/Shared/ChecksumExtensions.cs | 22 - .../src/Shared/Constants.cs | 9 - .../src/Shared/Errors.Clients.cs | 10 - .../Azure.Storage.Common/src/Shared/Errors.cs | 19 - .../src/Shared/LazyLoadingReadOnlyStream.cs | 40 +- .../src/Shared/PooledMemoryStream.cs | 2 +- .../src/Shared/StorageCrc64Composer.cs | 48 +- .../StorageRequestValidationPipelinePolicy.cs | 29 - .../src/Shared/StorageVersionExtensions.cs | 2 +- .../src/Shared/StreamExtensions.cs | 22 +- .../src/Shared/StructuredMessage.cs | 244 -------- ...tructuredMessageDecodingRetriableStream.cs | 264 --------- .../Shared/StructuredMessageDecodingStream.cs | 542 ----------------- .../Shared/StructuredMessageEncodingStream.cs | 545 ------------------ ...redMessagePrecalculatedCrcWrapperStream.cs | 451 --------------- .../TransferValidationOptionsExtensions.cs | 7 + .../tests/Azure.Storage.Common.Tests.csproj | 9 - .../tests/Shared/FaultyStream.cs | 13 +- .../Shared/ObserveStructuredMessagePolicy.cs | 85 --- .../tests/Shared/RequestExtensions.cs | 27 - .../Shared/TamperStreamContentsPolicy.cs | 11 +- .../Shared/TransferValidationTestBase.cs | 325 +++-------- ...uredMessageDecodingRetriableStreamTests.cs | 246 -------- .../StructuredMessageDecodingStreamTests.cs | 323 ----------- .../StructuredMessageEncodingStreamTests.cs | 271 --------- .../tests/StructuredMessageHelper.cs | 68 --- .../StructuredMessageStreamRoundtripTests.cs | 127 ---- .../tests/StructuredMessageTests.cs | 114 ---- .../Azure.Storage.DataMovement.Blobs.csproj | 1 - ...re.Storage.DataMovement.Blobs.Tests.csproj | 5 - ...taMovement.Blobs.Files.Shares.Tests.csproj | 1 - ...age.DataMovement.Files.Shares.Tests.csproj | 1 - .../src/Azure.Storage.DataMovement.csproj | 2 +- .../Azure.Storage.DataMovement.Tests.csproj | 1 - .../Azure.Storage.Files.DataLake/assets.json | 2 +- .../src/Azure.Storage.Files.DataLake.csproj | 5 - .../src/DataLakeFileClient.cs | 43 +- .../src/Generated/FileSystemRestClient.cs | 2 +- .../src/Generated/PathAppendDataHeaders.cs | 2 - .../src/Generated/PathRestClient.cs | 46 +- .../src/Generated/PathUpdateHeaders.cs | 2 - .../src/Generated/ServiceRestClient.cs | 2 +- .../src/autorest.md | 6 +- .../Azure.Storage.Files.DataLake.Tests.csproj | 3 - ...taLakeFileClientTransferValidationTests.cs | 5 +- .../api/Azure.Storage.Files.Shares.net6.0.cs | 1 - ...ure.Storage.Files.Shares.netstandard2.0.cs | 1 - .../Azure.Storage.Files.Shares/assets.json | 2 +- .../src/Azure.Storage.Files.Shares.csproj | 8 +- .../src/Generated/DirectoryRestClient.cs | 2 +- .../src/Generated/FileDownloadHeaders.cs | 4 - .../src/Generated/FileRestClient.cs | 40 +- .../src/Generated/FileUploadRangeHeaders.cs | 2 - .../src/Generated/ServiceRestClient.cs | 2 +- .../src/Generated/ShareRestClient.cs | 2 +- .../src/Models/ShareFileDownloadInfo.cs | 6 - .../src/ShareErrors.cs | 15 + .../src/ShareFileClient.cs | 165 ++---- .../src/autorest.md | 6 +- .../Azure.Storage.Files.Shares.Tests.csproj | 1 - .../ShareFileClientTransferValidationTests.cs | 42 +- .../tests/Azure.Storage.Queues.Tests.csproj | 1 - 93 files changed, 414 insertions(+), 4533 deletions(-) delete mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/ChecksumExtensions.cs delete mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessage.cs delete mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingRetriableStream.cs delete mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingStream.cs delete mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageEncodingStream.cs delete mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessagePrecalculatedCrcWrapperStream.cs delete mode 100644 sdk/storage/Azure.Storage.Common/tests/Shared/ObserveStructuredMessagePolicy.cs delete mode 100644 sdk/storage/Azure.Storage.Common/tests/Shared/RequestExtensions.cs delete mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingRetriableStreamTests.cs delete mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingStreamTests.cs delete mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageEncodingStreamTests.cs delete mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageHelper.cs delete mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageStreamRoundtripTests.cs delete mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageTests.cs diff --git a/sdk/storage/Azure.Storage.Blobs.Batch/tests/Azure.Storage.Blobs.Batch.Tests.csproj b/sdk/storage/Azure.Storage.Blobs.Batch/tests/Azure.Storage.Blobs.Batch.Tests.csproj index 286ab317256bf..2b77907e9aaac 100644 --- a/sdk/storage/Azure.Storage.Blobs.Batch/tests/Azure.Storage.Blobs.Batch.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs.Batch/tests/Azure.Storage.Blobs.Batch.Tests.csproj @@ -23,7 +23,6 @@ - PreserveNewest @@ -43,4 +42,4 @@ - + \ No newline at end of file diff --git a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/Azure.Storage.Blobs.ChangeFeed.Tests.csproj b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/Azure.Storage.Blobs.ChangeFeed.Tests.csproj index 8cf13cd60744f..9682ab15ecd60 100644 --- a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/Azure.Storage.Blobs.ChangeFeed.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/Azure.Storage.Blobs.ChangeFeed.Tests.csproj @@ -17,7 +17,6 @@ - @@ -29,4 +28,4 @@ PreserveNewest - + \ No newline at end of file diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs index fb52e93f85a56..05cdde6988050 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs @@ -516,7 +516,6 @@ public BlobDownloadDetails() { } public long BlobSequenceNumber { get { throw null; } } public Azure.Storage.Blobs.Models.BlobType BlobType { get { throw null; } } public string CacheControl { get { throw null; } } - public byte[] ContentCrc { get { throw null; } } public string ContentDisposition { get { throw null; } } public string ContentEncoding { get { throw null; } } public byte[] ContentHash { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs index fb52e93f85a56..05cdde6988050 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs @@ -516,7 +516,6 @@ public BlobDownloadDetails() { } public long BlobSequenceNumber { get { throw null; } } public Azure.Storage.Blobs.Models.BlobType BlobType { get { throw null; } } public string CacheControl { get { throw null; } } - public byte[] ContentCrc { get { throw null; } } public string ContentDisposition { get { throw null; } } public string ContentEncoding { get { throw null; } } public byte[] ContentHash { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs index fb52e93f85a56..05cdde6988050 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs @@ -516,7 +516,6 @@ public BlobDownloadDetails() { } public long BlobSequenceNumber { get { throw null; } } public Azure.Storage.Blobs.Models.BlobType BlobType { get { throw null; } } public string CacheControl { get { throw null; } } - public byte[] ContentCrc { get { throw null; } } public string ContentDisposition { get { throw null; } } public string ContentEncoding { get { throw null; } } public byte[] ContentHash { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/assets.json b/sdk/storage/Azure.Storage.Blobs/assets.json index 8315a7c09ec68..328a7707c7101 100644 --- a/sdk/storage/Azure.Storage.Blobs/assets.json +++ b/sdk/storage/Azure.Storage.Blobs/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Blobs", - "Tag": "net/storage/Azure.Storage.Blobs_a770006ccd" + "Tag": "net/storage/Azure.Storage.Blobs_f805dd22f1" } diff --git a/sdk/storage/Azure.Storage.Blobs/src/AppendBlobClient.cs b/sdk/storage/Azure.Storage.Blobs/src/AppendBlobClient.cs index 9a110cf8eb13a..e70d5e02c82d7 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/AppendBlobClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/AppendBlobClient.cs @@ -1242,39 +1242,14 @@ internal async Task> AppendBlockInternal( BlobErrors.VerifyHttpsCustomerProvidedKey(Uri, ClientConfiguration.CustomerProvidedKey); Errors.VerifyStreamPosition(content, nameof(content)); - ContentHasher.GetHashResult hashResult = null; - long contentLength = (content?.Length - content?.Position) ?? 0; - long? structuredContentLength = default; - string structuredBodyType = null; - if (validationOptions != null && - validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && - ClientSideEncryption == null) // don't allow feature combination - { - // report progress in terms of caller bytes, not encoded bytes - structuredContentLength = contentLength; - contentLength = (content?.Length - content?.Position) ?? 0; - structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; - content = content.WithNoDispose().WithProgress(progressHandler); - content = validationOptions.PrecalculatedChecksum.IsEmpty - ? new StructuredMessageEncodingStream( - content, - Constants.StructuredMessage.DefaultSegmentContentLength, - StructuredMessage.Flags.StorageCrc64) - : new StructuredMessagePrecalculatedCrcWrapperStream( - content, - validationOptions.PrecalculatedChecksum.Span); - contentLength = (content?.Length - content?.Position) ?? 0; - } - else - { - // compute hash BEFORE attaching progress handler - hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - content = content.WithNoDispose().WithProgress(progressHandler); - } + // compute hash BEFORE attaching progress handler + ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + + content = content.WithNoDispose().WithProgress(progressHandler); ResponseWithHeaders response; @@ -1292,8 +1267,6 @@ internal async Task> AppendBlockInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, ifModifiedSince: conditions?.IfModifiedSince, ifUnmodifiedSince: conditions?.IfUnmodifiedSince, ifMatch: conditions?.IfMatch?.ToString(), @@ -1316,8 +1289,6 @@ internal async Task> AppendBlockInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, ifModifiedSince: conditions?.IfModifiedSince, ifUnmodifiedSince: conditions?.IfUnmodifiedSince, ifMatch: conditions?.IfMatch?.ToString(), diff --git a/sdk/storage/Azure.Storage.Blobs/src/Azure.Storage.Blobs.csproj b/sdk/storage/Azure.Storage.Blobs/src/Azure.Storage.Blobs.csproj index 3b8c704b5e71a..ab681870be036 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Azure.Storage.Blobs.csproj +++ b/sdk/storage/Azure.Storage.Blobs/src/Azure.Storage.Blobs.csproj @@ -52,7 +52,6 @@ - @@ -92,11 +91,6 @@ - - - - - diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs b/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs index b48da27583a98..c1416524f0221 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs @@ -1031,7 +1031,6 @@ private async Task> DownloadInternal( ContentHash = blobDownloadDetails.ContentHash, ContentLength = blobDownloadDetails.ContentLength, ContentType = blobDownloadDetails.ContentType, - ExpectTrailingDetails = blobDownloadStreamingResult.ExpectTrailingDetails, }, response.GetRawResponse()); } #endregion @@ -1548,52 +1547,30 @@ internal virtual async ValueTask> Download // Wrap the response Content in a RetriableStream so we // can return it before it's finished downloading, but still // allow retrying if it fails. - ValueTask> Factory(long offset, bool async, CancellationToken cancellationToken) - => StartDownloadAsync( - range, - conditionsWithEtag, - validationOptions, - offset, - async, - cancellationToken); - async ValueTask<(Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData)> StructuredMessageFactory( - long offset, bool async, CancellationToken cancellationToken) - { - Response result = await Factory(offset, async, cancellationToken).ConfigureAwait(false); - return StructuredMessageDecodingStream.WrapStream(result.Value.Content, result.Value.Details.ContentLength); - } - Stream stream; - if (response.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) - { - (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = StructuredMessageDecodingStream.WrapStream( - response.Value.Content, response.Value.Details.ContentLength); - stream = new StructuredMessageDecodingRetriableStream( - decodingStream, - decodedData, - StructuredMessage.Flags.StorageCrc64, - startOffset => StructuredMessageFactory(startOffset, async: false, cancellationToken) - .EnsureCompleted(), - async startOffset => await StructuredMessageFactory(startOffset, async: true, cancellationToken) - .ConfigureAwait(false), - decodedData => - { - response.Value.Details.ContentCrc = new byte[StructuredMessage.Crc64Length]; - decodedData.Crc.WriteCrc64(response.Value.Details.ContentCrc); - }, - ClientConfiguration.Pipeline.ResponseClassifier, - Constants.MaxReliabilityRetries); - } - else - { - stream = RetriableStream.Create( - response.Value.Content, - startOffset => Factory(startOffset, async: false, cancellationToken) - .EnsureCompleted().Value.Content, - async startOffset => (await Factory(startOffset, async: true, cancellationToken) - .ConfigureAwait(false)).Value.Content, - ClientConfiguration.Pipeline.ResponseClassifier, - Constants.MaxReliabilityRetries); - } + Stream stream = RetriableStream.Create( + response.Value.Content, + startOffset => + StartDownloadAsync( + range, + conditionsWithEtag, + validationOptions, + startOffset, + async, + cancellationToken) + .EnsureCompleted() + .Value.Content, + async startOffset => + (await StartDownloadAsync( + range, + conditionsWithEtag, + validationOptions, + startOffset, + async, + cancellationToken) + .ConfigureAwait(false)) + .Value.Content, + ClientConfiguration.Pipeline.ResponseClassifier, + Constants.MaxReliabilityRetries); stream = stream.WithNoDispose().WithProgress(progressHandler); @@ -1601,11 +1578,7 @@ ValueTask> Factory(long offset, bool async * Buffer response stream and ensure it matches the transactional checksum if any. * Storage will not return a checksum for payload >4MB, so this buffer is capped similarly. * Checksum validation is opt-in, so this buffer is part of that opt-in. */ - if (validationOptions != default && - validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && - validationOptions.AutoValidateChecksum && - // structured message decoding does the validation for us - !response.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) + if (validationOptions != default && validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && validationOptions.AutoValidateChecksum) { // safe-buffer; transactional hash download limit well below maxInt var readDestStream = new MemoryStream((int)response.Value.Details.ContentLength); @@ -1676,8 +1649,8 @@ await ContentHasher.AssertResponseHashMatchInternal( /// notifications that the operation should be cancelled. /// /// - /// A describing the - /// downloaded blob. contains + /// A describing the + /// downloaded blob. contains /// the blob's data. /// /// @@ -1716,29 +1689,13 @@ private async ValueTask> StartDownloadAsyn operationName: nameof(BlobBaseClient.Download), parameterName: nameof(conditions)); - bool? rangeGetContentMD5 = null; - bool? rangeGetContentCRC64 = null; - string structuredBodyType = null; - switch (validationOptions?.ChecksumAlgorithm.ResolveAuto()) - { - case StorageChecksumAlgorithm.MD5: - rangeGetContentMD5 = true; - break; - case StorageChecksumAlgorithm.StorageCrc64: - structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; - break; - default: - break; - } - if (async) { response = await BlobRestClient.DownloadAsync( range: pageRange?.ToString(), leaseId: conditions?.LeaseId, - rangeGetContentMD5: rangeGetContentMD5, - rangeGetContentCRC64: rangeGetContentCRC64, - structuredBodyType: structuredBodyType, + rangeGetContentMD5: validationOptions?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.MD5 ? true : null, + rangeGetContentCRC64: validationOptions?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 ? true : null, encryptionKey: ClientConfiguration.CustomerProvidedKey?.EncryptionKey, encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, @@ -1755,9 +1712,8 @@ private async ValueTask> StartDownloadAsyn response = BlobRestClient.Download( range: pageRange?.ToString(), leaseId: conditions?.LeaseId, - rangeGetContentMD5: rangeGetContentMD5, - rangeGetContentCRC64: rangeGetContentCRC64, - structuredBodyType: structuredBodyType, + rangeGetContentMD5: validationOptions?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.MD5 ? true : null, + rangeGetContentCRC64: validationOptions?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 ? true : null, encryptionKey: ClientConfiguration.CustomerProvidedKey?.EncryptionKey, encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, @@ -1773,11 +1729,9 @@ private async ValueTask> StartDownloadAsyn long length = response.IsUnavailable() ? 0 : response.Headers.ContentLength ?? 0; ClientConfiguration.Pipeline.LogTrace($"Response: {response.GetRawResponse().Status}, ContentLength: {length}"); - Response result = Response.FromValue( + return Response.FromValue( response.ToBlobDownloadStreamingResult(), response.GetRawResponse()); - result.Value.ExpectTrailingDetails = structuredBodyType != null; - return result; } #endregion diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs b/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs index f312e621bffc4..b16cefc83a535 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs @@ -318,8 +318,6 @@ private void AddHeadersAndQueryParameters() Diagnostics.LoggedHeaderNames.Add("x-ms-encryption-key-sha256"); Diagnostics.LoggedHeaderNames.Add("x-ms-copy-source-error-code"); Diagnostics.LoggedHeaderNames.Add("x-ms-copy-source-status-code"); - Diagnostics.LoggedHeaderNames.Add("x-ms-structured-body"); - Diagnostics.LoggedHeaderNames.Add("x-ms-structured-content-length"); Diagnostics.LoggedQueryParameters.Add("comp"); Diagnostics.LoggedQueryParameters.Add("maxresults"); diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlockBlobClient.cs b/sdk/storage/Azure.Storage.Blobs/src/BlockBlobClient.cs index 5e5ec82e96dca..cd6bc3788fc26 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlockBlobClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlockBlobClient.cs @@ -875,35 +875,14 @@ internal virtual async Task> UploadInternal( scope.Start(); Errors.VerifyStreamPosition(content, nameof(content)); - ContentHasher.GetHashResult hashResult = null; - long contentLength = (content?.Length - content?.Position) ?? 0; - long? structuredContentLength = default; - string structuredBodyType = null; - if (content != null && - validationOptions != null && - validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && - ClientSideEncryption == null) // don't allow feature combination - { - // report progress in terms of caller bytes, not encoded bytes - structuredContentLength = contentLength; - structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; - content = content.WithNoDispose().WithProgress(progressHandler); - content = new StructuredMessageEncodingStream( - content, - Constants.StructuredMessage.DefaultSegmentContentLength, - StructuredMessage.Flags.StorageCrc64); - contentLength = content.Length - content.Position; - } - else - { - // compute hash BEFORE attaching progress handler - hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - content = content.WithNoDispose().WithProgress(progressHandler); - } + // compute hash BEFORE attaching progress handler + ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + + content = content?.WithNoDispose().WithProgress(progressHandler); ResponseWithHeaders response; @@ -942,8 +921,6 @@ internal virtual async Task> UploadInternal( legalHold: legalHold, transactionalContentMD5: hashResult?.MD5AsArray, transactionalContentCrc64: hashResult?.StorageCrc64AsArray, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, cancellationToken: cancellationToken) .ConfigureAwait(false); } @@ -976,8 +953,6 @@ internal virtual async Task> UploadInternal( legalHold: legalHold, transactionalContentMD5: hashResult?.MD5AsArray, transactionalContentCrc64: hashResult?.StorageCrc64AsArray, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, cancellationToken: cancellationToken); } @@ -1330,39 +1305,14 @@ internal virtual async Task> StageBlockInternal( Errors.VerifyStreamPosition(content, nameof(content)); - ContentHasher.GetHashResult hashResult = null; - long contentLength = (content?.Length - content?.Position) ?? 0; - long? structuredContentLength = default; - string structuredBodyType = null; - if (validationOptions != null && - validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && - ClientSideEncryption == null) // don't allow feature combination - { - // report progress in terms of caller bytes, not encoded bytes - structuredContentLength = contentLength; - contentLength = (content?.Length - content?.Position) ?? 0; - structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; - content = content.WithNoDispose().WithProgress(progressHandler); - content = validationOptions.PrecalculatedChecksum.IsEmpty - ? new StructuredMessageEncodingStream( - content, - Constants.StructuredMessage.DefaultSegmentContentLength, - StructuredMessage.Flags.StorageCrc64) - : new StructuredMessagePrecalculatedCrcWrapperStream( - content, - validationOptions.PrecalculatedChecksum.Span); - contentLength = (content?.Length - content?.Position) ?? 0; - } - else - { - // compute hash BEFORE attaching progress handler - hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - content = content.WithNoDispose().WithProgress(progressHandler); - } + // compute hash BEFORE attaching progress handler + ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + + content = content.WithNoDispose().WithProgress(progressHandler); ResponseWithHeaders response; @@ -1370,7 +1320,7 @@ internal virtual async Task> StageBlockInternal( { response = await BlockBlobRestClient.StageBlockAsync( blockId: base64BlockId, - contentLength: contentLength, + contentLength: (content?.Length - content?.Position) ?? 0, body: content, transactionalContentCrc64: hashResult?.StorageCrc64AsArray, transactionalContentMD5: hashResult?.MD5AsArray, @@ -1379,8 +1329,6 @@ internal virtual async Task> StageBlockInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, cancellationToken: cancellationToken) .ConfigureAwait(false); } @@ -1388,7 +1336,7 @@ internal virtual async Task> StageBlockInternal( { response = BlockBlobRestClient.StageBlock( blockId: base64BlockId, - contentLength: contentLength, + contentLength: (content?.Length - content?.Position) ?? 0, body: content, transactionalContentCrc64: hashResult?.StorageCrc64AsArray, transactionalContentMD5: hashResult?.MD5AsArray, @@ -1397,8 +1345,6 @@ internal virtual async Task> StageBlockInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, cancellationToken: cancellationToken); } @@ -2845,7 +2791,7 @@ internal async Task OpenWriteInternal( immutabilityPolicy: default, legalHold: default, progressHandler: default, - transferValidationOverride: new() { ChecksumAlgorithm = StorageChecksumAlgorithm.None }, + transferValidationOverride: default, operationName: default, async: async, cancellationToken: cancellationToken) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobAppendBlockHeaders.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobAppendBlockHeaders.cs index 48139cc16a682..9303ec3a3d653 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobAppendBlockHeaders.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobAppendBlockHeaders.cs @@ -35,7 +35,5 @@ public AppendBlobAppendBlockHeaders(Response response) public string EncryptionKeySha256 => _response.Headers.TryGetValue("x-ms-encryption-key-sha256", out string value) ? value : null; /// Returns the name of the encryption scope used to encrypt the blob contents and application metadata. Note that the absence of this header implies use of the default account encryption scope. public string EncryptionScope => _response.Headers.TryGetValue("x-ms-encryption-scope", out string value) ? value : null; - /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. - public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs index a3d0eca1ec405..88104aa95bb00 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs @@ -29,7 +29,7 @@ internal partial class AppendBlobRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". + /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". /// , , or is null. public AppendBlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { @@ -219,7 +219,7 @@ public ResponseWithHeaders Create(long contentLength, i } } - internal HttpMessage CreateAppendBlockRequest(long contentLength, Stream body, int? timeout, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, string leaseId, long? maxSize, long? appendPosition, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags, string structuredBodyType, long? structuredContentLength) + internal HttpMessage CreateAppendBlockRequest(long contentLength, Stream body, int? timeout, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, string leaseId, long? maxSize, long? appendPosition, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -285,14 +285,6 @@ internal HttpMessage CreateAppendBlockRequest(long contentLength, Stream body, i request.Headers.Add("x-ms-if-tags", ifTags); } request.Headers.Add("x-ms-version", _version); - if (structuredBodyType != null) - { - request.Headers.Add("x-ms-structured-body", structuredBodyType); - } - if (structuredContentLength != null) - { - request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); - } request.Headers.Add("Accept", "application/xml"); request.Headers.Add("Content-Length", contentLength); if (transactionalContentMD5 != null) @@ -322,18 +314,16 @@ internal HttpMessage CreateAppendBlockRequest(long contentLength, Stream body, i /// Specify an ETag value to operate only on blobs with a matching value. /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - /// Required if the request body is a structured message. Specifies the message schema version and properties. - /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public async Task> AppendBlockAsync(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, string leaseId = null, long? maxSize = null, long? appendPosition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) + public async Task> AppendBlockAsync(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, string leaseId = null, long? maxSize = null, long? appendPosition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateAppendBlockRequest(contentLength, body, timeout, transactionalContentMD5, transactionalContentCrc64, leaseId, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, structuredBodyType, structuredContentLength); + using var message = CreateAppendBlockRequest(contentLength, body, timeout, transactionalContentMD5, transactionalContentCrc64, leaseId, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new AppendBlobAppendBlockHeaders(message.Response); switch (message.Response.Status) @@ -363,18 +353,16 @@ public async Task> AppendBlock /// Specify an ETag value to operate only on blobs with a matching value. /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - /// Required if the request body is a structured message. Specifies the message schema version and properties. - /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public ResponseWithHeaders AppendBlock(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, string leaseId = null, long? maxSize = null, long? appendPosition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders AppendBlock(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, string leaseId = null, long? maxSize = null, long? appendPosition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateAppendBlockRequest(contentLength, body, timeout, transactionalContentMD5, transactionalContentCrc64, leaseId, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, structuredBodyType, structuredContentLength); + using var message = CreateAppendBlockRequest(contentLength, body, timeout, transactionalContentMD5, transactionalContentCrc64, leaseId, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); _pipeline.Send(message, cancellationToken); var headers = new AppendBlobAppendBlockHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobDownloadHeaders.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobDownloadHeaders.cs index 1897117cb01d8..ad17079901a72 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobDownloadHeaders.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobDownloadHeaders.cs @@ -96,10 +96,6 @@ public BlobDownloadHeaders(Response response) public BlobImmutabilityPolicyMode? ImmutabilityPolicyMode => _response.Headers.TryGetValue("x-ms-immutability-policy-mode", out string value) ? value.ToBlobImmutabilityPolicyMode() : null; /// Indicates if a legal hold is present on the blob. public bool? LegalHold => _response.Headers.TryGetValue("x-ms-legal-hold", out bool? value) ? value : null; - /// Indicates the response body contains a structured message and specifies the message schema version and properties. - public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; - /// The length of the blob/file content inside the message body when the response body is returned as a structured message. Will always be smaller than Content-Length. - public long? StructuredContentLength => _response.Headers.TryGetValue("x-ms-structured-content-length", out long? value) ? value : null; /// If the request is to read a specified range and the x-ms-range-get-content-crc64 is set to true, then the request returns a crc64 for the range, as long as the range size is less than or equal to 4 MB. If both x-ms-range-get-content-crc64 & x-ms-range-get-content-md5 is specified in the same request, it will fail with 400(Bad Request). public byte[] ContentCrc64 => _response.Headers.TryGetValue("x-ms-content-crc64", out byte[] value) ? value : null; public string ErrorCode => _response.Headers.TryGetValue("x-ms-error-code", out string value) ? value : null; diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs index 4f891a0a14684..615257741b781 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs @@ -30,7 +30,7 @@ internal partial class BlobRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". + /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". /// , , or is null. public BlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { @@ -40,7 +40,7 @@ public BlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline _version = version ?? throw new ArgumentNullException(nameof(version)); } - internal HttpMessage CreateDownloadRequest(string snapshot, string versionId, int? timeout, string range, string leaseId, bool? rangeGetContentMD5, bool? rangeGetContentCRC64, string structuredBodyType, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags) + internal HttpMessage CreateDownloadRequest(string snapshot, string versionId, int? timeout, string range, string leaseId, bool? rangeGetContentMD5, bool? rangeGetContentCRC64, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -77,10 +77,6 @@ internal HttpMessage CreateDownloadRequest(string snapshot, string versionId, in { request.Headers.Add("x-ms-range-get-content-crc64", rangeGetContentCRC64.Value); } - if (structuredBodyType != null) - { - request.Headers.Add("x-ms-structured-body", structuredBodyType); - } if (encryptionKey != null) { request.Headers.Add("x-ms-encryption-key", encryptionKey); @@ -126,7 +122,6 @@ internal HttpMessage CreateDownloadRequest(string snapshot, string versionId, in /// If specified, the operation only succeeds if the resource's lease is active and matches this ID. /// When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. /// When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 MB in size. - /// Specifies the response content should be returned as a structured message and specifies the message schema version and properties. /// Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services. /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. @@ -136,9 +131,9 @@ internal HttpMessage CreateDownloadRequest(string snapshot, string versionId, in /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. /// The cancellation token to use. - public async Task> DownloadAsync(string snapshot = null, string versionId = null, int? timeout = null, string range = null, string leaseId = null, bool? rangeGetContentMD5 = null, bool? rangeGetContentCRC64 = null, string structuredBodyType = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) + public async Task> DownloadAsync(string snapshot = null, string versionId = null, int? timeout = null, string range = null, string leaseId = null, bool? rangeGetContentMD5 = null, bool? rangeGetContentCRC64 = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) { - using var message = CreateDownloadRequest(snapshot, versionId, timeout, range, leaseId, rangeGetContentMD5, rangeGetContentCRC64, structuredBodyType, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); + using var message = CreateDownloadRequest(snapshot, versionId, timeout, range, leaseId, rangeGetContentMD5, rangeGetContentCRC64, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new BlobDownloadHeaders(message.Response); switch (message.Response.Status) @@ -164,7 +159,6 @@ public async Task> DownloadAsyn /// If specified, the operation only succeeds if the resource's lease is active and matches this ID. /// When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. /// When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 MB in size. - /// Specifies the response content should be returned as a structured message and specifies the message schema version and properties. /// Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services. /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. @@ -174,9 +168,9 @@ public async Task> DownloadAsyn /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. /// The cancellation token to use. - public ResponseWithHeaders Download(string snapshot = null, string versionId = null, int? timeout = null, string range = null, string leaseId = null, bool? rangeGetContentMD5 = null, bool? rangeGetContentCRC64 = null, string structuredBodyType = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders Download(string snapshot = null, string versionId = null, int? timeout = null, string range = null, string leaseId = null, bool? rangeGetContentMD5 = null, bool? rangeGetContentCRC64 = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) { - using var message = CreateDownloadRequest(snapshot, versionId, timeout, range, leaseId, rangeGetContentMD5, rangeGetContentCRC64, structuredBodyType, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); + using var message = CreateDownloadRequest(snapshot, versionId, timeout, range, leaseId, rangeGetContentMD5, rangeGetContentCRC64, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); _pipeline.Send(message, cancellationToken); var headers = new BlobDownloadHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs index 78ef424f66b13..0723c07204ac2 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs @@ -30,7 +30,7 @@ internal partial class BlockBlobRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". + /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". /// , , or is null. public BlockBlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { @@ -40,7 +40,7 @@ public BlockBlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pip _version = version ?? throw new ArgumentNullException(nameof(version)); } - internal HttpMessage CreateUploadRequest(long contentLength, Stream body, int? timeout, byte[] transactionalContentMD5, string blobContentType, string blobContentEncoding, string blobContentLanguage, byte[] blobContentMD5, string blobCacheControl, IDictionary metadata, string leaseId, string blobContentDisposition, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, AccessTier? tier, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags, string blobTagsString, DateTimeOffset? immutabilityPolicyExpiry, BlobImmutabilityPolicyMode? immutabilityPolicyMode, bool? legalHold, byte[] transactionalContentCrc64, string structuredBodyType, long? structuredContentLength) + internal HttpMessage CreateUploadRequest(long contentLength, Stream body, int? timeout, byte[] transactionalContentMD5, string blobContentType, string blobContentEncoding, string blobContentLanguage, byte[] blobContentMD5, string blobCacheControl, IDictionary metadata, string leaseId, string blobContentDisposition, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, AccessTier? tier, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags, string blobTagsString, DateTimeOffset? immutabilityPolicyExpiry, BlobImmutabilityPolicyMode? immutabilityPolicyMode, bool? legalHold, byte[] transactionalContentCrc64) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -146,14 +146,6 @@ internal HttpMessage CreateUploadRequest(long contentLength, Stream body, int? t { request.Headers.Add("x-ms-content-crc64", transactionalContentCrc64, "D"); } - if (structuredBodyType != null) - { - request.Headers.Add("x-ms-structured-body", structuredBodyType); - } - if (structuredContentLength != null) - { - request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); - } request.Headers.Add("Accept", "application/xml"); if (transactionalContentMD5 != null) { @@ -193,18 +185,16 @@ internal HttpMessage CreateUploadRequest(long contentLength, Stream body, int? t /// Specifies the immutability policy mode to set on the blob. /// Specified if a legal hold should be set on the blob. /// Specify the transactional crc64 for the body, to be validated by the service. - /// Required if the request body is a structured message. Specifies the message schema version and properties. - /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public async Task> UploadAsync(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, string blobContentType = null, string blobContentEncoding = null, string blobContentLanguage = null, byte[] blobContentMD5 = null, string blobCacheControl = null, IDictionary metadata = null, string leaseId = null, string blobContentDisposition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, AccessTier? tier = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string blobTagsString = null, DateTimeOffset? immutabilityPolicyExpiry = null, BlobImmutabilityPolicyMode? immutabilityPolicyMode = null, bool? legalHold = null, byte[] transactionalContentCrc64 = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) + public async Task> UploadAsync(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, string blobContentType = null, string blobContentEncoding = null, string blobContentLanguage = null, byte[] blobContentMD5 = null, string blobCacheControl = null, IDictionary metadata = null, string leaseId = null, string blobContentDisposition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, AccessTier? tier = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string blobTagsString = null, DateTimeOffset? immutabilityPolicyExpiry = null, BlobImmutabilityPolicyMode? immutabilityPolicyMode = null, bool? legalHold = null, byte[] transactionalContentCrc64 = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUploadRequest(contentLength, body, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseId, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold, transactionalContentCrc64, structuredBodyType, structuredContentLength); + using var message = CreateUploadRequest(contentLength, body, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseId, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold, transactionalContentCrc64); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new BlockBlobUploadHeaders(message.Response); switch (message.Response.Status) @@ -244,18 +234,16 @@ public async Task> UploadAsync(long /// Specifies the immutability policy mode to set on the blob. /// Specified if a legal hold should be set on the blob. /// Specify the transactional crc64 for the body, to be validated by the service. - /// Required if the request body is a structured message. Specifies the message schema version and properties. - /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public ResponseWithHeaders Upload(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, string blobContentType = null, string blobContentEncoding = null, string blobContentLanguage = null, byte[] blobContentMD5 = null, string blobCacheControl = null, IDictionary metadata = null, string leaseId = null, string blobContentDisposition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, AccessTier? tier = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string blobTagsString = null, DateTimeOffset? immutabilityPolicyExpiry = null, BlobImmutabilityPolicyMode? immutabilityPolicyMode = null, bool? legalHold = null, byte[] transactionalContentCrc64 = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders Upload(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, string blobContentType = null, string blobContentEncoding = null, string blobContentLanguage = null, byte[] blobContentMD5 = null, string blobCacheControl = null, IDictionary metadata = null, string leaseId = null, string blobContentDisposition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, AccessTier? tier = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string blobTagsString = null, DateTimeOffset? immutabilityPolicyExpiry = null, BlobImmutabilityPolicyMode? immutabilityPolicyMode = null, bool? legalHold = null, byte[] transactionalContentCrc64 = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUploadRequest(contentLength, body, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseId, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold, transactionalContentCrc64, structuredBodyType, structuredContentLength); + using var message = CreateUploadRequest(contentLength, body, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseId, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold, transactionalContentCrc64); _pipeline.Send(message, cancellationToken); var headers = new BlockBlobUploadHeaders(message.Response); switch (message.Response.Status) @@ -506,7 +494,7 @@ public ResponseWithHeaders PutBlobFromUrl(long c } } - internal HttpMessage CreateStageBlockRequest(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, int? timeout, string leaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, string structuredBodyType, long? structuredContentLength) + internal HttpMessage CreateStageBlockRequest(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, int? timeout, string leaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -545,14 +533,6 @@ internal HttpMessage CreateStageBlockRequest(string blockId, long contentLength, request.Headers.Add("x-ms-encryption-scope", encryptionScope); } request.Headers.Add("x-ms-version", _version); - if (structuredBodyType != null) - { - request.Headers.Add("x-ms-structured-body", structuredBodyType); - } - if (structuredContentLength != null) - { - request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); - } request.Headers.Add("Accept", "application/xml"); request.Headers.Add("Content-Length", contentLength); if (transactionalContentMD5 != null) @@ -576,11 +556,9 @@ internal HttpMessage CreateStageBlockRequest(string blockId, long contentLength, /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. /// Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. - /// Required if the request body is a structured message. Specifies the message schema version and properties. - /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// or is null. - public async Task> StageBlockAsync(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) + public async Task> StageBlockAsync(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, CancellationToken cancellationToken = default) { if (blockId == null) { @@ -591,7 +569,7 @@ public async Task> StageBlockAsy throw new ArgumentNullException(nameof(body)); } - using var message = CreateStageBlockRequest(blockId, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, structuredBodyType, structuredContentLength); + using var message = CreateStageBlockRequest(blockId, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new BlockBlobStageBlockHeaders(message.Response); switch (message.Response.Status) @@ -615,11 +593,9 @@ public async Task> StageBlockAsy /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. /// Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. - /// Required if the request body is a structured message. Specifies the message schema version and properties. - /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// or is null. - public ResponseWithHeaders StageBlock(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders StageBlock(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, CancellationToken cancellationToken = default) { if (blockId == null) { @@ -630,7 +606,7 @@ public ResponseWithHeaders StageBlock(string blockId throw new ArgumentNullException(nameof(body)); } - using var message = CreateStageBlockRequest(blockId, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, structuredBodyType, structuredContentLength); + using var message = CreateStageBlockRequest(blockId, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope); _pipeline.Send(message, cancellationToken); var headers = new BlockBlobStageBlockHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobStageBlockHeaders.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobStageBlockHeaders.cs index b13a3b7d1609a..7888b27dd7383 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobStageBlockHeaders.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobStageBlockHeaders.cs @@ -29,7 +29,5 @@ public BlockBlobStageBlockHeaders(Response response) public string EncryptionKeySha256 => _response.Headers.TryGetValue("x-ms-encryption-key-sha256", out string value) ? value : null; /// Returns the name of the encryption scope used to encrypt the blob contents and application metadata. Note that the absence of this header implies use of the default account encryption scope. public string EncryptionScope => _response.Headers.TryGetValue("x-ms-encryption-scope", out string value) ? value : null; - /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. - public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobUploadHeaders.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobUploadHeaders.cs index ca024b1fb5d84..1cfbd3924fa55 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobUploadHeaders.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobUploadHeaders.cs @@ -31,7 +31,5 @@ public BlockBlobUploadHeaders(Response response) public string EncryptionKeySha256 => _response.Headers.TryGetValue("x-ms-encryption-key-sha256", out string value) ? value : null; /// Returns the name of the encryption scope used to encrypt the blob contents and application metadata. Note that the absence of this header implies use of the default account encryption scope. public string EncryptionScope => _response.Headers.TryGetValue("x-ms-encryption-scope", out string value) ? value : null; - /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. - public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/ContainerRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/ContainerRestClient.cs index 9dd20ee7e1811..024bfecd4e90b 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/ContainerRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/ContainerRestClient.cs @@ -31,7 +31,7 @@ internal partial class ContainerRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". + /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". /// , , or is null. public ContainerRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs index 68a9e85b00d1b..260d8021543e2 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs @@ -30,7 +30,7 @@ internal partial class PageBlobRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". + /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". /// , , or is null. public PageBlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { @@ -235,7 +235,7 @@ public ResponseWithHeaders Create(long contentLength, lon } } - internal HttpMessage CreateUploadPagesRequest(long contentLength, Stream body, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, int? timeout, string range, string leaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, long? ifSequenceNumberLessThanOrEqualTo, long? ifSequenceNumberLessThan, long? ifSequenceNumberEqualTo, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags, string structuredBodyType, long? structuredContentLength) + internal HttpMessage CreateUploadPagesRequest(long contentLength, Stream body, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, int? timeout, string range, string leaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, long? ifSequenceNumberLessThanOrEqualTo, long? ifSequenceNumberLessThan, long? ifSequenceNumberEqualTo, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -310,14 +310,6 @@ internal HttpMessage CreateUploadPagesRequest(long contentLength, Stream body, b request.Headers.Add("x-ms-if-tags", ifTags); } request.Headers.Add("x-ms-version", _version); - if (structuredBodyType != null) - { - request.Headers.Add("x-ms-structured-body", structuredBodyType); - } - if (structuredContentLength != null) - { - request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); - } request.Headers.Add("Accept", "application/xml"); request.Headers.Add("Content-Length", contentLength); if (transactionalContentMD5 != null) @@ -349,18 +341,16 @@ internal HttpMessage CreateUploadPagesRequest(long contentLength, Stream body, b /// Specify an ETag value to operate only on blobs with a matching value. /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - /// Required if the request body is a structured message. Specifies the message schema version and properties. - /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public async Task> UploadPagesAsync(long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string range = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, long? ifSequenceNumberLessThanOrEqualTo = null, long? ifSequenceNumberLessThan = null, long? ifSequenceNumberEqualTo = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) + public async Task> UploadPagesAsync(long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string range = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, long? ifSequenceNumberLessThanOrEqualTo = null, long? ifSequenceNumberLessThan = null, long? ifSequenceNumberEqualTo = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUploadPagesRequest(contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, range, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, structuredBodyType, structuredContentLength); + using var message = CreateUploadPagesRequest(contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, range, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new PageBlobUploadPagesHeaders(message.Response); switch (message.Response.Status) @@ -392,18 +382,16 @@ public async Task> UploadPagesAs /// Specify an ETag value to operate only on blobs with a matching value. /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - /// Required if the request body is a structured message. Specifies the message schema version and properties. - /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public ResponseWithHeaders UploadPages(long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string range = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, long? ifSequenceNumberLessThanOrEqualTo = null, long? ifSequenceNumberLessThan = null, long? ifSequenceNumberEqualTo = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders UploadPages(long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string range = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, long? ifSequenceNumberLessThanOrEqualTo = null, long? ifSequenceNumberLessThan = null, long? ifSequenceNumberEqualTo = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUploadPagesRequest(contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, range, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, structuredBodyType, structuredContentLength); + using var message = CreateUploadPagesRequest(contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, range, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); _pipeline.Send(message, cancellationToken); var headers = new PageBlobUploadPagesHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobUploadPagesHeaders.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobUploadPagesHeaders.cs index c04659bc43322..77d37d90027aa 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobUploadPagesHeaders.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobUploadPagesHeaders.cs @@ -33,7 +33,5 @@ public PageBlobUploadPagesHeaders(Response response) public string EncryptionKeySha256 => _response.Headers.TryGetValue("x-ms-encryption-key-sha256", out string value) ? value : null; /// Returns the name of the encryption scope used to encrypt the blob contents and application metadata. Note that the absence of this header implies use of the default account encryption scope. public string EncryptionScope => _response.Headers.TryGetValue("x-ms-encryption-scope", out string value) ? value : null; - /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. - public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/ServiceRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/ServiceRestClient.cs index 2abac369c0cae..e274940f81e8d 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/ServiceRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/ServiceRestClient.cs @@ -31,7 +31,7 @@ internal partial class ServiceRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". + /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". /// , , or is null. public ServiceRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { diff --git a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadDetails.cs b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadDetails.cs index 0490ec239798e..bc119822cdc12 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadDetails.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadDetails.cs @@ -34,14 +34,6 @@ public class BlobDownloadDetails public byte[] ContentHash { get; internal set; } #pragma warning restore CA1819 // Properties should not return arrays - /// - /// When requested using , this value contains the CRC for the download blob range. - /// This value may only become populated once the network stream is fully consumed. If this instance is accessed through - /// , the network stream has already been consumed. Otherwise, consume the content stream before - /// checking this value. - /// - public byte[] ContentCrc { get; internal set; } - /// /// Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob. /// diff --git a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadInfo.cs b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadInfo.cs index b42801e36ab55..e034573b54b3a 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadInfo.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadInfo.cs @@ -4,8 +4,6 @@ using System; using System.ComponentModel; using System.IO; -using System.Threading.Tasks; -using Azure.Core; using Azure.Storage.Shared; namespace Azure.Storage.Blobs.Models @@ -51,14 +49,6 @@ public class BlobDownloadInfo : IDisposable, IDownloadedContent /// public BlobDownloadDetails Details { get; internal set; } - /// - /// Indicates some contents of are mixed into the response stream. - /// They will not be set until has been fully consumed. These details - /// will be extracted from the content stream by the library before the calling code can - /// encounter them. - /// - public bool ExpectTrailingDetails { get; internal set; } - /// /// Constructor. /// diff --git a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadStreamingResult.cs b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadStreamingResult.cs index 9b7d4d4e00dad..4fbada6e67aad 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadStreamingResult.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadStreamingResult.cs @@ -24,14 +24,6 @@ internal BlobDownloadStreamingResult() { } /// public Stream Content { get; internal set; } - /// - /// Indicates some contents of are mixed into the response stream. - /// They will not be set until has been fully consumed. These details - /// will be extracted from the content stream by the library before the calling code can - /// encounter them. - /// - public bool ExpectTrailingDetails { get; internal set; } - /// /// Disposes the by calling Dispose on the underlying stream. /// diff --git a/sdk/storage/Azure.Storage.Blobs/src/PageBlobClient.cs b/sdk/storage/Azure.Storage.Blobs/src/PageBlobClient.cs index 7038897531fbb..fa575e41b8ebe 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/PageBlobClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/PageBlobClient.cs @@ -1363,42 +1363,15 @@ internal async Task> UploadPagesInternal( scope.Start(); Errors.VerifyStreamPosition(content, nameof(content)); - ContentHasher.GetHashResult hashResult = null; - long contentLength = (content?.Length - content?.Position) ?? 0; - long? structuredContentLength = default; - string structuredBodyType = null; - HttpRange range; - if (validationOptions != null && - validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && - ClientSideEncryption == null) // don't allow feature combination - { - // report progress in terms of caller bytes, not encoded bytes - structuredContentLength = contentLength; - contentLength = (content?.Length - content?.Position) ?? 0; - range = new HttpRange(offset, (content?.Length - content?.Position) ?? null); - structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; - content = content?.WithNoDispose().WithProgress(progressHandler); - content = validationOptions.PrecalculatedChecksum.IsEmpty - ? new StructuredMessageEncodingStream( - content, - Constants.StructuredMessage.DefaultSegmentContentLength, - StructuredMessage.Flags.StorageCrc64) - : new StructuredMessagePrecalculatedCrcWrapperStream( - content, - validationOptions.PrecalculatedChecksum.Span); - contentLength = (content?.Length - content?.Position) ?? 0; - } - else - { - // compute hash BEFORE attaching progress handler - hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - content = content?.WithNoDispose().WithProgress(progressHandler); - range = new HttpRange(offset, (content?.Length - content?.Position) ?? null); - } + // compute hash BEFORE attaching progress handler + ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + + content = content?.WithNoDispose().WithProgress(progressHandler); + HttpRange range = new HttpRange(offset, (content?.Length - content?.Position) ?? null); ResponseWithHeaders response; @@ -1415,8 +1388,6 @@ internal async Task> UploadPagesInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, ifSequenceNumberLessThanOrEqualTo: conditions?.IfSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan: conditions?.IfSequenceNumberLessThan, ifSequenceNumberEqualTo: conditions?.IfSequenceNumberEqual, @@ -1441,8 +1412,6 @@ internal async Task> UploadPagesInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, ifSequenceNumberLessThanOrEqualTo: conditions?.IfSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan: conditions?.IfSequenceNumberLessThan, ifSequenceNumberEqualTo: conditions?.IfSequenceNumberEqual, diff --git a/sdk/storage/Azure.Storage.Blobs/src/PartitionedDownloader.cs b/sdk/storage/Azure.Storage.Blobs/src/PartitionedDownloader.cs index 08a1090716f2b..2c52d0c256e34 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/PartitionedDownloader.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/PartitionedDownloader.cs @@ -48,8 +48,7 @@ internal class PartitionedDownloader /// private readonly StorageChecksumAlgorithm _validationAlgorithm; private readonly int _checksumSize; - // TODO disabling master crc temporarily. segment CRCs still handled. - private bool UseMasterCrc => false; // _validationAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64; + private bool UseMasterCrc => _validationAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64; private StorageCrc64HashAlgorithm _masterCrcCalculator = null; /// @@ -213,20 +212,8 @@ public async Task DownloadToInternal( // If the first segment was the entire blob, we'll copy that to // the output stream and finish now - long initialLength; - long totalLength; - // Get blob content length downloaded from content range when available to handle transit encoding - if (string.IsNullOrWhiteSpace(initialResponse.Value.Details.ContentRange)) - { - initialLength = initialResponse.Value.Details.ContentLength; - totalLength = 0; - } - else - { - ContentRange recievedRange = ContentRange.Parse(initialResponse.Value.Details.ContentRange); - initialLength = recievedRange.End.Value - recievedRange.Start.Value + 1; - totalLength = recievedRange.Size.Value; - } + long initialLength = initialResponse.Value.Details.ContentLength; + long totalLength = ParseRangeTotalLength(initialResponse.Value.Details.ContentRange); if (initialLength == totalLength) { await HandleOneShotDownload(initialResponse, destination, async, cancellationToken) @@ -408,6 +395,20 @@ private async Task FinalizeDownloadInternal( } } + private static long ParseRangeTotalLength(string range) + { + if (range == null) + { + return 0; + } + int lengthSeparator = range.IndexOf("/", StringComparison.InvariantCultureIgnoreCase); + if (lengthSeparator == -1) + { + throw BlobErrors.ParsingFullHttpRangeFailed(range); + } + return long.Parse(range.Substring(lengthSeparator + 1), CultureInfo.InvariantCulture); + } + private async Task CopyToInternal( Response response, Stream destination, @@ -416,10 +417,7 @@ private async Task CopyToInternal( CancellationToken cancellationToken) { CancellationHelper.ThrowIfCancellationRequested(cancellationToken); - // if structured message, this crc is validated in the decoding process. don't decode it here. - using IHasher hasher = response.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader) - ? null - : ContentHasher.GetHasherFromAlgorithmId(_validationAlgorithm); + using IHasher hasher = ContentHasher.GetHasherFromAlgorithmId(_validationAlgorithm); using Stream rawSource = response.Value.Content; using Stream source = hasher != null ? ChecksumCalculatingStream.GetReadStream(rawSource, hasher.AppendHash) @@ -434,13 +432,13 @@ await source.CopyToInternal( if (hasher != null) { hasher.GetFinalHash(checksumBuffer.Span); - (ReadOnlyMemory checksum, StorageChecksumAlgorithm _) - = ContentHasher.GetResponseChecksumOrDefault(response.GetRawResponse()); - if (!checksumBuffer.Span.SequenceEqual(checksum.Span)) - { - throw Errors.HashMismatchOnStreamedDownload(response.Value.Details.ContentRange); - } + (ReadOnlyMemory checksum, StorageChecksumAlgorithm _) + = ContentHasher.GetResponseChecksumOrDefault(response.GetRawResponse()); + if (!checksumBuffer.Span.SequenceEqual(checksum.Span)) + { + throw Errors.HashMismatchOnStreamedDownload(response.Value.Details.ContentRange); } + } } private IEnumerable GetRanges(long initialLength, long totalLength) diff --git a/sdk/storage/Azure.Storage.Blobs/src/autorest.md b/sdk/storage/Azure.Storage.Blobs/src/autorest.md index 34efb5857c4a4..85fb92c2349cd 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/autorest.md +++ b/sdk/storage/Azure.Storage.Blobs/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://github.com/Azure/azure-rest-api-specs/blob/794c6178bc06c6c9dceb139e9f9d1b35b1a99701/specification/storage/data-plane/Microsoft.BlobStorage/preview/2025-01-05/blob.json + - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/f6f50c6388fd5836fa142384641b8353a99874ef/specification/storage/data-plane/Microsoft.BlobStorage/stable/2024-08-04/blob.json generation1-convenience-client: true # https://github.com/Azure/autorest/issues/4075 skip-semantics-validation: true @@ -34,7 +34,7 @@ directive: if (property.includes('/{containerName}/{blob}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))}); - } + } else if (property.includes('/{containerName}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))}); @@ -158,7 +158,7 @@ directive: var newName = property.replace('/{containerName}/{blob}', ''); $[newName] = $[oldName]; delete $[oldName]; - } + } else if (property.includes('/{containerName}')) { var oldName = property; diff --git a/sdk/storage/Azure.Storage.Blobs/tests/Azure.Storage.Blobs.Tests.csproj b/sdk/storage/Azure.Storage.Blobs/tests/Azure.Storage.Blobs.Tests.csproj index 1c3856c83b64e..62c7b6d17e63e 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/Azure.Storage.Blobs.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs/tests/Azure.Storage.Blobs.Tests.csproj @@ -6,9 +6,6 @@ Microsoft Azure.Storage.Blobs client library tests false - - BlobSDK - diff --git a/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTransferValidationTests.cs b/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTransferValidationTests.cs index c502231087ed6..73d11612f1d8c 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTransferValidationTests.cs +++ b/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTransferValidationTests.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using System.IO; using System.Threading.Tasks; using Azure.Core.TestFramework; @@ -38,10 +37,7 @@ protected override async Task> GetDispo StorageChecksumAlgorithm uploadAlgorithm = StorageChecksumAlgorithm.None, StorageChecksumAlgorithm downloadAlgorithm = StorageChecksumAlgorithm.None) { - var disposingContainer = await ClientBuilder.GetTestContainerAsync( - service: service, - containerName: containerName, - publicAccessType: PublicAccessType.None); + var disposingContainer = await ClientBuilder.GetTestContainerAsync(service: service, containerName: containerName); disposingContainer.Container.ClientConfiguration.TransferValidation.Upload.ChecksumAlgorithm = uploadAlgorithm; disposingContainer.Container.ClientConfiguration.TransferValidation.Download.ChecksumAlgorithm = downloadAlgorithm; @@ -95,96 +91,57 @@ public override void TestAutoResolve() } #region Added Tests - [Test] - public virtual async Task OlderServiceVersionThrowsOnStructuredMessage() + [TestCaseSource("GetValidationAlgorithms")] + public async Task ExpectedDownloadStreamingStreamTypeReturned(StorageChecksumAlgorithm algorithm) { - // use service version before structured message was introduced - await using DisposingContainer disposingContainer = await ClientBuilder.GetTestContainerAsync( - service: ClientBuilder.GetServiceClient_SharedKey( - InstrumentClientOptions(new BlobClientOptions(BlobClientOptions.ServiceVersion.V2024_11_04))), - publicAccessType: PublicAccessType.None); + await using var test = await GetDisposingContainerAsync(); // Arrange - const int dataLength = Constants.KB; - var data = GetRandomBuffer(dataLength); - - var resourceName = GetNewResourceName(); - var blob = InstrumentClient(disposingContainer.Container.GetBlobClient(GetNewResourceName())); - await blob.UploadAsync(BinaryData.FromBytes(data)); - - var validationOptions = new DownloadTransferValidationOptions + var data = GetRandomBuffer(Constants.KB); + BlobClient blob = InstrumentClient(test.Container.GetBlobClient(GetNewResourceName())); + using (var stream = new MemoryStream(data)) { - ChecksumAlgorithm = StorageChecksumAlgorithm.StorageCrc64 - }; - AsyncTestDelegate operation = async () => await (await blob.DownloadStreamingAsync( - new BlobDownloadOptions - { - Range = new HttpRange(length: Constants.StructuredMessage.MaxDownloadCrcWithHeader + 1), - TransferValidation = validationOptions, - })).Value.Content.CopyToAsync(Stream.Null); - Assert.That(operation, Throws.TypeOf()); - } - - [Test] - public async Task StructuredMessagePopulatesCrcDownloadStreaming() - { - await using DisposingContainer disposingContainer = await ClientBuilder.GetTestContainerAsync( - publicAccessType: PublicAccessType.None); - - const int dataLength = Constants.KB; - byte[] data = GetRandomBuffer(dataLength); - byte[] dataCrc = new byte[8]; - StorageCrc64Calculator.ComputeSlicedSafe(data, 0L).WriteCrc64(dataCrc); - - var blob = disposingContainer.Container.GetBlobClient(GetNewResourceName()); - await blob.UploadAsync(BinaryData.FromBytes(data)); + await blob.UploadAsync(stream); + } + // don't make options instance at all for no hash request + DownloadTransferValidationOptions transferValidation = algorithm == StorageChecksumAlgorithm.None + ? default + : new DownloadTransferValidationOptions { ChecksumAlgorithm = algorithm }; - Response response = await blob.DownloadStreamingAsync(new() + // Act + Response response = await blob.DownloadStreamingAsync(new BlobDownloadOptions { - TransferValidation = new DownloadTransferValidationOptions - { - ChecksumAlgorithm = StorageChecksumAlgorithm.StorageCrc64 - } + TransferValidation = transferValidation, + Range = new HttpRange(length: data.Length) }); - // crc is not present until response stream is consumed - Assert.That(response.Value.Details.ContentCrc, Is.Null); - - byte[] downloadedData; - using (MemoryStream ms = new()) - { - await response.Value.Content.CopyToAsync(ms); - downloadedData = ms.ToArray(); - } - - Assert.That(response.Value.Details.ContentCrc, Is.EqualTo(dataCrc)); - Assert.That(downloadedData, Is.EqualTo(data)); + // Assert + // validated stream is buffered + Assert.AreEqual(typeof(MemoryStream), response.Value.Content.GetType()); } [Test] - public async Task StructuredMessagePopulatesCrcDownloadContent() + public async Task ExpectedDownloadStreamingStreamTypeReturned_None() { - await using DisposingContainer disposingContainer = await ClientBuilder.GetTestContainerAsync( - publicAccessType: PublicAccessType.None); + await using var test = await GetDisposingContainerAsync(); - const int dataLength = Constants.KB; - byte[] data = GetRandomBuffer(dataLength); - byte[] dataCrc = new byte[8]; - StorageCrc64Calculator.ComputeSlicedSafe(data, 0L).WriteCrc64(dataCrc); - - var blob = disposingContainer.Container.GetBlobClient(GetNewResourceName()); - await blob.UploadAsync(BinaryData.FromBytes(data)); + // Arrange + var data = GetRandomBuffer(Constants.KB); + BlobClient blob = InstrumentClient(test.Container.GetBlobClient(GetNewResourceName())); + using (var stream = new MemoryStream(data)) + { + await blob.UploadAsync(stream); + } - Response response = await blob.DownloadContentAsync(new BlobDownloadOptions() + // Act + Response response = await blob.DownloadStreamingAsync(new BlobDownloadOptions { - TransferValidation = new DownloadTransferValidationOptions - { - ChecksumAlgorithm = StorageChecksumAlgorithm.StorageCrc64 - } + Range = new HttpRange(length: data.Length) }); - Assert.That(response.Value.Details.ContentCrc, Is.EqualTo(dataCrc)); - Assert.That(response.Value.Content.ToArray(), Is.EqualTo(data)); + // Assert + // unvalidated stream type is private; just check we didn't get back a buffered stream + Assert.AreNotEqual(typeof(MemoryStream), response.Value.Content.GetType()); } #endregion } diff --git a/sdk/storage/Azure.Storage.Blobs/tests/PartitionedDownloaderTests.cs b/sdk/storage/Azure.Storage.Blobs/tests/PartitionedDownloaderTests.cs index af408264c5bfa..d8d4756a510c1 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/PartitionedDownloaderTests.cs +++ b/sdk/storage/Azure.Storage.Blobs/tests/PartitionedDownloaderTests.cs @@ -305,7 +305,7 @@ public Response GetStream(HttpRange range, BlobRequ ContentHash = new byte[] { 1, 2, 3 }, LastModified = DateTimeOffset.Now, Metadata = new Dictionary() { { "meta", "data" } }, - ContentRange = $"bytes {range.Offset}-{Math.Max(1, range.Offset + contentLength - 1)}/{_length}", + ContentRange = $"bytes {range.Offset}-{range.Offset + contentLength}/{_length}", ETag = s_etag, ContentEncoding = "test", CacheControl = "test", diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/ChecksumExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/ChecksumExtensions.cs deleted file mode 100644 index 48304640eee43..0000000000000 --- a/sdk/storage/Azure.Storage.Common/src/Shared/ChecksumExtensions.cs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers.Binary; - -namespace Azure.Storage; - -internal static class ChecksumExtensions -{ - public static void WriteCrc64(this ulong crc, Span dest) - => BinaryPrimitives.WriteUInt64LittleEndian(dest, crc); - - public static bool TryWriteCrc64(this ulong crc, Span dest) - => BinaryPrimitives.TryWriteUInt64LittleEndian(dest, crc); - - public static ulong ReadCrc64(this ReadOnlySpan crc) - => BinaryPrimitives.ReadUInt64LittleEndian(crc); - - public static bool TryReadCrc64(this ReadOnlySpan crc, out ulong value) - => BinaryPrimitives.TryReadUInt64LittleEndian(crc, out value); -} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs b/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs index 4893b971d6529..17a32b2d46d41 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs @@ -665,15 +665,6 @@ internal static class AccountResources internal static readonly int[] PathStylePorts = { 10000, 10001, 10002, 10003, 10004, 10100, 10101, 10102, 10103, 10104, 11000, 11001, 11002, 11003, 11004, 11100, 11101, 11102, 11103, 11104 }; } - internal static class StructuredMessage - { - public const string StructuredMessageHeader = "x-ms-structured-body"; - public const string StructuredContentLength = "x-ms-structured-content-length"; - public const string CrcStructuredMessage = "XSM/1.0; properties=crc64"; - public const int DefaultSegmentContentLength = 4 * MB; - public const int MaxDownloadCrcWithHeader = 4 * MB; - } - internal static class ClientSideEncryption { public const string HttpMessagePropertyKeyV1 = "Azure.Storage.StorageTelemetryPolicy.ClientSideEncryption.V1"; diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs index 4d49edeb72ecf..4e5464fa17e6e 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs @@ -3,7 +3,6 @@ using System; using System.Globalization; -using System.IO; using System.Linq; using System.Security.Authentication; using System.Xml.Serialization; @@ -106,18 +105,9 @@ public static ArgumentException VersionNotSupported(string paramName) public static RequestFailedException ClientRequestIdMismatch(Response response, string echo, string original) => new RequestFailedException(response.Status, $"Response x-ms-client-request-id '{echo}' does not match the original expected request id, '{original}'.", null); - public static InvalidDataException StructuredMessageNotAcknowledgedGET(Response response) - => new InvalidDataException($"Response does not acknowledge structured message was requested. Unknown data structure in response body."); - - public static InvalidDataException StructuredMessageNotAcknowledgedPUT(Response response) - => new InvalidDataException($"Response does not acknowledge structured message was sent. Unexpected data may have been persisted to storage."); - public static ArgumentException TransactionalHashingNotSupportedWithClientSideEncryption() => new ArgumentException("Client-side encryption and transactional hashing are not supported at the same time."); - public static InvalidDataException ExpectedStructuredMessage() - => new InvalidDataException($"Expected {Constants.StructuredMessage.StructuredMessageHeader} in response, but found none."); - public static void VerifyHttpsTokenAuth(Uri uri) { if (uri.Scheme != Constants.Https) diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.cs b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.cs index e3372665928c1..6b89a59011d51 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.cs @@ -72,9 +72,6 @@ public static ArgumentException CannotDeferTransactionalHashVerification() public static ArgumentException CannotInitializeWriteStreamWithData() => new ArgumentException("Initialized buffer for StorageWriteStream must be empty."); - public static InvalidDataException InvalidStructuredMessage(string optionalMessage = default) - => new InvalidDataException(("Invalid structured message data. " + optionalMessage ?? "").Trim()); - internal static void VerifyStreamPosition(Stream stream, string streamName) { if (stream != null && stream.CanSeek && stream.Length > 0 && stream.Position >= stream.Length) @@ -83,22 +80,6 @@ internal static void VerifyStreamPosition(Stream stream, string streamName) } } - internal static void AssertBufferMinimumSize(ReadOnlySpan buffer, int minSize, string paramName) - { - if (buffer.Length < minSize) - { - throw new ArgumentException($"Expected buffer Length of at least {minSize} bytes. Got {buffer.Length}.", paramName); - } - } - - internal static void AssertBufferExactSize(ReadOnlySpan buffer, int size, string paramName) - { - if (buffer.Length != size) - { - throw new ArgumentException($"Expected buffer Length of exactly {size} bytes. Got {buffer.Length}.", paramName); - } - } - public static void ThrowIfParamNull(object obj, string paramName) { if (obj == null) diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/LazyLoadingReadOnlyStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/LazyLoadingReadOnlyStream.cs index fe2db427bef02..c3e9c641c3fea 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/LazyLoadingReadOnlyStream.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/LazyLoadingReadOnlyStream.cs @@ -249,9 +249,41 @@ private async Task DownloadInternal(bool async, CancellationToken cancellat response = await _downloadInternalFunc(range, _validationOptions, async, cancellationToken).ConfigureAwait(false); using Stream networkStream = response.Value.Content; - // use stream copy to ensure consumption of any trailing metadata (e.g. structured message) - // allow buffer limits to catch the error of data size mismatch - int totalCopiedBytes = (int) await networkStream.CopyToInternal(new MemoryStream(_buffer), async, cancellationToken).ConfigureAwait((false)); + + // The number of bytes we just downloaded. + long downloadSize = GetResponseRange(response.GetRawResponse()).Length.Value; + + // The number of bytes we copied in the last loop. + int copiedBytes; + + // Bytes we have copied so far. + int totalCopiedBytes = 0; + + // Bytes remaining to copy. It is save to truncate the long because we asked for a max of int _buffer size bytes. + int remainingBytes = (int)downloadSize; + + do + { + if (async) + { + copiedBytes = await networkStream.ReadAsync( + buffer: _buffer, + offset: totalCopiedBytes, + count: remainingBytes, + cancellationToken: cancellationToken).ConfigureAwait(false); + } + else + { + copiedBytes = networkStream.Read( + buffer: _buffer, + offset: totalCopiedBytes, + count: remainingBytes); + } + + totalCopiedBytes += copiedBytes; + remainingBytes -= copiedBytes; + } + while (copiedBytes != 0); _bufferPosition = 0; _bufferLength = totalCopiedBytes; @@ -259,7 +291,7 @@ private async Task DownloadInternal(bool async, CancellationToken cancellat // if we deferred transactional hash validation on download, validate now // currently we always defer but that may change - if (_validationOptions != default && _validationOptions.ChecksumAlgorithm == StorageChecksumAlgorithm.MD5 && !_validationOptions.AutoValidateChecksum) // TODO better condition + if (_validationOptions != default && _validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && !_validationOptions.AutoValidateChecksum) { ContentHasher.AssertResponseHashMatch(_buffer, _bufferPosition, _bufferLength, _validationOptions.ChecksumAlgorithm, response.GetRawResponse()); } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/PooledMemoryStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/PooledMemoryStream.cs index 6070329d10d3d..3e218d18a90af 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/PooledMemoryStream.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/PooledMemoryStream.cs @@ -251,7 +251,7 @@ public override int Read(byte[] buffer, int offset, int count) Length - Position, bufferCount - (Position - offsetOfBuffer), count - read); - Array.Copy(currentBuffer, Position - offsetOfBuffer, buffer, offset + read, toCopy); + Array.Copy(currentBuffer, Position - offsetOfBuffer, buffer, read, toCopy); read += toCopy; Position += toCopy; } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StorageCrc64Composer.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StorageCrc64Composer.cs index 307ff23b21144..ab6b76d78a87e 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StorageCrc64Composer.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StorageCrc64Composer.cs @@ -12,52 +12,22 @@ namespace Azure.Storage /// internal static class StorageCrc64Composer { - public static byte[] Compose(params (byte[] Crc64, long OriginalDataLength)[] partitions) - => Compose(partitions.AsEnumerable()); - - public static byte[] Compose(IEnumerable<(byte[] Crc64, long OriginalDataLength)> partitions) - { - ulong result = Compose(partitions.Select(tup => (BitConverter.ToUInt64(tup.Crc64, 0), tup.OriginalDataLength))); - return BitConverter.GetBytes(result); - } - - public static byte[] Compose(params (ReadOnlyMemory Crc64, long OriginalDataLength)[] partitions) - => Compose(partitions.AsEnumerable()); - - public static byte[] Compose(IEnumerable<(ReadOnlyMemory Crc64, long OriginalDataLength)> partitions) + public static Memory Compose(params (byte[] Crc64, long OriginalDataLength)[] partitions) { -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - ulong result = Compose(partitions.Select(tup => (BitConverter.ToUInt64(tup.Crc64.Span), tup.OriginalDataLength))); -#else - ulong result = Compose(partitions.Select(tup => (System.BitConverter.ToUInt64(tup.Crc64.ToArray(), 0), tup.OriginalDataLength))); -#endif - return BitConverter.GetBytes(result); + return Compose(partitions.AsEnumerable()); } - public static byte[] Compose( - ReadOnlySpan leftCrc64, long leftOriginalDataLength, - ReadOnlySpan rightCrc64, long rightOriginalDataLength) + public static Memory Compose(IEnumerable<(byte[] Crc64, long OriginalDataLength)> partitions) { -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - ulong result = Compose( - (BitConverter.ToUInt64(leftCrc64), leftOriginalDataLength), - (BitConverter.ToUInt64(rightCrc64), rightOriginalDataLength)); -#else - ulong result = Compose( - (BitConverter.ToUInt64(leftCrc64.ToArray(), 0), leftOriginalDataLength), - (BitConverter.ToUInt64(rightCrc64.ToArray(), 0), rightOriginalDataLength)); -#endif - return BitConverter.GetBytes(result); + ulong result = Compose(partitions.Select(tup => (BitConverter.ToUInt64(tup.Crc64, 0), tup.OriginalDataLength))); + return new Memory(BitConverter.GetBytes(result)); } - public static ulong Compose(params (ulong Crc64, long OriginalDataLength)[] partitions) - => Compose(partitions.AsEnumerable()); - public static ulong Compose(IEnumerable<(ulong Crc64, long OriginalDataLength)> partitions) { ulong composedCrc = 0; long composedDataLength = 0; - foreach ((ulong crc64, long originalDataLength) in partitions) + foreach (var tup in partitions) { composedCrc = StorageCrc64Calculator.Concatenate( uInitialCrcAB: 0, @@ -65,9 +35,9 @@ public static ulong Compose(IEnumerable<(ulong Crc64, long OriginalDataLength)> uFinalCrcA: composedCrc, uSizeA: (ulong) composedDataLength, uInitialCrcB: 0, - uFinalCrcB: crc64, - uSizeB: (ulong)originalDataLength); - composedDataLength += originalDataLength; + uFinalCrcB: tup.Crc64, + uSizeB: (ulong)tup.OriginalDataLength); + composedDataLength += tup.OriginalDataLength; } return composedCrc; } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StorageRequestValidationPipelinePolicy.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StorageRequestValidationPipelinePolicy.cs index 9f4ddb5249e82..0cef4f4d8d4ed 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StorageRequestValidationPipelinePolicy.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StorageRequestValidationPipelinePolicy.cs @@ -33,35 +33,6 @@ public override void OnReceivedResponse(HttpMessage message) { throw Errors.ClientRequestIdMismatch(message.Response, echo.First(), original); } - - if (message.Request.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader) && - message.Request.Headers.Contains(Constants.StructuredMessage.StructuredContentLength)) - { - AssertStructuredMessageAcknowledgedPUT(message); - } - else if (message.Request.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) - { - AssertStructuredMessageAcknowledgedGET(message); - } - } - - private static void AssertStructuredMessageAcknowledgedPUT(HttpMessage message) - { - if (!message.Response.IsError && - !message.Response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) - { - throw Errors.StructuredMessageNotAcknowledgedPUT(message.Response); - } - } - - private static void AssertStructuredMessageAcknowledgedGET(HttpMessage message) - { - if (!message.Response.IsError && - !(message.Response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader) && - message.Response.Headers.Contains(Constants.StructuredMessage.StructuredContentLength))) - { - throw Errors.StructuredMessageNotAcknowledgedGET(message.Response); - } } } } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs index 44c0973ea9be1..2a7bd90fb82a1 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs @@ -46,7 +46,7 @@ internal static class StorageVersionExtensions /// public const ServiceVersion LatestVersion = #if BlobSDK || QueueSDK || FileSDK || DataLakeSDK || ChangeFeedSDK || DataMovementSDK || BlobDataMovementSDK || ShareDataMovementSDK - ServiceVersion.V2025_01_05; + ServiceVersion.V2024_11_04; #else ERROR_STORAGE_SERVICE_NOT_DEFINED; #endif diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StreamExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StreamExtensions.cs index c8803ecf421e7..31f121d414ea4 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StreamExtensions.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StreamExtensions.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Buffers; using System.IO; using System.Threading; using System.Threading.Tasks; @@ -50,7 +48,7 @@ public static async Task WriteInternal( } } - public static Task CopyToInternal( + public static Task CopyToInternal( this Stream src, Stream dest, bool async, @@ -81,33 +79,21 @@ public static Task CopyToInternal( /// Cancellation token for the operation. /// /// - public static async Task CopyToInternal( + public static async Task CopyToInternal( this Stream src, Stream dest, int bufferSize, bool async, CancellationToken cancellationToken) { - using IDisposable _ = ArrayPool.Shared.RentDisposable(bufferSize, out byte[] buffer); - long totalRead = 0; - int read; if (async) { - while (0 < (read = await src.ReadAsync(buffer, 0, buffer.Length, cancellationToken).ConfigureAwait(false))) - { - totalRead += read; - await dest.WriteAsync(buffer, 0, read, cancellationToken).ConfigureAwait(false); - } + await src.CopyToAsync(dest, bufferSize, cancellationToken).ConfigureAwait(false); } else { - while (0 < (read = src.Read(buffer, 0, buffer.Length))) - { - totalRead += read; - dest.Write(buffer, 0, read); - } + src.CopyTo(dest, bufferSize); } - return totalRead; } } } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessage.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessage.cs deleted file mode 100644 index a0a46837797b9..0000000000000 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessage.cs +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers; -using System.Buffers.Binary; -using System.IO; -using Azure.Storage.Common; - -namespace Azure.Storage.Shared; - -internal static class StructuredMessage -{ - public const int Crc64Length = 8; - - [Flags] - public enum Flags - { - None = 0, - StorageCrc64 = 1, - } - - public static class V1_0 - { - public const byte MessageVersionByte = 1; - - public const int StreamHeaderLength = 13; - public const int StreamHeaderVersionOffset = 0; - public const int StreamHeaderMessageLengthOffset = 1; - public const int StreamHeaderFlagsOffset = 9; - public const int StreamHeaderSegmentCountOffset = 11; - - public const int SegmentHeaderLength = 10; - public const int SegmentHeaderNumOffset = 0; - public const int SegmentHeaderContentLengthOffset = 2; - - #region Stream Header - public static void ReadStreamHeader( - ReadOnlySpan buffer, - out long messageLength, - out Flags flags, - out int totalSegments) - { - Errors.AssertBufferExactSize(buffer, 13, nameof(buffer)); - if (buffer[StreamHeaderVersionOffset] != 1) - { - throw new InvalidDataException("Unrecognized version of structured message."); - } - messageLength = (long)BinaryPrimitives.ReadUInt64LittleEndian(buffer.Slice(StreamHeaderMessageLengthOffset, 8)); - flags = (Flags)BinaryPrimitives.ReadUInt16LittleEndian(buffer.Slice(StreamHeaderFlagsOffset, 2)); - totalSegments = BinaryPrimitives.ReadUInt16LittleEndian(buffer.Slice(StreamHeaderSegmentCountOffset, 2)); - } - - public static int WriteStreamHeader( - Span buffer, - long messageLength, - Flags flags, - int totalSegments) - { - const int versionOffset = 0; - const int messageLengthOffset = 1; - const int flagsOffset = 9; - const int numSegmentsOffset = 11; - - Errors.AssertBufferMinimumSize(buffer, StreamHeaderLength, nameof(buffer)); - - buffer[versionOffset] = MessageVersionByte; - BinaryPrimitives.WriteUInt64LittleEndian(buffer.Slice(messageLengthOffset, 8), (ulong)messageLength); - BinaryPrimitives.WriteUInt16LittleEndian(buffer.Slice(flagsOffset, 2), (ushort)flags); - BinaryPrimitives.WriteUInt16LittleEndian(buffer.Slice(numSegmentsOffset, 2), (ushort)totalSegments); - - return StreamHeaderLength; - } - - /// - /// Gets stream header in a buffer rented from the provided ArrayPool. - /// - /// - /// Disposable to return the buffer to the pool. - /// - public static IDisposable GetStreamHeaderBytes( - ArrayPool pool, - out Memory bytes, - long messageLength, - Flags flags, - int totalSegments) - { - Argument.AssertNotNull(pool, nameof(pool)); - IDisposable disposable = pool.RentAsMemoryDisposable(StreamHeaderLength, out bytes); - WriteStreamHeader(bytes.Span, messageLength, flags, totalSegments); - return disposable; - } - #endregion - - #region StreamFooter - public static int GetStreamFooterSize(Flags flags) - => flags.HasFlag(Flags.StorageCrc64) ? Crc64Length : 0; - - public static void ReadStreamFooter( - ReadOnlySpan buffer, - Flags flags, - out ulong crc64) - { - int expectedBufferSize = GetSegmentFooterSize(flags); - Errors.AssertBufferExactSize(buffer, expectedBufferSize, nameof(buffer)); - - crc64 = flags.HasFlag(Flags.StorageCrc64) ? buffer.ReadCrc64() : default; - } - - public static int WriteStreamFooter(Span buffer, ReadOnlySpan crc64 = default) - { - int requiredSpace = 0; - if (!crc64.IsEmpty) - { - Errors.AssertBufferExactSize(crc64, Crc64Length, nameof(crc64)); - requiredSpace += Crc64Length; - } - - Errors.AssertBufferMinimumSize(buffer, requiredSpace, nameof(buffer)); - int offset = 0; - if (!crc64.IsEmpty) - { - crc64.CopyTo(buffer.Slice(offset, Crc64Length)); - offset += Crc64Length; - } - - return offset; - } - - /// - /// Gets stream header in a buffer rented from the provided ArrayPool. - /// - /// - /// Disposable to return the buffer to the pool. - /// - public static IDisposable GetStreamFooterBytes( - ArrayPool pool, - out Memory bytes, - ReadOnlySpan crc64 = default) - { - Argument.AssertNotNull(pool, nameof(pool)); - IDisposable disposable = pool.RentAsMemoryDisposable(StreamHeaderLength, out bytes); - WriteStreamFooter(bytes.Span, crc64); - return disposable; - } - #endregion - - #region SegmentHeader - public static void ReadSegmentHeader( - ReadOnlySpan buffer, - out int segmentNum, - out long contentLength) - { - Errors.AssertBufferExactSize(buffer, 10, nameof(buffer)); - segmentNum = BinaryPrimitives.ReadUInt16LittleEndian(buffer.Slice(0, 2)); - contentLength = (long)BinaryPrimitives.ReadUInt64LittleEndian(buffer.Slice(2, 8)); - } - - public static int WriteSegmentHeader(Span buffer, int segmentNum, long segmentLength) - { - const int segmentNumOffset = 0; - const int segmentLengthOffset = 2; - - Errors.AssertBufferMinimumSize(buffer, SegmentHeaderLength, nameof(buffer)); - - BinaryPrimitives.WriteUInt16LittleEndian(buffer.Slice(segmentNumOffset, 2), (ushort)segmentNum); - BinaryPrimitives.WriteUInt64LittleEndian(buffer.Slice(segmentLengthOffset, 8), (ulong)segmentLength); - - return SegmentHeaderLength; - } - - /// - /// Gets segment header in a buffer rented from the provided ArrayPool. - /// - /// - /// Disposable to return the buffer to the pool. - /// - public static IDisposable GetSegmentHeaderBytes( - ArrayPool pool, - out Memory bytes, - int segmentNum, - long segmentLength) - { - Argument.AssertNotNull(pool, nameof(pool)); - IDisposable disposable = pool.RentAsMemoryDisposable(SegmentHeaderLength, out bytes); - WriteSegmentHeader(bytes.Span, segmentNum, segmentLength); - return disposable; - } - #endregion - - #region SegmentFooter - public static int GetSegmentFooterSize(Flags flags) - => flags.HasFlag(Flags.StorageCrc64) ? Crc64Length : 0; - - public static void ReadSegmentFooter( - ReadOnlySpan buffer, - Flags flags, - out ulong crc64) - { - int expectedBufferSize = GetSegmentFooterSize(flags); - Errors.AssertBufferExactSize(buffer, expectedBufferSize, nameof(buffer)); - - crc64 = flags.HasFlag(Flags.StorageCrc64) ? buffer.ReadCrc64() : default; - } - - public static int WriteSegmentFooter(Span buffer, ReadOnlySpan crc64 = default) - { - int requiredSpace = 0; - if (!crc64.IsEmpty) - { - Errors.AssertBufferExactSize(crc64, Crc64Length, nameof(crc64)); - requiredSpace += Crc64Length; - } - - Errors.AssertBufferMinimumSize(buffer, requiredSpace, nameof(buffer)); - int offset = 0; - if (!crc64.IsEmpty) - { - crc64.CopyTo(buffer.Slice(offset, Crc64Length)); - offset += Crc64Length; - } - - return offset; - } - - /// - /// Gets stream header in a buffer rented from the provided ArrayPool. - /// - /// - /// Disposable to return the buffer to the pool. - /// - public static IDisposable GetSegmentFooterBytes( - ArrayPool pool, - out Memory bytes, - ReadOnlySpan crc64 = default) - { - Argument.AssertNotNull(pool, nameof(pool)); - IDisposable disposable = pool.RentAsMemoryDisposable(StreamHeaderLength, out bytes); - WriteSegmentFooter(bytes.Span, crc64); - return disposable; - } - #endregion - } -} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingRetriableStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingRetriableStream.cs deleted file mode 100644 index 22dfaef259972..0000000000000 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingRetriableStream.cs +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers; -using System.Buffers.Binary; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using Azure.Core; -using Azure.Core.Pipeline; - -namespace Azure.Storage.Shared; - -internal class StructuredMessageDecodingRetriableStream : Stream -{ - public class DecodedData - { - public ulong Crc { get; set; } - } - - private readonly Stream _innerRetriable; - private long _decodedBytesRead; - - private readonly StructuredMessage.Flags _expectedFlags; - private readonly List _decodedDatas; - private readonly Action _onComplete; - - private StorageCrc64HashAlgorithm _totalContentCrc; - - private readonly Func _decodingStreamFactory; - private readonly Func> _decodingAsyncStreamFactory; - - public StructuredMessageDecodingRetriableStream( - Stream initialDecodingStream, - StructuredMessageDecodingStream.RawDecodedData initialDecodedData, - StructuredMessage.Flags expectedFlags, - Func decodingStreamFactory, - Func> decodingAsyncStreamFactory, - Action onComplete, - ResponseClassifier responseClassifier, - int maxRetries) - { - _decodingStreamFactory = decodingStreamFactory; - _decodingAsyncStreamFactory = decodingAsyncStreamFactory; - _innerRetriable = RetriableStream.Create(initialDecodingStream, StreamFactory, StreamFactoryAsync, responseClassifier, maxRetries); - _decodedDatas = new() { initialDecodedData }; - _expectedFlags = expectedFlags; - _onComplete = onComplete; - - if (expectedFlags.HasFlag(StructuredMessage.Flags.StorageCrc64)) - { - _totalContentCrc = StorageCrc64HashAlgorithm.Create(); - } - } - - private Stream StreamFactory(long _) - { - long offset = _decodedDatas.SelectMany(d => d.SegmentCrcs).Select(s => s.SegmentLen).Sum(); - (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = _decodingStreamFactory(offset); - _decodedDatas.Add(decodedData); - FastForwardInternal(decodingStream, _decodedBytesRead - offset, false).EnsureCompleted(); - return decodingStream; - } - - private async ValueTask StreamFactoryAsync(long _) - { - long offset = _decodedDatas.SelectMany(d => d.SegmentCrcs).Select(s => s.SegmentLen).Sum(); - (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = await _decodingAsyncStreamFactory(offset).ConfigureAwait(false); - _decodedDatas.Add(decodedData); - await FastForwardInternal(decodingStream, _decodedBytesRead - offset, true).ConfigureAwait(false); - return decodingStream; - } - - private static async ValueTask FastForwardInternal(Stream stream, long bytes, bool async) - { - using (ArrayPool.Shared.RentDisposable(4 * Constants.KB, out byte[] buffer)) - { - if (async) - { - while (bytes > 0) - { - bytes -= await stream.ReadAsync(buffer, 0, (int)Math.Min(bytes, buffer.Length)).ConfigureAwait(false); - } - } - else - { - while (bytes > 0) - { - bytes -= stream.Read(buffer, 0, (int)Math.Min(bytes, buffer.Length)); - } - } - } - } - - protected override void Dispose(bool disposing) - { - _decodedDatas.Clear(); - _innerRetriable.Dispose(); - } - - private void OnCompleted() - { - DecodedData final = new(); - if (_totalContentCrc != null) - { - final.Crc = ValidateCrc(); - } - _onComplete?.Invoke(final); - } - - private ulong ValidateCrc() - { - using IDisposable _ = ArrayPool.Shared.RentDisposable(StructuredMessage.Crc64Length * 2, out byte[] buf); - Span calculatedBytes = new(buf, 0, StructuredMessage.Crc64Length); - _totalContentCrc.GetCurrentHash(calculatedBytes); - ulong calculated = BinaryPrimitives.ReadUInt64LittleEndian(calculatedBytes); - - ulong reported = _decodedDatas.Count == 1 - ? _decodedDatas.First().TotalCrc.Value - : StorageCrc64Composer.Compose(_decodedDatas.SelectMany(d => d.SegmentCrcs)); - - if (calculated != reported) - { - Span reportedBytes = new(buf, calculatedBytes.Length, StructuredMessage.Crc64Length); - BinaryPrimitives.WriteUInt64LittleEndian(reportedBytes, reported); - throw Errors.ChecksumMismatch(calculatedBytes, reportedBytes); - } - - return calculated; - } - - #region Read - public override int Read(byte[] buffer, int offset, int count) - { - int read = _innerRetriable.Read(buffer, offset, count); - _decodedBytesRead += read; - if (read == 0) - { - OnCompleted(); - } - else - { - _totalContentCrc?.Append(new ReadOnlySpan(buffer, offset, read)); - } - return read; - } - - public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - { - int read = await _innerRetriable.ReadAsync(buffer, offset, count, cancellationToken).ConfigureAwait(false); - _decodedBytesRead += read; - if (read == 0) - { - OnCompleted(); - } - else - { - _totalContentCrc?.Append(new ReadOnlySpan(buffer, offset, read)); - } - return read; - } - -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - public override int Read(Span buffer) - { - int read = _innerRetriable.Read(buffer); - _decodedBytesRead += read; - if (read == 0) - { - OnCompleted(); - } - else - { - _totalContentCrc?.Append(buffer.Slice(0, read)); - } - return read; - } - - public override async ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) - { - int read = await _innerRetriable.ReadAsync(buffer, cancellationToken).ConfigureAwait(false); - _decodedBytesRead += read; - if (read == 0) - { - OnCompleted(); - } - else - { - _totalContentCrc?.Append(buffer.Span.Slice(0, read)); - } - return read; - } -#endif - - public override int ReadByte() - { - int val = _innerRetriable.ReadByte(); - _decodedBytesRead += 1; - if (val == -1) - { - OnCompleted(); - } - return val; - } - - public override int EndRead(IAsyncResult asyncResult) - { - int read = _innerRetriable.EndRead(asyncResult); - _decodedBytesRead += read; - if (read == 0) - { - OnCompleted(); - } - return read; - } - #endregion - - #region Passthru - public override bool CanRead => _innerRetriable.CanRead; - - public override bool CanSeek => _innerRetriable.CanSeek; - - public override bool CanWrite => _innerRetriable.CanWrite; - - public override bool CanTimeout => _innerRetriable.CanTimeout; - - public override long Length => _innerRetriable.Length; - - public override long Position { get => _innerRetriable.Position; set => _innerRetriable.Position = value; } - - public override void Flush() => _innerRetriable.Flush(); - - public override Task FlushAsync(CancellationToken cancellationToken) => _innerRetriable.FlushAsync(cancellationToken); - - public override long Seek(long offset, SeekOrigin origin) => _innerRetriable.Seek(offset, origin); - - public override void SetLength(long value) => _innerRetriable.SetLength(value); - - public override void Write(byte[] buffer, int offset, int count) => _innerRetriable.Write(buffer, offset, count); - - public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) => _innerRetriable.WriteAsync(buffer, offset, count, cancellationToken); - - public override void WriteByte(byte value) => _innerRetriable.WriteByte(value); - - public override IAsyncResult BeginWrite(byte[] buffer, int offset, int count, AsyncCallback callback, object state) => _innerRetriable.BeginWrite(buffer, offset, count, callback, state); - - public override void EndWrite(IAsyncResult asyncResult) => _innerRetriable.EndWrite(asyncResult); - - public override IAsyncResult BeginRead(byte[] buffer, int offset, int count, AsyncCallback callback, object state) => _innerRetriable.BeginRead(buffer, offset, count, callback, state); - - public override int ReadTimeout { get => _innerRetriable.ReadTimeout; set => _innerRetriable.ReadTimeout = value; } - - public override int WriteTimeout { get => _innerRetriable.WriteTimeout; set => _innerRetriable.WriteTimeout = value; } - -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - public override void Write(ReadOnlySpan buffer) => _innerRetriable.Write(buffer); - - public override ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) => _innerRetriable.WriteAsync(buffer, cancellationToken); -#endif - #endregion -} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingStream.cs deleted file mode 100644 index e6b193ae18260..0000000000000 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingStream.cs +++ /dev/null @@ -1,542 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers; -using System.Buffers.Binary; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using Azure.Storage.Common; - -namespace Azure.Storage.Shared; - -/// -/// Decodes a structured message stream as the data is read. -/// -/// -/// Wraps the inner stream in a , which avoids using its internal -/// buffer if individual Read() calls are larger than it. This ensures one of the three scenarios -/// -/// -/// Read buffer >= stream buffer: -/// There is enough space in the read buffer for inline metadata to be safely -/// extracted in only one read to the true inner stream. -/// -/// -/// Read buffer < next inline metadata: -/// The stream buffer has been activated, and we can read multiple small times from the inner stream -/// without multi-reading the real stream, even when partway through an existing stream buffer. -/// -/// -/// Else: -/// Same as #1, but also the already-allocated stream buffer has been used to slightly improve -/// resource churn when reading inner stream. -/// -/// -/// -internal class StructuredMessageDecodingStream : Stream -{ - internal class RawDecodedData - { - public long? InnerStreamLength { get; set; } - public int? TotalSegments { get; set; } - public StructuredMessage.Flags? Flags { get; set; } - public List<(ulong SegmentCrc, long SegmentLen)> SegmentCrcs { get; } = new(); - public ulong? TotalCrc { get; set; } - public bool DecodeCompleted { get; set; } - } - - private enum SMRegion - { - StreamHeader, - StreamFooter, - SegmentHeader, - SegmentFooter, - SegmentContent, - } - - private readonly Stream _innerBufferedStream; - - private byte[] _metadataBuffer = ArrayPool.Shared.Rent(Constants.KB); - private int _metadataBufferOffset = 0; - private int _metadataBufferLength = 0; - - private int _streamHeaderLength; - private int _streamFooterLength; - private int _segmentHeaderLength; - private int _segmentFooterLength; - - private long? _expectedInnerStreamLength; - - private bool _disposed; - - private readonly RawDecodedData _decodedData; - private StorageCrc64HashAlgorithm _totalContentCrc; - private StorageCrc64HashAlgorithm _segmentCrc; - - private readonly bool _validateChecksums; - - public override bool CanRead => true; - - public override bool CanWrite => false; - - public override bool CanSeek => false; - - public override bool CanTimeout => _innerBufferedStream.CanTimeout; - - public override int ReadTimeout => _innerBufferedStream.ReadTimeout; - - public override int WriteTimeout => _innerBufferedStream.WriteTimeout; - - public override long Length => throw new NotSupportedException(); - - public override long Position - { - get => throw new NotSupportedException(); - set => throw new NotSupportedException(); - } - - public static (Stream DecodedStream, RawDecodedData DecodedData) WrapStream( - Stream innerStream, - long? expextedStreamLength = default) - { - RawDecodedData data = new(); - return (new StructuredMessageDecodingStream(innerStream, data, expextedStreamLength), data); - } - - private StructuredMessageDecodingStream( - Stream innerStream, - RawDecodedData decodedData, - long? expectedStreamLength) - { - Argument.AssertNotNull(innerStream, nameof(innerStream)); - Argument.AssertNotNull(decodedData, nameof(decodedData)); - - _expectedInnerStreamLength = expectedStreamLength; - _innerBufferedStream = new BufferedStream(innerStream); - _decodedData = decodedData; - - // Assumes stream will be structured message 1.0. Will validate this when consuming stream. - _streamHeaderLength = StructuredMessage.V1_0.StreamHeaderLength; - _segmentHeaderLength = StructuredMessage.V1_0.SegmentHeaderLength; - - _validateChecksums = true; - } - - #region Write - public override void Flush() => throw new NotSupportedException(); - - public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException(); - - public override void SetLength(long value) => throw new NotSupportedException(); - #endregion - - #region Read - public override int Read(byte[] buf, int offset, int count) - { - int decodedRead; - int read; - do - { - read = _innerBufferedStream.Read(buf, offset, count); - _innerStreamConsumed += read; - decodedRead = Decode(new Span(buf, offset, read)); - } while (decodedRead <= 0 && read > 0); - - if (read <= 0) - { - AssertDecodeFinished(); - } - - return decodedRead; - } - - public override async Task ReadAsync(byte[] buf, int offset, int count, CancellationToken cancellationToken) - { - int decodedRead; - int read; - do - { - read = await _innerBufferedStream.ReadAsync(buf, offset, count, cancellationToken).ConfigureAwait(false); - _innerStreamConsumed += read; - decodedRead = Decode(new Span(buf, offset, read)); - } while (decodedRead <= 0 && read > 0); - - if (read <= 0) - { - AssertDecodeFinished(); - } - - return decodedRead; - } - -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - public override int Read(Span buf) - { - int decodedRead; - int read; - do - { - read = _innerBufferedStream.Read(buf); - _innerStreamConsumed += read; - decodedRead = Decode(buf.Slice(0, read)); - } while (decodedRead <= 0 && read > 0); - - if (read <= 0) - { - AssertDecodeFinished(); - } - - return decodedRead; - } - - public override async ValueTask ReadAsync(Memory buf, CancellationToken cancellationToken = default) - { - int decodedRead; - int read; - do - { - read = await _innerBufferedStream.ReadAsync(buf).ConfigureAwait(false); - _innerStreamConsumed += read; - decodedRead = Decode(buf.Slice(0, read).Span); - } while (decodedRead <= 0 && read > 0); - - if (read <= 0) - { - AssertDecodeFinished(); - } - - return decodedRead; - } -#endif - - private void AssertDecodeFinished() - { - if (_streamFooterLength > 0 && !_decodedData.DecodeCompleted) - { - throw Errors.InvalidStructuredMessage("Premature end of stream."); - } - _decodedData.DecodeCompleted = true; - } - - private long _innerStreamConsumed = 0; - private long _decodedContentConsumed = 0; - private SMRegion _currentRegion = SMRegion.StreamHeader; - private int _currentSegmentNum = 0; - private long _currentSegmentContentLength; - private long _currentSegmentContentRemaining; - private long CurrentRegionLength => _currentRegion switch - { - SMRegion.StreamHeader => _streamHeaderLength, - SMRegion.StreamFooter => _streamFooterLength, - SMRegion.SegmentHeader => _segmentHeaderLength, - SMRegion.SegmentFooter => _segmentFooterLength, - SMRegion.SegmentContent => _currentSegmentContentLength, - _ => 0, - }; - - /// - /// Decodes given bytes in place. Decoding based on internal stream position info. - /// Decoded data size will be less than or equal to encoded data length. - /// - /// - /// Length of the decoded data in . - /// - private int Decode(Span buffer) - { - if (buffer.IsEmpty) - { - return 0; - } - List<(int Offset, int Count)> gaps = new(); - - int bufferConsumed = ProcessMetadataBuffer(buffer); - - if (bufferConsumed > 0) - { - gaps.Add((0, bufferConsumed)); - } - - while (bufferConsumed < buffer.Length) - { - if (_currentRegion == SMRegion.SegmentContent) - { - int read = (int)Math.Min(buffer.Length - bufferConsumed, _currentSegmentContentRemaining); - _totalContentCrc?.Append(buffer.Slice(bufferConsumed, read)); - _segmentCrc?.Append(buffer.Slice(bufferConsumed, read)); - bufferConsumed += read; - _decodedContentConsumed += read; - _currentSegmentContentRemaining -= read; - if (_currentSegmentContentRemaining == 0) - { - _currentRegion = SMRegion.SegmentFooter; - } - } - else if (buffer.Length - bufferConsumed < CurrentRegionLength) - { - SavePartialMetadata(buffer.Slice(bufferConsumed)); - gaps.Add((bufferConsumed, buffer.Length - bufferConsumed)); - bufferConsumed = buffer.Length; - } - else - { - int processed = _currentRegion switch - { - SMRegion.StreamHeader => ProcessStreamHeader(buffer.Slice(bufferConsumed)), - SMRegion.StreamFooter => ProcessStreamFooter(buffer.Slice(bufferConsumed)), - SMRegion.SegmentHeader => ProcessSegmentHeader(buffer.Slice(bufferConsumed)), - SMRegion.SegmentFooter => ProcessSegmentFooter(buffer.Slice(bufferConsumed)), - _ => 0, - }; - // TODO surface error if processed is 0 - gaps.Add((bufferConsumed, processed)); - bufferConsumed += processed; - } - } - - if (gaps.Count == 0) - { - return buffer.Length; - } - - // gaps is already sorted by offset due to how it was assembled - int gap = 0; - for (int i = gaps.First().Offset; i < buffer.Length; i++) - { - if (gaps.Count > 0 && gaps.First().Offset == i) - { - int count = gaps.First().Count; - gap += count; - i += count - 1; - gaps.RemoveAt(0); - } - else - { - buffer[i - gap] = buffer[i]; - } - } - return buffer.Length - gap; - } - - /// - /// Processes metadata in the internal buffer, if any. Appends any necessary data - /// from the append buffer to complete metadata. - /// - /// - /// Bytes consumed from . - /// - private int ProcessMetadataBuffer(ReadOnlySpan append) - { - if (_metadataBufferLength == 0) - { - return 0; - } - if (_currentRegion == SMRegion.SegmentContent) - { - return 0; - } - int appended = 0; - if (_metadataBufferLength < CurrentRegionLength && append.Length > 0) - { - appended = Math.Min((int)CurrentRegionLength - _metadataBufferLength, append.Length); - SavePartialMetadata(append.Slice(0, appended)); - } - if (_metadataBufferLength == CurrentRegionLength) - { - Span metadata = new(_metadataBuffer, _metadataBufferOffset, (int)CurrentRegionLength); - switch (_currentRegion) - { - case SMRegion.StreamHeader: - ProcessStreamHeader(metadata); - break; - case SMRegion.StreamFooter: - ProcessStreamFooter(metadata); - break; - case SMRegion.SegmentHeader: - ProcessSegmentHeader(metadata); - break; - case SMRegion.SegmentFooter: - ProcessSegmentFooter(metadata); - break; - } - _metadataBufferOffset = 0; - _metadataBufferLength = 0; - } - return appended; - } - - private void SavePartialMetadata(ReadOnlySpan span) - { - // safety array resize w/ArrayPool - if (_metadataBufferLength + span.Length > _metadataBuffer.Length) - { - ResizeMetadataBuffer(2 * (_metadataBufferLength + span.Length)); - } - - // realign any existing content if necessary - if (_metadataBufferLength != 0 && _metadataBufferOffset != 0) - { - // don't use Array.Copy() to move elements in the same array - for (int i = 0; i < _metadataBufferLength; i++) - { - _metadataBuffer[i] = _metadataBuffer[i + _metadataBufferOffset]; - } - _metadataBufferOffset = 0; - } - - span.CopyTo(new Span(_metadataBuffer, _metadataBufferOffset + _metadataBufferLength, span.Length)); - _metadataBufferLength += span.Length; - } - - private int ProcessStreamHeader(ReadOnlySpan span) - { - StructuredMessage.V1_0.ReadStreamHeader( - span.Slice(0, _streamHeaderLength), - out long streamLength, - out StructuredMessage.Flags flags, - out int totalSegments); - - _decodedData.InnerStreamLength = streamLength; - _decodedData.Flags = flags; - _decodedData.TotalSegments = totalSegments; - - if (_expectedInnerStreamLength.HasValue && _expectedInnerStreamLength.Value != streamLength) - { - throw Errors.InvalidStructuredMessage("Unexpected message size."); - } - - if (_decodedData.Flags.Value.HasFlag(StructuredMessage.Flags.StorageCrc64)) - { - _segmentFooterLength = StructuredMessage.Crc64Length; - _streamFooterLength = StructuredMessage.Crc64Length; - if (_validateChecksums) - { - _segmentCrc = StorageCrc64HashAlgorithm.Create(); - _totalContentCrc = StorageCrc64HashAlgorithm.Create(); - } - } - _currentRegion = SMRegion.SegmentHeader; - return _streamHeaderLength; - } - - private int ProcessStreamFooter(ReadOnlySpan span) - { - int footerLen = StructuredMessage.V1_0.GetStreamFooterSize(_decodedData.Flags.Value); - StructuredMessage.V1_0.ReadStreamFooter( - span.Slice(0, footerLen), - _decodedData.Flags.Value, - out ulong reportedCrc); - if (_decodedData.Flags.Value.HasFlag(StructuredMessage.Flags.StorageCrc64)) - { - if (_validateChecksums) - { - ValidateCrc64(_totalContentCrc, reportedCrc); - } - _decodedData.TotalCrc = reportedCrc; - } - - if (_innerStreamConsumed != _decodedData.InnerStreamLength) - { - throw Errors.InvalidStructuredMessage("Unexpected message size."); - } - if (_currentSegmentNum != _decodedData.TotalSegments) - { - throw Errors.InvalidStructuredMessage("Missing expected message segments."); - } - - _decodedData.DecodeCompleted = true; - return footerLen; - } - - private int ProcessSegmentHeader(ReadOnlySpan span) - { - StructuredMessage.V1_0.ReadSegmentHeader( - span.Slice(0, _segmentHeaderLength), - out int newSegNum, - out _currentSegmentContentLength); - _currentSegmentContentRemaining = _currentSegmentContentLength; - if (newSegNum != _currentSegmentNum + 1) - { - throw Errors.InvalidStructuredMessage("Unexpected segment number in structured message."); - } - _currentSegmentNum = newSegNum; - _currentRegion = SMRegion.SegmentContent; - return _segmentHeaderLength; - } - - private int ProcessSegmentFooter(ReadOnlySpan span) - { - int footerLen = StructuredMessage.V1_0.GetSegmentFooterSize(_decodedData.Flags.Value); - StructuredMessage.V1_0.ReadSegmentFooter( - span.Slice(0, footerLen), - _decodedData.Flags.Value, - out ulong reportedCrc); - if (_decodedData.Flags.Value.HasFlag(StructuredMessage.Flags.StorageCrc64)) - { - if (_validateChecksums) - { - ValidateCrc64(_segmentCrc, reportedCrc); - _segmentCrc = StorageCrc64HashAlgorithm.Create(); - } - _decodedData.SegmentCrcs.Add((reportedCrc, _currentSegmentContentLength)); - } - _currentRegion = _currentSegmentNum == _decodedData.TotalSegments ? SMRegion.StreamFooter : SMRegion.SegmentHeader; - return footerLen; - } - - private static void ValidateCrc64(StorageCrc64HashAlgorithm calculation, ulong reported) - { - using IDisposable _ = ArrayPool.Shared.RentDisposable(StructuredMessage.Crc64Length * 2, out byte[] buf); - Span calculatedBytes = new(buf, 0, StructuredMessage.Crc64Length); - Span reportedBytes = new(buf, calculatedBytes.Length, StructuredMessage.Crc64Length); - calculation.GetCurrentHash(calculatedBytes); - reported.WriteCrc64(reportedBytes); - if (!calculatedBytes.SequenceEqual(reportedBytes)) - { - throw Errors.ChecksumMismatch(calculatedBytes, reportedBytes); - } - } - #endregion - - public override long Seek(long offset, SeekOrigin origin) - => throw new NotSupportedException(); - - protected override void Dispose(bool disposing) - { - base.Dispose(disposing); - - if (_disposed) - { - return; - } - - if (disposing) - { - _innerBufferedStream.Dispose(); - _disposed = true; - } - } - - private void ResizeMetadataBuffer(int newSize) - { - byte[] newBuf = ArrayPool.Shared.Rent(newSize); - Array.Copy(_metadataBuffer, _metadataBufferOffset, newBuf, 0, _metadataBufferLength); - ArrayPool.Shared.Return(_metadataBuffer); - _metadataBuffer = newBuf; - } - - private void AlignMetadataBuffer() - { - if (_metadataBufferOffset != 0 && _metadataBufferLength != 0) - { - for (int i = 0; i < _metadataBufferLength; i++) - { - _metadataBuffer[i] = _metadataBuffer[_metadataBufferOffset + i]; - } - _metadataBufferOffset = 0; - } - } -} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageEncodingStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageEncodingStream.cs deleted file mode 100644 index cb0ef340155ec..0000000000000 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageEncodingStream.cs +++ /dev/null @@ -1,545 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers; -using System.IO; -using System.Threading; -using System.Threading.Tasks; -using Azure.Core.Pipeline; -using Azure.Storage.Common; - -namespace Azure.Storage.Shared; - -internal class StructuredMessageEncodingStream : Stream -{ - private readonly Stream _innerStream; - - private readonly int _streamHeaderLength; - private readonly int _streamFooterLength; - private readonly int _segmentHeaderLength; - private readonly int _segmentFooterLength; - private readonly int _segmentContentLength; - - private readonly StructuredMessage.Flags _flags; - private bool _disposed; - - private bool UseCrcSegment => _flags.HasFlag(StructuredMessage.Flags.StorageCrc64); - private readonly StorageCrc64HashAlgorithm _totalCrc; - private StorageCrc64HashAlgorithm _segmentCrc; - private readonly byte[] _segmentCrcs; - private int _latestSegmentCrcd = 0; - - #region Segments - /// - /// Gets the 1-indexed segment number the underlying stream is currently positioned in. - /// 1-indexed to match segment labelling as specified by SM spec. - /// - private int CurrentInnerSegment => (int)Math.Floor(_innerStream.Position / (float)_segmentContentLength) + 1; - - /// - /// Gets the 1-indexed segment number the encoded data stream is currently positioned in. - /// 1-indexed to match segment labelling as specified by SM spec. - /// - private int CurrentEncodingSegment - { - get - { - // edge case: always on final segment when at end of inner stream - if (_innerStream.Position == _innerStream.Length) - { - return TotalSegments; - } - // when writing footer, inner stream is positioned at next segment, - // but this stream is still writing the previous one - if (_currentRegion == SMRegion.SegmentFooter) - { - return CurrentInnerSegment - 1; - } - return CurrentInnerSegment; - } - } - - /// - /// Segment length including header and footer. - /// - private int SegmentTotalLength => _segmentHeaderLength + _segmentContentLength + _segmentFooterLength; - - private int TotalSegments => GetTotalSegments(_innerStream, _segmentContentLength); - private static int GetTotalSegments(Stream innerStream, long segmentContentLength) - { - return (int)Math.Ceiling(innerStream.Length / (float)segmentContentLength); - } - #endregion - - public override bool CanRead => true; - - public override bool CanWrite => false; - - public override bool CanSeek => _innerStream.CanSeek; - - public override bool CanTimeout => _innerStream.CanTimeout; - - public override int ReadTimeout => _innerStream.ReadTimeout; - - public override int WriteTimeout => _innerStream.WriteTimeout; - - public override long Length => - _streamHeaderLength + _streamFooterLength + - (_segmentHeaderLength + _segmentFooterLength) * TotalSegments + - _innerStream.Length; - - #region Position - private enum SMRegion - { - StreamHeader, - StreamFooter, - SegmentHeader, - SegmentFooter, - SegmentContent, - } - - private SMRegion _currentRegion = SMRegion.StreamHeader; - private int _currentRegionPosition = 0; - - private long _maxSeekPosition = 0; - - public override long Position - { - get - { - return _currentRegion switch - { - SMRegion.StreamHeader => _currentRegionPosition, - SMRegion.StreamFooter => _streamHeaderLength + - TotalSegments * (_segmentHeaderLength + _segmentFooterLength) + - _innerStream.Length + - _currentRegionPosition, - SMRegion.SegmentHeader => _innerStream.Position + - _streamHeaderLength + - (CurrentEncodingSegment - 1) * (_segmentHeaderLength + _segmentFooterLength) + - _currentRegionPosition, - SMRegion.SegmentFooter => _innerStream.Position + - _streamHeaderLength + - // Inner stream has moved to next segment but we're still writing the previous segment footer - CurrentEncodingSegment * (_segmentHeaderLength + _segmentFooterLength) - - _segmentFooterLength + _currentRegionPosition, - SMRegion.SegmentContent => _innerStream.Position + - _streamHeaderLength + - CurrentEncodingSegment * (_segmentHeaderLength + _segmentFooterLength) - - _segmentFooterLength, - _ => throw new InvalidDataException($"{nameof(StructuredMessageEncodingStream)} invalid state."), - }; - } - set - { - Argument.AssertInRange(value, 0, _maxSeekPosition, nameof(value)); - if (value < _streamHeaderLength) - { - _currentRegion = SMRegion.StreamHeader; - _currentRegionPosition = (int)value; - _innerStream.Position = 0; - return; - } - if (value >= Length - _streamFooterLength) - { - _currentRegion = SMRegion.StreamFooter; - _currentRegionPosition = (int)(value - (Length - _streamFooterLength)); - _innerStream.Position = _innerStream.Length; - return; - } - int newSegmentNum = 1 + (int)Math.Floor((value - _streamHeaderLength) / (double)(_segmentHeaderLength + _segmentFooterLength + _segmentContentLength)); - int segmentPosition = (int)(value - _streamHeaderLength - - ((newSegmentNum - 1) * (_segmentHeaderLength + _segmentFooterLength + _segmentContentLength))); - - if (segmentPosition < _segmentHeaderLength) - { - _currentRegion = SMRegion.SegmentHeader; - _currentRegionPosition = (int)((value - _streamHeaderLength) % SegmentTotalLength); - _innerStream.Position = (newSegmentNum - 1) * _segmentContentLength; - return; - } - if (segmentPosition < _segmentHeaderLength + _segmentContentLength) - { - _currentRegion = SMRegion.SegmentContent; - _currentRegionPosition = (int)((value - _streamHeaderLength) % SegmentTotalLength) - - _segmentHeaderLength; - _innerStream.Position = (newSegmentNum - 1) * _segmentContentLength + _currentRegionPosition; - return; - } - - _currentRegion = SMRegion.SegmentFooter; - _currentRegionPosition = (int)((value - _streamHeaderLength) % SegmentTotalLength) - - _segmentHeaderLength - _segmentContentLength; - _innerStream.Position = newSegmentNum * _segmentContentLength; - } - } - #endregion - - public StructuredMessageEncodingStream( - Stream innerStream, - int segmentContentLength, - StructuredMessage.Flags flags) - { - Argument.AssertNotNull(innerStream, nameof(innerStream)); - if (innerStream.GetLengthOrDefault() == default) - { - throw new ArgumentException("Stream must have known length.", nameof(innerStream)); - } - if (innerStream.Position != 0) - { - throw new ArgumentException("Stream must be at starting position.", nameof(innerStream)); - } - // stream logic likely breaks down with segment length of 1; enforce >=2 rather than just positive number - // real world scenarios will probably use a minimum of tens of KB - Argument.AssertInRange(segmentContentLength, 2, int.MaxValue, nameof(segmentContentLength)); - - _flags = flags; - _segmentContentLength = segmentContentLength; - - _streamHeaderLength = StructuredMessage.V1_0.StreamHeaderLength; - _streamFooterLength = UseCrcSegment ? StructuredMessage.Crc64Length : 0; - _segmentHeaderLength = StructuredMessage.V1_0.SegmentHeaderLength; - _segmentFooterLength = UseCrcSegment ? StructuredMessage.Crc64Length : 0; - - if (UseCrcSegment) - { - _totalCrc = StorageCrc64HashAlgorithm.Create(); - _segmentCrc = StorageCrc64HashAlgorithm.Create(); - _segmentCrcs = ArrayPool.Shared.Rent( - GetTotalSegments(innerStream, segmentContentLength) * StructuredMessage.Crc64Length); - innerStream = ChecksumCalculatingStream.GetReadStream(innerStream, span => - { - _totalCrc.Append(span); - _segmentCrc.Append(span); - }); - } - - _innerStream = innerStream; - } - - #region Write - public override void Flush() => throw new NotSupportedException(); - - public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException(); - - public override void SetLength(long value) => throw new NotSupportedException(); - #endregion - - #region Read - public override int Read(byte[] buffer, int offset, int count) - => ReadInternal(buffer, offset, count, async: false, cancellationToken: default).EnsureCompleted(); - - public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - => await ReadInternal(buffer, offset, count, async: true, cancellationToken).ConfigureAwait(false); - - private async ValueTask ReadInternal(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken) - { - int totalRead = 0; - bool readInner = false; - while (totalRead < count && Position < Length) - { - int subreadOffset = offset + totalRead; - int subreadCount = count - totalRead; - switch (_currentRegion) - { - case SMRegion.StreamHeader: - totalRead += ReadFromStreamHeader(new Span(buffer, subreadOffset, subreadCount)); - break; - case SMRegion.StreamFooter: - totalRead += ReadFromStreamFooter(new Span(buffer, subreadOffset, subreadCount)); - break; - case SMRegion.SegmentHeader: - totalRead += ReadFromSegmentHeader(new Span(buffer, subreadOffset, subreadCount)); - break; - case SMRegion.SegmentFooter: - totalRead += ReadFromSegmentFooter(new Span(buffer, subreadOffset, subreadCount)); - break; - case SMRegion.SegmentContent: - // don't double read from stream. Allow caller to multi-read when desired. - if (readInner) - { - UpdateLatestPosition(); - return totalRead; - } - totalRead += await ReadFromInnerStreamInternal( - buffer, subreadOffset, subreadCount, async, cancellationToken).ConfigureAwait(false); - readInner = true; - break; - default: - break; - } - } - UpdateLatestPosition(); - return totalRead; - } - -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - public override int Read(Span buffer) - { - int totalRead = 0; - bool readInner = false; - while (totalRead < buffer.Length && Position < Length) - { - switch (_currentRegion) - { - case SMRegion.StreamHeader: - totalRead += ReadFromStreamHeader(buffer.Slice(totalRead)); - break; - case SMRegion.StreamFooter: - totalRead += ReadFromStreamFooter(buffer.Slice(totalRead)); - break; - case SMRegion.SegmentHeader: - totalRead += ReadFromSegmentHeader(buffer.Slice(totalRead)); - break; - case SMRegion.SegmentFooter: - totalRead += ReadFromSegmentFooter(buffer.Slice(totalRead)); - break; - case SMRegion.SegmentContent: - // don't double read from stream. Allow caller to multi-read when desired. - if (readInner) - { - UpdateLatestPosition(); - return totalRead; - } - totalRead += ReadFromInnerStream(buffer.Slice(totalRead)); - readInner = true; - break; - default: - break; - } - } - UpdateLatestPosition(); - return totalRead; - } - - public override async ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) - { - int totalRead = 0; - bool readInner = false; - while (totalRead < buffer.Length && Position < Length) - { - switch (_currentRegion) - { - case SMRegion.StreamHeader: - totalRead += ReadFromStreamHeader(buffer.Slice(totalRead).Span); - break; - case SMRegion.StreamFooter: - totalRead += ReadFromStreamFooter(buffer.Slice(totalRead).Span); - break; - case SMRegion.SegmentHeader: - totalRead += ReadFromSegmentHeader(buffer.Slice(totalRead).Span); - break; - case SMRegion.SegmentFooter: - totalRead += ReadFromSegmentFooter(buffer.Slice(totalRead).Span); - break; - case SMRegion.SegmentContent: - // don't double read from stream. Allow caller to multi-read when desired. - if (readInner) - { - UpdateLatestPosition(); - return totalRead; - } - totalRead += await ReadFromInnerStreamAsync(buffer.Slice(totalRead), cancellationToken).ConfigureAwait(false); - readInner = true; - break; - default: - break; - } - } - UpdateLatestPosition(); - return totalRead; - } -#endif - - #region Read Headers/Footers - private int ReadFromStreamHeader(Span buffer) - { - int read = Math.Min(buffer.Length, _streamHeaderLength - _currentRegionPosition); - using IDisposable _ = StructuredMessage.V1_0.GetStreamHeaderBytes( - ArrayPool.Shared, out Memory headerBytes, Length, _flags, TotalSegments); - headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); - _currentRegionPosition += read; - - if (_currentRegionPosition == _streamHeaderLength) - { - _currentRegion = SMRegion.SegmentHeader; - _currentRegionPosition = 0; - } - - return read; - } - - private int ReadFromStreamFooter(Span buffer) - { - int read = Math.Min(buffer.Length, _segmentFooterLength - _currentRegionPosition); - if (read <= 0) - { - return 0; - } - - using IDisposable _ = StructuredMessage.V1_0.GetStreamFooterBytes( - ArrayPool.Shared, - out Memory footerBytes, - crc64: UseCrcSegment - ? _totalCrc.GetCurrentHash() // TODO array pooling - : default); - footerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); - _currentRegionPosition += read; - - return read; - } - - private int ReadFromSegmentHeader(Span buffer) - { - int read = Math.Min(buffer.Length, _segmentHeaderLength - _currentRegionPosition); - using IDisposable _ = StructuredMessage.V1_0.GetSegmentHeaderBytes( - ArrayPool.Shared, - out Memory headerBytes, - CurrentInnerSegment, - Math.Min(_segmentContentLength, _innerStream.Length - _innerStream.Position)); - headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); - _currentRegionPosition += read; - - if (_currentRegionPosition == _segmentHeaderLength) - { - _currentRegion = SMRegion.SegmentContent; - _currentRegionPosition = 0; - } - - return read; - } - - private int ReadFromSegmentFooter(Span buffer) - { - int read = Math.Min(buffer.Length, _segmentFooterLength - _currentRegionPosition); - if (read < 0) - { - return 0; - } - - using IDisposable _ = StructuredMessage.V1_0.GetSegmentFooterBytes( - ArrayPool.Shared, - out Memory headerBytes, - crc64: UseCrcSegment - ? new Span( - _segmentCrcs, - (CurrentEncodingSegment-1) * _totalCrc.HashLengthInBytes, - _totalCrc.HashLengthInBytes) - : default); - headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); - _currentRegionPosition += read; - - if (_currentRegionPosition == _segmentFooterLength) - { - _currentRegion = _innerStream.Position == _innerStream.Length - ? SMRegion.StreamFooter : SMRegion.SegmentHeader; - _currentRegionPosition = 0; - } - - return read; - } - #endregion - - #region ReadUnderlyingStream - private int MaxInnerStreamRead => _segmentContentLength - _currentRegionPosition; - - private void CleanupContentSegment() - { - if (_currentRegionPosition == _segmentContentLength || _innerStream.Position >= _innerStream.Length) - { - _currentRegion = SMRegion.SegmentFooter; - _currentRegionPosition = 0; - if (UseCrcSegment && CurrentEncodingSegment - 1 == _latestSegmentCrcd) - { - _segmentCrc.GetCurrentHash(new Span( - _segmentCrcs, - _latestSegmentCrcd * _segmentCrc.HashLengthInBytes, - _segmentCrc.HashLengthInBytes)); - _latestSegmentCrcd++; - _segmentCrc = StorageCrc64HashAlgorithm.Create(); - } - } - } - - private async ValueTask ReadFromInnerStreamInternal( - byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken) - { - int read = async - ? await _innerStream.ReadAsync(buffer, offset, Math.Min(count, MaxInnerStreamRead)).ConfigureAwait(false) - : _innerStream.Read(buffer, offset, Math.Min(count, MaxInnerStreamRead)); - _currentRegionPosition += read; - CleanupContentSegment(); - return read; - } - -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - private int ReadFromInnerStream(Span buffer) - { - if (MaxInnerStreamRead < buffer.Length) - { - buffer = buffer.Slice(0, MaxInnerStreamRead); - } - int read = _innerStream.Read(buffer); - _currentRegionPosition += read; - CleanupContentSegment(); - return read; - } - - private async ValueTask ReadFromInnerStreamAsync(Memory buffer, CancellationToken cancellationToken) - { - if (MaxInnerStreamRead < buffer.Length) - { - buffer = buffer.Slice(0, MaxInnerStreamRead); - } - int read = await _innerStream.ReadAsync(buffer, cancellationToken).ConfigureAwait(false); - _currentRegionPosition += read; - CleanupContentSegment(); - return read; - } -#endif - #endregion - - // don't allow stream to seek too far forward. track how far the stream has been naturally read. - private void UpdateLatestPosition() - { - if (_maxSeekPosition < Position) - { - _maxSeekPosition = Position; - } - } - #endregion - - public override long Seek(long offset, SeekOrigin origin) - { - switch (origin) - { - case SeekOrigin.Begin: - Position = offset; - break; - case SeekOrigin.Current: - Position += offset; - break; - case SeekOrigin.End: - Position = Length + offset; - break; - } - return Position; - } - - protected override void Dispose(bool disposing) - { - base.Dispose(disposing); - - if (_disposed) - { - return; - } - - if (disposing) - { - _innerStream.Dispose(); - _disposed = true; - } - } -} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessagePrecalculatedCrcWrapperStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessagePrecalculatedCrcWrapperStream.cs deleted file mode 100644 index 3569ef4339735..0000000000000 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessagePrecalculatedCrcWrapperStream.cs +++ /dev/null @@ -1,451 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers; -using System.IO; -using System.Threading; -using System.Threading.Tasks; -using Azure.Core.Pipeline; -using Azure.Storage.Common; - -namespace Azure.Storage.Shared; - -internal class StructuredMessagePrecalculatedCrcWrapperStream : Stream -{ - private readonly Stream _innerStream; - - private readonly int _streamHeaderLength; - private readonly int _streamFooterLength; - private readonly int _segmentHeaderLength; - private readonly int _segmentFooterLength; - - private bool _disposed; - - private readonly byte[] _crc; - - public override bool CanRead => true; - - public override bool CanWrite => false; - - public override bool CanSeek => _innerStream.CanSeek; - - public override bool CanTimeout => _innerStream.CanTimeout; - - public override int ReadTimeout => _innerStream.ReadTimeout; - - public override int WriteTimeout => _innerStream.WriteTimeout; - - public override long Length => - _streamHeaderLength + _streamFooterLength + - _segmentHeaderLength + _segmentFooterLength + - _innerStream.Length; - - #region Position - private enum SMRegion - { - StreamHeader, - StreamFooter, - SegmentHeader, - SegmentFooter, - SegmentContent, - } - - private SMRegion _currentRegion = SMRegion.StreamHeader; - private int _currentRegionPosition = 0; - - private long _maxSeekPosition = 0; - - public override long Position - { - get - { - return _currentRegion switch - { - SMRegion.StreamHeader => _currentRegionPosition, - SMRegion.SegmentHeader => _innerStream.Position + - _streamHeaderLength + - _currentRegionPosition, - SMRegion.SegmentContent => _streamHeaderLength + - _segmentHeaderLength + - _innerStream.Position, - SMRegion.SegmentFooter => _streamHeaderLength + - _segmentHeaderLength + - _innerStream.Length + - _currentRegionPosition, - SMRegion.StreamFooter => _streamHeaderLength + - _segmentHeaderLength + - _innerStream.Length + - _segmentFooterLength + - _currentRegionPosition, - _ => throw new InvalidDataException($"{nameof(StructuredMessageEncodingStream)} invalid state."), - }; - } - set - { - Argument.AssertInRange(value, 0, _maxSeekPosition, nameof(value)); - if (value < _streamHeaderLength) - { - _currentRegion = SMRegion.StreamHeader; - _currentRegionPosition = (int)value; - _innerStream.Position = 0; - return; - } - if (value < _streamHeaderLength + _segmentHeaderLength) - { - _currentRegion = SMRegion.SegmentHeader; - _currentRegionPosition = (int)(value - _streamHeaderLength); - _innerStream.Position = 0; - return; - } - if (value < _streamHeaderLength + _segmentHeaderLength + _innerStream.Length) - { - _currentRegion = SMRegion.SegmentContent; - _currentRegionPosition = (int)(value - _streamHeaderLength - _segmentHeaderLength); - _innerStream.Position = value - _streamHeaderLength - _segmentHeaderLength; - return; - } - if (value < _streamHeaderLength + _segmentHeaderLength + _innerStream.Length + _segmentFooterLength) - { - _currentRegion = SMRegion.SegmentFooter; - _currentRegionPosition = (int)(value - _streamHeaderLength - _segmentHeaderLength - _innerStream.Length); - _innerStream.Position = _innerStream.Length; - return; - } - - _currentRegion = SMRegion.StreamFooter; - _currentRegionPosition = (int)(value - _streamHeaderLength - _segmentHeaderLength - _innerStream.Length - _segmentFooterLength); - _innerStream.Position = _innerStream.Length; - } - } - #endregion - - public StructuredMessagePrecalculatedCrcWrapperStream( - Stream innerStream, - ReadOnlySpan precalculatedCrc) - { - Argument.AssertNotNull(innerStream, nameof(innerStream)); - if (innerStream.GetLengthOrDefault() == default) - { - throw new ArgumentException("Stream must have known length.", nameof(innerStream)); - } - if (innerStream.Position != 0) - { - throw new ArgumentException("Stream must be at starting position.", nameof(innerStream)); - } - - _streamHeaderLength = StructuredMessage.V1_0.StreamHeaderLength; - _streamFooterLength = StructuredMessage.Crc64Length; - _segmentHeaderLength = StructuredMessage.V1_0.SegmentHeaderLength; - _segmentFooterLength = StructuredMessage.Crc64Length; - - _crc = ArrayPool.Shared.Rent(StructuredMessage.Crc64Length); - precalculatedCrc.CopyTo(_crc); - - _innerStream = innerStream; - } - - #region Write - public override void Flush() => throw new NotSupportedException(); - - public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException(); - - public override void SetLength(long value) => throw new NotSupportedException(); - #endregion - - #region Read - public override int Read(byte[] buffer, int offset, int count) - => ReadInternal(buffer, offset, count, async: false, cancellationToken: default).EnsureCompleted(); - - public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - => await ReadInternal(buffer, offset, count, async: true, cancellationToken).ConfigureAwait(false); - - private async ValueTask ReadInternal(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken) - { - int totalRead = 0; - bool readInner = false; - while (totalRead < count && Position < Length) - { - int subreadOffset = offset + totalRead; - int subreadCount = count - totalRead; - switch (_currentRegion) - { - case SMRegion.StreamHeader: - totalRead += ReadFromStreamHeader(new Span(buffer, subreadOffset, subreadCount)); - break; - case SMRegion.StreamFooter: - totalRead += ReadFromStreamFooter(new Span(buffer, subreadOffset, subreadCount)); - break; - case SMRegion.SegmentHeader: - totalRead += ReadFromSegmentHeader(new Span(buffer, subreadOffset, subreadCount)); - break; - case SMRegion.SegmentFooter: - totalRead += ReadFromSegmentFooter(new Span(buffer, subreadOffset, subreadCount)); - break; - case SMRegion.SegmentContent: - // don't double read from stream. Allow caller to multi-read when desired. - if (readInner) - { - UpdateLatestPosition(); - return totalRead; - } - totalRead += await ReadFromInnerStreamInternal( - buffer, subreadOffset, subreadCount, async, cancellationToken).ConfigureAwait(false); - readInner = true; - break; - default: - break; - } - } - UpdateLatestPosition(); - return totalRead; - } - -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - public override int Read(Span buffer) - { - int totalRead = 0; - bool readInner = false; - while (totalRead < buffer.Length && Position < Length) - { - switch (_currentRegion) - { - case SMRegion.StreamHeader: - totalRead += ReadFromStreamHeader(buffer.Slice(totalRead)); - break; - case SMRegion.StreamFooter: - totalRead += ReadFromStreamFooter(buffer.Slice(totalRead)); - break; - case SMRegion.SegmentHeader: - totalRead += ReadFromSegmentHeader(buffer.Slice(totalRead)); - break; - case SMRegion.SegmentFooter: - totalRead += ReadFromSegmentFooter(buffer.Slice(totalRead)); - break; - case SMRegion.SegmentContent: - // don't double read from stream. Allow caller to multi-read when desired. - if (readInner) - { - UpdateLatestPosition(); - return totalRead; - } - totalRead += ReadFromInnerStream(buffer.Slice(totalRead)); - readInner = true; - break; - default: - break; - } - } - UpdateLatestPosition(); - return totalRead; - } - - public override async ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) - { - int totalRead = 0; - bool readInner = false; - while (totalRead < buffer.Length && Position < Length) - { - switch (_currentRegion) - { - case SMRegion.StreamHeader: - totalRead += ReadFromStreamHeader(buffer.Slice(totalRead).Span); - break; - case SMRegion.StreamFooter: - totalRead += ReadFromStreamFooter(buffer.Slice(totalRead).Span); - break; - case SMRegion.SegmentHeader: - totalRead += ReadFromSegmentHeader(buffer.Slice(totalRead).Span); - break; - case SMRegion.SegmentFooter: - totalRead += ReadFromSegmentFooter(buffer.Slice(totalRead).Span); - break; - case SMRegion.SegmentContent: - // don't double read from stream. Allow caller to multi-read when desired. - if (readInner) - { - UpdateLatestPosition(); - return totalRead; - } - totalRead += await ReadFromInnerStreamAsync(buffer.Slice(totalRead), cancellationToken).ConfigureAwait(false); - readInner = true; - break; - default: - break; - } - } - UpdateLatestPosition(); - return totalRead; - } -#endif - - #region Read Headers/Footers - private int ReadFromStreamHeader(Span buffer) - { - int read = Math.Min(buffer.Length, _streamHeaderLength - _currentRegionPosition); - using IDisposable _ = StructuredMessage.V1_0.GetStreamHeaderBytes( - ArrayPool.Shared, - out Memory headerBytes, - Length, - StructuredMessage.Flags.StorageCrc64, - totalSegments: 1); - headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); - _currentRegionPosition += read; - - if (_currentRegionPosition == _streamHeaderLength) - { - _currentRegion = SMRegion.SegmentHeader; - _currentRegionPosition = 0; - } - - return read; - } - - private int ReadFromStreamFooter(Span buffer) - { - int read = Math.Min(buffer.Length, _segmentFooterLength - _currentRegionPosition); - if (read <= 0) - { - return 0; - } - - using IDisposable _ = StructuredMessage.V1_0.GetStreamFooterBytes( - ArrayPool.Shared, - out Memory footerBytes, - new ReadOnlySpan(_crc, 0, StructuredMessage.Crc64Length)); - footerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); - _currentRegionPosition += read; - - return read; - } - - private int ReadFromSegmentHeader(Span buffer) - { - int read = Math.Min(buffer.Length, _segmentHeaderLength - _currentRegionPosition); - using IDisposable _ = StructuredMessage.V1_0.GetSegmentHeaderBytes( - ArrayPool.Shared, - out Memory headerBytes, - segmentNum: 1, - _innerStream.Length); - headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); - _currentRegionPosition += read; - - if (_currentRegionPosition == _segmentHeaderLength) - { - _currentRegion = SMRegion.SegmentContent; - _currentRegionPosition = 0; - } - - return read; - } - - private int ReadFromSegmentFooter(Span buffer) - { - int read = Math.Min(buffer.Length, _segmentFooterLength - _currentRegionPosition); - if (read < 0) - { - return 0; - } - - using IDisposable _ = StructuredMessage.V1_0.GetSegmentFooterBytes( - ArrayPool.Shared, - out Memory headerBytes, - new ReadOnlySpan(_crc, 0, StructuredMessage.Crc64Length)); - headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); - _currentRegionPosition += read; - - if (_currentRegionPosition == _segmentFooterLength) - { - _currentRegion = _innerStream.Position == _innerStream.Length - ? SMRegion.StreamFooter : SMRegion.SegmentHeader; - _currentRegionPosition = 0; - } - - return read; - } - #endregion - - #region ReadUnderlyingStream - private void CleanupContentSegment() - { - if (_innerStream.Position >= _innerStream.Length) - { - _currentRegion = SMRegion.SegmentFooter; - _currentRegionPosition = 0; - } - } - - private async ValueTask ReadFromInnerStreamInternal( - byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken) - { - int read = async - ? await _innerStream.ReadAsync(buffer, offset, count).ConfigureAwait(false) - : _innerStream.Read(buffer, offset, count); - _currentRegionPosition += read; - CleanupContentSegment(); - return read; - } - -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - private int ReadFromInnerStream(Span buffer) - { - int read = _innerStream.Read(buffer); - _currentRegionPosition += read; - CleanupContentSegment(); - return read; - } - - private async ValueTask ReadFromInnerStreamAsync(Memory buffer, CancellationToken cancellationToken) - { - int read = await _innerStream.ReadAsync(buffer, cancellationToken).ConfigureAwait(false); - _currentRegionPosition += read; - CleanupContentSegment(); - return read; - } -#endif - #endregion - - // don't allow stream to seek too far forward. track how far the stream has been naturally read. - private void UpdateLatestPosition() - { - if (_maxSeekPosition < Position) - { - _maxSeekPosition = Position; - } - } - #endregion - - public override long Seek(long offset, SeekOrigin origin) - { - switch (origin) - { - case SeekOrigin.Begin: - Position = offset; - break; - case SeekOrigin.Current: - Position += offset; - break; - case SeekOrigin.End: - Position = Length + offset; - break; - } - return Position; - } - - protected override void Dispose(bool disposing) - { - base.Dispose(disposing); - - if (_disposed) - { - return; - } - - if (disposing) - { - ArrayPool.Shared.Return(_crc); - _innerStream.Dispose(); - _disposed = true; - } - } -} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/TransferValidationOptionsExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/TransferValidationOptionsExtensions.cs index 763d385240383..af21588b4ae09 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/TransferValidationOptionsExtensions.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/TransferValidationOptionsExtensions.cs @@ -9,7 +9,14 @@ public static StorageChecksumAlgorithm ResolveAuto(this StorageChecksumAlgorithm { if (checksumAlgorithm == StorageChecksumAlgorithm.Auto) { +#if BlobSDK || DataLakeSDK || CommonSDK return StorageChecksumAlgorithm.StorageCrc64; +#elif FileSDK // file shares don't support crc64 + return StorageChecksumAlgorithm.MD5; +#else + throw new System.NotSupportedException( + $"{typeof(TransferValidationOptionsExtensions).FullName}.{nameof(ResolveAuto)} is not supported."); +#endif } return checksumAlgorithm; } diff --git a/sdk/storage/Azure.Storage.Common/tests/Azure.Storage.Common.Tests.csproj b/sdk/storage/Azure.Storage.Common/tests/Azure.Storage.Common.Tests.csproj index 2863b85f6feb2..5db86ebee984b 100644 --- a/sdk/storage/Azure.Storage.Common/tests/Azure.Storage.Common.Tests.csproj +++ b/sdk/storage/Azure.Storage.Common/tests/Azure.Storage.Common.Tests.csproj @@ -13,12 +13,9 @@ - - - @@ -31,7 +28,6 @@ - @@ -50,11 +46,6 @@ - - - - - diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/FaultyStream.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/FaultyStream.cs index f4e4b92ed73c4..7411eb1499312 100644 --- a/sdk/storage/Azure.Storage.Common/tests/Shared/FaultyStream.cs +++ b/sdk/storage/Azure.Storage.Common/tests/Shared/FaultyStream.cs @@ -15,7 +15,6 @@ internal class FaultyStream : Stream private readonly Exception _exceptionToRaise; private int _remainingExceptions; private Action _onFault; - private long _position = 0; public FaultyStream( Stream innerStream, @@ -41,7 +40,7 @@ public FaultyStream( public override long Position { - get => CanSeek ? _innerStream.Position : _position; + get => _innerStream.Position; set => _innerStream.Position = value; } @@ -54,9 +53,7 @@ public override int Read(byte[] buffer, int offset, int count) { if (_remainingExceptions == 0 || Position + count <= _raiseExceptionAt || _raiseExceptionAt >= _innerStream.Length) { - int read = _innerStream.Read(buffer, offset, count); - _position += read; - return read; + return _innerStream.Read(buffer, offset, count); } else { @@ -64,13 +61,11 @@ public override int Read(byte[] buffer, int offset, int count) } } - public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) { if (_remainingExceptions == 0 || Position + count <= _raiseExceptionAt || _raiseExceptionAt >= _innerStream.Length) { - int read = await _innerStream.ReadAsync(buffer, offset, count, cancellationToken); - _position += read; - return read; + return _innerStream.ReadAsync(buffer, offset, count, cancellationToken); } else { diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/ObserveStructuredMessagePolicy.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/ObserveStructuredMessagePolicy.cs deleted file mode 100644 index 828c41179bba3..0000000000000 --- a/sdk/storage/Azure.Storage.Common/tests/Shared/ObserveStructuredMessagePolicy.cs +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Collections.Generic; -using System.IO; -using Azure.Core; -using Azure.Core.Pipeline; -using Azure.Storage.Shared; - -namespace Azure.Storage.Test.Shared -{ - internal class ObserveStructuredMessagePolicy : HttpPipelineSynchronousPolicy - { - private readonly HashSet _requestScopes = new(); - - private readonly HashSet _responseScopes = new(); - - public ObserveStructuredMessagePolicy() - { - } - - public override void OnSendingRequest(HttpMessage message) - { - if (_requestScopes.Count > 0) - { - byte[] encodedContent; - byte[] underlyingContent; - StructuredMessageDecodingStream.RawDecodedData decodedData; - using (MemoryStream ms = new()) - { - message.Request.Content.WriteTo(ms, default); - encodedContent = ms.ToArray(); - using (MemoryStream ms2 = new()) - { - (Stream s, decodedData) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedContent)); - s.CopyTo(ms2); - underlyingContent = ms2.ToArray(); - } - } - } - } - - public override void OnReceivedResponse(HttpMessage message) - { - } - - public IDisposable CheckRequestScope() => CheckMessageScope.CheckRequestScope(this); - - public IDisposable CheckResponseScope() => CheckMessageScope.CheckResponseScope(this); - - private class CheckMessageScope : IDisposable - { - private bool _isRequestScope; - private ObserveStructuredMessagePolicy _policy; - - public static CheckMessageScope CheckRequestScope(ObserveStructuredMessagePolicy policy) - { - CheckMessageScope result = new() - { - _isRequestScope = true, - _policy = policy - }; - result._policy._requestScopes.Add(result); - return result; - } - - public static CheckMessageScope CheckResponseScope(ObserveStructuredMessagePolicy policy) - { - CheckMessageScope result = new() - { - _isRequestScope = false, - _policy = policy - }; - result._policy._responseScopes.Add(result); - return result; - } - - public void Dispose() - { - (_isRequestScope ? _policy._requestScopes : _policy._responseScopes).Remove(this); - } - } - } -} diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/RequestExtensions.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/RequestExtensions.cs deleted file mode 100644 index ad395e862f827..0000000000000 --- a/sdk/storage/Azure.Storage.Common/tests/Shared/RequestExtensions.cs +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System.Linq; -using System.Text; -using Azure.Core; -using NUnit.Framework; - -namespace Azure.Storage; - -public static partial class RequestExtensions -{ - public static string AssertHeaderPresent(this Request request, string headerName) - { - if (request.Headers.TryGetValue(headerName, out string value)) - { - return headerName == Constants.StructuredMessage.StructuredMessageHeader ? null : value; - } - StringBuilder sb = new StringBuilder() - .AppendLine($"`{headerName}` expected on request but was not found.") - .AppendLine($"{request.Method} {request.Uri}") - .AppendLine(string.Join("\n", request.Headers.Select(h => $"{h.Name}: {h.Value}s"))) - ; - Assert.Fail(sb.ToString()); - return null; - } -} diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/TamperStreamContentsPolicy.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/TamperStreamContentsPolicy.cs index 7e6c78117f53b..f4198e9dfd532 100644 --- a/sdk/storage/Azure.Storage.Common/tests/Shared/TamperStreamContentsPolicy.cs +++ b/sdk/storage/Azure.Storage.Common/tests/Shared/TamperStreamContentsPolicy.cs @@ -14,7 +14,7 @@ internal class TamperStreamContentsPolicy : HttpPipelineSynchronousPolicy /// /// Default tampering that changes the first byte of the stream. /// - private static Func GetTamperByteStreamTransform(long position) => stream => + private static readonly Func _defaultStreamTransform = stream => { if (stream is not MemoryStream) { @@ -23,10 +23,10 @@ private static Func GetTamperByteStreamTransform(long position) stream = buffer; } - stream.Position = position; + stream.Position = 0; var firstByte = stream.ReadByte(); - stream.Position = position; + stream.Position = 0; stream.WriteByte((byte)((firstByte + 1) % byte.MaxValue)); stream.Position = 0; @@ -37,12 +37,9 @@ private static Func GetTamperByteStreamTransform(long position) public TamperStreamContentsPolicy(Func streamTransform = default) { - _streamTransform = streamTransform ?? GetTamperByteStreamTransform(0); + _streamTransform = streamTransform ?? _defaultStreamTransform; } - public static TamperStreamContentsPolicy TamperByteAt(long position) - => new(GetTamperByteStreamTransform(position)); - public bool TransformRequestBody { get; set; } public bool TransformResponseBody { get; set; } diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/TransferValidationTestBase.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/TransferValidationTestBase.cs index 248acf8811960..c18492d2fb4dd 100644 --- a/sdk/storage/Azure.Storage.Common/tests/Shared/TransferValidationTestBase.cs +++ b/sdk/storage/Azure.Storage.Common/tests/Shared/TransferValidationTestBase.cs @@ -5,13 +5,10 @@ using System.Collections.Generic; using System.IO; using System.Linq; -using System.Security.Cryptography; using System.Threading.Tasks; using Azure.Core; -using Azure.Core.Diagnostics; -using Azure.Core.Pipeline; using Azure.Core.TestFramework; -using Azure.Storage.Shared; +using FastSerialization; using NUnit.Framework; namespace Azure.Storage.Test.Shared @@ -193,15 +190,21 @@ protected string GetNewResourceName() /// The actual checksum value expected to be on the request, if known. Defaults to no specific value expected or checked. /// /// An assertion to put into a pipeline policy. - internal static Action GetRequestChecksumHeaderAssertion(StorageChecksumAlgorithm algorithm, Func isChecksumExpected = default, byte[] expectedChecksum = default) + internal static Action GetRequestChecksumAssertion(StorageChecksumAlgorithm algorithm, Func isChecksumExpected = default, byte[] expectedChecksum = default) { // action to assert a request header is as expected - void AssertChecksum(Request req, string headerName) + void AssertChecksum(RequestHeaders headers, string headerName) { - string checksum = req.AssertHeaderPresent(headerName); - if (expectedChecksum != default) + if (headers.TryGetValue(headerName, out string checksum)) { - Assert.AreEqual(Convert.ToBase64String(expectedChecksum), checksum); + if (expectedChecksum != default) + { + Assert.AreEqual(Convert.ToBase64String(expectedChecksum), checksum); + } + } + else + { + Assert.Fail($"{headerName} expected on request but was not found."); } }; @@ -216,39 +219,14 @@ void AssertChecksum(Request req, string headerName) switch (algorithm.ResolveAuto()) { case StorageChecksumAlgorithm.MD5: - AssertChecksum(request, "Content-MD5"); + AssertChecksum(request.Headers, "Content-MD5"); break; case StorageChecksumAlgorithm.StorageCrc64: - AssertChecksum(request, Constants.StructuredMessage.StructuredMessageHeader); + AssertChecksum(request.Headers, "x-ms-content-crc64"); break; default: - throw new Exception($"Bad {nameof(StorageChecksumAlgorithm)} provided to {nameof(GetRequestChecksumHeaderAssertion)}."); - } - }; - } - - internal static Action GetRequestStructuredMessageAssertion( - StructuredMessage.Flags flags, - Func isStructuredMessageExpected = default, - long? structuredContentSegmentLength = default) - { - return request => - { - // filter some requests out with predicate - if (isStructuredMessageExpected != default && !isStructuredMessageExpected(request)) - { - return; + throw new Exception($"Bad {nameof(StorageChecksumAlgorithm)} provided to {nameof(GetRequestChecksumAssertion)}."); } - - Assert.That(request.Headers.TryGetValue("x-ms-structured-body", out string structuredBody)); - Assert.That(structuredBody, Does.Contain("XSM/1.0")); - if (flags.HasFlag(StructuredMessage.Flags.StorageCrc64)) - { - Assert.That(structuredBody, Does.Contain("crc64")); - } - - Assert.That(request.Headers.TryGetValue("Content-Length", out string contentLength)); - Assert.That(request.Headers.TryGetValue("x-ms-structured-content-length", out string structuredContentLength)); }; } @@ -300,66 +278,32 @@ void AssertChecksum(ResponseHeaders headers, string headerName) AssertChecksum(response.Headers, "Content-MD5"); break; case StorageChecksumAlgorithm.StorageCrc64: - AssertChecksum(response.Headers, Constants.StructuredMessage.StructuredMessageHeader); + AssertChecksum(response.Headers, "x-ms-content-crc64"); break; default: - throw new Exception($"Bad {nameof(StorageChecksumAlgorithm)} provided to {nameof(GetRequestChecksumHeaderAssertion)}."); + throw new Exception($"Bad {nameof(StorageChecksumAlgorithm)} provided to {nameof(GetRequestChecksumAssertion)}."); } }; } - internal static Action GetResponseStructuredMessageAssertion( - StructuredMessage.Flags flags, - Func isStructuredMessageExpected = default) - { - return response => - { - // filter some requests out with predicate - if (isStructuredMessageExpected != default && !isStructuredMessageExpected(response)) - { - return; - } - - Assert.That(response.Headers.TryGetValue("x-ms-structured-body", out string structuredBody)); - Assert.That(structuredBody, Does.Contain("XSM/1.0")); - if (flags.HasFlag(StructuredMessage.Flags.StorageCrc64)) - { - Assert.That(structuredBody, Does.Contain("crc64")); - } - - Assert.That(response.Headers.TryGetValue("Content-Length", out string contentLength)); - Assert.That(response.Headers.TryGetValue("x-ms-structured-content-length", out string structuredContentLength)); - }; - } - /// /// Asserts the service returned an error that expected checksum did not match checksum on upload. /// /// Async action to upload data to service. /// Checksum algorithm used. - internal static void AssertWriteChecksumMismatch( - AsyncTestDelegate writeAction, - StorageChecksumAlgorithm algorithm, - bool expectStructuredMessage = false) + internal static void AssertWriteChecksumMismatch(AsyncTestDelegate writeAction, StorageChecksumAlgorithm algorithm) { var exception = ThrowsOrInconclusiveAsync(writeAction); - if (expectStructuredMessage) - { - Assert.That(exception.ErrorCode, Is.EqualTo("Crc64Mismatch")); - } - else + switch (algorithm.ResolveAuto()) { - switch (algorithm.ResolveAuto()) - { - case StorageChecksumAlgorithm.MD5: - Assert.That(exception.ErrorCode, Is.EqualTo("Md5Mismatch")); - break; - case StorageChecksumAlgorithm.StorageCrc64: - Assert.That(exception.ErrorCode, Is.EqualTo("Crc64Mismatch")); - break; - default: - throw new ArgumentException("Test arguments contain bad algorithm specifier."); - } + case StorageChecksumAlgorithm.MD5: + Assert.AreEqual("Md5Mismatch", exception.ErrorCode); + break; + case StorageChecksumAlgorithm.StorageCrc64: + Assert.AreEqual("Crc64Mismatch", exception.ErrorCode); + break; + default: + throw new ArgumentException("Test arguments contain bad algorithm specifier."); } } #endregion @@ -404,7 +348,6 @@ public virtual async Task UploadPartitionSuccessfulHashComputation(StorageChecks await using IDisposingContainer disposingContainer = await GetDisposingContainerAsync(); // Arrange - bool expectStructuredMessage = algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64; const int dataLength = Constants.KB; var data = GetRandomBuffer(dataLength); var validationOptions = new UploadTransferValidationOptions @@ -413,10 +356,7 @@ public virtual async Task UploadPartitionSuccessfulHashComputation(StorageChecks }; // make pipeline assertion for checking checksum was present on upload - var assertion = algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 - ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, null, dataLength) - : GetRequestChecksumHeaderAssertion(algorithm); - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(algorithm)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -466,11 +406,7 @@ public virtual async Task UploadPartitionUsePrecalculatedHash(StorageChecksumAlg }; // make pipeline assertion for checking precalculated checksum was present on upload - // precalculated partition upload will never use structured message. always check header - var assertion = GetRequestChecksumHeaderAssertion( - algorithm, - expectedChecksum: algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 ? default : precalculatedChecksum); - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(algorithm, expectedChecksum: precalculatedChecksum)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -487,12 +423,12 @@ public virtual async Task UploadPartitionUsePrecalculatedHash(StorageChecksumAlg AsyncTestDelegate operation = async () => await UploadPartitionAsync(client, stream, validationOptions); // Assert - AssertWriteChecksumMismatch(operation, algorithm, algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64); + AssertWriteChecksumMismatch(operation, algorithm); } } [TestCaseSource(nameof(GetValidationAlgorithms))] - public virtual async Task UploadPartitionTamperedStreamThrows(StorageChecksumAlgorithm algorithm) + public virtual async Task UploadPartitionMismatchedHashThrows(StorageChecksumAlgorithm algorithm) { await using IDisposingContainer disposingContainer = await GetDisposingContainerAsync(); @@ -505,7 +441,7 @@ public virtual async Task UploadPartitionTamperedStreamThrows(StorageChecksumAlg }; // Tamper with stream contents in the pipeline to simulate silent failure in the transit layer - var streamTamperPolicy = TamperStreamContentsPolicy.TamperByteAt(100); + var streamTamperPolicy = new TamperStreamContentsPolicy(); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(streamTamperPolicy, HttpPipelinePosition.PerCall); @@ -520,10 +456,9 @@ public virtual async Task UploadPartitionTamperedStreamThrows(StorageChecksumAlg // Act streamTamperPolicy.TransformRequestBody = true; AsyncTestDelegate operation = async () => await UploadPartitionAsync(client, stream, validationOptions); - using var listener = AzureEventSourceListener.CreateConsoleLogger(); + // Assert - AssertWriteChecksumMismatch(operation, algorithm, - expectStructuredMessage: algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64); + AssertWriteChecksumMismatch(operation, algorithm); } } @@ -538,10 +473,7 @@ public virtual async Task UploadPartitionUsesDefaultClientValidationOptions( var data = GetRandomBuffer(dataLength); // make pipeline assertion for checking checksum was present on upload - var assertion = clientAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 - ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, null, dataLength) - : GetRequestChecksumHeaderAssertion(clientAlgorithm); - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(clientAlgorithm)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -580,10 +512,7 @@ public virtual async Task UploadPartitionOverwritesDefaultClientValidationOption }; // make pipeline assertion for checking checksum was present on upload - var assertion = overrideAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 - ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, null, dataLength) - : GetRequestChecksumHeaderAssertion(overrideAlgorithm); - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(overrideAlgorithm)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -626,14 +555,10 @@ public virtual async Task UploadPartitionDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (request.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) + if (request.Headers.Contains("x-ms-content-crc64")) { Assert.Fail($"Hash found when none expected."); } - if (request.Headers.Contains("x-ms-structured-body")) - { - Assert.Fail($"Structured body used when none expected."); - } }); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -676,11 +601,9 @@ public virtual async Task OpenWriteSuccessfulHashComputation( }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumHeaderAssertion(algorithm)); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(algorithm)); var clientOptions = ClientBuilder.GetOptions(); - //ObserveStructuredMessagePolicy observe = new(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); - //clientOptions.AddPolicy(observe, HttpPipelinePosition.BeforeTransport); var client = await GetResourceClientAsync( disposingContainer.Container, @@ -693,7 +616,6 @@ public virtual async Task OpenWriteSuccessfulHashComputation( using var writeStream = await OpenWriteAsync(client, validationOptions, streamBufferSize); // Assert - //using var obsv = observe.CheckRequestScope(); using (checksumPipelineAssertion.CheckRequestScope()) { foreach (var _ in Enumerable.Range(0, streamWrites)) @@ -722,7 +644,7 @@ public virtual async Task OpenWriteMismatchedHashThrows(StorageChecksumAlgorithm // Tamper with stream contents in the pipeline to simulate silent failure in the transit layer var clientOptions = ClientBuilder.GetOptions(); - var tamperPolicy = TamperStreamContentsPolicy.TamperByteAt(100); + var tamperPolicy = new TamperStreamContentsPolicy(); clientOptions.AddPolicy(tamperPolicy, HttpPipelinePosition.PerCall); var client = await GetResourceClientAsync( @@ -760,7 +682,7 @@ public virtual async Task OpenWriteUsesDefaultClientValidationOptions( var data = GetRandomBuffer(dataLength); // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumHeaderAssertion(clientAlgorithm)); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(clientAlgorithm)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -804,7 +726,7 @@ public virtual async Task OpenWriteOverwritesDefaultClientValidationOptions( }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumHeaderAssertion(overrideAlgorithm)); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(overrideAlgorithm)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -852,7 +774,7 @@ public virtual async Task OpenWriteDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (request.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) + if (request.Headers.Contains("x-ms-content-crc64")) { Assert.Fail($"Hash found when none expected."); } @@ -964,7 +886,7 @@ public virtual async Task ParallelUploadSplitSuccessfulHashComputation(StorageCh // make pipeline assertion for checking checksum was present on upload var checksumPipelineAssertion = new AssertMessageContentsPolicy( - checkRequest: GetRequestChecksumHeaderAssertion(algorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); + checkRequest: GetRequestChecksumAssertion(algorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -1001,10 +923,8 @@ public virtual async Task ParallelUploadOneShotSuccessfulHashComputation(Storage }; // make pipeline assertion for checking checksum was present on upload - var assertion = algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 - ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, ParallelUploadIsChecksumExpected, dataLength) - : GetRequestChecksumHeaderAssertion(algorithm, isChecksumExpected: ParallelUploadIsChecksumExpected); - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); + var checksumPipelineAssertion = new AssertMessageContentsPolicy( + checkRequest: GetRequestChecksumAssertion(algorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -1061,7 +981,7 @@ public virtual async Task ParallelUploadPrecalculatedComposableHashAccepted(Stor PrecalculatedChecksum = hash }; - var client = await GetResourceClientAsync(disposingContainer.Container, dataLength, createResource: true); + var client = await GetResourceClientAsync(disposingContainer.Container, dataLength); // Act await DoesNotThrowOrInconclusiveAsync( @@ -1091,10 +1011,8 @@ public virtual async Task ParallelUploadUsesDefaultClientValidationOptions( }; // make pipeline assertion for checking checksum was present on upload - var assertion = clientAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && !split - ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, ParallelUploadIsChecksumExpected, dataLength) - : GetRequestChecksumHeaderAssertion(clientAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected); - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion( + clientAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -1145,10 +1063,8 @@ public virtual async Task ParallelUploadOverwritesDefaultClientValidationOptions }; // make pipeline assertion for checking checksum was present on upload - var assertion = overrideAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && !split - ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, ParallelUploadIsChecksumExpected, dataLength) - : GetRequestChecksumHeaderAssertion(overrideAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected); - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion( + overrideAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -1203,7 +1119,7 @@ public virtual async Task ParallelUploadDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (request.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) + if (request.Headers.Contains("x-ms-content-crc64")) { Assert.Fail($"Hash found when none expected."); } @@ -1268,17 +1184,15 @@ public virtual async Task ParallelDownloadSuccessfulHashVerification( }; // Act - byte[] dest; - using (MemoryStream ms = new()) + var dest = new MemoryStream(); using (checksumPipelineAssertion.CheckRequestScope()) { - await ParallelDownloadAsync(client, ms, validationOptions, transferOptions); - dest = ms.ToArray(); + await ParallelDownloadAsync(client, dest, validationOptions, transferOptions); } // Assert // Assertion was in the pipeline and the SDK not throwing means the checksum was validated - Assert.IsTrue(dest.SequenceEqual(data)); + Assert.IsTrue(dest.ToArray().SequenceEqual(data)); } [Test] @@ -1443,7 +1357,7 @@ public virtual async Task ParallelDownloadDisablesDefaultClientValidationOptions { Assert.Fail($"Hash found when none expected."); } - if (response.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) + if (response.Headers.Contains("x-ms-content-crc64")) { Assert.Fail($"Hash found when none expected."); } @@ -1651,7 +1565,7 @@ public virtual async Task OpenReadDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (response.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) + if (response.Headers.Contains("x-ms-content-crc64")) { Assert.Fail($"Hash found when none expected."); } @@ -1701,7 +1615,7 @@ public virtual async Task DownloadSuccessfulHashVerification(StorageChecksumAlgo var validationOptions = new DownloadTransferValidationOptions { ChecksumAlgorithm = algorithm }; // Act - using var dest = new MemoryStream(); + var dest = new MemoryStream(); var response = await DownloadPartitionAsync(client, dest, validationOptions, new HttpRange(length: data.Length)); // Assert @@ -1712,71 +1626,13 @@ public virtual async Task DownloadSuccessfulHashVerification(StorageChecksumAlgo Assert.True(response.Headers.Contains("Content-MD5")); break; case StorageChecksumAlgorithm.StorageCrc64: - Assert.True(response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)); + Assert.True(response.Headers.Contains("x-ms-content-crc64")); break; default: Assert.Fail("Test can't validate given algorithm type."); break; } - var result = dest.ToArray(); - Assert.IsTrue(result.SequenceEqual(data)); - } - - [TestCase(StorageChecksumAlgorithm.StorageCrc64, Constants.StructuredMessage.MaxDownloadCrcWithHeader, false, false)] - [TestCase(StorageChecksumAlgorithm.StorageCrc64, Constants.StructuredMessage.MaxDownloadCrcWithHeader-1, false, false)] - [TestCase(StorageChecksumAlgorithm.StorageCrc64, Constants.StructuredMessage.MaxDownloadCrcWithHeader+1, true, false)] - [TestCase(StorageChecksumAlgorithm.MD5, Constants.StructuredMessage.MaxDownloadCrcWithHeader+1, false, true)] - public virtual async Task DownloadApporpriatelyUsesStructuredMessage( - StorageChecksumAlgorithm algorithm, - int? downloadLen, - bool expectStructuredMessage, - bool expectThrow) - { - await using IDisposingContainer disposingContainer = await GetDisposingContainerAsync(); - - // Arrange - const int dataLength = Constants.KB; - var data = GetRandomBuffer(dataLength); - - var resourceName = GetNewResourceName(); - var client = await GetResourceClientAsync( - disposingContainer.Container, - resourceLength: dataLength, - createResource: true, - resourceName: resourceName); - await SetupDataAsync(client, new MemoryStream(data)); - - // make pipeline assertion for checking checksum was present on download - HttpPipelinePolicy checksumPipelineAssertion = new AssertMessageContentsPolicy(checkResponse: expectStructuredMessage - ? GetResponseStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64) - : GetResponseChecksumAssertion(algorithm)); - TClientOptions clientOptions = ClientBuilder.GetOptions(); - clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); - - client = await GetResourceClientAsync( - disposingContainer.Container, - resourceLength: dataLength, - resourceName: resourceName, - createResource: false, - downloadAlgorithm: algorithm, - options: clientOptions); - - var validationOptions = new DownloadTransferValidationOptions { ChecksumAlgorithm = algorithm }; - - // Act - var dest = new MemoryStream(); - AsyncTestDelegate operation = async () => await DownloadPartitionAsync( - client, dest, validationOptions, downloadLen.HasValue ? new HttpRange(length: downloadLen.Value) : default); - // Assert (policies checked use of content validation) - if (expectThrow) - { - Assert.That(operation, Throws.TypeOf()); - } - else - { - Assert.That(operation, Throws.Nothing); - Assert.IsTrue(dest.ToArray().SequenceEqual(data)); - } + Assert.IsTrue(dest.ToArray().SequenceEqual(data)); } [Test, Combinatorial] @@ -1802,9 +1658,7 @@ public virtual async Task DownloadHashMismatchThrows( // alter response contents in pipeline, forcing a checksum mismatch on verification step var clientOptions = ClientBuilder.GetOptions(); - var tamperPolicy = TamperStreamContentsPolicy.TamperByteAt(50); - tamperPolicy.TransformResponseBody = true; - clientOptions.AddPolicy(tamperPolicy, HttpPipelinePosition.PerCall); + clientOptions.AddPolicy(new TamperStreamContentsPolicy() { TransformResponseBody = true }, HttpPipelinePosition.PerCall); client = await GetResourceClientAsync( disposingContainer.Container, createResource: false, @@ -1816,7 +1670,7 @@ public virtual async Task DownloadHashMismatchThrows( AsyncTestDelegate operation = async () => await DownloadPartitionAsync(client, dest, validationOptions, new HttpRange(length: data.Length)); // Assert - if (validate || algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64) + if (validate) { // SDK responsible for finding bad checksum. Throw. ThrowsOrInconclusiveAsync(operation); @@ -1874,7 +1728,7 @@ public virtual async Task DownloadUsesDefaultClientValidationOptions( Assert.True(response.Headers.Contains("Content-MD5")); break; case StorageChecksumAlgorithm.StorageCrc64: - Assert.True(response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)); + Assert.True(response.Headers.Contains("x-ms-content-crc64")); break; default: Assert.Fail("Test can't validate given algorithm type."); @@ -1934,7 +1788,7 @@ public virtual async Task DownloadOverwritesDefaultClientValidationOptions( Assert.True(response.Headers.Contains("Content-MD5")); break; case StorageChecksumAlgorithm.StorageCrc64: - Assert.True(response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)); + Assert.True(response.Headers.Contains("x-ms-content-crc64")); break; default: Assert.Fail("Test can't validate given algorithm type."); @@ -1973,7 +1827,7 @@ public virtual async Task DownloadDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (response.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) + if (response.Headers.Contains("x-ms-content-crc64")) { Assert.Fail($"Hash found when none expected."); } @@ -1996,54 +1850,7 @@ public virtual async Task DownloadDisablesDefaultClientValidationOptions( // Assert // no policies this time; just check response headers Assert.False(response.Headers.Contains("Content-MD5")); - Assert.False(response.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)); - Assert.IsTrue(dest.ToArray().SequenceEqual(data)); - } - - [Test] - public virtual async Task DownloadRecoversFromInterruptWithValidation( - [ValueSource(nameof(GetValidationAlgorithms))] StorageChecksumAlgorithm algorithm) - { - using var _ = AzureEventSourceListener.CreateConsoleLogger(); - int dataLen = algorithm.ResolveAuto() switch { - StorageChecksumAlgorithm.StorageCrc64 => 5 * Constants.MB, // >4MB for multisegment - _ => Constants.KB, - }; - - await using IDisposingContainer disposingContainer = await GetDisposingContainerAsync(); - - // Arrange - var data = GetRandomBuffer(dataLen); - - TClientOptions options = ClientBuilder.GetOptions(); - options.AddPolicy(new FaultyDownloadPipelinePolicy(dataLen - 512, new IOException(), () => { }), HttpPipelinePosition.BeforeTransport); - var client = await GetResourceClientAsync( - disposingContainer.Container, - resourceLength: dataLen, - createResource: true, - options: options); - await SetupDataAsync(client, new MemoryStream(data)); - - var validationOptions = new DownloadTransferValidationOptions { ChecksumAlgorithm = algorithm }; - - // Act - var dest = new MemoryStream(); - var response = await DownloadPartitionAsync(client, dest, validationOptions, new HttpRange(length: data.Length)); - - // Assert - // no policies this time; just check response headers - switch (algorithm.ResolveAuto()) - { - case StorageChecksumAlgorithm.MD5: - Assert.True(response.Headers.Contains("Content-MD5")); - break; - case StorageChecksumAlgorithm.StorageCrc64: - Assert.True(response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)); - break; - default: - Assert.Fail("Test can't validate given algorithm type."); - break; - } + Assert.False(response.Headers.Contains("x-ms-content-crc64")); Assert.IsTrue(dest.ToArray().SequenceEqual(data)); } #endregion @@ -2084,7 +1891,7 @@ public async Task RoundtripWIthDefaults() // make pipeline assertion for checking checksum was present on upload AND download var checksumPipelineAssertion = new AssertMessageContentsPolicy( - checkRequest: GetRequestChecksumHeaderAssertion(expectedAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected), + checkRequest: GetRequestChecksumAssertion(expectedAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected), checkResponse: GetResponseChecksumAssertion(expectedAlgorithm)); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingRetriableStreamTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingRetriableStreamTests.cs deleted file mode 100644 index a0f9158040b11..0000000000000 --- a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingRetriableStreamTests.cs +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers.Binary; -using System.IO; -using System.Threading; -using System.Threading.Tasks; -using Azure.Core; -using Azure.Storage.Shared; -using Azure.Storage.Test.Shared; -using Microsoft.Diagnostics.Tracing.Parsers.AspNet; -using Moq; -using NUnit.Framework; - -namespace Azure.Storage.Tests; - -[TestFixture(true)] -[TestFixture(false)] -public class StructuredMessageDecodingRetriableStreamTests -{ - public bool Async { get; } - - public StructuredMessageDecodingRetriableStreamTests(bool async) - { - Async = async; - } - - private Mock AllExceptionsRetry() - { - Mock mock = new(MockBehavior.Strict); - mock.Setup(rc => rc.IsRetriableException(It.IsAny())).Returns(true); - return mock; - } - - [Test] - public async ValueTask UninterruptedStream() - { - byte[] data = new Random().NextBytesInline(4 * Constants.KB).ToArray(); - byte[] dest = new byte[data.Length]; - - // mock with a simple MemoryStream rather than an actual StructuredMessageDecodingStream - using (Stream src = new MemoryStream(data)) - using (Stream retriableSrc = new StructuredMessageDecodingRetriableStream(src, new(), default, default, default, default, default, 1)) - using (Stream dst = new MemoryStream(dest)) - { - await retriableSrc.CopyToInternal(dst, Async, default); - } - - Assert.AreEqual(data, dest); - } - - [Test] - public async Task Interrupt_DataIntact([Values(true, false)] bool multipleInterrupts) - { - const int segments = 4; - const int segmentLen = Constants.KB; - const int readLen = 128; - const int interruptPos = segmentLen + (3 * readLen) + 10; - - Random r = new(); - byte[] data = r.NextBytesInline(segments * Constants.KB).ToArray(); - byte[] dest = new byte[data.Length]; - - // Mock a decoded data for the mocked StructuredMessageDecodingStream - StructuredMessageDecodingStream.RawDecodedData initialDecodedData = new() - { - TotalSegments = segments, - InnerStreamLength = data.Length, - Flags = StructuredMessage.Flags.StorageCrc64 - }; - // for test purposes, initialize a DecodedData, since we are not actively decoding in this test - initialDecodedData.SegmentCrcs.Add((BinaryPrimitives.ReadUInt64LittleEndian(r.NextBytesInline(StructuredMessage.Crc64Length)), segmentLen)); - - (Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData) Factory(long offset, bool faulty) - { - Stream stream = new MemoryStream(data, (int)offset, data.Length - (int)offset); - if (faulty) - { - stream = new FaultyStream(stream, interruptPos, 1, new Exception(), () => { }); - } - // Mock a decoded data for the mocked StructuredMessageDecodingStream - StructuredMessageDecodingStream.RawDecodedData decodedData = new() - { - TotalSegments = segments, - InnerStreamLength = data.Length, - Flags = StructuredMessage.Flags.StorageCrc64, - }; - // for test purposes, initialize a DecodedData, since we are not actively decoding in this test - initialDecodedData.SegmentCrcs.Add((BinaryPrimitives.ReadUInt64LittleEndian(r.NextBytesInline(StructuredMessage.Crc64Length)), segmentLen)); - return (stream, decodedData); - } - - // mock with a simple MemoryStream rather than an actual StructuredMessageDecodingStream - using (Stream src = new MemoryStream(data)) - using (Stream faultySrc = new FaultyStream(src, interruptPos, 1, new Exception(), () => { })) - using (Stream retriableSrc = new StructuredMessageDecodingRetriableStream( - faultySrc, - initialDecodedData, - default, - offset => Factory(offset, multipleInterrupts), - offset => new ValueTask<(Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData)>(Factory(offset, multipleInterrupts)), - null, - AllExceptionsRetry().Object, - int.MaxValue)) - using (Stream dst = new MemoryStream(dest)) - { - await retriableSrc.CopyToInternal(dst, readLen, Async, default); - } - - Assert.AreEqual(data, dest); - } - - [Test] - public async Task Interrupt_AppropriateRewind() - { - const int segments = 2; - const int segmentLen = Constants.KB; - const int dataLen = segments * segmentLen; - const int readLen = segmentLen / 4; - const int interruptOffset = 10; - const int interruptPos = segmentLen + (2 * readLen) + interruptOffset; - Random r = new(); - - // Mock a decoded data for the mocked StructuredMessageDecodingStream - StructuredMessageDecodingStream.RawDecodedData initialDecodedData = new() - { - TotalSegments = segments, - InnerStreamLength = segments * segmentLen, - Flags = StructuredMessage.Flags.StorageCrc64, - }; - // By the time of interrupt, there will be one segment reported - initialDecodedData.SegmentCrcs.Add((BinaryPrimitives.ReadUInt64LittleEndian(r.NextBytesInline(StructuredMessage.Crc64Length)), segmentLen)); - - Mock mock = new(MockBehavior.Strict); - mock.SetupGet(s => s.CanRead).Returns(true); - mock.SetupGet(s => s.CanSeek).Returns(false); - if (Async) - { - mock.SetupSequence(s => s.ReadAsync(It.IsAny(), It.IsAny(), It.IsAny(), default)) - .Returns(Task.FromResult(readLen)) // start first segment - .Returns(Task.FromResult(readLen)) - .Returns(Task.FromResult(readLen)) - .Returns(Task.FromResult(readLen)) // finish first segment - .Returns(Task.FromResult(readLen)) // start second segment - .Returns(Task.FromResult(readLen)) - // faulty stream interrupt - .Returns(Task.FromResult(readLen * 2)) // restart second segment. fast-forward uses an internal 4KB buffer, so it will leap the 512 byte catchup all at once - .Returns(Task.FromResult(readLen)) - .Returns(Task.FromResult(readLen)) // end second segment - .Returns(Task.FromResult(0)) // signal end of stream - .Returns(Task.FromResult(0)) // second signal needed for stream wrapping reasons - ; - } - else - { - mock.SetupSequence(s => s.Read(It.IsAny(), It.IsAny(), It.IsAny())) - .Returns(readLen) // start first segment - .Returns(readLen) - .Returns(readLen) - .Returns(readLen) // finish first segment - .Returns(readLen) // start second segment - .Returns(readLen) - // faulty stream interrupt - .Returns(readLen * 2) // restart second segment. fast-forward uses an internal 4KB buffer, so it will leap the 512 byte catchup all at once - .Returns(readLen) - .Returns(readLen) // end second segment - .Returns(0) // signal end of stream - .Returns(0) // second signal needed for stream wrapping reasons - ; - } - Stream faultySrc = new FaultyStream(mock.Object, interruptPos, 1, new Exception(), default); - Stream retriableSrc = new StructuredMessageDecodingRetriableStream( - faultySrc, - initialDecodedData, - default, - offset => (mock.Object, new()), - offset => new(Task.FromResult((mock.Object, new StructuredMessageDecodingStream.RawDecodedData()))), - null, - AllExceptionsRetry().Object, - 1); - - int totalRead = 0; - int read = 0; - byte[] buf = new byte[readLen]; - if (Async) - { - while ((read = await retriableSrc.ReadAsync(buf, 0, buf.Length)) > 0) - { - totalRead += read; - } - } - else - { - while ((read = retriableSrc.Read(buf, 0, buf.Length)) > 0) - { - totalRead += read; - } - } - await retriableSrc.CopyToInternal(Stream.Null, readLen, Async, default); - - // Asserts we read exactly the data length, excluding the fastforward of the inner stream - Assert.That(totalRead, Is.EqualTo(dataLen)); - } - - [Test] - public async Task Interrupt_ProperDecode([Values(true, false)] bool multipleInterrupts) - { - // decoding stream inserts a buffered layer of 4 KB. use larger sizes to avoid interference from it. - const int segments = 4; - const int segmentLen = 128 * Constants.KB; - const int readLen = 8 * Constants.KB; - const int interruptPos = segmentLen + (3 * readLen) + 10; - - Random r = new(); - byte[] data = r.NextBytesInline(segments * Constants.KB).ToArray(); - byte[] dest = new byte[data.Length]; - - (Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData) Factory(long offset, bool faulty) - { - Stream stream = new MemoryStream(data, (int)offset, data.Length - (int)offset); - stream = new StructuredMessageEncodingStream(stream, segmentLen, StructuredMessage.Flags.StorageCrc64); - if (faulty) - { - stream = new FaultyStream(stream, interruptPos, 1, new Exception(), () => { }); - } - return StructuredMessageDecodingStream.WrapStream(stream); - } - - (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = Factory(0, true); - using Stream retriableSrc = new StructuredMessageDecodingRetriableStream( - decodingStream, - decodedData, - default, - offset => Factory(offset, multipleInterrupts), - offset => new ValueTask<(Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData)>(Factory(offset, multipleInterrupts)), - null, - AllExceptionsRetry().Object, - int.MaxValue); - using Stream dst = new MemoryStream(dest); - - await retriableSrc.CopyToInternal(dst, readLen, Async, default); - - Assert.AreEqual(data, dest); - } -} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingStreamTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingStreamTests.cs deleted file mode 100644 index 2789672df4976..0000000000000 --- a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingStreamTests.cs +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers.Binary; -using System.Dynamic; -using System.IO; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using Azure.Storage.Blobs.Tests; -using Azure.Storage.Shared; -using NUnit.Framework; -using static Azure.Storage.Shared.StructuredMessage; - -namespace Azure.Storage.Tests -{ - [TestFixture(ReadMethod.SyncArray)] - [TestFixture(ReadMethod.AsyncArray)] -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - [TestFixture(ReadMethod.SyncSpan)] - [TestFixture(ReadMethod.AsyncMemory)] -#endif - public class StructuredMessageDecodingStreamTests - { - // Cannot just implement as passthru in the stream - // Must test each one - public enum ReadMethod - { - SyncArray, - AsyncArray, -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - SyncSpan, - AsyncMemory -#endif - } - - public ReadMethod Method { get; } - - public StructuredMessageDecodingStreamTests(ReadMethod method) - { - Method = method; - } - - private class CopyStreamException : Exception - { - public long TotalCopied { get; } - - public CopyStreamException(Exception inner, long totalCopied) - : base($"Failed read after {totalCopied}-many bytes.", inner) - { - TotalCopied = totalCopied; - } - } - private async ValueTask CopyStream(Stream source, Stream destination, int bufferSize = 81920) // number default for CopyTo impl - { - byte[] buf = new byte[bufferSize]; - int read; - long totalRead = 0; - try - { - switch (Method) - { - case ReadMethod.SyncArray: - while ((read = source.Read(buf, 0, bufferSize)) > 0) - { - totalRead += read; - destination.Write(buf, 0, read); - } - break; - case ReadMethod.AsyncArray: - while ((read = await source.ReadAsync(buf, 0, bufferSize)) > 0) - { - totalRead += read; - await destination.WriteAsync(buf, 0, read); - } - break; -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - case ReadMethod.SyncSpan: - while ((read = source.Read(new Span(buf))) > 0) - { - totalRead += read; - destination.Write(new Span(buf, 0, read)); - } - break; - case ReadMethod.AsyncMemory: - while ((read = await source.ReadAsync(new Memory(buf))) > 0) - { - totalRead += read; - await destination.WriteAsync(new Memory(buf, 0, read)); - } - break; -#endif - } - destination.Flush(); - } - catch (Exception ex) - { - throw new CopyStreamException(ex, totalRead); - } - return totalRead; - } - - [Test] - [Pairwise] - public async Task DecodesData( - [Values(2048, 2005)] int dataLength, - [Values(default, 512)] int? seglen, - [Values(8*Constants.KB, 512, 530, 3)] int readLen, - [Values(true, false)] bool useCrc) - { - int segmentContentLength = seglen ?? int.MaxValue; - Flags flags = useCrc ? Flags.StorageCrc64 : Flags.None; - - byte[] originalData = new byte[dataLength]; - new Random().NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentContentLength, flags); - - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); - byte[] decodedData; - using (MemoryStream dest = new()) - { - await CopyStream(decodingStream, dest, readLen); - decodedData = dest.ToArray(); - } - - Assert.That(new Span(decodedData).SequenceEqual(originalData)); - } - - [Test] - public void BadStreamBadVersion() - { - byte[] originalData = new byte[1024]; - new Random().NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); - - encodedData[0] = byte.MaxValue; - - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); - Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); - } - - [Test] - public async Task BadSegmentCrcThrows() - { - const int segmentLength = 256; - Random r = new(); - - byte[] originalData = new byte[2048]; - r.NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentLength, Flags.StorageCrc64); - - const int badBytePos = 1024; - encodedData[badBytePos] = (byte)~encodedData[badBytePos]; - - MemoryStream encodedDataStream = new(encodedData); - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(encodedDataStream); - - // manual try/catch to validate the proccess failed mid-stream rather than the end - const int copyBufferSize = 4; - bool caught = false; - try - { - await CopyStream(decodingStream, Stream.Null, copyBufferSize); - } - catch (CopyStreamException ex) - { - caught = true; - Assert.That(ex.TotalCopied, Is.LessThanOrEqualTo(badBytePos)); - } - Assert.That(caught); - } - - [Test] - public void BadStreamCrcThrows() - { - const int segmentLength = 256; - Random r = new(); - - byte[] originalData = new byte[2048]; - r.NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentLength, Flags.StorageCrc64); - - encodedData[originalData.Length - 1] = (byte)~encodedData[originalData.Length - 1]; - - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); - Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); - } - - [Test] - public void BadStreamWrongContentLength() - { - byte[] originalData = new byte[1024]; - new Random().NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); - - BinaryPrimitives.WriteInt64LittleEndian(new Span(encodedData, V1_0.StreamHeaderMessageLengthOffset, 8), 123456789L); - - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); - Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); - } - - [TestCase(-1)] - [TestCase(1)] - public void BadStreamWrongSegmentCount(int difference) - { - const int dataSize = 1024; - const int segmentSize = 256; - const int numSegments = 4; - - byte[] originalData = new byte[dataSize]; - new Random().NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentSize, Flags.StorageCrc64); - - // rewrite the segment count to be different than the actual number of segments - BinaryPrimitives.WriteInt16LittleEndian( - new Span(encodedData, V1_0.StreamHeaderSegmentCountOffset, 2), (short)(numSegments + difference)); - - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); - Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); - } - - [Test] - public void BadStreamWrongSegmentNum() - { - byte[] originalData = new byte[1024]; - new Random().NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); - - BinaryPrimitives.WriteInt16LittleEndian( - new Span(encodedData, V1_0.StreamHeaderLength + V1_0.SegmentHeaderNumOffset, 2), 123); - - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); - Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); - } - - [Test] - [Combinatorial] - public async Task BadStreamWrongContentLength( - [Values(-1, 1)] int difference, - [Values(true, false)] bool lengthProvided) - { - byte[] originalData = new byte[1024]; - new Random().NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); - - BinaryPrimitives.WriteInt64LittleEndian( - new Span(encodedData, V1_0.StreamHeaderMessageLengthOffset, 8), - encodedData.Length + difference); - - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream( - new MemoryStream(encodedData), - lengthProvided ? (long?)encodedData.Length : default); - - // manual try/catch with tiny buffer to validate the proccess failed mid-stream rather than the end - const int copyBufferSize = 4; - bool caught = false; - try - { - await CopyStream(decodingStream, Stream.Null, copyBufferSize); - } - catch (CopyStreamException ex) - { - caught = true; - if (lengthProvided) - { - Assert.That(ex.TotalCopied, Is.EqualTo(0)); - } - else - { - Assert.That(ex.TotalCopied, Is.EqualTo(originalData.Length)); - } - } - Assert.That(caught); - } - - [Test] - public void BadStreamMissingExpectedStreamFooter() - { - byte[] originalData = new byte[1024]; - new Random().NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); - - byte[] brokenData = new byte[encodedData.Length - Crc64Length]; - new Span(encodedData, 0, encodedData.Length - Crc64Length).CopyTo(brokenData); - - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(brokenData)); - Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); - } - - [Test] - public void NoSeek() - { - (Stream stream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream()); - - Assert.That(stream.CanSeek, Is.False); - Assert.That(() => stream.Length, Throws.TypeOf()); - Assert.That(() => stream.Position, Throws.TypeOf()); - Assert.That(() => stream.Position = 0, Throws.TypeOf()); - Assert.That(() => stream.Seek(0, SeekOrigin.Begin), Throws.TypeOf()); - } - - [Test] - public void NoWrite() - { - (Stream stream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream()); - byte[] data = new byte[1024]; - new Random().NextBytes(data); - - Assert.That(stream.CanWrite, Is.False); - Assert.That(() => stream.Write(data, 0, data.Length), - Throws.TypeOf()); - Assert.That(async () => await stream.WriteAsync(data, 0, data.Length, CancellationToken.None), - Throws.TypeOf()); -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - Assert.That(() => stream.Write(new Span(data)), - Throws.TypeOf()); - Assert.That(async () => await stream.WriteAsync(new Memory(data), CancellationToken.None), - Throws.TypeOf()); -#endif - } - } -} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageEncodingStreamTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageEncodingStreamTests.cs deleted file mode 100644 index e0f91dee7de3a..0000000000000 --- a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageEncodingStreamTests.cs +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers.Binary; -using System.IO; -using System.Linq; -using System.Threading.Tasks; -using Azure.Storage.Blobs.Tests; -using Azure.Storage.Shared; -using NUnit.Framework; -using static Azure.Storage.Shared.StructuredMessage; - -namespace Azure.Storage.Tests -{ - [TestFixture(ReadMethod.SyncArray)] - [TestFixture(ReadMethod.AsyncArray)] -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - [TestFixture(ReadMethod.SyncSpan)] - [TestFixture(ReadMethod.AsyncMemory)] -#endif - public class StructuredMessageEncodingStreamTests - { - // Cannot just implement as passthru in the stream - // Must test each one - public enum ReadMethod - { - SyncArray, - AsyncArray, -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - SyncSpan, - AsyncMemory -#endif - } - - public ReadMethod Method { get; } - - public StructuredMessageEncodingStreamTests(ReadMethod method) - { - Method = method; - } - - private async ValueTask CopyStream(Stream source, Stream destination, int bufferSize = 81920) // number default for CopyTo impl - { - byte[] buf = new byte[bufferSize]; - int read; - switch (Method) - { - case ReadMethod.SyncArray: - while ((read = source.Read(buf, 0, bufferSize)) > 0) - { - destination.Write(buf, 0, read); - } - break; - case ReadMethod.AsyncArray: - while ((read = await source.ReadAsync(buf, 0, bufferSize)) > 0) - { - await destination.WriteAsync(buf, 0, read); - } - break; -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - case ReadMethod.SyncSpan: - while ((read = source.Read(new Span(buf))) > 0) - { - destination.Write(new Span(buf, 0, read)); - } - break; - case ReadMethod.AsyncMemory: - while ((read = await source.ReadAsync(new Memory(buf))) > 0) - { - await destination.WriteAsync(new Memory(buf, 0, read)); - } - break; -#endif - } - destination.Flush(); - } - - [Test] - [Pairwise] - public async Task EncodesData( - [Values(2048, 2005)] int dataLength, - [Values(default, 512)] int? seglen, - [Values(8 * Constants.KB, 512, 530, 3)] int readLen, - [Values(true, false)] bool useCrc) - { - int segmentContentLength = seglen ?? int.MaxValue; - Flags flags = useCrc ? Flags.StorageCrc64 : Flags.None; - - byte[] originalData = new byte[dataLength]; - new Random().NextBytes(originalData); - byte[] expectedEncodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentContentLength, flags); - - Stream encodingStream = new StructuredMessageEncodingStream(new MemoryStream(originalData), segmentContentLength, flags); - byte[] encodedData; - using (MemoryStream dest = new()) - { - await CopyStream(encodingStream, dest, readLen); - encodedData = dest.ToArray(); - } - - Assert.That(new Span(encodedData).SequenceEqual(expectedEncodedData)); - } - - [TestCase(0, 0)] // start - [TestCase(5, 0)] // partway through stream header - [TestCase(V1_0.StreamHeaderLength, 0)] // start of segment - [TestCase(V1_0.StreamHeaderLength + 3, 0)] // partway through segment header - [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength, 0)] // start of segment content - [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 123, 123)] // partway through segment content - [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 512, 512)] // start of segment footer - [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 515, 512)] // partway through segment footer - [TestCase(V1_0.StreamHeaderLength + 3*V1_0.SegmentHeaderLength + 2*Crc64Length + 1500, 1500)] // partway through not first segment content - public async Task Seek(int targetRewindOffset, int expectedInnerStreamPosition) - { - const int segmentLength = 512; - const int dataLength = 2055; - byte[] data = new byte[dataLength]; - new Random().NextBytes(data); - - MemoryStream dataStream = new(data); - StructuredMessageEncodingStream encodingStream = new(dataStream, segmentLength, Flags.StorageCrc64); - - // no support for seeking past existing read, need to consume whole stream before seeking - await CopyStream(encodingStream, Stream.Null); - - encodingStream.Position = targetRewindOffset; - Assert.That(encodingStream.Position, Is.EqualTo(targetRewindOffset)); - Assert.That(dataStream.Position, Is.EqualTo(expectedInnerStreamPosition)); - } - - [TestCase(0)] // start - [TestCase(5)] // partway through stream header - [TestCase(V1_0.StreamHeaderLength)] // start of segment - [TestCase(V1_0.StreamHeaderLength + 3)] // partway through segment header - [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength)] // start of segment content - [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 123)] // partway through segment content - [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 512)] // start of segment footer - [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 515)] // partway through segment footer - [TestCase(V1_0.StreamHeaderLength + 2 * V1_0.SegmentHeaderLength + Crc64Length + 1500)] // partway through not first segment content - public async Task SupportsRewind(int targetRewindOffset) - { - const int segmentLength = 512; - const int dataLength = 2055; - byte[] data = new byte[dataLength]; - new Random().NextBytes(data); - - Stream encodingStream = new StructuredMessageEncodingStream(new MemoryStream(data), segmentLength, Flags.StorageCrc64); - byte[] encodedData1; - using (MemoryStream dest = new()) - { - await CopyStream(encodingStream, dest); - encodedData1 = dest.ToArray(); - } - encodingStream.Position = targetRewindOffset; - byte[] encodedData2; - using (MemoryStream dest = new()) - { - await CopyStream(encodingStream, dest); - encodedData2 = dest.ToArray(); - } - - Assert.That(new Span(encodedData1).Slice(targetRewindOffset).SequenceEqual(encodedData2)); - } - - [Test] - public async Task SupportsFastForward() - { - const int segmentLength = 512; - const int dataLength = 2055; - byte[] data = new byte[dataLength]; - new Random().NextBytes(data); - - // must have read stream to fastforward. so read whole stream upfront & save result to check later - Stream encodingStream = new StructuredMessageEncodingStream(new MemoryStream(data), segmentLength, Flags.StorageCrc64); - byte[] encodedData; - using (MemoryStream dest = new()) - { - await CopyStream(encodingStream, dest); - encodedData = dest.ToArray(); - } - - encodingStream.Position = 0; - - bool skip = false; - const int increment = 499; - while (encodingStream.Position < encodingStream.Length) - { - if (skip) - { - encodingStream.Position = Math.Min(dataLength, encodingStream.Position + increment); - skip = !skip; - continue; - } - ReadOnlyMemory expected = new(encodedData, (int)encodingStream.Position, - (int)Math.Min(increment, encodedData.Length - encodingStream.Position)); - ReadOnlyMemory actual; - using (MemoryStream dest = new(increment)) - { - await CopyStream(WindowStream.GetWindow(encodingStream, increment), dest); - actual = dest.ToArray(); - } - Assert.That(expected.Span.SequenceEqual(actual.Span)); - skip = !skip; - } - } - - [Test] - public void NotSupportsFastForwardBeyondLatestRead() - { - const int segmentLength = 512; - const int dataLength = 2055; - byte[] data = new byte[dataLength]; - new Random().NextBytes(data); - - Stream encodingStream = new StructuredMessageEncodingStream(new MemoryStream(data), segmentLength, Flags.StorageCrc64); - - Assert.That(() => encodingStream.Position = 123, Throws.TypeOf()); - } - - [Test] - [Pairwise] - public async Task WrapperStreamCorrectData( - [Values(2048, 2005)] int dataLength, - [Values(8 * Constants.KB, 512, 530, 3)] int readLen) - { - int segmentContentLength = dataLength; - Flags flags = Flags.StorageCrc64; - - byte[] originalData = new byte[dataLength]; - new Random().NextBytes(originalData); - byte[] crc = CrcInline(originalData); - byte[] expectedEncodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentContentLength, flags); - - Stream encodingStream = new StructuredMessagePrecalculatedCrcWrapperStream(new MemoryStream(originalData), crc); - byte[] encodedData; - using (MemoryStream dest = new()) - { - await CopyStream(encodingStream, dest, readLen); - encodedData = dest.ToArray(); - } - - Assert.That(new Span(encodedData).SequenceEqual(expectedEncodedData)); - } - - private static void AssertExpectedStreamHeader(ReadOnlySpan actual, int originalDataLength, Flags flags, int expectedSegments) - { - int expectedFooterLen = flags.HasFlag(Flags.StorageCrc64) ? Crc64Length : 0; - - Assert.That(actual.Length, Is.EqualTo(V1_0.StreamHeaderLength)); - Assert.That(actual[0], Is.EqualTo(1)); - Assert.That(BinaryPrimitives.ReadInt64LittleEndian(actual.Slice(1, 8)), - Is.EqualTo(V1_0.StreamHeaderLength + expectedSegments * (V1_0.SegmentHeaderLength + expectedFooterLen) + originalDataLength)); - Assert.That(BinaryPrimitives.ReadInt16LittleEndian(actual.Slice(9, 2)), Is.EqualTo((short)flags)); - Assert.That(BinaryPrimitives.ReadInt16LittleEndian(actual.Slice(11, 2)), Is.EqualTo((short)expectedSegments)); - } - - private static void AssertExpectedSegmentHeader(ReadOnlySpan actual, int segmentNum, long contentLength) - { - Assert.That(BinaryPrimitives.ReadInt16LittleEndian(actual.Slice(0, 2)), Is.EqualTo((short) segmentNum)); - Assert.That(BinaryPrimitives.ReadInt64LittleEndian(actual.Slice(2, 8)), Is.EqualTo(contentLength)); - } - - private static byte[] CrcInline(ReadOnlySpan data) - { - var crc = StorageCrc64HashAlgorithm.Create(); - crc.Append(data); - return crc.GetCurrentHash(); - } - } -} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageHelper.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageHelper.cs deleted file mode 100644 index 59e80320d96a0..0000000000000 --- a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageHelper.cs +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; -using Azure.Storage.Shared; -using static Azure.Storage.Shared.StructuredMessage; - -namespace Azure.Storage.Blobs.Tests -{ - internal class StructuredMessageHelper - { - public static byte[] MakeEncodedData(ReadOnlySpan data, long segmentContentLength, Flags flags) - { - int segmentCount = (int) Math.Ceiling(data.Length / (double)segmentContentLength); - int segmentFooterLen = flags.HasFlag(Flags.StorageCrc64) ? 8 : 0; - int streamFooterLen = flags.HasFlag(Flags.StorageCrc64) ? 8 : 0; - - byte[] encodedData = new byte[ - V1_0.StreamHeaderLength + - segmentCount*(V1_0.SegmentHeaderLength + segmentFooterLen) + - streamFooterLen + - data.Length]; - V1_0.WriteStreamHeader( - new Span(encodedData, 0, V1_0.StreamHeaderLength), - encodedData.Length, - flags, - segmentCount); - - int i = V1_0.StreamHeaderLength; - int j = 0; - foreach (int seg in Enumerable.Range(1, segmentCount)) - { - int segContentLen = Math.Min((int)segmentContentLength, data.Length - j); - V1_0.WriteSegmentHeader( - new Span(encodedData, i, V1_0.SegmentHeaderLength), - seg, - segContentLen); - i += V1_0.SegmentHeaderLength; - - data.Slice(j, segContentLen) - .CopyTo(new Span(encodedData).Slice(i)); - i += segContentLen; - - if (flags.HasFlag(Flags.StorageCrc64)) - { - var crc = StorageCrc64HashAlgorithm.Create(); - crc.Append(data.Slice(j, segContentLen)); - crc.GetCurrentHash(new Span(encodedData, i, Crc64Length)); - i += Crc64Length; - } - j += segContentLen; - } - - if (flags.HasFlag(Flags.StorageCrc64)) - { - var crc = StorageCrc64HashAlgorithm.Create(); - crc.Append(data); - crc.GetCurrentHash(new Span(encodedData, i, Crc64Length)); - } - - return encodedData; - } - } -} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageStreamRoundtripTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageStreamRoundtripTests.cs deleted file mode 100644 index 61583aa1ebe4e..0000000000000 --- a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageStreamRoundtripTests.cs +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.IO; -using System.Linq; -using System.Threading.Tasks; -using Azure.Storage.Shared; -using NUnit.Framework; -using static Azure.Storage.Shared.StructuredMessage; - -namespace Azure.Storage.Tests -{ - [TestFixture(ReadMethod.SyncArray)] - [TestFixture(ReadMethod.AsyncArray)] -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - [TestFixture(ReadMethod.SyncSpan)] - [TestFixture(ReadMethod.AsyncMemory)] -#endif - public class StructuredMessageStreamRoundtripTests - { - // Cannot just implement as passthru in the stream - // Must test each one - public enum ReadMethod - { - SyncArray, - AsyncArray, -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - SyncSpan, - AsyncMemory -#endif - } - - public ReadMethod Method { get; } - - public StructuredMessageStreamRoundtripTests(ReadMethod method) - { - Method = method; - } - - private class CopyStreamException : Exception - { - public long TotalCopied { get; } - - public CopyStreamException(Exception inner, long totalCopied) - : base($"Failed read after {totalCopied}-many bytes.", inner) - { - TotalCopied = totalCopied; - } - } - private async ValueTask CopyStream(Stream source, Stream destination, int bufferSize = 81920) // number default for CopyTo impl - { - byte[] buf = new byte[bufferSize]; - int read; - long totalRead = 0; - try - { - switch (Method) - { - case ReadMethod.SyncArray: - while ((read = source.Read(buf, 0, bufferSize)) > 0) - { - totalRead += read; - destination.Write(buf, 0, read); - } - break; - case ReadMethod.AsyncArray: - while ((read = await source.ReadAsync(buf, 0, bufferSize)) > 0) - { - totalRead += read; - await destination.WriteAsync(buf, 0, read); - } - break; -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - case ReadMethod.SyncSpan: - while ((read = source.Read(new Span(buf))) > 0) - { - totalRead += read; - destination.Write(new Span(buf, 0, read)); - } - break; - case ReadMethod.AsyncMemory: - while ((read = await source.ReadAsync(new Memory(buf))) > 0) - { - totalRead += read; - await destination.WriteAsync(new Memory(buf, 0, read)); - } - break; -#endif - } - destination.Flush(); - } - catch (Exception ex) - { - throw new CopyStreamException(ex, totalRead); - } - return totalRead; - } - - [Test] - [Pairwise] - public async Task RoundTrip( - [Values(2048, 2005)] int dataLength, - [Values(default, 512)] int? seglen, - [Values(8 * Constants.KB, 512, 530, 3)] int readLen, - [Values(true, false)] bool useCrc) - { - int segmentLength = seglen ?? int.MaxValue; - Flags flags = useCrc ? Flags.StorageCrc64 : Flags.None; - - byte[] originalData = new byte[dataLength]; - new Random().NextBytes(originalData); - - byte[] roundtripData; - using (MemoryStream source = new(originalData)) - using (Stream encode = new StructuredMessageEncodingStream(source, segmentLength, flags)) - using (Stream decode = StructuredMessageDecodingStream.WrapStream(encode).DecodedStream) - using (MemoryStream dest = new()) - { - await CopyStream(source, dest, readLen); - roundtripData = dest.ToArray(); - } - - Assert.That(originalData.SequenceEqual(roundtripData)); - } - } -} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageTests.cs deleted file mode 100644 index b4f1dfe178246..0000000000000 --- a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageTests.cs +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers.Binary; -using System.Collections.Generic; -using NUnit.Framework; -using static Azure.Storage.Shared.StructuredMessage; - -namespace Azure.Storage.Tests -{ - public class StructuredMessageTests - { - [TestCase(1024, Flags.None, 2)] - [TestCase(2000, Flags.StorageCrc64, 4)] - public void EncodeStreamHeader(int messageLength, int flags, int numSegments) - { - Span encoding = new(new byte[V1_0.StreamHeaderLength]); - V1_0.WriteStreamHeader(encoding, messageLength, (Flags)flags, numSegments); - - Assert.That(encoding[0], Is.EqualTo((byte)1)); - Assert.That(BinaryPrimitives.ReadUInt64LittleEndian(encoding.Slice(1, 8)), Is.EqualTo(messageLength)); - Assert.That(BinaryPrimitives.ReadUInt16LittleEndian(encoding.Slice(9, 2)), Is.EqualTo(flags)); - Assert.That(BinaryPrimitives.ReadUInt16LittleEndian(encoding.Slice(11, 2)), Is.EqualTo(numSegments)); - } - - [TestCase(V1_0.StreamHeaderLength)] - [TestCase(V1_0.StreamHeaderLength + 1)] - [TestCase(V1_0.StreamHeaderLength - 1)] - public void EncodeStreamHeaderRejectBadBufferSize(int bufferSize) - { - Random r = new(); - byte[] encoding = new byte[bufferSize]; - - void Action() => V1_0.WriteStreamHeader(encoding, r.Next(2, int.MaxValue), Flags.StorageCrc64, r.Next(2, int.MaxValue)); - if (bufferSize < V1_0.StreamHeaderLength) - { - Assert.That(Action, Throws.ArgumentException); - } - else - { - Assert.That(Action, Throws.Nothing); - } - } - - [TestCase(1, 1024)] - [TestCase(5, 39578)] - public void EncodeSegmentHeader(int segmentNum, int contentLength) - { - Span encoding = new(new byte[V1_0.SegmentHeaderLength]); - V1_0.WriteSegmentHeader(encoding, segmentNum, contentLength); - - Assert.That(BinaryPrimitives.ReadUInt16LittleEndian(encoding.Slice(0, 2)), Is.EqualTo(segmentNum)); - Assert.That(BinaryPrimitives.ReadUInt64LittleEndian(encoding.Slice(2, 8)), Is.EqualTo(contentLength)); - } - - [TestCase(V1_0.SegmentHeaderLength)] - [TestCase(V1_0.SegmentHeaderLength + 1)] - [TestCase(V1_0.SegmentHeaderLength - 1)] - public void EncodeSegmentHeaderRejectBadBufferSize(int bufferSize) - { - Random r = new(); - byte[] encoding = new byte[bufferSize]; - - void Action() => V1_0.WriteSegmentHeader(encoding, r.Next(1, int.MaxValue), r.Next(2, int.MaxValue)); - if (bufferSize < V1_0.SegmentHeaderLength) - { - Assert.That(Action, Throws.ArgumentException); - } - else - { - Assert.That(Action, Throws.Nothing); - } - } - - [TestCase(true)] - [TestCase(false)] - public void EncodeSegmentFooter(bool useCrc) - { - Span encoding = new(new byte[Crc64Length]); - Span crc = useCrc ? new Random().NextBytesInline(Crc64Length) : default; - V1_0.WriteSegmentFooter(encoding, crc); - - if (useCrc) - { - Assert.That(encoding.SequenceEqual(crc), Is.True); - } - else - { - Assert.That(encoding.SequenceEqual(new Span(new byte[Crc64Length])), Is.True); - } - } - - [TestCase(Crc64Length)] - [TestCase(Crc64Length + 1)] - [TestCase(Crc64Length - 1)] - public void EncodeSegmentFooterRejectBadBufferSize(int bufferSize) - { - byte[] encoding = new byte[bufferSize]; - byte[] crc = new byte[Crc64Length]; - new Random().NextBytes(crc); - - void Action() => V1_0.WriteSegmentFooter(encoding, crc); - if (bufferSize < Crc64Length) - { - Assert.That(Action, Throws.ArgumentException); - } - else - { - Assert.That(Action, Throws.Nothing); - } - } - } -} diff --git a/sdk/storage/Azure.Storage.DataMovement.Blobs/src/Azure.Storage.DataMovement.Blobs.csproj b/sdk/storage/Azure.Storage.DataMovement.Blobs/src/Azure.Storage.DataMovement.Blobs.csproj index 93e7432f186e3..6098dcd8ba33d 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Blobs/src/Azure.Storage.DataMovement.Blobs.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Blobs/src/Azure.Storage.DataMovement.Blobs.csproj @@ -37,7 +37,6 @@ - diff --git a/sdk/storage/Azure.Storage.DataMovement.Blobs/tests/Azure.Storage.DataMovement.Blobs.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Blobs/tests/Azure.Storage.DataMovement.Blobs.Tests.csproj index 214903eb5f9c4..f8b62d0b947e2 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Blobs/tests/Azure.Storage.DataMovement.Blobs.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Blobs/tests/Azure.Storage.DataMovement.Blobs.Tests.csproj @@ -22,15 +22,11 @@ - - - - @@ -44,7 +40,6 @@ - diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/BlobToFileSharesTests/Azure.Storage.DataMovement.Blobs.Files.Shares.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/BlobToFileSharesTests/Azure.Storage.DataMovement.Blobs.Files.Shares.Tests.csproj index 66a9fea0861a2..a6abde432473f 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/BlobToFileSharesTests/Azure.Storage.DataMovement.Blobs.Files.Shares.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/BlobToFileSharesTests/Azure.Storage.DataMovement.Blobs.Files.Shares.Tests.csproj @@ -35,7 +35,6 @@ - diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Azure.Storage.DataMovement.Files.Shares.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Azure.Storage.DataMovement.Files.Shares.Tests.csproj index d75775beceafd..8e574bca36a48 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Azure.Storage.DataMovement.Files.Shares.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Azure.Storage.DataMovement.Files.Shares.Tests.csproj @@ -27,7 +27,6 @@ - diff --git a/sdk/storage/Azure.Storage.DataMovement/src/Azure.Storage.DataMovement.csproj b/sdk/storage/Azure.Storage.DataMovement/src/Azure.Storage.DataMovement.csproj index dd30659cf0a5d..5aaf548493b15 100644 --- a/sdk/storage/Azure.Storage.DataMovement/src/Azure.Storage.DataMovement.csproj +++ b/sdk/storage/Azure.Storage.DataMovement/src/Azure.Storage.DataMovement.csproj @@ -1,4 +1,4 @@ - + $(RequiredTargetFrameworks);net6.0 diff --git a/sdk/storage/Azure.Storage.DataMovement/tests/Azure.Storage.DataMovement.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement/tests/Azure.Storage.DataMovement.Tests.csproj index 21a1ea45f92a0..8afd7735a0168 100644 --- a/sdk/storage/Azure.Storage.DataMovement/tests/Azure.Storage.DataMovement.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement/tests/Azure.Storage.DataMovement.Tests.csproj @@ -34,7 +34,6 @@ - diff --git a/sdk/storage/Azure.Storage.Files.DataLake/assets.json b/sdk/storage/Azure.Storage.Files.DataLake/assets.json index 8949234de7a1a..442889d04be63 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/assets.json +++ b/sdk/storage/Azure.Storage.Files.DataLake/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Files.DataLake", - "Tag": "net/storage/Azure.Storage.Files.DataLake_4b543941a8" + "Tag": "net/storage/Azure.Storage.Files.DataLake_186c14971d" } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj b/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj index ccd45baaff251..7adb79645b0a9 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj @@ -42,7 +42,6 @@ - @@ -82,10 +81,6 @@ - - - - diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs index 93ca4c3f9a1fd..3d2bd710e25aa 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs @@ -16,7 +16,6 @@ using Azure.Storage.Common; using Azure.Storage.Files.DataLake.Models; using Azure.Storage.Sas; -using Azure.Storage.Shared; using Metadata = System.Collections.Generic.IDictionary; namespace Azure.Storage.Files.DataLake @@ -2333,39 +2332,13 @@ internal virtual async Task AppendInternal( using (ClientConfiguration.Pipeline.BeginLoggingScope(nameof(DataLakeFileClient))) { // compute hash BEFORE attaching progress handler - ContentHasher.GetHashResult hashResult = null; - long contentLength = (content?.Length - content?.Position) ?? 0; - long? structuredContentLength = default; - string structuredBodyType = null; - if (content != null && - validationOptions != null && - validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64) - { - // report progress in terms of caller bytes, not encoded bytes - structuredContentLength = contentLength; - structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; - content = content.WithNoDispose().WithProgress(progressHandler); - content = validationOptions.PrecalculatedChecksum.IsEmpty - ? new StructuredMessageEncodingStream( - content, - Constants.StructuredMessage.DefaultSegmentContentLength, - StructuredMessage.Flags.StorageCrc64) - : new StructuredMessagePrecalculatedCrcWrapperStream( - content, - validationOptions.PrecalculatedChecksum.Span); - contentLength = content.Length - content.Position; - } - else - { - // compute hash BEFORE attaching progress handler - hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - content = content?.WithNoDispose().WithProgress(progressHandler); - } + ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + content = content?.WithNoDispose().WithProgress(progressHandler); ClientConfiguration.Pipeline.LogMethodEnter( nameof(DataLakeFileClient), message: @@ -2400,8 +2373,6 @@ internal virtual async Task AppendInternal( encryptionKey: ClientConfiguration.CustomerProvidedKey?.EncryptionKey, encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, leaseId: leaseId, leaseAction: leaseAction, leaseDuration: leaseDurationLong, @@ -2421,8 +2392,6 @@ internal virtual async Task AppendInternal( encryptionKey: ClientConfiguration.CustomerProvidedKey?.EncryptionKey, encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, leaseId: leaseId, leaseAction: leaseAction, leaseDuration: leaseDurationLong, diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/FileSystemRestClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/FileSystemRestClient.cs index 4144d908b7549..719932d5cd500 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/FileSystemRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/FileSystemRestClient.cs @@ -33,7 +33,7 @@ internal partial class FileSystemRestClient /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. /// The value must be "filesystem" for all filesystem operations. The default value is "filesystem". - /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". + /// Specifies the version of the operation to use for this request. The default value is "2023-05-03". /// , , , or is null. public FileSystemRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string resource, string version) { diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathAppendDataHeaders.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathAppendDataHeaders.cs index 502dd557f4822..6ec456a438564 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathAppendDataHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathAppendDataHeaders.cs @@ -29,7 +29,5 @@ public PathAppendDataHeaders(Response response) public string EncryptionKeySha256 => _response.Headers.TryGetValue("x-ms-encryption-key-sha256", out string value) ? value : null; /// If the lease was auto-renewed with this request. public bool? LeaseRenewed => _response.Headers.TryGetValue("x-ms-lease-renewed", out bool? value) ? value : null; - /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. - public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathRestClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathRestClient.cs index d328c3079de6b..6b1e970bd2fc8 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathRestClient.cs @@ -30,7 +30,7 @@ internal partial class PathRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". + /// Specifies the version of the operation to use for this request. The default value is "2023-05-03". /// The lease duration is required to acquire a lease, and specifies the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or -1 for infinite lease. /// , , or is null. public PathRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version, int? xMsLeaseDuration = null) @@ -293,7 +293,7 @@ public ResponseWithHeaders Create(int? timeout = null, PathRe } } - internal HttpMessage CreateUpdateRequest(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout, int? maxRecords, string continuation, bool? forceFlag, long? position, bool? retainUncommittedData, bool? close, long? contentLength, byte[] contentMD5, string leaseId, string cacheControl, string contentType, string contentDisposition, string contentEncoding, string contentLanguage, string properties, string owner, string group, string permissions, string acl, string ifMatch, string ifNoneMatch, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string structuredBodyType, long? structuredContentLength) + internal HttpMessage CreateUpdateRequest(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout, int? maxRecords, string continuation, bool? forceFlag, long? position, bool? retainUncommittedData, bool? close, long? contentLength, byte[] contentMD5, string leaseId, string cacheControl, string contentType, string contentDisposition, string contentEncoding, string contentLanguage, string properties, string owner, string group, string permissions, string acl, string ifMatch, string ifNoneMatch, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -396,14 +396,6 @@ internal HttpMessage CreateUpdateRequest(PathUpdateAction action, PathSetAccessC { request.Headers.Add("If-Unmodified-Since", ifUnmodifiedSince.Value, "R"); } - if (structuredBodyType != null) - { - request.Headers.Add("x-ms-structured-body", structuredBodyType); - } - if (structuredContentLength != null) - { - request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); - } request.Headers.Add("Accept", "application/json"); if (contentLength != null) { @@ -442,19 +434,17 @@ internal HttpMessage CreateUpdateRequest(PathUpdateAction action, PathSetAccessC /// Specify an ETag value to operate only on blobs without a matching value. /// Specify this header value to operate only on a blob if it has been modified since the specified date/time. /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - /// Required if the request body is a structured message. Specifies the message schema version and properties. - /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. /// Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, sets properties for a file or directory, or sets access control for a file or directory. Data can only be appended to a file. Concurrent writes to the same file using multiple clients are not supported. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - public async Task> UpdateAsync(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout = null, int? maxRecords = null, string continuation = null, bool? forceFlag = null, long? position = null, bool? retainUncommittedData = null, bool? close = null, long? contentLength = null, byte[] contentMD5 = null, string leaseId = null, string cacheControl = null, string contentType = null, string contentDisposition = null, string contentEncoding = null, string contentLanguage = null, string properties = null, string owner = null, string group = null, string permissions = null, string acl = null, string ifMatch = null, string ifNoneMatch = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) + public async Task> UpdateAsync(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout = null, int? maxRecords = null, string continuation = null, bool? forceFlag = null, long? position = null, bool? retainUncommittedData = null, bool? close = null, long? contentLength = null, byte[] contentMD5 = null, string leaseId = null, string cacheControl = null, string contentType = null, string contentDisposition = null, string contentEncoding = null, string contentLanguage = null, string properties = null, string owner = null, string group = null, string permissions = null, string acl = null, string ifMatch = null, string ifNoneMatch = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUpdateRequest(action, mode, body, timeout, maxRecords, continuation, forceFlag, position, retainUncommittedData, close, contentLength, contentMD5, leaseId, cacheControl, contentType, contentDisposition, contentEncoding, contentLanguage, properties, owner, group, permissions, acl, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince, structuredBodyType, structuredContentLength); + using var message = CreateUpdateRequest(action, mode, body, timeout, maxRecords, continuation, forceFlag, position, retainUncommittedData, close, contentLength, contentMD5, leaseId, cacheControl, contentType, contentDisposition, contentEncoding, contentLanguage, properties, owner, group, permissions, acl, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new PathUpdateHeaders(message.Response); switch (message.Response.Status) @@ -501,19 +491,17 @@ public async Task Specify an ETag value to operate only on blobs without a matching value. /// Specify this header value to operate only on a blob if it has been modified since the specified date/time. /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - /// Required if the request body is a structured message. Specifies the message schema version and properties. - /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. /// Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, sets properties for a file or directory, or sets access control for a file or directory. Data can only be appended to a file. Concurrent writes to the same file using multiple clients are not supported. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - public ResponseWithHeaders Update(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout = null, int? maxRecords = null, string continuation = null, bool? forceFlag = null, long? position = null, bool? retainUncommittedData = null, bool? close = null, long? contentLength = null, byte[] contentMD5 = null, string leaseId = null, string cacheControl = null, string contentType = null, string contentDisposition = null, string contentEncoding = null, string contentLanguage = null, string properties = null, string owner = null, string group = null, string permissions = null, string acl = null, string ifMatch = null, string ifNoneMatch = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders Update(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout = null, int? maxRecords = null, string continuation = null, bool? forceFlag = null, long? position = null, bool? retainUncommittedData = null, bool? close = null, long? contentLength = null, byte[] contentMD5 = null, string leaseId = null, string cacheControl = null, string contentType = null, string contentDisposition = null, string contentEncoding = null, string contentLanguage = null, string properties = null, string owner = null, string group = null, string permissions = null, string acl = null, string ifMatch = null, string ifNoneMatch = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUpdateRequest(action, mode, body, timeout, maxRecords, continuation, forceFlag, position, retainUncommittedData, close, contentLength, contentMD5, leaseId, cacheControl, contentType, contentDisposition, contentEncoding, contentLanguage, properties, owner, group, permissions, acl, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince, structuredBodyType, structuredContentLength); + using var message = CreateUpdateRequest(action, mode, body, timeout, maxRecords, continuation, forceFlag, position, retainUncommittedData, close, contentLength, contentMD5, leaseId, cacheControl, contentType, contentDisposition, contentEncoding, contentLanguage, properties, owner, group, permissions, acl, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince); _pipeline.Send(message, cancellationToken); var headers = new PathUpdateHeaders(message.Response); switch (message.Response.Status) @@ -1327,7 +1315,7 @@ public ResponseWithHeaders FlushData(int? timeout = null, } } - internal HttpMessage CreateAppendDataRequest(Stream body, long? position, int? timeout, long? contentLength, byte[] transactionalContentHash, byte[] transactionalContentCrc64, string leaseId, DataLakeLeaseAction? leaseAction, long? leaseDuration, string proposedLeaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, bool? flush, string structuredBodyType, long? structuredContentLength) + internal HttpMessage CreateAppendDataRequest(Stream body, long? position, int? timeout, long? contentLength, byte[] transactionalContentHash, byte[] transactionalContentCrc64, string leaseId, DataLakeLeaseAction? leaseAction, long? leaseDuration, string proposedLeaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, bool? flush) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -1381,14 +1369,6 @@ internal HttpMessage CreateAppendDataRequest(Stream body, long? position, int? t { request.Headers.Add("x-ms-encryption-algorithm", encryptionAlgorithm.Value.ToSerialString()); } - if (structuredBodyType != null) - { - request.Headers.Add("x-ms-structured-body", structuredBodyType); - } - if (structuredContentLength != null) - { - request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); - } request.Headers.Add("Accept", "application/json"); if (contentLength != null) { @@ -1418,18 +1398,16 @@ internal HttpMessage CreateAppendDataRequest(Stream body, long? position, int? t /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. /// If file should be flushed after the append. - /// Required if the request body is a structured message. Specifies the message schema version and properties. - /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public async Task> AppendDataAsync(Stream body, long? position = null, int? timeout = null, long? contentLength = null, byte[] transactionalContentHash = null, byte[] transactionalContentCrc64 = null, string leaseId = null, DataLakeLeaseAction? leaseAction = null, long? leaseDuration = null, string proposedLeaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, bool? flush = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) + public async Task> AppendDataAsync(Stream body, long? position = null, int? timeout = null, long? contentLength = null, byte[] transactionalContentHash = null, byte[] transactionalContentCrc64 = null, string leaseId = null, DataLakeLeaseAction? leaseAction = null, long? leaseDuration = null, string proposedLeaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, bool? flush = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateAppendDataRequest(body, position, timeout, contentLength, transactionalContentHash, transactionalContentCrc64, leaseId, leaseAction, leaseDuration, proposedLeaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, flush, structuredBodyType, structuredContentLength); + using var message = CreateAppendDataRequest(body, position, timeout, contentLength, transactionalContentHash, transactionalContentCrc64, leaseId, leaseAction, leaseDuration, proposedLeaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, flush); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new PathAppendDataHeaders(message.Response); switch (message.Response.Status) @@ -1456,18 +1434,16 @@ public async Task> AppendDataAsync(St /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. /// If file should be flushed after the append. - /// Required if the request body is a structured message. Specifies the message schema version and properties. - /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public ResponseWithHeaders AppendData(Stream body, long? position = null, int? timeout = null, long? contentLength = null, byte[] transactionalContentHash = null, byte[] transactionalContentCrc64 = null, string leaseId = null, DataLakeLeaseAction? leaseAction = null, long? leaseDuration = null, string proposedLeaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, bool? flush = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders AppendData(Stream body, long? position = null, int? timeout = null, long? contentLength = null, byte[] transactionalContentHash = null, byte[] transactionalContentCrc64 = null, string leaseId = null, DataLakeLeaseAction? leaseAction = null, long? leaseDuration = null, string proposedLeaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, bool? flush = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateAppendDataRequest(body, position, timeout, contentLength, transactionalContentHash, transactionalContentCrc64, leaseId, leaseAction, leaseDuration, proposedLeaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, flush, structuredBodyType, structuredContentLength); + using var message = CreateAppendDataRequest(body, position, timeout, contentLength, transactionalContentHash, transactionalContentCrc64, leaseId, leaseAction, leaseDuration, proposedLeaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, flush); _pipeline.Send(message, cancellationToken); var headers = new PathAppendDataHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathUpdateHeaders.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathUpdateHeaders.cs index 026c78e72481a..35668cb1c3a1d 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathUpdateHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathUpdateHeaders.cs @@ -43,7 +43,5 @@ public PathUpdateHeaders(Response response) public string XMsContinuation => _response.Headers.TryGetValue("x-ms-continuation", out string value) ? value : null; /// The version of the REST protocol used to process the request. public string Version => _response.Headers.TryGetValue("x-ms-version", out string value) ? value : null; - /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. - public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/ServiceRestClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/ServiceRestClient.cs index b00fa12238f4e..118595b4d87d1 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/ServiceRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/ServiceRestClient.cs @@ -28,7 +28,7 @@ internal partial class ServiceRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". + /// Specifies the version of the operation to use for this request. The default value is "2023-05-03". /// , , or is null. public ServiceRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md b/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md index 58f5c3d055d3b..4121ebab9932e 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://github.com/Azure/azure-rest-api-specs/blob/794c6178bc06c6c9dceb139e9f9d1b35b1a99701/specification/storage/data-plane/Azure.Storage.Files.DataLake/preview/2025-01-05/DataLakeStorage.json + - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/5da3c08b92d05858b728b013b69502dc93485373/specification/storage/data-plane/Azure.Storage.Files.DataLake/stable/2023-05-03/DataLakeStorage.json generation1-convenience-client: true modelerfour: seal-single-value-enum-by-default: true @@ -23,7 +23,7 @@ directive: if (property.includes('/{filesystem}/{path}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/FileSystem") && false == param['$ref'].endsWith("#/parameters/Path"))}); - } + } else if (property.includes('/{filesystem}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/FileSystem"))}); @@ -127,7 +127,7 @@ directive: } $[newName] = $[oldName]; delete $[oldName]; - } + } else if (property.includes('/{filesystem}')) { var oldName = property; diff --git a/sdk/storage/Azure.Storage.Files.DataLake/tests/Azure.Storage.Files.DataLake.Tests.csproj b/sdk/storage/Azure.Storage.Files.DataLake/tests/Azure.Storage.Files.DataLake.Tests.csproj index 1fa78690077be..bef13bb21a1c6 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/tests/Azure.Storage.Files.DataLake.Tests.csproj +++ b/sdk/storage/Azure.Storage.Files.DataLake/tests/Azure.Storage.Files.DataLake.Tests.csproj @@ -6,9 +6,6 @@ Microsoft Azure.Storage.Files.DataLake client library tests false - - DataLakeSDK - diff --git a/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeFileClientTransferValidationTests.cs b/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeFileClientTransferValidationTests.cs index 5067f98517bd2..4bdefdbf756cd 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeFileClientTransferValidationTests.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeFileClientTransferValidationTests.cs @@ -34,10 +34,7 @@ protected override async Task> Get StorageChecksumAlgorithm uploadAlgorithm = StorageChecksumAlgorithm.None, StorageChecksumAlgorithm downloadAlgorithm = StorageChecksumAlgorithm.None) { - var disposingFileSystem = await ClientBuilder.GetNewFileSystem( - service: service, - fileSystemName: containerName, - publicAccessType: PublicAccessType.None); + var disposingFileSystem = await ClientBuilder.GetNewFileSystem(service: service, fileSystemName: containerName); disposingFileSystem.FileSystem.ClientConfiguration.TransferValidation.Upload.ChecksumAlgorithm = uploadAlgorithm; disposingFileSystem.FileSystem.ClientConfiguration.TransferValidation.Download.ChecksumAlgorithm = downloadAlgorithm; diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs index 0cd25700dd1d7..88fbd1326e018 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs @@ -796,7 +796,6 @@ public partial class ShareFileDownloadInfo : System.IDisposable { internal ShareFileDownloadInfo() { } public System.IO.Stream Content { get { throw null; } } - public byte[] ContentCrc { get { throw null; } } public byte[] ContentHash { get { throw null; } } public long ContentLength { get { throw null; } } public string ContentType { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs index 0cd25700dd1d7..88fbd1326e018 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs @@ -796,7 +796,6 @@ public partial class ShareFileDownloadInfo : System.IDisposable { internal ShareFileDownloadInfo() { } public System.IO.Stream Content { get { throw null; } } - public byte[] ContentCrc { get { throw null; } } public byte[] ContentHash { get { throw null; } } public long ContentLength { get { throw null; } } public string ContentType { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/assets.json b/sdk/storage/Azure.Storage.Files.Shares/assets.json index 184d64e873031..9ca749681b79e 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/assets.json +++ b/sdk/storage/Azure.Storage.Files.Shares/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Files.Shares", - "Tag": "net/storage/Azure.Storage.Files.Shares_b3158cd2dd" + "Tag": "net/storage/Azure.Storage.Files.Shares_14e0fa0c22" } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Azure.Storage.Files.Shares.csproj b/sdk/storage/Azure.Storage.Files.Shares/src/Azure.Storage.Files.Shares.csproj index 547cccbd0a5c3..60f6f200fd402 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Azure.Storage.Files.Shares.csproj +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Azure.Storage.Files.Shares.csproj @@ -1,4 +1,4 @@ - + $(RequiredTargetFrameworks);net6.0 @@ -42,7 +42,6 @@ - @@ -86,11 +85,6 @@ - - - - - diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/DirectoryRestClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/DirectoryRestClient.cs index 8a2edb8b99134..961c6ff47ce59 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/DirectoryRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/DirectoryRestClient.cs @@ -33,7 +33,7 @@ internal partial class DirectoryRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, share, directory or file that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". + /// Specifies the version of the operation to use for this request. The default value is "2024-11-04". /// If true, the trailing dot will not be trimmed from the target URI. /// Valid value is backup. /// If true, the trailing dot will not be trimmed from the source URI. diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileDownloadHeaders.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileDownloadHeaders.cs index c4d7056a5cfa3..61384dee810d4 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileDownloadHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileDownloadHeaders.cs @@ -79,9 +79,5 @@ public FileDownloadHeaders(Response response) public ShareLeaseState? LeaseState => _response.Headers.TryGetValue("x-ms-lease-state", out string value) ? value.ToShareLeaseState() : null; /// The current lease status of the file. public ShareLeaseStatus? LeaseStatus => _response.Headers.TryGetValue("x-ms-lease-status", out string value) ? value.ToShareLeaseStatus() : null; - /// Indicates the response body contains a structured message and specifies the message schema version and properties. - public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; - /// The length of the blob/file content inside the message body when the response body is returned as a structured message. Will always be smaller than Content-Length. - public long? StructuredContentLength => _response.Headers.TryGetValue("x-ms-structured-content-length", out long? value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs index 093de99705c4d..d4b584e6660ee 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs @@ -34,7 +34,7 @@ internal partial class FileRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, share, directory or file that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". + /// Specifies the version of the operation to use for this request. The default value is "2024-11-04". /// Only update is supported: - Update: Writes the bytes downloaded from the source url into the specified range. The default value is "update". /// If true, the trailing dot will not be trimmed from the target URI. /// Valid value is backup. @@ -204,7 +204,7 @@ public ResponseWithHeaders Create(long fileContentLength, str } } - internal HttpMessage CreateDownloadRequest(int? timeout, string range, bool? rangeGetContentMD5, string structuredBodyType, ShareFileRequestConditions shareFileRequestConditions) + internal HttpMessage CreateDownloadRequest(int? timeout, string range, bool? rangeGetContentMD5, ShareFileRequestConditions shareFileRequestConditions) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -230,10 +230,6 @@ internal HttpMessage CreateDownloadRequest(int? timeout, string range, bool? ran { request.Headers.Add("x-ms-range-get-content-md5", rangeGetContentMD5.Value); } - if (structuredBodyType != null) - { - request.Headers.Add("x-ms-structured-body", structuredBodyType); - } if (shareFileRequestConditions?.LeaseId != null) { request.Headers.Add("x-ms-lease-id", shareFileRequestConditions.LeaseId); @@ -250,12 +246,11 @@ internal HttpMessage CreateDownloadRequest(int? timeout, string range, bool? ran /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting Timeouts for File Service Operations.</a>. /// Return file data only from the specified byte range. /// When this header is set to true and specified together with the Range header, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. - /// Specifies the response content should be returned as a structured message and specifies the message schema version and properties. /// Parameter group. /// The cancellation token to use. - public async Task> DownloadAsync(int? timeout = null, string range = null, bool? rangeGetContentMD5 = null, string structuredBodyType = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) + public async Task> DownloadAsync(int? timeout = null, string range = null, bool? rangeGetContentMD5 = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) { - using var message = CreateDownloadRequest(timeout, range, rangeGetContentMD5, structuredBodyType, shareFileRequestConditions); + using var message = CreateDownloadRequest(timeout, range, rangeGetContentMD5, shareFileRequestConditions); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new FileDownloadHeaders(message.Response); switch (message.Response.Status) @@ -275,12 +270,11 @@ public async Task> DownloadAsyn /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting Timeouts for File Service Operations.</a>. /// Return file data only from the specified byte range. /// When this header is set to true and specified together with the Range header, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. - /// Specifies the response content should be returned as a structured message and specifies the message schema version and properties. /// Parameter group. /// The cancellation token to use. - public ResponseWithHeaders Download(int? timeout = null, string range = null, bool? rangeGetContentMD5 = null, string structuredBodyType = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders Download(int? timeout = null, string range = null, bool? rangeGetContentMD5 = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) { - using var message = CreateDownloadRequest(timeout, range, rangeGetContentMD5, structuredBodyType, shareFileRequestConditions); + using var message = CreateDownloadRequest(timeout, range, rangeGetContentMD5, shareFileRequestConditions); _pipeline.Send(message, cancellationToken); var headers = new FileDownloadHeaders(message.Response); switch (message.Response.Status) @@ -951,7 +945,7 @@ public ResponseWithHeaders BreakLease(int? timeout = null } } - internal HttpMessage CreateUploadRangeRequest(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout, byte[] contentMD5, FileLastWrittenMode? fileLastWrittenMode, string structuredBodyType, long? structuredContentLength, Stream optionalbody, ShareFileRequestConditions shareFileRequestConditions) + internal HttpMessage CreateUploadRangeRequest(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout, byte[] contentMD5, FileLastWrittenMode? fileLastWrittenMode, Stream optionalbody, ShareFileRequestConditions shareFileRequestConditions) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -983,14 +977,6 @@ internal HttpMessage CreateUploadRangeRequest(string range, ShareFileRangeWriteT { request.Headers.Add("x-ms-file-request-intent", _fileRequestIntent.Value.ToString()); } - if (structuredBodyType != null) - { - request.Headers.Add("x-ms-structured-body", structuredBodyType); - } - if (structuredContentLength != null) - { - request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); - } request.Headers.Add("Accept", "application/xml"); if (optionalbody != null) { @@ -1012,20 +998,18 @@ internal HttpMessage CreateUploadRangeRequest(string range, ShareFileRangeWriteT /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting Timeouts for File Service Operations.</a>. /// An MD5 hash of the content. This hash is used to verify the integrity of the data during transport. When the Content-MD5 header is specified, the File service compares the hash of the content that has arrived with the header value that was sent. If the two hashes do not match, the operation will fail with error code 400 (Bad Request). /// If the file last write time should be preserved or overwritten. - /// Required if the request body is a structured message. Specifies the message schema version and properties. - /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// Initial data. /// Parameter group. /// The cancellation token to use. /// is null. - public async Task> UploadRangeAsync(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout = null, byte[] contentMD5 = null, FileLastWrittenMode? fileLastWrittenMode = null, string structuredBodyType = null, long? structuredContentLength = null, Stream optionalbody = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) + public async Task> UploadRangeAsync(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout = null, byte[] contentMD5 = null, FileLastWrittenMode? fileLastWrittenMode = null, Stream optionalbody = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) { if (range == null) { throw new ArgumentNullException(nameof(range)); } - using var message = CreateUploadRangeRequest(range, fileRangeWrite, contentLength, timeout, contentMD5, fileLastWrittenMode, structuredBodyType, structuredContentLength, optionalbody, shareFileRequestConditions); + using var message = CreateUploadRangeRequest(range, fileRangeWrite, contentLength, timeout, contentMD5, fileLastWrittenMode, optionalbody, shareFileRequestConditions); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new FileUploadRangeHeaders(message.Response); switch (message.Response.Status) @@ -1044,20 +1028,18 @@ public async Task> UploadRangeAsync( /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting Timeouts for File Service Operations.</a>. /// An MD5 hash of the content. This hash is used to verify the integrity of the data during transport. When the Content-MD5 header is specified, the File service compares the hash of the content that has arrived with the header value that was sent. If the two hashes do not match, the operation will fail with error code 400 (Bad Request). /// If the file last write time should be preserved or overwritten. - /// Required if the request body is a structured message. Specifies the message schema version and properties. - /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// Initial data. /// Parameter group. /// The cancellation token to use. /// is null. - public ResponseWithHeaders UploadRange(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout = null, byte[] contentMD5 = null, FileLastWrittenMode? fileLastWrittenMode = null, string structuredBodyType = null, long? structuredContentLength = null, Stream optionalbody = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders UploadRange(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout = null, byte[] contentMD5 = null, FileLastWrittenMode? fileLastWrittenMode = null, Stream optionalbody = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) { if (range == null) { throw new ArgumentNullException(nameof(range)); } - using var message = CreateUploadRangeRequest(range, fileRangeWrite, contentLength, timeout, contentMD5, fileLastWrittenMode, structuredBodyType, structuredContentLength, optionalbody, shareFileRequestConditions); + using var message = CreateUploadRangeRequest(range, fileRangeWrite, contentLength, timeout, contentMD5, fileLastWrittenMode, optionalbody, shareFileRequestConditions); _pipeline.Send(message, cancellationToken); var headers = new FileUploadRangeHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileUploadRangeHeaders.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileUploadRangeHeaders.cs index 322bfcd1b6d83..db079c2692663 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileUploadRangeHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileUploadRangeHeaders.cs @@ -27,7 +27,5 @@ public FileUploadRangeHeaders(Response response) public bool? IsServerEncrypted => _response.Headers.TryGetValue("x-ms-request-server-encrypted", out bool? value) ? value : null; /// Last write time for the file. public DateTimeOffset? FileLastWriteTime => _response.Headers.TryGetValue("x-ms-file-last-write-time", out DateTimeOffset? value) ? value : null; - /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. - public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ServiceRestClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ServiceRestClient.cs index fe5ea495a7a15..ef4c21b9a33c7 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ServiceRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ServiceRestClient.cs @@ -31,7 +31,7 @@ internal partial class ServiceRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, share, directory or file that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". + /// Specifies the version of the operation to use for this request. The default value is "2024-11-04". /// Valid value is backup. /// , , or is null. public ServiceRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version, ShareTokenIntent? fileRequestIntent = null) diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs index 3012d3d8735b1..599aacf2c6287 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs @@ -32,7 +32,7 @@ internal partial class ShareRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, share, directory or file that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". + /// Specifies the version of the operation to use for this request. The default value is "2024-11-04". /// Valid value is backup. /// , , or is null. public ShareRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version, ShareTokenIntent? fileRequestIntent = null) diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileDownloadInfo.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileDownloadInfo.cs index 4037cbdfd875e..0165af94435a0 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileDownloadInfo.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileDownloadInfo.cs @@ -38,12 +38,6 @@ public partial class ShareFileDownloadInfo : IDisposable, IDownloadedContent public byte[] ContentHash { get; internal set; } #pragma warning restore CA1819 // Properties should not return arrays - /// - /// When requested using , this value contains the CRC for the download blob range. - /// This value may only become populated once the network stream is fully consumed. - /// - public byte[] ContentCrc { get; internal set; } - /// /// Details returned when downloading a file /// diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/ShareErrors.cs b/sdk/storage/Azure.Storage.Files.Shares/src/ShareErrors.cs index 0b27510aaa6c4..f776384d06add 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/ShareErrors.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/ShareErrors.cs @@ -17,5 +17,20 @@ public static InvalidOperationException FileOrShareMissing( string fileClient, string shareClient) => new InvalidOperationException($"{leaseClient} requires either a {fileClient} or {shareClient}"); + + public static void AssertAlgorithmSupport(StorageChecksumAlgorithm? algorithm) + { + StorageChecksumAlgorithm resolved = (algorithm ?? StorageChecksumAlgorithm.None).ResolveAuto(); + switch (resolved) + { + case StorageChecksumAlgorithm.None: + case StorageChecksumAlgorithm.MD5: + return; + case StorageChecksumAlgorithm.StorageCrc64: + throw new ArgumentException("Azure File Shares do not support CRC-64."); + default: + throw new ArgumentException($"{nameof(StorageChecksumAlgorithm)} does not support value {Enum.GetName(typeof(StorageChecksumAlgorithm), resolved) ?? ((int)resolved).ToString(CultureInfo.InvariantCulture)}."); + } + } } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs index 23c5fd40d2db1..2d58482950b9a 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs @@ -2385,70 +2385,51 @@ private async Task> DownloadInternal( // Wrap the response Content in a RetriableStream so we // can return it before it's finished downloading, but still // allow retrying if it fails. - async ValueTask> Factory(long offset, bool async, CancellationToken cancellationToken) - { - (Response response, Stream contentStream) = await StartDownloadAsync( - range, - validationOptions, - conditions, - offset, - async, - cancellationToken).ConfigureAwait(false); - if (etag != response.GetRawResponse().Headers.ETag) + initialResponse.Value.Content = RetriableStream.Create( + stream, + startOffset => { - throw new ShareFileModifiedException( - "File has been modified concurrently", - Uri, etag, response.GetRawResponse().Headers.ETag.GetValueOrDefault(), range); - } - return response; - } - async ValueTask<(Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData)> StructuredMessageFactory( - long offset, bool async, CancellationToken cancellationToken) - { - Response result = await Factory(offset, async, cancellationToken).ConfigureAwait(false); - return StructuredMessageDecodingStream.WrapStream(result.Value.Content, result.Value.ContentLength); - } - - if (initialResponse.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) - { - (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = StructuredMessageDecodingStream.WrapStream( - initialResponse.Value.Content, initialResponse.Value.ContentLength); - initialResponse.Value.Content = new StructuredMessageDecodingRetriableStream( - decodingStream, - decodedData, - StructuredMessage.Flags.StorageCrc64, - startOffset => StructuredMessageFactory(startOffset, async: false, cancellationToken) - .EnsureCompleted(), - async startOffset => await StructuredMessageFactory(startOffset, async: true, cancellationToken) - .ConfigureAwait(false), - decodedData => + (Response Response, Stream ContentStream) = StartDownloadAsync( + range, + validationOptions, + conditions, + startOffset, + async, + cancellationToken) + .EnsureCompleted(); + if (etag != Response.GetRawResponse().Headers.ETag) { - initialResponse.Value.ContentCrc = new byte[StructuredMessage.Crc64Length]; - decodedData.Crc.WriteCrc64(initialResponse.Value.ContentCrc); - }, - ClientConfiguration.Pipeline.ResponseClassifier, - Constants.MaxReliabilityRetries); - } - else - { - initialResponse.Value.Content = RetriableStream.Create( - initialResponse.Value.Content, - startOffset => Factory(startOffset, async: false, cancellationToken) - .EnsureCompleted().Value.Content, - async startOffset => (await Factory(startOffset, async: true, cancellationToken) - .ConfigureAwait(false)).Value.Content, - ClientConfiguration.Pipeline.ResponseClassifier, - Constants.MaxReliabilityRetries); - } + throw new ShareFileModifiedException( + "File has been modified concurrently", + Uri, etag, Response.GetRawResponse().Headers.ETag.GetValueOrDefault(), range); + } + return ContentStream; + }, + async startOffset => + { + (Response Response, Stream ContentStream) = await StartDownloadAsync( + range, + validationOptions, + conditions, + startOffset, + async, + cancellationToken) + .ConfigureAwait(false); + if (etag != Response.GetRawResponse().Headers.ETag) + { + throw new ShareFileModifiedException( + "File has been modified concurrently", + Uri, etag, Response.GetRawResponse().Headers.ETag.GetValueOrDefault(), range); + } + return ContentStream; + }, + ClientConfiguration.Pipeline.ResponseClassifier, + Constants.MaxReliabilityRetries); // buffer response stream and ensure it matches the transactional hash if any // Storage will not return a hash for payload >4MB, so this buffer is capped similarly // hashing is opt-in, so this buffer is part of that opt-in - if (validationOptions != default && - validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && - validationOptions.AutoValidateChecksum && - // structured message decoding does the validation for us - !initialResponse.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) + if (validationOptions != default && validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && validationOptions.AutoValidateChecksum) { // safe-buffer; transactional hash download limit well below maxInt var readDestStream = new MemoryStream((int)initialResponse.Value.ContentLength); @@ -2531,6 +2512,8 @@ await ContentHasher.AssertResponseHashMatchInternal( bool async = true, CancellationToken cancellationToken = default) { + ShareErrors.AssertAlgorithmSupport(transferValidationOverride?.ChecksumAlgorithm); + // calculation gets illegible with null coalesce; just pre-initialize var pageRange = range; pageRange = new HttpRange( @@ -2540,27 +2523,13 @@ await ContentHasher.AssertResponseHashMatchInternal( (long?)null); ClientConfiguration.Pipeline.LogTrace($"Download {Uri} with range: {pageRange}"); - bool? rangeGetContentMD5 = null; - string structuredBodyType = null; - switch (transferValidationOverride?.ChecksumAlgorithm.ResolveAuto()) - { - case StorageChecksumAlgorithm.MD5: - rangeGetContentMD5 = true; - break; - case StorageChecksumAlgorithm.StorageCrc64: - structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; - break; - default: - break; - } - ResponseWithHeaders response; + if (async) { response = await FileRestClient.DownloadAsync( range: pageRange == default ? null : pageRange.ToString(), - rangeGetContentMD5: rangeGetContentMD5, - structuredBodyType: structuredBodyType, + rangeGetContentMD5: transferValidationOverride?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.MD5 ? true : null, shareFileRequestConditions: conditions, cancellationToken: cancellationToken) .ConfigureAwait(false); @@ -2569,8 +2538,7 @@ await ContentHasher.AssertResponseHashMatchInternal( { response = FileRestClient.Download( range: pageRange == default ? null : pageRange.ToString(), - rangeGetContentMD5: rangeGetContentMD5, - structuredBodyType: structuredBodyType, + rangeGetContentMD5: transferValidationOverride?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.MD5 ? true : null, shareFileRequestConditions: conditions, cancellationToken: cancellationToken); } @@ -4644,6 +4612,7 @@ internal async Task> UploadRangeInternal( CancellationToken cancellationToken) { UploadTransferValidationOptions validationOptions = transferValidationOverride ?? ClientConfiguration.TransferValidation.Upload; + ShareErrors.AssertAlgorithmSupport(validationOptions?.ChecksumAlgorithm); using (ClientConfiguration.Pipeline.BeginLoggingScope(nameof(ShareFileClient))) { @@ -4659,38 +4628,14 @@ internal async Task> UploadRangeInternal( scope.Start(); Errors.VerifyStreamPosition(content, nameof(content)); - ContentHasher.GetHashResult hashResult = null; - long contentLength = (content?.Length - content?.Position) ?? 0; - long? structuredContentLength = default; - string structuredBodyType = null; - if (validationOptions != null && - validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64) - { - // report progress in terms of caller bytes, not encoded bytes - structuredContentLength = contentLength; - contentLength = (content?.Length - content?.Position) ?? 0; - structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; - content = content.WithNoDispose().WithProgress(progressHandler); - content = validationOptions.PrecalculatedChecksum.IsEmpty - ? new StructuredMessageEncodingStream( - content, - Constants.StructuredMessage.DefaultSegmentContentLength, - StructuredMessage.Flags.StorageCrc64) - : new StructuredMessagePrecalculatedCrcWrapperStream( - content, - validationOptions.PrecalculatedChecksum.Span); - contentLength = (content?.Length - content?.Position) ?? 0; - } - else - { - // compute hash BEFORE attaching progress handler - hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - content = content.WithNoDispose().WithProgress(progressHandler); - } + // compute hash BEFORE attaching progress handler + ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + + content = content.WithNoDispose().WithProgress(progressHandler); ResponseWithHeaders response; @@ -4703,8 +4648,6 @@ internal async Task> UploadRangeInternal( fileLastWrittenMode: fileLastWrittenMode, optionalbody: content, contentMD5: hashResult?.MD5AsArray, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, shareFileRequestConditions: conditions, cancellationToken: cancellationToken) .ConfigureAwait(false); @@ -4718,8 +4661,6 @@ internal async Task> UploadRangeInternal( fileLastWrittenMode: fileLastWrittenMode, optionalbody: content, contentMD5: hashResult?.MD5AsArray, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, shareFileRequestConditions: conditions, cancellationToken: cancellationToken); } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md index d7ed8ae3216df..43022bc56d1c1 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md +++ b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/c8eee2dfa99d517e12e6ac8c96b14b707bb3c8eb/specification/storage/data-plane/Microsoft.FileStorage/stable/2025-01-05/file.json + - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/98b600498947073c18c2ac5eb7c3c658db5a1a59/specification/storage/data-plane/Microsoft.FileStorage/stable/2024-11-04/file.json generation1-convenience-client: true # https://github.com/Azure/autorest/issues/4075 skip-semantics-validation: true @@ -25,7 +25,7 @@ directive: if (property.includes('/{shareName}/{directory}/{fileName}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ShareName") && false == param['$ref'].endsWith("#/parameters/DirectoryPath") && false == param['$ref'].endsWith("#/parameters/FilePath"))}); - } + } else if (property.includes('/{shareName}/{directory}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ShareName") && false == param['$ref'].endsWith("#/parameters/DirectoryPath"))}); @@ -46,7 +46,7 @@ directive: $.Metrics.type = "object"; ``` -### Times aren't required +### Times aren't required ``` yaml directive: - from: swagger-document diff --git a/sdk/storage/Azure.Storage.Files.Shares/tests/Azure.Storage.Files.Shares.Tests.csproj b/sdk/storage/Azure.Storage.Files.Shares/tests/Azure.Storage.Files.Shares.Tests.csproj index d09dd8fe8949f..398a4b6367489 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/tests/Azure.Storage.Files.Shares.Tests.csproj +++ b/sdk/storage/Azure.Storage.Files.Shares/tests/Azure.Storage.Files.Shares.Tests.csproj @@ -17,7 +17,6 @@ - PreserveNewest diff --git a/sdk/storage/Azure.Storage.Files.Shares/tests/ShareFileClientTransferValidationTests.cs b/sdk/storage/Azure.Storage.Files.Shares/tests/ShareFileClientTransferValidationTests.cs index 9fd8905e388b1..3dcdb21f27b36 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/tests/ShareFileClientTransferValidationTests.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/tests/ShareFileClientTransferValidationTests.cs @@ -64,6 +64,10 @@ protected override async Task GetResourceClientAsync( private void AssertSupportsHashAlgorithm(StorageChecksumAlgorithm algorithm) { + if (algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64) + { + TestHelper.AssertInconclusiveRecordingFriendly(Recording.Mode, "Azure File Share does not support CRC64."); + } } protected override async Task UploadPartitionAsync(ShareFileClient client, Stream source, UploadTransferValidationOptions transferValidation) @@ -143,44 +147,8 @@ protected override async Task SetupDataAsync(ShareFileClient client, Stream data public override void TestAutoResolve() { Assert.AreEqual( - StorageChecksumAlgorithm.StorageCrc64, + StorageChecksumAlgorithm.MD5, TransferValidationOptionsExtensions.ResolveAuto(StorageChecksumAlgorithm.Auto)); } - - [Test] - public async Task StructuredMessagePopulatesCrcDownloadStreaming() - { - await using DisposingShare disposingContainer = await ClientBuilder.GetTestShareAsync(); - - const int dataLength = Constants.KB; - byte[] data = GetRandomBuffer(dataLength); - byte[] dataCrc = new byte[8]; - StorageCrc64Calculator.ComputeSlicedSafe(data, 0L).WriteCrc64(dataCrc); - - ShareFileClient file = disposingContainer.Container.GetRootDirectoryClient().GetFileClient(GetNewResourceName()); - await file.CreateAsync(data.Length); - await file.UploadAsync(new MemoryStream(data)); - - Response response = await file.DownloadAsync(new ShareFileDownloadOptions() - { - TransferValidation = new DownloadTransferValidationOptions - { - ChecksumAlgorithm = StorageChecksumAlgorithm.StorageCrc64 - } - }); - - // crc is not present until response stream is consumed - Assert.That(response.Value.ContentCrc, Is.Null); - - byte[] downloadedData; - using (MemoryStream ms = new()) - { - await response.Value.Content.CopyToAsync(ms); - downloadedData = ms.ToArray(); - } - - Assert.That(response.Value.ContentCrc, Is.EqualTo(dataCrc)); - Assert.That(downloadedData, Is.EqualTo(data)); - } } } diff --git a/sdk/storage/Azure.Storage.Queues/tests/Azure.Storage.Queues.Tests.csproj b/sdk/storage/Azure.Storage.Queues/tests/Azure.Storage.Queues.Tests.csproj index 4d0334255f041..e0a6fab3c753b 100644 --- a/sdk/storage/Azure.Storage.Queues/tests/Azure.Storage.Queues.Tests.csproj +++ b/sdk/storage/Azure.Storage.Queues/tests/Azure.Storage.Queues.Tests.csproj @@ -21,7 +21,6 @@ - From 2a77534c98b38220cfbcde7e3bc78844322d18ff Mon Sep 17 00:00:00 2001 From: Nick Liu Date: Fri, 20 Sep 2024 13:17:59 -0400 Subject: [PATCH 10/25] [Storage] DataLake download APIs feature parity MERGE (#45600) --- .../Azure.Storage.Files.DataLake/CHANGELOG.md | 1 + .../Azure.Storage.Files.DataLake/README.md | 12 + .../Azure.Storage.Files.DataLake.net6.0.cs | 34 + ...e.Storage.Files.DataLake.netstandard2.0.cs | 34 + .../Azure.Storage.Files.DataLake/assets.json | 2 +- .../samples/Sample01a_HelloWorld.cs | 109 ++++ .../samples/Sample01b_HelloWorldAsync.cs | 105 +++ .../src/DataLakeExtensions.cs | 23 + .../src/DataLakeFileClient.cs | 611 +++++++++++++++++- .../src/Models/DataLakeFileReadResult.cs | 25 + .../Models/DataLakeFileReadStreamingResult.cs | 36 ++ .../src/Models/DataLakeModelFactory.cs | 29 + .../tests/FileClientTests.cs | 320 +++++++++ 13 files changed, 1334 insertions(+), 7 deletions(-) create mode 100644 sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeFileReadResult.cs create mode 100644 sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeFileReadStreamingResult.cs diff --git a/sdk/storage/Azure.Storage.Files.DataLake/CHANGELOG.md b/sdk/storage/Azure.Storage.Files.DataLake/CHANGELOG.md index 3fe9b88537cb6..786a5ff1c0da6 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/CHANGELOG.md +++ b/sdk/storage/Azure.Storage.Files.DataLake/CHANGELOG.md @@ -3,6 +3,7 @@ ## 12.20.0-beta.2 (Unreleased) ### Features Added +- Deprecated Read()/ReadAsync() in favor of ReadStreaming()/ReadStreamingAsync() and ReadContent()/ReadContentAsync() for DataLake #45418 ### Breaking Changes diff --git a/sdk/storage/Azure.Storage.Files.DataLake/README.md b/sdk/storage/Azure.Storage.Files.DataLake/README.md index 249a5c0dced06..e5136a042ff56 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/README.md +++ b/sdk/storage/Azure.Storage.Files.DataLake/README.md @@ -156,6 +156,18 @@ file.Flush(SampleFileContent.Length); Response fileContents = file.Read(); ``` +### Reading Streaming Data from a DataLake File +```C# Snippet:SampleSnippetDataLakeFileClient_ReadStreaming +Response fileContents = file.ReadStreaming(); +Stream readStream = fileContents.Value.Content; +``` + +### Reading Content Data from a DataLake File +```C# Snippet:SampleSnippetDataLakeFileClient_ReadContent +Response fileContents = file.ReadContent(); +BinaryData readData = fileContents.Value.Content; +``` + ### Listing/Traversing through a DataLake Filesystem ```C# Snippet:SampleSnippetDataLakeFileClient_List foreach (PathItem pathItem in filesystem.GetPaths()) diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs index d2ced44d996eb..884a12eb570c8 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs @@ -171,16 +171,34 @@ public DataLakeFileClient(System.Uri fileUri, Azure.Storage.StorageSharedKeyCred public virtual System.Threading.Tasks.Task OpenWriteAsync(bool overwrite, Azure.Storage.Files.DataLake.Models.DataLakeFileOpenWriteOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response Query(string querySqlExpression, Azure.Storage.Files.DataLake.Models.DataLakeQueryOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task> QueryAsync(string querySqlExpression, Azure.Storage.Files.DataLake.Models.DataLakeQueryOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual Azure.Response Read() { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual Azure.Response Read(Azure.HttpRange range, Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions, bool rangeGetContentHash, System.Threading.CancellationToken cancellationToken) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual Azure.Response Read(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual Azure.Response Read(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Threading.Tasks.Task> ReadAsync() { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Threading.Tasks.Task> ReadAsync(Azure.HttpRange range, Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions, bool rangeGetContentHash, System.Threading.CancellationToken cancellationToken) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Threading.Tasks.Task> ReadAsync(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Threading.Tasks.Task> ReadAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReadContent() { throw null; } + public virtual Azure.Response ReadContent(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReadContent(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> ReadContentAsync() { throw null; } + public virtual System.Threading.Tasks.Task> ReadContentAsync(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> ReadContentAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReadStreaming() { throw null; } + public virtual Azure.Response ReadStreaming(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReadStreaming(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> ReadStreamingAsync() { throw null; } + public virtual System.Threading.Tasks.Task> ReadStreamingAsync(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> ReadStreamingAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response ReadTo(System.IO.Stream destination, Azure.Storage.Files.DataLake.Models.DataLakeFileReadToOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual Azure.Response ReadTo(System.IO.Stream destination, Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions, Azure.Storage.StorageTransferOptions transferOptions, System.Threading.CancellationToken cancellationToken) { throw null; } @@ -645,6 +663,19 @@ public DataLakeFileReadOptions() { } public Azure.HttpRange Range { get { throw null; } set { } } public Azure.Storage.DownloadTransferValidationOptions TransferValidation { get { throw null; } set { } } } + public partial class DataLakeFileReadResult + { + internal DataLakeFileReadResult() { } + public System.BinaryData Content { get { throw null; } } + public Azure.Storage.Files.DataLake.Models.FileDownloadDetails Details { get { throw null; } } + } + public partial class DataLakeFileReadStreamingResult : System.IDisposable + { + internal DataLakeFileReadStreamingResult() { } + public System.IO.Stream Content { get { throw null; } } + public Azure.Storage.Files.DataLake.Models.FileDownloadDetails Details { get { throw null; } } + public void Dispose() { } + } public partial class DataLakeFileReadToOptions { public DataLakeFileReadToOptions() { } @@ -731,6 +762,8 @@ public DataLakeMetrics() { } } public static partial class DataLakeModelFactory { + public static Azure.Storage.Files.DataLake.Models.DataLakeFileReadResult DataLakeFileReadResult(System.BinaryData content, Azure.Storage.Files.DataLake.Models.FileDownloadDetails details) { throw null; } + public static Azure.Storage.Files.DataLake.Models.DataLakeFileReadStreamingResult DataLakeFileReadStreamingResult(System.IO.Stream content, Azure.Storage.Files.DataLake.Models.FileDownloadDetails details) { throw null; } public static Azure.Storage.Files.DataLake.Models.DataLakeQueryError DataLakeQueryError(string name = null, string description = null, bool isFatal = false, long position = (long)0) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public static Azure.Storage.Files.DataLake.Models.FileDownloadDetails FileDownloadDetails(System.DateTimeOffset lastModified, System.Collections.Generic.IDictionary metadata, string contentRange, Azure.ETag eTag, string contentEncoding, string cacheControl, string contentDisposition, string contentLanguage, System.DateTimeOffset copyCompletionTime, string copyStatusDescription, string copyId, string copyProgress, System.Uri copySource, Azure.Storage.Files.DataLake.Models.CopyStatus copyStatus, Azure.Storage.Files.DataLake.Models.DataLakeLeaseDuration leaseDuration, Azure.Storage.Files.DataLake.Models.DataLakeLeaseState leaseState, Azure.Storage.Files.DataLake.Models.DataLakeLeaseStatus leaseStatus, string acceptRanges, bool isServerEncrypted, string encryptionKeySha256, byte[] contentHash) { throw null; } @@ -739,6 +772,7 @@ public static partial class DataLakeModelFactory [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public static Azure.Storage.Files.DataLake.Models.FileDownloadDetails FileDownloadDetails(System.DateTimeOffset lastModified, System.Collections.Generic.IDictionary metadata, string contentRange, Azure.ETag eTag, string contentEncoding, string cacheControl, string contentDisposition, string contentLanguage, System.DateTimeOffset copyCompletionTime, string copyStatusDescription, string copyId, string copyProgress, System.Uri copySource, Azure.Storage.Files.DataLake.Models.CopyStatus copyStatus, Azure.Storage.Files.DataLake.Models.DataLakeLeaseDuration leaseDuration, Azure.Storage.Files.DataLake.Models.DataLakeLeaseState leaseState, Azure.Storage.Files.DataLake.Models.DataLakeLeaseStatus leaseStatus, string acceptRanges, bool isServerEncrypted, string encryptionKeySha256, byte[] contentHash, System.DateTimeOffset createdOn, string encryptionContext) { throw null; } public static Azure.Storage.Files.DataLake.Models.FileDownloadDetails FileDownloadDetails(System.DateTimeOffset lastModified, System.Collections.Generic.IDictionary metadata, string contentRange, Azure.ETag eTag, string contentEncoding, string cacheControl, string contentDisposition, string contentLanguage, System.DateTimeOffset copyCompletionTime, string copyStatusDescription, string copyId, string copyProgress, System.Uri copySource, Azure.Storage.Files.DataLake.Models.CopyStatus copyStatus, Azure.Storage.Files.DataLake.Models.DataLakeLeaseDuration leaseDuration, Azure.Storage.Files.DataLake.Models.DataLakeLeaseState leaseState, Azure.Storage.Files.DataLake.Models.DataLakeLeaseStatus leaseStatus, string acceptRanges, bool isServerEncrypted, string encryptionKeySha256, byte[] contentHash, System.DateTimeOffset createdOn, string encryptionContext, System.Collections.Generic.IList accessControlList) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public static Azure.Storage.Files.DataLake.Models.FileDownloadInfo FileDownloadInfo(long contentLength, System.IO.Stream content, byte[] contentHash, Azure.Storage.Files.DataLake.Models.FileDownloadDetails properties) { throw null; } public static Azure.Storage.Files.DataLake.Models.FileSystemInfo FileSystemInfo(Azure.ETag etag, System.DateTimeOffset lastModified) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs index d2ced44d996eb..884a12eb570c8 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs @@ -171,16 +171,34 @@ public DataLakeFileClient(System.Uri fileUri, Azure.Storage.StorageSharedKeyCred public virtual System.Threading.Tasks.Task OpenWriteAsync(bool overwrite, Azure.Storage.Files.DataLake.Models.DataLakeFileOpenWriteOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response Query(string querySqlExpression, Azure.Storage.Files.DataLake.Models.DataLakeQueryOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task> QueryAsync(string querySqlExpression, Azure.Storage.Files.DataLake.Models.DataLakeQueryOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual Azure.Response Read() { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual Azure.Response Read(Azure.HttpRange range, Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions, bool rangeGetContentHash, System.Threading.CancellationToken cancellationToken) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual Azure.Response Read(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual Azure.Response Read(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Threading.Tasks.Task> ReadAsync() { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Threading.Tasks.Task> ReadAsync(Azure.HttpRange range, Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions, bool rangeGetContentHash, System.Threading.CancellationToken cancellationToken) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Threading.Tasks.Task> ReadAsync(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Threading.Tasks.Task> ReadAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReadContent() { throw null; } + public virtual Azure.Response ReadContent(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReadContent(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> ReadContentAsync() { throw null; } + public virtual System.Threading.Tasks.Task> ReadContentAsync(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> ReadContentAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReadStreaming() { throw null; } + public virtual Azure.Response ReadStreaming(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReadStreaming(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> ReadStreamingAsync() { throw null; } + public virtual System.Threading.Tasks.Task> ReadStreamingAsync(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> ReadStreamingAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response ReadTo(System.IO.Stream destination, Azure.Storage.Files.DataLake.Models.DataLakeFileReadToOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual Azure.Response ReadTo(System.IO.Stream destination, Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions, Azure.Storage.StorageTransferOptions transferOptions, System.Threading.CancellationToken cancellationToken) { throw null; } @@ -645,6 +663,19 @@ public DataLakeFileReadOptions() { } public Azure.HttpRange Range { get { throw null; } set { } } public Azure.Storage.DownloadTransferValidationOptions TransferValidation { get { throw null; } set { } } } + public partial class DataLakeFileReadResult + { + internal DataLakeFileReadResult() { } + public System.BinaryData Content { get { throw null; } } + public Azure.Storage.Files.DataLake.Models.FileDownloadDetails Details { get { throw null; } } + } + public partial class DataLakeFileReadStreamingResult : System.IDisposable + { + internal DataLakeFileReadStreamingResult() { } + public System.IO.Stream Content { get { throw null; } } + public Azure.Storage.Files.DataLake.Models.FileDownloadDetails Details { get { throw null; } } + public void Dispose() { } + } public partial class DataLakeFileReadToOptions { public DataLakeFileReadToOptions() { } @@ -731,6 +762,8 @@ public DataLakeMetrics() { } } public static partial class DataLakeModelFactory { + public static Azure.Storage.Files.DataLake.Models.DataLakeFileReadResult DataLakeFileReadResult(System.BinaryData content, Azure.Storage.Files.DataLake.Models.FileDownloadDetails details) { throw null; } + public static Azure.Storage.Files.DataLake.Models.DataLakeFileReadStreamingResult DataLakeFileReadStreamingResult(System.IO.Stream content, Azure.Storage.Files.DataLake.Models.FileDownloadDetails details) { throw null; } public static Azure.Storage.Files.DataLake.Models.DataLakeQueryError DataLakeQueryError(string name = null, string description = null, bool isFatal = false, long position = (long)0) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public static Azure.Storage.Files.DataLake.Models.FileDownloadDetails FileDownloadDetails(System.DateTimeOffset lastModified, System.Collections.Generic.IDictionary metadata, string contentRange, Azure.ETag eTag, string contentEncoding, string cacheControl, string contentDisposition, string contentLanguage, System.DateTimeOffset copyCompletionTime, string copyStatusDescription, string copyId, string copyProgress, System.Uri copySource, Azure.Storage.Files.DataLake.Models.CopyStatus copyStatus, Azure.Storage.Files.DataLake.Models.DataLakeLeaseDuration leaseDuration, Azure.Storage.Files.DataLake.Models.DataLakeLeaseState leaseState, Azure.Storage.Files.DataLake.Models.DataLakeLeaseStatus leaseStatus, string acceptRanges, bool isServerEncrypted, string encryptionKeySha256, byte[] contentHash) { throw null; } @@ -739,6 +772,7 @@ public static partial class DataLakeModelFactory [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public static Azure.Storage.Files.DataLake.Models.FileDownloadDetails FileDownloadDetails(System.DateTimeOffset lastModified, System.Collections.Generic.IDictionary metadata, string contentRange, Azure.ETag eTag, string contentEncoding, string cacheControl, string contentDisposition, string contentLanguage, System.DateTimeOffset copyCompletionTime, string copyStatusDescription, string copyId, string copyProgress, System.Uri copySource, Azure.Storage.Files.DataLake.Models.CopyStatus copyStatus, Azure.Storage.Files.DataLake.Models.DataLakeLeaseDuration leaseDuration, Azure.Storage.Files.DataLake.Models.DataLakeLeaseState leaseState, Azure.Storage.Files.DataLake.Models.DataLakeLeaseStatus leaseStatus, string acceptRanges, bool isServerEncrypted, string encryptionKeySha256, byte[] contentHash, System.DateTimeOffset createdOn, string encryptionContext) { throw null; } public static Azure.Storage.Files.DataLake.Models.FileDownloadDetails FileDownloadDetails(System.DateTimeOffset lastModified, System.Collections.Generic.IDictionary metadata, string contentRange, Azure.ETag eTag, string contentEncoding, string cacheControl, string contentDisposition, string contentLanguage, System.DateTimeOffset copyCompletionTime, string copyStatusDescription, string copyId, string copyProgress, System.Uri copySource, Azure.Storage.Files.DataLake.Models.CopyStatus copyStatus, Azure.Storage.Files.DataLake.Models.DataLakeLeaseDuration leaseDuration, Azure.Storage.Files.DataLake.Models.DataLakeLeaseState leaseState, Azure.Storage.Files.DataLake.Models.DataLakeLeaseStatus leaseStatus, string acceptRanges, bool isServerEncrypted, string encryptionKeySha256, byte[] contentHash, System.DateTimeOffset createdOn, string encryptionContext, System.Collections.Generic.IList accessControlList) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public static Azure.Storage.Files.DataLake.Models.FileDownloadInfo FileDownloadInfo(long contentLength, System.IO.Stream content, byte[] contentHash, Azure.Storage.Files.DataLake.Models.FileDownloadDetails properties) { throw null; } public static Azure.Storage.Files.DataLake.Models.FileSystemInfo FileSystemInfo(Azure.ETag etag, System.DateTimeOffset lastModified) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] diff --git a/sdk/storage/Azure.Storage.Files.DataLake/assets.json b/sdk/storage/Azure.Storage.Files.DataLake/assets.json index 442889d04be63..556652aaba663 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/assets.json +++ b/sdk/storage/Azure.Storage.Files.DataLake/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Files.DataLake", - "Tag": "net/storage/Azure.Storage.Files.DataLake_186c14971d" + "Tag": "net/storage/Azure.Storage.Files.DataLake_c09a71b442" } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/samples/Sample01a_HelloWorld.cs b/sdk/storage/Azure.Storage.Files.DataLake/samples/Sample01a_HelloWorld.cs index 3f8cfdd32c7c5..f8369e587453a 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/samples/Sample01a_HelloWorld.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/samples/Sample01a_HelloWorld.cs @@ -325,6 +325,115 @@ public void Read() } } + /// + /// Download a DataLake File's streaming data to a file. + /// + [Test] + public void ReadStreaming() + { + // Create a temporary Lorem Ipsum file on disk that we can upload + string originalPath = CreateTempFile(SampleFileContent); + + // Get a temporary path on disk where we can download the file + string downloadPath = CreateTempPath(); + + // Make StorageSharedKeyCredential to pass to the serviceClient + string storageAccountName = StorageAccountName; + string storageAccountKey = StorageAccountKey; + Uri serviceUri = StorageAccountBlobUri; + StorageSharedKeyCredential sharedKeyCredential = new StorageSharedKeyCredential(storageAccountName, storageAccountKey); + + // Create DataLakeServiceClient using StorageSharedKeyCredentials + DataLakeServiceClient serviceClient = new DataLakeServiceClient(serviceUri, sharedKeyCredential); + + // Get a reference to a filesystem named "sample-filesystem-read" and then create it + DataLakeFileSystemClient filesystem = serviceClient.GetFileSystemClient("sample-filesystem-read"); + filesystem.Create(); + try + { + // Get a reference to a file named "sample-file" in a filesystem + DataLakeFileClient file = filesystem.GetFileClient("sample-file"); + + // First upload something the DataLake file so we have something to download + file.Upload(File.OpenRead(originalPath)); + + // Download the DataLake file's contents and save it to a file + // The ReadStreamingAsync() API downloads a file in a single requests. + // For large files, it may be faster to call ReadTo() + #region Snippet:SampleSnippetDataLakeFileClient_ReadStreaming + Response fileContents = file.ReadStreaming(); + Stream readStream = fileContents.Value.Content; + #endregion Snippet:SampleSnippetDataLakeFileClient_ReadStreaming + using (FileStream stream = File.OpenWrite(downloadPath)) + { + readStream.CopyTo(stream); + } + + // Verify the contents + Assert.AreEqual(SampleFileContent, File.ReadAllText(downloadPath)); + } + finally + { + // Clean up after the test when we're finished + filesystem.Delete(); + } + } + + /// + /// Download a DataLake File's content data to a file. + /// + [Test] + public void ReadContent() + { + // Create a temporary Lorem Ipsum file on disk that we can upload + string originalPath = CreateTempFile(SampleFileContent); + + // Get a temporary path on disk where we can download the file + string downloadPath = CreateTempPath(); + + // Make StorageSharedKeyCredential to pass to the serviceClient + string storageAccountName = StorageAccountName; + string storageAccountKey = StorageAccountKey; + Uri serviceUri = StorageAccountBlobUri; + StorageSharedKeyCredential sharedKeyCredential = new StorageSharedKeyCredential(storageAccountName, storageAccountKey); + + // Create DataLakeServiceClient using StorageSharedKeyCredentials + DataLakeServiceClient serviceClient = new DataLakeServiceClient(serviceUri, sharedKeyCredential); + + // Get a reference to a filesystem named "sample-filesystem-read" and then create it + DataLakeFileSystemClient filesystem = serviceClient.GetFileSystemClient("sample-filesystem-read"); + filesystem.Create(); + try + { + // Get a reference to a file named "sample-file" in a filesystem + DataLakeFileClient file = filesystem.GetFileClient("sample-file"); + + // First upload something the DataLake file so we have something to download + file.Upload(File.OpenRead(originalPath)); + + // Download the DataLake file's contents and save it to a file + // The ReadContentAsync() API downloads a file in a single requests. + // For large files, it may be faster to call ReadTo() + #region Snippet:SampleSnippetDataLakeFileClient_ReadContent + Response fileContents = file.ReadContent(); + BinaryData readData = fileContents.Value.Content; + #endregion Snippet:SampleSnippetDataLakeFileClient_ReadContent + byte[] data = readData.ToArray(); + using (FileStream stream = File.OpenWrite(downloadPath)) + { + stream.Write(data, 0, data.Length); + } + + // Verify the contents + Assert.AreEqual(SampleFileContent, File.ReadAllText(downloadPath)); + } + finally + { + // Clean up after the test when we're finished + filesystem.Delete(); + } + } + /// /// Download a DataLake File to a file. /// diff --git a/sdk/storage/Azure.Storage.Files.DataLake/samples/Sample01b_HelloWorldAsync.cs b/sdk/storage/Azure.Storage.Files.DataLake/samples/Sample01b_HelloWorldAsync.cs index c00d412854193..227cdfaf27a7b 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/samples/Sample01b_HelloWorldAsync.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/samples/Sample01b_HelloWorldAsync.cs @@ -327,6 +327,111 @@ public async Task ReadAsync() } } + /// + /// Download a DataLake File's streaming data to a file. + /// + [Test] + public async Task ReadStreamingAsync() + { + // Create a temporary Lorem Ipsum file on disk that we can upload + string originalPath = CreateTempFile(SampleFileContent); + + // Get a temporary path on disk where we can download the file + string downloadPath = CreateTempPath(); + + // Make StorageSharedKeyCredential to pass to the serviceClient + string storageAccountName = StorageAccountName; + string storageAccountKey = StorageAccountKey; + Uri serviceUri = StorageAccountBlobUri; + StorageSharedKeyCredential sharedKeyCredential = new StorageSharedKeyCredential(storageAccountName, storageAccountKey); + + // Create DataLakeServiceClient using StorageSharedKeyCredentials + DataLakeServiceClient serviceClient = new DataLakeServiceClient(serviceUri, sharedKeyCredential); + + // Get a reference to a filesystem named "sample-filesystem-readasync" and then create it + DataLakeFileSystemClient filesystem = serviceClient.GetFileSystemClient(Randomize("sample-filesystem-read")); + await filesystem.CreateAsync(); + try + { + // Get a reference to a file named "sample-file" in a filesystem + DataLakeFileClient file = filesystem.GetFileClient(Randomize("sample-file")); + + // First upload something the DataLake file so we have something to download + await file.UploadAsync(File.OpenRead(originalPath)); + + // Download the DataLake file's contents and save it to a file + // The ReadStreamingAsync() API downloads a file in a single requests. + // For large files, it may be faster to call ReadToAsync() + Response fileContents = await file.ReadStreamingAsync(); + Stream readStream = fileContents.Value.Content; + using (FileStream stream = File.OpenWrite(downloadPath)) + { + readStream.CopyTo(stream); + } + + // Verify the contents + Assert.AreEqual(SampleFileContent, File.ReadAllText(downloadPath)); + } + finally + { + // Clean up after the test when we're finished + await filesystem.DeleteAsync(); + } + } + + /// + /// Download a DataLake File's content data to a file. + /// + [Test] + public async Task ReadContentAsync() + { + // Create a temporary Lorem Ipsum file on disk that we can upload + string originalPath = CreateTempFile(SampleFileContent); + + // Get a temporary path on disk where we can download the file + string downloadPath = CreateTempPath(); + + // Make StorageSharedKeyCredential to pass to the serviceClient + string storageAccountName = StorageAccountName; + string storageAccountKey = StorageAccountKey; + Uri serviceUri = StorageAccountBlobUri; + StorageSharedKeyCredential sharedKeyCredential = new StorageSharedKeyCredential(storageAccountName, storageAccountKey); + + // Create DataLakeServiceClient using StorageSharedKeyCredentials + DataLakeServiceClient serviceClient = new DataLakeServiceClient(serviceUri, sharedKeyCredential); + + // Get a reference to a filesystem named "sample-filesystem-readasync" and then create it + DataLakeFileSystemClient filesystem = serviceClient.GetFileSystemClient(Randomize("sample-filesystem-read")); + await filesystem.CreateAsync(); + try + { + // Get a reference to a file named "sample-file" in a filesystem + DataLakeFileClient file = filesystem.GetFileClient(Randomize("sample-file")); + + // First upload something the DataLake file so we have something to download + await file.UploadAsync(File.OpenRead(originalPath)); + + // Download the DataLake file's contents and save it to a file + // The ReadContentAsync() API downloads a file in a single requests. + // For large files, it may be faster to call ReadToAsync() + Response fileContents = await file.ReadContentAsync(); + BinaryData readData = fileContents.Value.Content; + byte[] data = readData.ToArray(); + using (FileStream stream = File.OpenWrite(downloadPath)) + { + stream.Write(data, 0, data.Length); + } + + // Verify the contents + Assert.AreEqual(SampleFileContent, File.ReadAllText(downloadPath)); + } + finally + { + // Clean up after the test when we're finished + await filesystem.DeleteAsync(); + } + } + /// /// Download a DataLake File directly to file. /// diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeExtensions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeExtensions.cs index 9d50525a1120c..77f5257c56923 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeExtensions.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeExtensions.cs @@ -99,6 +99,29 @@ internal static FileDownloadInfo ToFileDownloadInfo(this Response blobDownloadStreamingResultResponse) + { + blobDownloadStreamingResultResponse.GetRawResponse().Headers.TryGetValue(Constants.DataLake.EncryptionContextHeaderName, out string encryptionContext); + blobDownloadStreamingResultResponse.GetRawResponse().Headers.TryGetValue(Constants.DataLake.AclHeaderName, out string accessControlList); + DataLakeFileReadStreamingResult dataLakeFileReadStreamingResult = new DataLakeFileReadStreamingResult() + { + Content = blobDownloadStreamingResultResponse.Value.Content, + Details = blobDownloadStreamingResultResponse.Value.Details.ToFileDownloadDetails(encryptionContext, accessControlList) + }; + return dataLakeFileReadStreamingResult; + } + + internal static DataLakeFileReadResult ToDataLakeFileReadResult(this Response blobDownloadResult) + { + blobDownloadResult.GetRawResponse().Headers.TryGetValue(Constants.DataLake.EncryptionContextHeaderName, out string encryptionContext); + blobDownloadResult.GetRawResponse().Headers.TryGetValue(Constants.DataLake.AclHeaderName, out string accessControlList); + DataLakeFileReadResult dataLakeFileReadResult = new DataLakeFileReadResult() + { + Content = blobDownloadResult.Value.Content, + Details = blobDownloadResult.Value.Details.ToFileDownloadDetails(encryptionContext, accessControlList) + }; + return dataLakeFileReadResult; + } internal static PathProperties ToPathProperties(this Response blobPropertiesResponse) { diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs index 3d2bd710e25aa..ef00d2b9c4ac0 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs @@ -2827,6 +2827,8 @@ internal virtual async Task> FlushInternal( #endregion #region Read Data + + #region Deprecated /// /// The operation downloads a file from /// the service, including its metadata and properties. @@ -2844,6 +2846,7 @@ internal virtual async Task> FlushInternal( /// A will be thrown if /// a failure occurs. /// + [EditorBrowsable(EditorBrowsableState.Never)] public virtual Response Read() { DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(Read)}"); @@ -2886,6 +2889,7 @@ public virtual Response Read() /// A will be thrown if /// a failure occurs. /// + [EditorBrowsable(EditorBrowsableState.Never)] public virtual async Task> ReadAsync() { DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(Read)}"); @@ -2933,6 +2937,7 @@ public virtual async Task> ReadAsync() /// A will be thrown if /// a failure occurs. /// + [EditorBrowsable(EditorBrowsableState.Never)] public virtual Response Read( CancellationToken cancellationToken = default) { @@ -2980,6 +2985,7 @@ public virtual Response Read( /// A will be thrown if /// a failure occurs. /// + [EditorBrowsable(EditorBrowsableState.Never)] public virtual async Task> ReadAsync( CancellationToken cancellationToken = default) { @@ -3017,12 +3023,12 @@ public virtual async Task> ReadAsync( /// Get Blob. /// /// - /// If provided, only donwload the bytes of the file in the specified + /// If provided, only download the bytes of the file in the specified /// range. If not provided, download the entire file. /// /// /// Optional to add conditions on - /// donwloading this file. + /// downloading this file. /// /// /// When set to true and specified together with the , @@ -3091,12 +3097,12 @@ public virtual Response Read( /// Get Blob. /// /// - /// If provided, only donwload the bytes of the file in the specified + /// If provided, only download the bytes of the file in the specified /// range. If not provided, download the entire file. /// /// /// Optional to add conditions on - /// donwloading this file. + /// downloading this file. /// /// /// When set to true and specified together with the , @@ -3157,7 +3163,7 @@ public virtual async Task> ReadAsync( } /// - /// The + /// The /// operation downloads a file from the service, including its metadata /// and properties. /// @@ -3181,6 +3187,7 @@ public virtual async Task> ReadAsync( /// A will be thrown if /// a failure occurs. /// + [EditorBrowsable(EditorBrowsableState.Never)] public virtual Response Read( DataLakeFileReadOptions options = default, CancellationToken cancellationToken = default) @@ -3211,7 +3218,7 @@ public virtual Response Read( } /// - /// The + /// The /// operation downloads a file from the service, including its metadata /// and properties. /// @@ -3235,6 +3242,7 @@ public virtual Response Read( /// A will be thrown if /// a failure occurs. /// + [EditorBrowsable(EditorBrowsableState.Never)] public virtual async Task> ReadAsync( DataLakeFileReadOptions options = default, CancellationToken cancellationToken = default) @@ -3264,6 +3272,597 @@ public virtual async Task> ReadAsync( scope.Dispose(); } } + #endregion Deprecated + + /// + /// The + /// operation downloads a file from the service, including its metadata + /// and properties. + /// + /// For more information, see + /// + /// Get Blob. + /// + /// + /// A describing the + /// downloaded file. contains + /// the file's data. + /// + /// + /// A will be thrown if + /// a failure occurs. + /// + public virtual Response ReadStreaming() + { + DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadStreaming)}"); + + try + { + scope.Start(); + + Response response = _blockBlobClient.DownloadStreaming(); + + return Response.FromValue( + response.ToDataLakeFileReadStreamingResult(), + response.GetRawResponse()); + } + catch (Exception ex) + { + scope.Failed(ex); + throw; + } + finally + { + scope.Dispose(); + } + } + + /// + /// The + /// operation downloads a file from the service, including its metadata + /// and properties. + /// + /// For more information, see + /// + /// Get Blob. + /// + /// + /// A describing the + /// downloaded file. contains + /// the file's data. + /// + /// + /// A will be thrown if + /// a failure occurs. + /// + public virtual async Task> ReadStreamingAsync() + { + DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadStreaming)}"); + + try + { + scope.Start(); + + Response response = await _blockBlobClient.DownloadStreamingAsync() + .ConfigureAwait(false); + + return Response.FromValue( + response.ToDataLakeFileReadStreamingResult(), + response.GetRawResponse()); + } + catch (Exception ex) + { + scope.Failed(ex); + throw; + } + finally + { + scope.Dispose(); + } + } + + /// + /// The + /// operation downloads a file from the service, including its metadata + /// and properties. + /// + /// For more information, see + /// + /// Get Blob. + /// + /// + /// Optional to propagate + /// notifications that the operation should be cancelled. + /// + /// + /// A describing the + /// downloaded file. contains + /// the file's data. + /// + /// + /// A will be thrown if + /// a failure occurs. + /// + public virtual Response ReadStreaming( + CancellationToken cancellationToken = default) + { + DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadStreaming)}"); + + try + { + scope.Start(); + + Response response = _blockBlobClient.DownloadStreaming( + cancellationToken: cancellationToken); + + return Response.FromValue( + response.ToDataLakeFileReadStreamingResult(), + response.GetRawResponse()); + } + catch (Exception ex) + { + scope.Failed(ex); + throw; + } + finally + { + scope.Dispose(); + } + } + + /// + /// The + /// operation downloads a file from the service, including its metadata + /// and properties. + /// + /// For more information, see + /// + /// Get Blob. + /// + /// + /// Optional to propagate + /// notifications that the operation should be cancelled. + /// + /// + /// A describing the + /// downloaded file. contains + /// the file's data. + /// + /// + /// A will be thrown if + /// a failure occurs. + /// + public virtual async Task> ReadStreamingAsync( + CancellationToken cancellationToken = default) + { + DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadStreaming)}"); + + try + { + scope.Start(); + + Response response = await _blockBlobClient.DownloadStreamingAsync( + cancellationToken: cancellationToken) + .ConfigureAwait(false); + + return Response.FromValue( + response.ToDataLakeFileReadStreamingResult(), + response.GetRawResponse()); + } + catch (Exception ex) + { + scope.Failed(ex); + throw; + } + finally + { + scope.Dispose(); + } + } + + /// + /// The + /// operation downloads a file from the service, including its metadata + /// and properties. + /// + /// For more information, see + /// + /// Get Blob. + /// + /// + /// Optional parameters. + /// + /// + /// Optional to propagate + /// notifications that the operation should be cancelled. + /// + /// + /// A describing the + /// downloaded file. contains + /// the file's data. + /// + /// + /// A will be thrown if + /// a failure occurs. + /// + public virtual Response ReadStreaming( + DataLakeFileReadOptions options = default, + CancellationToken cancellationToken = default) + { + DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadStreaming)}"); + + try + { + scope.Start(); + + Response response = _blockBlobClient.DownloadStreaming( + options: options.ToBlobBaseDownloadOptions(), + cancellationToken: cancellationToken); + + return Response.FromValue( + response.ToDataLakeFileReadStreamingResult(), + response.GetRawResponse()); + } + catch (Exception ex) + { + scope.Failed(ex); + throw; + } + finally + { + scope.Dispose(); + } + } + + /// + /// The + /// operation downloads a file from the service, including its metadata + /// and properties. + /// + /// For more information, see + /// + /// Get Blob. + /// + /// + /// Optional parameters. + /// + /// + /// Optional to propagate + /// notifications that the operation should be cancelled. + /// + /// + /// A describing the + /// downloaded file. contains + /// the file's data. + /// + /// + /// A will be thrown if + /// a failure occurs. + /// + public virtual async Task> ReadStreamingAsync( + DataLakeFileReadOptions options = default, + CancellationToken cancellationToken = default) + { + DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadStreaming)}"); + + try + { + scope.Start(); + + Response response = await _blockBlobClient.DownloadStreamingAsync( + options: options.ToBlobBaseDownloadOptions(), + cancellationToken: cancellationToken) + .ConfigureAwait(false); + + return Response.FromValue( + response.ToDataLakeFileReadStreamingResult(), + response.GetRawResponse()); + } + catch (Exception ex) + { + scope.Failed(ex); + throw; + } + finally + { + scope.Dispose(); + } + } + + /// + /// The + /// operation downloads a file from the service, including its metadata + /// and properties. + /// + /// For more information, see + /// + /// Get Blob. + /// + /// + /// A describing the + /// downloaded file. contains + /// the file's data. + /// + /// + /// A will be thrown if + /// a failure occurs. + /// + public virtual Response ReadContent() + { + DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadContent)}"); + + try + { + scope.Start(); + + Response response = _blockBlobClient.DownloadContent(); + + return Response.FromValue( + response.ToDataLakeFileReadResult(), + response.GetRawResponse()); + } + catch (Exception ex) + { + scope.Failed(ex); + throw; + } + finally + { + scope.Dispose(); + } + } + + /// + /// The + /// operation downloads a file from the service, including its metadata + /// and properties. + /// + /// For more information, see + /// + /// Get Blob. + /// + /// + /// A describing the + /// downloaded file. contains + /// the file's data. + /// + /// + /// A will be thrown if + /// a failure occurs. + /// + public virtual async Task> ReadContentAsync() + { + DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadContent)}"); + + try + { + scope.Start(); + + Response response = await _blockBlobClient.DownloadContentAsync() + .ConfigureAwait(false); + + return Response.FromValue( + response.ToDataLakeFileReadResult(), + response.GetRawResponse()); + } + catch (Exception ex) + { + scope.Failed(ex); + throw; + } + finally + { + scope.Dispose(); + } + } + + /// + /// The + /// operation downloads a file from the service, including its metadata + /// and properties. + /// + /// For more information, see + /// + /// Get Blob. + /// + /// + /// Optional to propagate + /// notifications that the operation should be cancelled. + /// + /// + /// A describing the + /// downloaded file. contains + /// the file's data. + /// + /// + /// A will be thrown if + /// a failure occurs. + /// + public virtual Response ReadContent( + CancellationToken cancellationToken = default) + { + DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadContent)}"); + + try + { + scope.Start(); + + Response response = _blockBlobClient.DownloadContent( + cancellationToken: cancellationToken); + + return Response.FromValue( + response.ToDataLakeFileReadResult(), + response.GetRawResponse()); + } + catch (Exception ex) + { + scope.Failed(ex); + throw; + } + finally + { + scope.Dispose(); + } + } + + /// + /// The + /// operation downloads a file from the service, including its metadata + /// and properties. + /// + /// For more information, see + /// + /// Get Blob. + /// + /// + /// Optional to propagate + /// notifications that the operation should be cancelled. + /// + /// + /// A describing the + /// downloaded file. contains + /// the file's data. + /// + /// + /// A will be thrown if + /// a failure occurs. + /// + public virtual async Task> ReadContentAsync( + CancellationToken cancellationToken = default) + { + DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadContent)}"); + + try + { + scope.Start(); + + Response response = await _blockBlobClient.DownloadContentAsync( + cancellationToken: cancellationToken) + .ConfigureAwait(false); + + return Response.FromValue( + response.ToDataLakeFileReadResult(), + response.GetRawResponse()); + } + catch (Exception ex) + { + scope.Failed(ex); + throw; + } + finally + { + scope.Dispose(); + } + } + + /// + /// The + /// operation downloads a file from the service, including its metadata + /// and properties. + /// + /// For more information, see + /// + /// Get Blob. + /// + /// + /// Optional parameters. + /// + /// + /// Optional to propagate + /// notifications that the operation should be cancelled. + /// + /// + /// A describing the + /// downloaded file. contains + /// the file's data. + /// + /// + /// A will be thrown if + /// a failure occurs. + /// + public virtual Response ReadContent( + DataLakeFileReadOptions options = default, + CancellationToken cancellationToken = default) + { + DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadContent)}"); + + try + { + scope.Start(); + + Response response = _blockBlobClient.DownloadContent( + options: options.ToBlobBaseDownloadOptions(), + cancellationToken: cancellationToken); + + return Response.FromValue( + response.ToDataLakeFileReadResult(), + response.GetRawResponse()); + } + catch (Exception ex) + { + scope.Failed(ex); + throw; + } + finally + { + scope.Dispose(); + } + } + + /// + /// The + /// operation downloads a file from the service, including its metadata + /// and properties. + /// + /// For more information, see + /// + /// Get Blob. + /// + /// + /// Optional parameters. + /// + /// + /// Optional to propagate + /// notifications that the operation should be cancelled. + /// + /// + /// A describing the + /// downloaded file. contains + /// the file's data. + /// + /// + /// A will be thrown if + /// a failure occurs. + /// + public virtual async Task> ReadContentAsync( + DataLakeFileReadOptions options = default, + CancellationToken cancellationToken = default) + { + DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadContent)}"); + + try + { + scope.Start(); + + Response response = await _blockBlobClient.DownloadContentAsync( + options: options.ToBlobBaseDownloadOptions(), + cancellationToken: cancellationToken) + .ConfigureAwait(false); + + return Response.FromValue( + response.ToDataLakeFileReadResult(), + response.GetRawResponse()); + } + catch (Exception ex) + { + scope.Failed(ex); + throw; + } + finally + { + scope.Dispose(); + } + } #endregion Read Data #region Read To diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeFileReadResult.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeFileReadResult.cs new file mode 100644 index 0000000000000..6059019eb6cdd --- /dev/null +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeFileReadResult.cs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; + +namespace Azure.Storage.Files.DataLake.Models +{ + /// + /// The details and content returned from reading a DataLake File. + /// + public class DataLakeFileReadResult + { + internal DataLakeFileReadResult() { } + + /// + /// Details returned when reading a DataLake file + /// + public FileDownloadDetails Details { get; internal set; } + + /// + /// Content. + /// + public BinaryData Content { get; internal set; } + } +} diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeFileReadStreamingResult.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeFileReadStreamingResult.cs new file mode 100644 index 0000000000000..985b1e9ab8374 --- /dev/null +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeFileReadStreamingResult.cs @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.IO; +using Azure.Storage.Shared; + +namespace Azure.Storage.Files.DataLake.Models +{ + /// + /// The details and content returned from reading a datalake file. + /// + public class DataLakeFileReadStreamingResult : IDisposable + { + internal DataLakeFileReadStreamingResult() { } + + /// + /// Details returned when reading a datalake file. + /// + public FileDownloadDetails Details { get; internal set; } + + /// + /// Content. + /// + public Stream Content { get; internal set; } + + /// + /// Disposes the by calling Dispose on the underlying stream. + /// + public void Dispose() + { + Content?.Dispose(); + GC.SuppressFinalize(this); + } + } +} diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeModelFactory.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeModelFactory.cs index f4b7370f69f4e..34e280c2b2f63 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeModelFactory.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeModelFactory.cs @@ -13,6 +13,34 @@ namespace Azure.Storage.Files.DataLake.Models /// public static partial class DataLakeModelFactory { + #region DataLakeFileReadResult + /// + /// Creates a new instance for mocking. + /// + public static DataLakeFileReadResult DataLakeFileReadResult( + BinaryData content, + FileDownloadDetails details) + => new DataLakeFileReadResult() + { + Content = content, + Details = details + }; + #endregion DataLakeFileReadResult + + #region DataLakeFileReadStreamingResult + /// + /// Creates a new instance for mocking. + /// + public static DataLakeFileReadStreamingResult DataLakeFileReadStreamingResult( + Stream content, + FileDownloadDetails details) + => new DataLakeFileReadStreamingResult() + { + Content = content, + Details = details + }; + #endregion DataLakeFileReadStreamingResult + #region FileDownloadDetails /// /// Creates a new FileDownloadDetails instance for mocking. @@ -235,6 +263,7 @@ public static FileDownloadDetails FileDownloadDetails( /// /// Creates a new instance for mocking. /// + [EditorBrowsable(EditorBrowsableState.Never)] public static FileDownloadInfo FileDownloadInfo( long contentLength, Stream content, diff --git a/sdk/storage/Azure.Storage.Files.DataLake/tests/FileClientTests.cs b/sdk/storage/Azure.Storage.Files.DataLake/tests/FileClientTests.cs index 6d9f0d5750800..d43891039bdaf 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/tests/FileClientTests.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/tests/FileClientTests.cs @@ -3455,6 +3455,71 @@ public async Task ReadAsync() TestHelper.AssertSequenceEqual(data, actual.ToArray()); } + [RecordedTest] + public async Task ReadStreamingAsync() + { + await using DisposingFileSystem test = await GetNewFileSystem(); + + // Arrange + byte[] data = GetRandomBuffer(Constants.KB); + DataLakeFileClient fileClient = await test.FileSystem.CreateFileAsync(GetNewFileName()); + using (MemoryStream stream = new MemoryStream(data)) + { + await fileClient.AppendAsync(stream, 0); + } + + await fileClient.FlushAsync(Constants.KB); + + // Act + Response response = await fileClient.ReadStreamingAsync(); + + // Assert + Assert.IsNotNull(response.Value.Details.LastModified); + Assert.IsNotNull(response.Value.Details.AcceptRanges); + Assert.IsNotNull(response.Value.Details.ETag); + Assert.IsNotNull(response.Value.Details.LeaseStatus); + Assert.IsNotNull(response.Value.Details.LeaseState); + Assert.IsNotNull(response.Value.Details.IsServerEncrypted); + Assert.IsNotNull(response.Value.Details.CreatedOn); + Assert.IsNotNull(response.Value.Details.Metadata); + + MemoryStream actual = new MemoryStream(); + await response.Value.Content.CopyToAsync(actual); + TestHelper.AssertSequenceEqual(data, actual.ToArray()); + } + + [RecordedTest] + public async Task ReadContentAsync() + { + await using DisposingFileSystem test = await GetNewFileSystem(); + + // Arrange + byte[] data = GetRandomBuffer(Constants.KB); + DataLakeFileClient fileClient = await test.FileSystem.CreateFileAsync(GetNewFileName()); + using (MemoryStream stream = new MemoryStream(data)) + { + await fileClient.AppendAsync(stream, 0); + } + + await fileClient.FlushAsync(Constants.KB); + + // Act + Response response = await fileClient.ReadContentAsync(); + + // Assert + Assert.IsNotNull(response.Value.Details.LastModified); + Assert.IsNotNull(response.Value.Details.AcceptRanges); + Assert.IsNotNull(response.Value.Details.ETag); + Assert.IsNotNull(response.Value.Details.LeaseStatus); + Assert.IsNotNull(response.Value.Details.LeaseState); + Assert.IsNotNull(response.Value.Details.IsServerEncrypted); + Assert.IsNotNull(response.Value.Details.CreatedOn); + Assert.IsNotNull(response.Value.Details.Metadata); + + byte[] actual = response.Value.Content.ToArray(); + TestHelper.AssertSequenceEqual(data, actual); + } + [RecordedTest] [ServiceVersion(Min = DataLakeClientOptions.ServiceVersion.V2024_05_04)] public async Task ReadAsyncACL() @@ -3502,6 +3567,97 @@ public async Task ReadAsyncACL() TestHelper.AssertSequenceEqual(data, actual.ToArray()); } + [RecordedTest] + [ServiceVersion(Min = DataLakeClientOptions.ServiceVersion.V2024_05_04)] + public async Task ReadStreamingAsyncACL() + { + await using DisposingFileSystem test = await GetNewFileSystem(publicAccessType: PublicAccessType.None); + DataLakeDirectoryClient directory = await test.FileSystem.CreateDirectoryAsync(GetNewDirectoryName()); + + DataLakeFileClient fileClient = InstrumentClient(directory.GetFileClient(GetNewFileName())); + + DataLakePathCreateOptions options = new DataLakePathCreateOptions + { + AccessOptions = new DataLakeAccessOptions + { + AccessControlList = AccessControlList + } + }; + + await fileClient.CreateAsync(options: options); + + // Arrange + var data = GetRandomBuffer(Constants.KB); + using (var stream = new MemoryStream(data)) + { + await fileClient.AppendAsync(stream, 0); + } + + await fileClient.FlushAsync(Constants.KB); + + // Act + Response response = await fileClient.ReadStreamingAsync(); + + // Assert + Assert.IsNotNull(response.Value.Details.LastModified); + Assert.IsNotNull(response.Value.Details.AcceptRanges); + Assert.IsNotNull(response.Value.Details.ETag); + Assert.IsNotNull(response.Value.Details.LeaseStatus); + Assert.IsNotNull(response.Value.Details.LeaseState); + Assert.IsNotNull(response.Value.Details.IsServerEncrypted); + Assert.IsNotNull(response.Value.Details.CreatedOn); + AssertAccessControlListEquality(AccessControlList, response.Value.Details.AccessControlList.ToList()); + + var actual = new MemoryStream(); + await response.Value.Content.CopyToAsync(actual); + TestHelper.AssertSequenceEqual(data, actual.ToArray()); + } + + [RecordedTest] + [ServiceVersion(Min = DataLakeClientOptions.ServiceVersion.V2024_05_04)] + public async Task ReadContentAsyncACL() + { + await using DisposingFileSystem test = await GetNewFileSystem(publicAccessType: PublicAccessType.None); + DataLakeDirectoryClient directory = await test.FileSystem.CreateDirectoryAsync(GetNewDirectoryName()); + + DataLakeFileClient fileClient = InstrumentClient(directory.GetFileClient(GetNewFileName())); + + DataLakePathCreateOptions options = new DataLakePathCreateOptions + { + AccessOptions = new DataLakeAccessOptions + { + AccessControlList = AccessControlList + } + }; + + await fileClient.CreateAsync(options: options); + + // Arrange + var data = GetRandomBuffer(Constants.KB); + using (var stream = new MemoryStream(data)) + { + await fileClient.AppendAsync(stream, 0); + } + + await fileClient.FlushAsync(Constants.KB); + + // Act + Response response = await fileClient.ReadContentAsync(); + + // Assert + Assert.IsNotNull(response.Value.Details.LastModified); + Assert.IsNotNull(response.Value.Details.AcceptRanges); + Assert.IsNotNull(response.Value.Details.ETag); + Assert.IsNotNull(response.Value.Details.LeaseStatus); + Assert.IsNotNull(response.Value.Details.LeaseState); + Assert.IsNotNull(response.Value.Details.IsServerEncrypted); + Assert.IsNotNull(response.Value.Details.CreatedOn); + AssertAccessControlListEquality(AccessControlList, response.Value.Details.AccessControlList.ToList()); + + byte[] actual = response.Value.Content.ToArray(); + TestHelper.AssertSequenceEqual(data, actual); + } + [RecordedTest] [ServiceVersion(Min = DataLakeClientOptions.ServiceVersion.V2024_05_04)] public async Task GetPropertiesAsyncACL() @@ -3630,6 +3786,76 @@ public async Task ReadAsync_Conditions() } } + [RecordedTest] + public async Task ReadStreamingAsync_Conditions() + { + var garbageLeaseId = GetGarbageLeaseId(); + foreach (AccessConditionParameters parameters in Conditions_Data) + { + await using DisposingFileSystem test = await GetNewFileSystem(); + + // Arrange + var data = GetRandomBuffer(Constants.KB); + DataLakeFileClient file = await test.FileSystem.CreateFileAsync(GetNewFileName()); + using (var stream = new MemoryStream(data)) + { + await file.AppendAsync(stream, 0); + } + + await file.FlushAsync(Constants.KB); + + parameters.Match = await SetupPathMatchCondition(file, parameters.Match); + parameters.LeaseId = await SetupPathLeaseCondition(file, parameters.LeaseId, garbageLeaseId); + DataLakeRequestConditions conditions = BuildDataLakeRequestConditions( + parameters: parameters, + lease: true); + + // Act + Response response = await file.ReadStreamingAsync(new DataLakeFileReadOptions + { + Conditions = conditions + }); + + // Assert + Assert.IsNotNull(response.GetRawResponse().Headers.RequestId); + } + } + + [RecordedTest] + public async Task ReadContentAsync_Conditions() + { + var garbageLeaseId = GetGarbageLeaseId(); + foreach (AccessConditionParameters parameters in Conditions_Data) + { + await using DisposingFileSystem test = await GetNewFileSystem(); + + // Arrange + var data = GetRandomBuffer(Constants.KB); + DataLakeFileClient file = await test.FileSystem.CreateFileAsync(GetNewFileName()); + using (var stream = new MemoryStream(data)) + { + await file.AppendAsync(stream, 0); + } + + await file.FlushAsync(Constants.KB); + + parameters.Match = await SetupPathMatchCondition(file, parameters.Match); + parameters.LeaseId = await SetupPathLeaseCondition(file, parameters.LeaseId, garbageLeaseId); + DataLakeRequestConditions conditions = BuildDataLakeRequestConditions( + parameters: parameters, + lease: true); + + // Act + Response response = await file.ReadContentAsync(new DataLakeFileReadOptions + { + Conditions = conditions + }); + + // Assert + Assert.IsNotNull(response.GetRawResponse().Headers.RequestId); + } + } + [RecordedTest] public async Task ReadAsync_ConditionsFail() { @@ -3663,6 +3889,72 @@ await TestHelper.CatchAsync( } } + [RecordedTest] + public async Task ReadStreamingAsync_ConditionsFail() + { + var garbageLeaseId = GetGarbageLeaseId(); + foreach (AccessConditionParameters parameters in GetConditionsFail_Data(garbageLeaseId)) + { + await using DisposingFileSystem test = await GetNewFileSystem(); + + // Arrange + var data = GetRandomBuffer(Constants.KB); + DataLakeFileClient file = await test.FileSystem.CreateFileAsync(GetNewFileName()); + using (var stream = new MemoryStream(data)) + { + await file.AppendAsync(stream, 0); + } + + await file.FlushAsync(Constants.KB); + + parameters.NoneMatch = await SetupPathMatchCondition(file, parameters.NoneMatch); + DataLakeRequestConditions conditions = BuildDataLakeRequestConditions(parameters); + + // Act + await TestHelper.CatchAsync( + async () => + { + var _ = (await file.ReadStreamingAsync(new DataLakeFileReadOptions + { + Conditions = conditions + })).Value; + }); + } + } + + [RecordedTest] + public async Task ReadContentAsync_ConditionsFail() + { + var garbageLeaseId = GetGarbageLeaseId(); + foreach (AccessConditionParameters parameters in GetConditionsFail_Data(garbageLeaseId)) + { + await using DisposingFileSystem test = await GetNewFileSystem(); + + // Arrange + var data = GetRandomBuffer(Constants.KB); + DataLakeFileClient file = await test.FileSystem.CreateFileAsync(GetNewFileName()); + using (var stream = new MemoryStream(data)) + { + await file.AppendAsync(stream, 0); + } + + await file.FlushAsync(Constants.KB); + + parameters.NoneMatch = await SetupPathMatchCondition(file, parameters.NoneMatch); + DataLakeRequestConditions conditions = BuildDataLakeRequestConditions(parameters); + + // Act + await TestHelper.CatchAsync( + async () => + { + var _ = (await file.ReadContentAsync(new DataLakeFileReadOptions + { + Conditions = conditions + })).Value; + }); + } + } + [RecordedTest] public async Task ReadAsync_Error() { @@ -3677,6 +3969,34 @@ await TestHelper.AssertExpectedExceptionAsync( e => Assert.AreEqual("BlobNotFound", e.ErrorCode)); } + [RecordedTest] + public async Task ReadStreamingAsync_Error() + { + await using DisposingFileSystem test = await GetNewFileSystem(); + + // Arrange + DataLakeFileClient file = InstrumentClient(test.FileSystem.GetFileClient(GetNewFileName())); + + // Act + await TestHelper.AssertExpectedExceptionAsync( + file.ReadStreamingAsync(), + e => Assert.AreEqual("BlobNotFound", e.ErrorCode)); + } + + [RecordedTest] + public async Task ReadContentAsync_Error() + { + await using DisposingFileSystem test = await GetNewFileSystem(); + + // Arrange + DataLakeFileClient file = InstrumentClient(test.FileSystem.GetFileClient(GetNewFileName())); + + // Act + await TestHelper.AssertExpectedExceptionAsync( + file.ReadContentAsync(), + e => Assert.AreEqual("BlobNotFound", e.ErrorCode)); + } + [RecordedTest] public async Task AcquireLeaseAsync() { From 82f24fbb8f60a2982589554d240b5580800d695a Mon Sep 17 00:00:00 2001 From: Nick Liu Date: Fri, 20 Sep 2024 13:19:24 -0400 Subject: [PATCH 11/25] [Storage] Added GenerateUserDelegationSasUri() for Blobs and BlobContainers (#45646) --- .../AzureStorageNetMigrationV12.md | 48 ++ sdk/storage/Azure.Storage.Blobs/CHANGELOG.md | 1 + .../api/Azure.Storage.Blobs.net6.0.cs | 12 + .../api/Azure.Storage.Blobs.netstandard2.0.cs | 12 + .../api/Azure.Storage.Blobs.netstandard2.1.cs | 12 + sdk/storage/Azure.Storage.Blobs/assets.json | 2 +- .../samples/Sample03_Migrations.cs | 133 ++++ .../Azure.Storage.Blobs/src/BlobBaseClient.cs | 223 ++++++- .../src/BlobContainerClient.cs | 186 +++++- .../tests/BlobBaseClientTests.cs | 599 ++++++++++++++++++ .../tests/ContainerClientTests.cs | 263 ++++++++ 11 files changed, 1445 insertions(+), 46 deletions(-) diff --git a/sdk/storage/Azure.Storage.Blobs/AzureStorageNetMigrationV12.md b/sdk/storage/Azure.Storage.Blobs/AzureStorageNetMigrationV12.md index 68f523f715716..8faad4f14059e 100644 --- a/sdk/storage/Azure.Storage.Blobs/AzureStorageNetMigrationV12.md +++ b/sdk/storage/Azure.Storage.Blobs/AzureStorageNetMigrationV12.md @@ -611,6 +611,54 @@ BlobSasBuilder sasBuilder = new BlobSasBuilder }; ``` +To create a simple User Delegation SAS with any optional parameters, use the convenience overload of GenerateUserDelegationSas which only requires taking in permissions and the expiry time. + +```C# Snippet:SampleSnippetsBlobMigration_GenerateUserDelegationSas +// Create a BlobClient +BlobClient blobClient = new BlobClient(blobUri); + +// Create full, self-authenticating URI to the resource from the BlobClient +Uri sasUri = blobClient.GenerateUserDelegationSasUri(BlobSasPermissions.Read, DateTimeOffset.UtcNow.AddHours(1), userDelegationKey); +``` + +To create a more complex User Delegation SAS, pass the SAS builder to the GenerateUserDelegationSas method. + +```C# Snippet:SampleSnippetsBlobMigration_GenerateUserDelegationSas_Builder +// Create a BlobClient +BlobClient blobClient = new BlobClient(blobUri); +// Create BlobSasBuilder and specify parameters +BlobSasBuilder sasBuilder = new BlobSasBuilder(BlobSasPermissions.Read, DateTimeOffset.UtcNow.AddHours(1)) +{ + // Since we are generating from the client, the client will have the container and blob name + // Specify any optional paremeters here + StartsOn = DateTimeOffset.UtcNow.AddHours(-1) +}; + +// Create full, self-authenticating URI to the resource from the BlobClient +Uri sasUri = blobClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey); +``` + +You can also generate an User Delegation SAS without use of the client. + +```C# Snippet:SampleSnippetsBlobMigration_UserDelegationSasBuilder +// Create BlobSasBuilder and specify parameters +BlobSasBuilder sasBuilder = new BlobSasBuilder(BlobSasPermissions.Read, DateTimeOffset.UtcNow.AddHours(1)) +{ + // with no url in a client to read from, container and blob name must be provided if applicable + BlobContainerName = containerName, + BlobName = blobName +}; + +// Create full, self-authenticating URI to the resource +BlobUriBuilder uriBuilder = new BlobUriBuilder(StorageAccountBlobUri) +{ + BlobContainerName = containerName, + BlobName = blobName, + Sas = sasBuilder.ToSasQueryParameters(userDelegationKey, accountName) +}; +Uri sasUri = uriBuilder.ToUri(); +``` + ### Content Hashes #### Blob Content MD5 diff --git a/sdk/storage/Azure.Storage.Blobs/CHANGELOG.md b/sdk/storage/Azure.Storage.Blobs/CHANGELOG.md index e23e02933f961..ced8fcd9c14eb 100644 --- a/sdk/storage/Azure.Storage.Blobs/CHANGELOG.md +++ b/sdk/storage/Azure.Storage.Blobs/CHANGELOG.md @@ -3,6 +3,7 @@ ## 12.22.0-beta.2 (Unreleased) ### Features Added +- Added GenerateUserDelegationSasUri() for BlobBaseClient and BlobContainerClient ### Breaking Changes diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs index 05cdde6988050..6e618343e0aee 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs @@ -137,6 +137,12 @@ public BlobContainerClient(System.Uri blobContainerUri, Azure.Storage.StorageSha public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.BlobSasBuilder builder) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.BlobSasBuilder builder, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobContainerSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobContainerSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasBuilder builder, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasBuilder builder, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } public virtual Azure.Response GetAccessPolicy(Azure.Storage.Blobs.Models.BlobRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task> GetAccessPolicyAsync(Azure.Storage.Blobs.Models.BlobRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response GetAccountInfo(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } @@ -1636,6 +1642,12 @@ public BlobBaseClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.BlobSasPermissions permissions, System.DateTimeOffset expiresOn) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.BlobSasPermissions permissions, System.DateTimeOffset expiresOn, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasBuilder builder, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasBuilder builder, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } public virtual Azure.Response GetAccountInfo(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task> GetAccountInfoAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } protected internal virtual Azure.Storage.Blobs.Specialized.BlobLeaseClient GetBlobLeaseClientCore(string leaseId) { throw null; } diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs index 05cdde6988050..6e618343e0aee 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs @@ -137,6 +137,12 @@ public BlobContainerClient(System.Uri blobContainerUri, Azure.Storage.StorageSha public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.BlobSasBuilder builder) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.BlobSasBuilder builder, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobContainerSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobContainerSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasBuilder builder, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasBuilder builder, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } public virtual Azure.Response GetAccessPolicy(Azure.Storage.Blobs.Models.BlobRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task> GetAccessPolicyAsync(Azure.Storage.Blobs.Models.BlobRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response GetAccountInfo(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } @@ -1636,6 +1642,12 @@ public BlobBaseClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.BlobSasPermissions permissions, System.DateTimeOffset expiresOn) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.BlobSasPermissions permissions, System.DateTimeOffset expiresOn, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasBuilder builder, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasBuilder builder, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } public virtual Azure.Response GetAccountInfo(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task> GetAccountInfoAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } protected internal virtual Azure.Storage.Blobs.Specialized.BlobLeaseClient GetBlobLeaseClientCore(string leaseId) { throw null; } diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs index 05cdde6988050..6e618343e0aee 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs @@ -137,6 +137,12 @@ public BlobContainerClient(System.Uri blobContainerUri, Azure.Storage.StorageSha public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.BlobSasBuilder builder) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.BlobSasBuilder builder, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobContainerSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobContainerSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasBuilder builder, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasBuilder builder, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } public virtual Azure.Response GetAccessPolicy(Azure.Storage.Blobs.Models.BlobRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task> GetAccessPolicyAsync(Azure.Storage.Blobs.Models.BlobRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response GetAccountInfo(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } @@ -1636,6 +1642,12 @@ public BlobBaseClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.BlobSasPermissions permissions, System.DateTimeOffset expiresOn) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.BlobSasPermissions permissions, System.DateTimeOffset expiresOn, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasBuilder builder, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasBuilder builder, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.BlobSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Blobs.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } public virtual Azure.Response GetAccountInfo(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task> GetAccountInfoAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } protected internal virtual Azure.Storage.Blobs.Specialized.BlobLeaseClient GetBlobLeaseClientCore(string leaseId) { throw null; } diff --git a/sdk/storage/Azure.Storage.Blobs/assets.json b/sdk/storage/Azure.Storage.Blobs/assets.json index 328a7707c7101..377294f47b993 100644 --- a/sdk/storage/Azure.Storage.Blobs/assets.json +++ b/sdk/storage/Azure.Storage.Blobs/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Blobs", - "Tag": "net/storage/Azure.Storage.Blobs_f805dd22f1" + "Tag": "net/storage/Azure.Storage.Blobs_730bf5e40e" } diff --git a/sdk/storage/Azure.Storage.Blobs/samples/Sample03_Migrations.cs b/sdk/storage/Azure.Storage.Blobs/samples/Sample03_Migrations.cs index d1f81e4c461e0..198c3ba8f9731 100644 --- a/sdk/storage/Azure.Storage.Blobs/samples/Sample03_Migrations.cs +++ b/sdk/storage/Azure.Storage.Blobs/samples/Sample03_Migrations.cs @@ -859,6 +859,139 @@ public async Task SasBuilderIdentifier() } } + [Test] + public async Task UserDelegationSasBuilder() + { + string accountName = StorageAccountName; + string containerName = Randomize("sample-container"); + string blobName = Randomize("sample-blob"); + BlobServiceClient client = new BlobServiceClient(ConnectionString); + Response userDelegationKeyResponse = await client.GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: DateTimeOffset.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // setup blob + BlobContainerClient container = new BlobContainerClient(ConnectionString, containerName); + + try + { + await container.CreateAsync(); + await container.GetBlobClient(blobName).UploadAsync(BinaryData.FromString("hello world")); + + #region Snippet:SampleSnippetsBlobMigration_UserDelegationSasBuilder + // Create BlobSasBuilder and specify parameters + BlobSasBuilder sasBuilder = new BlobSasBuilder(BlobSasPermissions.Read, DateTimeOffset.UtcNow.AddHours(1)) + { + // with no url in a client to read from, container and blob name must be provided if applicable + BlobContainerName = containerName, + BlobName = blobName + }; + + // Create full, self-authenticating URI to the resource + BlobUriBuilder uriBuilder = new BlobUriBuilder(StorageAccountBlobUri) + { + BlobContainerName = containerName, + BlobName = blobName, + Sas = sasBuilder.ToSasQueryParameters(userDelegationKey, accountName) + }; + Uri sasUri = uriBuilder.ToUri(); + #endregion + + // successful download indicates pass + await new BlobClient(sasUri).DownloadToAsync(new MemoryStream()); + } + finally + { + await container.DeleteIfExistsAsync(); + } + } + + [Test] + public async Task GenerateUserDelegationSas() + { + string accountName = StorageAccountName; + string containerName = Randomize("sample-container"); + string blobName = Randomize("sample-blob"); + BlobServiceClient client = new BlobServiceClient(ConnectionString); + Response userDelegationKeyResponse = await client.GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: DateTimeOffset.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // setup blob + BlobContainerClient container = new BlobContainerClient(ConnectionString, containerName); + BlobUriBuilder uriBuilder = new BlobUriBuilder(container.Uri) { BlobName = blobName }; + Uri blobUri = uriBuilder.ToUri(); + + try + { + await container.CreateAsync(); + await container.GetBlobClient(blobName).UploadAsync(BinaryData.FromString("hello world")); + + #region Snippet:SampleSnippetsBlobMigration_GenerateUserDelegationSas + // Create a BlobClient + BlobClient blobClient = new BlobClient(blobUri); + + // Create full, self-authenticating URI to the resource from the BlobClient + Uri sasUri = blobClient.GenerateUserDelegationSasUri(BlobSasPermissions.Read, DateTimeOffset.UtcNow.AddHours(1), userDelegationKey); + #endregion + + // Use newly made SAS URI to download the blob + await new BlobClient(sasUri).DownloadToAsync(new MemoryStream()); + } + finally + { + await container.DeleteIfExistsAsync(); + } + } + + [Test] + public async Task GenerateUserDelegationSas_Builder() + { + string accountName = StorageAccountName; + string containerName = Randomize("sample-container"); + string blobName = Randomize("sample-blob"); + BlobServiceClient client = new BlobServiceClient(ConnectionString); + Response userDelegationKeyResponse = await client.GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: DateTimeOffset.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // setup blob + BlobContainerClient container = new BlobContainerClient(ConnectionString, containerName); + BlobUriBuilder uriBuilder = new BlobUriBuilder(container.Uri) { BlobName = blobName }; + Uri blobUri = uriBuilder.ToUri(); + + try + { + await container.CreateAsync(); + await container.GetBlobClient(blobName).UploadAsync(BinaryData.FromString("hello world")); + + #region Snippet:SampleSnippetsBlobMigration_GenerateUserDelegationSas_Builder + // Create a BlobClient + BlobClient blobClient = new BlobClient(blobUri); + // Create BlobSasBuilder and specify parameters + BlobSasBuilder sasBuilder = new BlobSasBuilder(BlobSasPermissions.Read, DateTimeOffset.UtcNow.AddHours(1)) + { + // Since we are generating from the client, the client will have the container and blob name + // Specify any optional paremeters here + StartsOn = DateTimeOffset.UtcNow.AddHours(-1) + }; + + // Create full, self-authenticating URI to the resource from the BlobClient + Uri sasUri = blobClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey); + #endregion + + // Use newly made SAS URI to download the blob + await new BlobClient(sasUri).DownloadToAsync(new MemoryStream()); + } + finally + { + await container.DeleteIfExistsAsync(); + } + } + [Test] public async Task BlobContentHash() { diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs b/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs index c1416524f0221..50b3eec280712 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs @@ -6792,42 +6792,167 @@ public virtual Uri GenerateSasUri(BlobSasBuilder builder, out string stringToSig // Deep copy of builder so we don't modify the user's original BlobSasBuilder. builder = BlobSasBuilder.DeepCopy(builder); - // Assign builder's ContainerName, BlobName, Snapshot, BlobVersionId, and EncryptionScope if they are null. - builder.BlobContainerName ??= BlobContainerName; - builder.BlobName ??= Name; - builder.Snapshot ??= _snapshot; - builder.BlobVersionId ??= _blobVersionId; - builder.EncryptionScope ??= _clientConfiguration.EncryptionScope; - - if (!builder.BlobContainerName.Equals(BlobContainerName, StringComparison.InvariantCulture)) - { - throw Errors.SasNamesNotMatching( - nameof(builder.BlobContainerName), - nameof(BlobSasBuilder), - nameof(BlobContainerName)); - } - if (!builder.BlobName.Equals(Name, StringComparison.InvariantCulture)) + SetBuilderAndValidate(builder); + BlobUriBuilder sasUri = new BlobUriBuilder(Uri, ClientConfiguration.TrimBlobNameSlashes) { - throw Errors.SasNamesNotMatching( - nameof(builder.BlobName), - nameof(BlobSasBuilder), - nameof(Name)); - } - if (string.Compare(_snapshot, builder.Snapshot, StringComparison.InvariantCulture) != 0) + Sas = builder.ToSasQueryParameters(ClientConfiguration.SharedKeyCredential, out stringToSign) + }; + return sasUri.ToUri(); + } + #endregion + + #region GenerateUserDelegationSas + /// + /// The + /// returns a representing a Blob Service + /// Shared Access Signature (SAS) Uri based on the Client properties + /// and parameters passed. The SAS is signed by the user delegation key + /// that is passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Specifies the list of permissions to be associated with the SAS. + /// See . + /// + /// + /// Required. Specifies the time at which the SAS becomes invalid. This field + /// must be omitted if it has been specified in an associated stored access policy. + /// + /// + /// Required. A returned from + /// . + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if a failure occurs. + /// + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-blobs")] + public virtual Uri GenerateUserDelegationSasUri(BlobSasPermissions permissions, DateTimeOffset expiresOn, UserDelegationKey userDelegationKey) => + GenerateUserDelegationSasUri(permissions, expiresOn, userDelegationKey, out _); + + /// + /// The + /// returns a representing a Blob Service + /// Shared Access Signature (SAS) Uri based on the Client properties + /// and parameters passed. The SAS is signed by the user delegation key + /// that is passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Specifies the list of permissions to be associated with the SAS. + /// See . + /// + /// + /// Required. Specifies the time at which the SAS becomes invalid. This field + /// must be omitted if it has been specified in an associated stored access policy. + /// + /// + /// Required. A returned from + /// . + /// + /// + /// For debugging purposes only. This string will be overwritten with the string to sign that was used to generate the SAS Uri. + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if a failure occurs. + /// + [EditorBrowsable(EditorBrowsableState.Never)] + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-blobs")] + public virtual Uri GenerateUserDelegationSasUri(BlobSasPermissions permissions, DateTimeOffset expiresOn, UserDelegationKey userDelegationKey, out string stringToSign) => + GenerateUserDelegationSasUri(new BlobSasBuilder(permissions, expiresOn) { - throw Errors.SasNamesNotMatching( - nameof(builder.Snapshot), - nameof(BlobSasBuilder)); - } - if (string.Compare(_blobVersionId, builder.BlobVersionId, StringComparison.InvariantCulture) != 0) + BlobContainerName = BlobContainerName, + BlobName = Name, + Snapshot = _snapshot, + BlobVersionId = _blobVersionId, + EncryptionScope = _clientConfiguration.EncryptionScope + }, userDelegationKey, out stringToSign); + + /// + /// The + /// returns a representing a Blob Service + /// Shared Access Signature (SAS) Uri based on the Client properties + /// and builder passed. The SAS is signed by the user delegation key + /// that is passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Used to generate a Shared Access Signature (SAS). + /// + /// + /// Required. A returned from + /// . + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if a failure occurs. + /// + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-blobs")] + public virtual Uri GenerateUserDelegationSasUri(BlobSasBuilder builder, UserDelegationKey userDelegationKey) => + GenerateUserDelegationSasUri(builder, userDelegationKey, out _); + + /// + /// The + /// returns a representing a Blob Service + /// Shared Access Signature (SAS) Uri based on the Client properties + /// and builder passed. The SAS is signed by the user delegation key + /// that is passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Used to generate a Shared Access Signature (SAS). + /// + /// + /// Required. A returned from + /// . + /// + /// + /// For debugging purposes only. This string will be overwritten with the string to sign that was used to generate the SAS Uri. + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if a failure occurs. + /// + [EditorBrowsable(EditorBrowsableState.Never)] + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-blobs")] + public virtual Uri GenerateUserDelegationSasUri(BlobSasBuilder builder, UserDelegationKey userDelegationKey, out string stringToSign) + { + builder = builder ?? throw Errors.ArgumentNull(nameof(builder)); + userDelegationKey = userDelegationKey ?? throw Errors.ArgumentNull(nameof(userDelegationKey)); + + // Deep copy of builder so we don't modify the user's origial BlobSasBuilder. + builder = BlobSasBuilder.DeepCopy(builder); + + SetBuilderAndValidate(builder); + if (string.IsNullOrEmpty(AccountName)) { - throw Errors.SasNamesNotMatching( - nameof(builder.BlobVersionId), - nameof(BlobSasBuilder)); + throw Errors.SasClientMissingData(nameof(AccountName)); } + BlobUriBuilder sasUri = new BlobUriBuilder(Uri, ClientConfiguration.TrimBlobNameSlashes) { - Sas = builder.ToSasQueryParameters(ClientConfiguration.SharedKeyCredential, out stringToSign) + Sas = builder.ToSasQueryParameters(userDelegationKey, AccountName, out stringToSign) }; return sasUri.ToUri(); } @@ -6865,6 +6990,44 @@ protected internal virtual BlobContainerClient GetParentBlobContainerClientCore( return _parentBlobContainerClient; } #endregion + + private void SetBuilderAndValidate(BlobSasBuilder builder) + { + // Assign builder's ContainerName, BlobName, Snapshot, BlobVersionId, and EncryptionScope if they are null. + builder.BlobContainerName ??= BlobContainerName; + builder.BlobName ??= Name; + builder.Snapshot ??= _snapshot; + builder.BlobVersionId ??= _blobVersionId; + builder.EncryptionScope ??= _clientConfiguration.EncryptionScope; + + // Validate that builder is properly set + if (!builder.BlobContainerName.Equals(BlobContainerName, StringComparison.InvariantCulture)) + { + throw Errors.SasNamesNotMatching( + nameof(builder.BlobContainerName), + nameof(BlobSasBuilder), + nameof(BlobContainerName)); + } + if (!builder.BlobName.Equals(Name, StringComparison.InvariantCulture)) + { + throw Errors.SasNamesNotMatching( + nameof(builder.BlobName), + nameof(BlobSasBuilder), + nameof(Name)); + } + if (string.Compare(_snapshot, builder.Snapshot, StringComparison.InvariantCulture) != 0) + { + throw Errors.SasNamesNotMatching( + nameof(builder.Snapshot), + nameof(BlobSasBuilder)); + } + if (string.Compare(_blobVersionId, builder.BlobVersionId, StringComparison.InvariantCulture) != 0) + { + throw Errors.SasNamesNotMatching( + nameof(builder.BlobVersionId), + nameof(BlobSasBuilder)); + } + } } /// diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlobContainerClient.cs b/sdk/storage/Azure.Storage.Blobs/src/BlobContainerClient.cs index 97b6985c51918..8b641d58d2fee 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlobContainerClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlobContainerClient.cs @@ -3801,26 +3801,160 @@ public virtual Uri GenerateSasUri(BlobSasBuilder builder, out string stringToSig // Deep copy of builder so we don't modify the user's origial BlobSasBuilder. builder = BlobSasBuilder.DeepCopy(builder); - // Assign builder's ContainerName if it is null. - builder.BlobContainerName ??= Name; - - if (!builder.BlobContainerName.Equals(Name, StringComparison.InvariantCulture)) + SetBuilderAndValidate(builder); + BlobUriBuilder sasUri = new BlobUriBuilder(Uri, ClientConfiguration.TrimBlobNameSlashes) { - throw Errors.SasNamesNotMatching( - nameof(builder.BlobContainerName), - nameof(BlobSasBuilder), - nameof(Name)); - } - if (!string.IsNullOrEmpty(builder.BlobName)) + Sas = builder.ToSasQueryParameters(ClientConfiguration.SharedKeyCredential, out stringToSign) + }; + return sasUri.ToUri(); + } + #endregion + + #region GenerateUserDelegationSas + /// + /// The + /// returns a representing a Blob Container Service + /// Shared Access Signature (SAS) Uri based on the Client properties + /// and parameters passed. The SAS is signed by the user delegation key + /// that is passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Specifies the list of permissions to be associated with the SAS. + /// See . + /// + /// + /// Required. Specifies the time at which the SAS becomes invalid. This field + /// must be omitted if it has been specified in an associated stored access policy. + /// + /// + /// Required. A returned from + /// . + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if a failure occurs. + /// + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-blobs")] + public virtual Uri GenerateUserDelegationSasUri(BlobContainerSasPermissions permissions, DateTimeOffset expiresOn, UserDelegationKey userDelegationKey) => + GenerateUserDelegationSasUri(permissions, expiresOn, userDelegationKey, out _); + + /// + /// The + /// returns a representing a Blob Container Service + /// Shared Access Signature (SAS) Uri based on the Client properties + /// and parameters passed. The SAS is signed by the user delegation key + /// that is passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Specifies the list of permissions to be associated with the SAS. + /// See . + /// + /// + /// Required. Specifies the time at which the SAS becomes invalid. This field + /// must be omitted if it has been specified in an associated stored access policy. + /// + /// + /// Required. A returned from + /// . + /// + /// + /// For debugging purposes only. This string will be overwritten with the string to sign that was used to generate the SAS Uri. + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if a failure occurs. + /// + [EditorBrowsable(EditorBrowsableState.Never)] + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-blobs")] + public virtual Uri GenerateUserDelegationSasUri(BlobContainerSasPermissions permissions, DateTimeOffset expiresOn, UserDelegationKey userDelegationKey, out string stringToSign) => + GenerateUserDelegationSasUri(new BlobSasBuilder(permissions, expiresOn) { BlobContainerName = Name }, userDelegationKey, out stringToSign); + + /// + /// The + /// returns a representing a Blob Container Service + /// Shared Access Signature (SAS) Uri based on the Client properties + /// and builder passed. The SAS is signed by the user delegation key + /// that is passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Used to generate a Shared Access Signature (SAS). + /// + /// + /// Required. A returned from + /// . + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if a failure occurs. + /// + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-blobs")] + public virtual Uri GenerateUserDelegationSasUri(BlobSasBuilder builder, UserDelegationKey userDelegationKey) => + GenerateUserDelegationSasUri(builder, userDelegationKey, out _); + + /// + /// The + /// returns a representing a Blob Container Service + /// Shared Access Signature (SAS) Uri based on the Client properties + /// and builder passed. The SAS is signed by the user delegation key + /// that is passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Used to generate a Shared Access Signature (SAS). + /// + /// + /// Required. A returned from + /// . + /// + /// + /// For debugging purposes only. This string will be overwritten with the string to sign that was used to generate the SAS Uri. + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if a failure occurs. + /// + [EditorBrowsable(EditorBrowsableState.Never)] + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-blobs")] + public virtual Uri GenerateUserDelegationSasUri(BlobSasBuilder builder, UserDelegationKey userDelegationKey, out string stringToSign) + { + builder = builder ?? throw Errors.ArgumentNull(nameof(builder)); + userDelegationKey = userDelegationKey ?? throw Errors.ArgumentNull(nameof(userDelegationKey)); + + // Deep copy of builder so we don't modify the user's origial BlobSasBuilder. + builder = BlobSasBuilder.DeepCopy(builder); + + SetBuilderAndValidate(builder); + if (string.IsNullOrEmpty(AccountName)) { - throw Errors.SasBuilderEmptyParam( - nameof(builder), - nameof(builder.BlobName), - nameof(Constants.Blob.Container.Name)); + throw Errors.SasClientMissingData(nameof(AccountName)); } + BlobUriBuilder sasUri = new BlobUriBuilder(Uri, ClientConfiguration.TrimBlobNameSlashes) { - Sas = builder.ToSasQueryParameters(ClientConfiguration.SharedKeyCredential, out stringToSign) + Sas = builder.ToSasQueryParameters(userDelegationKey, AccountName, out stringToSign) }; return sasUri.ToUri(); } @@ -3860,6 +3994,28 @@ protected internal virtual BlobServiceClient GetParentBlobServiceClientCore() return _parentBlobServiceClient; } #endregion + + private void SetBuilderAndValidate(BlobSasBuilder builder) + { + // Assign builder's ContainerName if it is null. + builder.BlobContainerName ??= Name; + + // Validate that builder is properly set + if (!builder.BlobContainerName.Equals(Name, StringComparison.InvariantCulture)) + { + throw Errors.SasNamesNotMatching( + nameof(builder.BlobContainerName), + nameof(BlobSasBuilder), + nameof(Name)); + } + if (!string.IsNullOrEmpty(builder.BlobName)) + { + throw Errors.SasBuilderEmptyParam( + nameof(builder), + nameof(builder.BlobName), + nameof(Constants.Blob.Container.Name)); + } + } } namespace Specialized diff --git a/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTests.cs b/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTests.cs index 0fbabc019e76c..787f4e44b0c46 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTests.cs +++ b/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTests.cs @@ -7668,6 +7668,605 @@ public async Task GenerateSas_TrimBlobSlashes() } #endregion + #region GenerateUserDelegationSasTests + [RecordedTest] + public async Task GenerateUserDelegationSas_RequiredParameters() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string containerName = GetNewContainerName(); + string blobName = GetNewBlobName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.blob.core.windows.net"); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + BlobName = blobName, + }; + BlobSasPermissions permissions = BlobSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + BlobBaseClient blobClient = InstrumentClient(new BlobBaseClient( + blobUriBuilder.ToUri(), + GetOptions())); + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = blobClient.GenerateUserDelegationSasUri(permissions, expiresOn, userDelegationKey, out stringToSign); + + // Assert + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + BlobName = blobName + }; + BlobUriBuilder expectedUri = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + BlobName = blobName, + Sas = sasBuilder.ToSasQueryParameters(userDelegationKey, blobClient.AccountName) + }; + Assert.AreEqual(expectedUri.ToUri(), sasUri); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_Builder() + { + TestConstants constants = TestConstants.Create(this); + string containerName = GetNewContainerName(); + string blobName = GetNewBlobName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.blob.core.windows.net"); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + BlobName = blobName, + }; + BlobSasPermissions permissions = BlobSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DateTimeOffset startsOn = Recording.UtcNow.AddHours(-1); + BlobBaseClient blobClient = InstrumentClient(new BlobBaseClient( + blobUriBuilder.ToUri(), + GetOptions())); + + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + BlobName = blobName, + StartsOn = startsOn + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = blobClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign); + + // Assert + BlobSasBuilder sasBuilder2 = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + BlobName = blobName, + StartsOn = startsOn + }; + BlobUriBuilder expectedUri = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + BlobName = blobName, + Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, blobClient.AccountName) + }; + Assert.AreEqual(expectedUri.ToUri(), sasUri); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNull() + { + TestConstants constants = TestConstants.Create(this); + string containerName = GetNewContainerName(); + string blobName = GetNewBlobName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.blob.core.windows.net"); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + BlobName = blobName, + }; + BlobBaseClient blobClient = InstrumentClient(new BlobBaseClient( + blobUriBuilder.ToUri(), + GetOptions())); + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => blobClient.GenerateUserDelegationSasUri(null, userDelegationKey, out stringToSign), + new ArgumentNullException("builder")); + } + + [RecordedTest] + public void GenerateUserDelegationSas_UserDelegationKeyNull() + { + TestConstants constants = TestConstants.Create(this); + string containerName = GetNewContainerName(); + string blobName = GetNewBlobName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.blob.core.windows.net"); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + BlobName = blobName, + }; + BlobSasPermissions permissions = BlobSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DateTimeOffset startsOn = Recording.UtcNow.AddHours(-1); + BlobBaseClient blobClient = InstrumentClient(new BlobBaseClient( + blobUriBuilder.ToUri(), + GetOptions())); + + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + BlobName = blobName, + StartsOn = startsOn + }; + + string stringToSign = null; + + // Act + TestHelper.AssertExpectedException( + () => blobClient.GenerateUserDelegationSasUri(sasBuilder, null, out stringToSign), + new ArgumentNullException("userDelegationKey")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNullContainerName() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string blobName = GetNewBlobName(); + string containerName = GetNewContainerName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.blob.core.windows.net"); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + BlobName = blobName, + }; + BlobSasPermissions permissions = BlobSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + BlobBaseClient blobClient = InstrumentClient(new BlobBaseClient( + blobUriBuilder.ToUri(), + GetOptions())); + + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = null, + BlobName = blobName, + Resource = "b" + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = blobClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign); + + // Assert + BlobSasBuilder sasBuilder2 = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + BlobName = blobName, + }; + BlobUriBuilder expectedUri = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + BlobName = blobName, + Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, blobClient.AccountName) + }; + Assert.AreEqual(expectedUri.ToUri(), sasUri); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderWrongContainerName() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string blobName = GetNewBlobName(); + string containerName = GetNewContainerName(); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(new Uri($"https://{constants.Sas.Account}.blob.core.windows.net")) + { + BlobContainerName = containerName, + BlobName = blobName, + }; + BlobSasPermissions permissions = BlobSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + BlobBaseClient blobClient = InstrumentClient(new BlobBaseClient( + blobUriBuilder.ToUri(), + GetOptions())); + + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = GetNewContainerName(), // set a different containerName + BlobName = blobName, + Resource = "b" + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => blobClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign), + new InvalidOperationException("SAS Uri cannot be generated. BlobSasBuilder.BlobContainerName does not match BlobContainerName in the Client. BlobSasBuilder.BlobContainerName must either be left empty or match the BlobContainerName in the Client")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNullBlobName() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string blobName = GetNewBlobName(); + string containerName = GetNewContainerName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.blob.core.windows.net"); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + BlobName = blobName, + }; + BlobSasPermissions permissions = BlobSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DateTimeOffset startsOn = Recording.UtcNow.AddHours(-1); + BlobBaseClient blobClient = InstrumentClient(new BlobBaseClient( + blobUriBuilder.ToUri(), + GetOptions())); + + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + BlobName = null, + Resource = "b", + StartsOn = startsOn + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = blobClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign); + + // Assert + BlobSasBuilder sasBuilder2 = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + BlobName = blobName, + StartsOn = startsOn + }; + BlobUriBuilder expectedUri = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + BlobName = blobName, + Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, blobClient.AccountName) + }; + Assert.AreEqual(expectedUri.ToUri(), sasUri); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderWrongBlobName() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string containerName = GetNewContainerName(); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(new Uri($"https://{constants.Sas.Account}.blob.core.windows.net")) + { + BlobContainerName = containerName, + }; + BlobSasPermissions permissions = BlobSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + BlobBaseClient blobClient = InstrumentClient(new BlobBaseClient( + blobUriBuilder.ToUri(), + GetOptions())); + + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + BlobName = GetNewBlobName(), // set a different blobName + Resource = "b" + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => blobClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign), + new InvalidOperationException("SAS Uri cannot be generated. BlobSasBuilder.BlobName does not match Name in the Client. BlobSasBuilder.BlobName must either be left empty or match the Name in the Client")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNullSnapshot() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string blobName = GetNewBlobName(); + string containerName = GetNewContainerName(); + string snapshot = "2020-07-03T12:45:46.1234567Z"; + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.blob.core.windows.net"); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + BlobName = blobName, + Snapshot = snapshot + }; + BlobSasPermissions permissions = BlobSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DateTimeOffset startsOn = Recording.UtcNow.AddHours(-1); + BlobBaseClient blobClient = InstrumentClient(new BlobBaseClient( + blobUriBuilder.ToUri(), + GetOptions())); + + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + BlobName = blobName, + Snapshot = null, + Resource = "b", + StartsOn = startsOn + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = blobClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign); + + // Assert + BlobSasBuilder sasBuilder2 = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + BlobName = blobName, + Snapshot = snapshot, + StartsOn = startsOn + }; + BlobUriBuilder expectedUri = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + BlobName = blobName, + Snapshot = snapshot, + Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, blobClient.AccountName) + }; + Assert.AreEqual(expectedUri.ToUri(), sasUri); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderWrongSnapshot() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string snapshot = "2020-07-03T12:45:46.1234567Z"; + string differentSnapshot = "2019-07-03T12:45:46.1234567Z"; + string containerName = GetNewContainerName(); + string blobName = GetNewBlobName(); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(new Uri($"https://{constants.Sas.Account}.blob.core.windows.net")) + { + BlobContainerName = containerName, + BlobName = blobName, + Snapshot = snapshot + }; + BlobSasPermissions permissions = BlobSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + BlobBaseClient blobClient = InstrumentClient(new BlobBaseClient( + blobUriBuilder.ToUri(), + GetOptions())); + + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + BlobName = blobName, + Resource = "bs", + Snapshot = differentSnapshot + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => blobClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign), + new InvalidOperationException("SAS Uri cannot be generated. BlobSasBuilder.Snapshot does not match snapshot value in the URI in the Client. BlobSasBuilder.Snapshot must either be left empty or match the snapshot value in the URI in the Client")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNullVersion() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string blobName = GetNewBlobName(); + string containerName = GetNewContainerName(); + string versionId = "2020-07-03T12:45:46.1234567Z"; + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.blob.core.windows.net"); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + BlobName = blobName, + VersionId = versionId + }; + BlobSasPermissions permissions = BlobSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DateTimeOffset startsOn = Recording.UtcNow.AddHours(-1); + BlobBaseClient blobClient = InstrumentClient(new BlobBaseClient( + blobUriBuilder.ToUri(), + GetOptions())); + + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + BlobName = blobName, + BlobVersionId = null, + Resource = "b", + StartsOn = startsOn + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = blobClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign); + + // Assert + BlobSasBuilder sasBuilder2 = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + BlobName = blobName, + BlobVersionId = versionId, + StartsOn = startsOn + }; + BlobUriBuilder expectedUri = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + BlobName = blobName, + VersionId = versionId, + Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, blobClient.AccountName) + }; + Assert.AreEqual(expectedUri.ToUri(), sasUri); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderWrongVersion() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string blobVersionId = "2020-07-03T12:45:46.1234567Z"; + string diffBlobVersionId = "2019-07-03T12:45:46.1234567Z"; + string containerName = GetNewContainerName(); + string blobName = GetNewBlobName(); + Uri blobEndpoint = new Uri($"https://{constants.Sas.Account}.blob.core.windows.net"); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(blobEndpoint) + { + BlobContainerName = containerName, + BlobName = blobName, + VersionId = blobVersionId + }; + + BlobSasPermissions permissions = BlobSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + BlobBaseClient blobClient = InstrumentClient(new BlobBaseClient( + blobUriBuilder.ToUri(), + GetOptions())); + + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + BlobName = blobName, + Resource = "bs", + BlobVersionId = diffBlobVersionId, + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => blobClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign), + new InvalidOperationException("SAS Uri cannot be generated. BlobSasBuilder.BlobVersionId does not match snapshot value in the URI in the Client. BlobSasBuilder.BlobVersionId must either be left empty or match the snapshot value in the URI in the Client")); + } + + [LiveOnly] + public async Task GenerateUserDelegationSas_TrimBlobSlashes() + { + // Arrange + BlobServiceClient serviceClient = GetServiceClient_OAuth(); + await using DisposingContainer test = await GetTestContainerAsync( + service: serviceClient); + string containerName = test.Container.Name; + string blobName = $"/{GetNewBlobName()}"; + + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(test.Container.Uri, false) + { + BlobContainerName = containerName, + BlobName = blobName, + }; + + // Set up options with TrimBlobNameSlashes set to false + BlobClientOptions options = GetOptions(); + options.TrimBlobNameSlashes = false; + BlobSasPermissions permissions = BlobSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + AppendBlobClient createClient = InstrumentClient(new AppendBlobClient( + blobUriBuilder.ToUri(), + TestEnvironment.Credential, + options)); + + await createClient.CreateAsync(); + + string stringToSign = null; + Response userDelegationKeyResponse = await serviceClient.GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + BlobBaseClient blobClient = InstrumentClient(new BlobBaseClient( + blobUriBuilder.ToUri(), + options)); + + Uri sasUri = blobClient.GenerateUserDelegationSasUri(permissions, expiresOn, userDelegationKey, out stringToSign); + + // Assert + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + BlobName = blobName + }; + BlobUriBuilder expectedUri = new BlobUriBuilder(test.Container.Uri, false) + { + BlobContainerName = containerName, + BlobName = blobName, + Sas = sasBuilder.ToSasQueryParameters(userDelegationKey, blobClient.AccountName) + }; + Assert.AreEqual(expectedUri.ToUri(), sasUri); + Assert.IsNotNull(stringToSign); + + BlobBaseClient sasClient = InstrumentClient(new BlobBaseClient(sasUri, options)); + Assert.IsTrue(await sasClient.ExistsAsync()); + } + #endregion + //[Test] //public async Task SetTierAsync_Batch() //{ diff --git a/sdk/storage/Azure.Storage.Blobs/tests/ContainerClientTests.cs b/sdk/storage/Azure.Storage.Blobs/tests/ContainerClientTests.cs index f1e69e70bb4e5..451586836669f 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/ContainerClientTests.cs +++ b/sdk/storage/Azure.Storage.Blobs/tests/ContainerClientTests.cs @@ -3659,6 +3659,269 @@ public void GenerateSas_BuilderWrongName() } #endregion + #region GenerateUserDelegationSas + [RecordedTest] + public async Task GenerateUserDelegationSas_RequiredParameters() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string containerName = GetNewContainerName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.blob.core.windows.net"); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + }; + BlobContainerSasPermissions permissions = BlobContainerSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + BlobContainerClient containerClient = InstrumentClient( + new BlobContainerClient( + blobUriBuilder.ToUri(), + GetOptions())); + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + //Act + Uri sasUri = containerClient.GenerateUserDelegationSasUri(permissions, expiresOn, userDelegationKey, out stringToSign); + + // Assert + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + }; + BlobUriBuilder expectedUri = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + Sas = sasBuilder.ToSasQueryParameters(userDelegationKey, containerClient.AccountName) + }; + Assert.AreEqual(expectedUri.ToUri(), sasUri); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_Builder() + { + TestConstants constants = TestConstants.Create(this); + string containerName = GetNewContainerName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.blob.core.windows.net"); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName + }; + BlobContainerSasPermissions permissions = BlobContainerSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + BlobContainerClient containerClient = + InstrumentClient(new BlobContainerClient( + blobUriBuilder.ToUri(), + GetOptions())); + + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = containerClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign); + + // Assert + BlobSasBuilder sasBuilder2 = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName + }; + BlobUriBuilder expectedUri = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, containerClient.AccountName) + }; + Assert.AreEqual(expectedUri.ToUri(), sasUri); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNull() + { + TestConstants constants = TestConstants.Create(this); + string containerName = GetNewContainerName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.blob.core.windows.net"); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName + }; + BlobContainerClient containerClient = + InstrumentClient(new BlobContainerClient( + blobUriBuilder.ToUri(), + GetOptions())); + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => containerClient.GenerateUserDelegationSasUri(null, userDelegationKey, out stringToSign), + new ArgumentNullException("builder")); + } + + [RecordedTest] + public void GenerateUserDelegationSas_UserDelegationKeyNull() + { + TestConstants constants = TestConstants.Create(this); + string containerName = GetNewContainerName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.blob.core.windows.net"); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName + }; + BlobContainerSasPermissions permissions = BlobContainerSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + BlobContainerClient containerClient = + InstrumentClient(new BlobContainerClient( + blobUriBuilder.ToUri(), + GetOptions())); + + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName + }; + + string stringToSign = null; + + // Act + TestHelper.AssertExpectedException( + () => containerClient.GenerateUserDelegationSasUri(sasBuilder, null, out stringToSign), + new ArgumentNullException("userDelegationKey")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNullName() + { + TestConstants constants = TestConstants.Create(this); + string containerName = GetNewContainerName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.blob.core.windows.net"); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName + }; + BlobContainerSasPermissions permissions = BlobContainerSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + BlobContainerClient containerClient = + InstrumentClient(new BlobContainerClient( + blobUriBuilder.ToUri(), + GetOptions())); + + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = null + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = containerClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign); + + // Assert + BlobSasBuilder sasBuilder2 = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName + }; + BlobUriBuilder expectedUri = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, containerClient.AccountName) + }; + + Assert.AreEqual(expectedUri.ToUri(), sasUri); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderWrongName() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string containerName = GetNewContainerName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.blob.core.windows.net"); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName + }; + BlobSasPermissions permissions = BlobSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + BlobContainerClient containerClient = InstrumentClient(new BlobContainerClient( + blobUriBuilder.ToUri(), + GetOptions())); + + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = GetNewContainerName(), // set a different containerName + Resource = "b" + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => containerClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign), + new InvalidOperationException("SAS Uri cannot be generated. BlobSasBuilder.BlobContainerName does not match Name in the Client. BlobSasBuilder.BlobContainerName must either be left empty or match the Name in the Client")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderIncorrectlySettingBlobName() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string containerName = GetNewContainerName(); + string blobName = GetNewBlobName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.blob.core.windows.net"); + BlobUriBuilder blobUriBuilder = new BlobUriBuilder(serviceUri) + { + BlobContainerName = containerName, + BlobName = blobName + }; + BlobSasPermissions permissions = BlobSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + BlobContainerClient containerClient = InstrumentClient(new BlobContainerClient( + blobUriBuilder.ToUri(), + GetOptions())); + + BlobSasBuilder sasBuilder = new BlobSasBuilder(permissions, expiresOn) + { + BlobContainerName = containerName, + BlobName = blobName + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => containerClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign), + new InvalidOperationException("SAS Uri cannot be generated. builder.BlobName cannot be set to create a Name SAS.")); + } + #endregion + [RecordedTest] public void CanMockBlobClientsRetrieval() { From b7b09dc83d8d6459593388391eb7f61ca1f8b8fc Mon Sep 17 00:00:00 2001 From: Nick Liu Date: Fri, 20 Sep 2024 14:25:27 -0400 Subject: [PATCH 12/25] [Storage] Added GenerateUserDelegationSasUri() for DataLake (#45732) --- .../src/Shared/Errors.Clients.cs | 2 +- .../Azure.Storage.Files.DataLake/CHANGELOG.md | 1 + .../Azure.Storage.Files.DataLake.net6.0.cs | 18 + ...e.Storage.Files.DataLake.netstandard2.0.cs | 18 + .../Azure.Storage.Files.DataLake/assets.json | 2 +- .../src/DataLakeDirectoryClient.cs | 165 ++++++- .../src/DataLakeFileSystemClient.cs | 184 +++++++- .../src/DataLakePathClient.cs | 210 +++++++-- .../tests/DirectoryClientTests.cs | 419 +++++++++++++++++- .../tests/FileClientTests.cs | 404 ++++++++++++++++- .../tests/FileSystemClientTests.cs | 266 +++++++++++ .../tests/PathClientTests.cs | 362 +++++++++++++++ 12 files changed, 1998 insertions(+), 53 deletions(-) diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs index 4e5464fa17e6e..2a5fe38668104 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs @@ -78,7 +78,7 @@ public static InvalidOperationException SasBuilderEmptyParam(string builderName, => new InvalidOperationException($"SAS Uri cannot be generated. {builderName}.{paramName} cannot be set to create a {sasType} SAS."); public static InvalidOperationException SasIncorrectResourceType(string builderName, string builderParam, string value, string clientName) - => new InvalidOperationException($"SAS Uri cannot be generated. Expected {builderName}.{builderParam} to be set to {value} to generate" + + => new InvalidOperationException($"SAS Uri cannot be generated. Expected {builderName}.{builderParam} to be set to {value} to generate " + $"the respective SAS for the client, {clientName}"); public static ArgumentException InvalidPermission(char s) diff --git a/sdk/storage/Azure.Storage.Files.DataLake/CHANGELOG.md b/sdk/storage/Azure.Storage.Files.DataLake/CHANGELOG.md index 786a5ff1c0da6..d04d43370d5eb 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/CHANGELOG.md +++ b/sdk/storage/Azure.Storage.Files.DataLake/CHANGELOG.md @@ -3,6 +3,7 @@ ## 12.20.0-beta.2 (Unreleased) ### Features Added +- Added GenerateUserDelegationSasUri() for DataLakePathClient, DataLakeFileSystemClient, and DataLakeDirectoryClient - Deprecated Read()/ReadAsync() in favor of ReadStreaming()/ReadStreamingAsync() and ReadContent()/ReadContentAsync() for DataLake #45418 ### Breaking Changes diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs index 884a12eb570c8..a202d6300f50e 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs @@ -89,6 +89,12 @@ public DataLakeDirectoryClient(System.Uri directoryUri, Azure.Storage.StorageSha public override System.Uri GenerateSasUri(Azure.Storage.Sas.DataLakeSasPermissions permissions, System.DateTimeOffset expiresOn) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public override System.Uri GenerateSasUri(Azure.Storage.Sas.DataLakeSasPermissions permissions, System.DateTimeOffset expiresOn, out string stringToSign) { throw null; } + public override System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasBuilder builder, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasBuilder builder, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } + public override System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } public override Azure.Response GetAccessControl(bool? userPrincipalName = default(bool?), Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public override System.Threading.Tasks.Task> GetAccessControlAsync(bool? userPrincipalName = default(bool?), Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Storage.Files.DataLake.DataLakeFileClient GetFileClient(string fileName) { throw null; } @@ -302,6 +308,12 @@ public DataLakeFileSystemClient(System.Uri fileSystemUri, Azure.Storage.StorageS public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.DataLakeSasBuilder builder) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.DataLakeSasBuilder builder, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeFileSystemSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeFileSystemSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasBuilder builder, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasBuilder builder, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } public virtual Azure.Response GetAccessPolicy(Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task> GetAccessPolicyAsync(Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Pageable GetDeletedPaths(string pathPrefix = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } @@ -388,6 +400,12 @@ public DataLakePathClient(System.Uri pathUri, Azure.Storage.StorageSharedKeyCred public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.DataLakeSasPermissions permissions, System.DateTimeOffset expiresOn) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.DataLakeSasPermissions permissions, System.DateTimeOffset expiresOn, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasBuilder builder, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasBuilder builder, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } public virtual Azure.Response GetAccessControl(bool? userPrincipalName = default(bool?), Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task> GetAccessControlAsync(bool? userPrincipalName = default(bool?), Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } protected internal virtual Azure.Storage.Files.DataLake.DataLakeDirectoryClient GetParentDirectoryClientCore() { throw null; } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs index 884a12eb570c8..a202d6300f50e 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs @@ -89,6 +89,12 @@ public DataLakeDirectoryClient(System.Uri directoryUri, Azure.Storage.StorageSha public override System.Uri GenerateSasUri(Azure.Storage.Sas.DataLakeSasPermissions permissions, System.DateTimeOffset expiresOn) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public override System.Uri GenerateSasUri(Azure.Storage.Sas.DataLakeSasPermissions permissions, System.DateTimeOffset expiresOn, out string stringToSign) { throw null; } + public override System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasBuilder builder, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasBuilder builder, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } + public override System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } public override Azure.Response GetAccessControl(bool? userPrincipalName = default(bool?), Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public override System.Threading.Tasks.Task> GetAccessControlAsync(bool? userPrincipalName = default(bool?), Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Storage.Files.DataLake.DataLakeFileClient GetFileClient(string fileName) { throw null; } @@ -302,6 +308,12 @@ public DataLakeFileSystemClient(System.Uri fileSystemUri, Azure.Storage.StorageS public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.DataLakeSasBuilder builder) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.DataLakeSasBuilder builder, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeFileSystemSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeFileSystemSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasBuilder builder, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasBuilder builder, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } public virtual Azure.Response GetAccessPolicy(Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task> GetAccessPolicyAsync(Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Pageable GetDeletedPaths(string pathPrefix = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } @@ -388,6 +400,12 @@ public DataLakePathClient(System.Uri pathUri, Azure.Storage.StorageSharedKeyCred public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.DataLakeSasPermissions permissions, System.DateTimeOffset expiresOn) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Uri GenerateSasUri(Azure.Storage.Sas.DataLakeSasPermissions permissions, System.DateTimeOffset expiresOn, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasBuilder builder, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasBuilder builder, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Uri GenerateUserDelegationSasUri(Azure.Storage.Sas.DataLakeSasPermissions permissions, System.DateTimeOffset expiresOn, Azure.Storage.Files.DataLake.Models.UserDelegationKey userDelegationKey, out string stringToSign) { throw null; } public virtual Azure.Response GetAccessControl(bool? userPrincipalName = default(bool?), Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task> GetAccessControlAsync(bool? userPrincipalName = default(bool?), Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } protected internal virtual Azure.Storage.Files.DataLake.DataLakeDirectoryClient GetParentDirectoryClientCore() { throw null; } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/assets.json b/sdk/storage/Azure.Storage.Files.DataLake/assets.json index 556652aaba663..8bb5dc2c5e400 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/assets.json +++ b/sdk/storage/Azure.Storage.Files.DataLake/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Files.DataLake", - "Tag": "net/storage/Azure.Storage.Files.DataLake_c09a71b442" + "Tag": "net/storage/Azure.Storage.Files.DataLake_aaedf543aa" } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeDirectoryClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeDirectoryClient.cs index 41b75adc88d1f..7b2ba5c7cee3b 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeDirectoryClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeDirectoryClient.cs @@ -3059,11 +3059,170 @@ public override Uri GenerateSasUri(DataLakeSasBuilder builder, out string string // Deep copy of builder so we don't modify the user's original DataLakeSasBuilder. builder = DataLakeSasBuilder.DeepCopy(builder); + SetBuilderAndValidate(builder); + DataLakeUriBuilder sasUri = new DataLakeUriBuilder(Uri) + { + Sas = builder.ToSasQueryParameters(ClientConfiguration.SharedKeyCredential, out stringToSign) + }; + return sasUri.ToUri(); + } + #endregion + + #region GenerateUserDelegationSas + /// + /// The + /// returns a that generates a DataLake Directory Service Shared Access Signature (SAS) + /// Uri based on the Client properties and parameter passed. The SAS is signed by the user delegation key passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Specifies the list of permissions to be associated with the SAS. + /// See . + /// + /// + /// Required. Specifies the time at which the SAS becomes invalid. This field + /// must be omitted if it has been specified in an associated stored access policy. + /// + /// + /// Required. A returned from + /// . + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if a failure occurs. + /// + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-files-datalake")] + public override Uri GenerateUserDelegationSasUri(DataLakeSasPermissions permissions, DateTimeOffset expiresOn, UserDelegationKey userDelegationKey) + => GenerateUserDelegationSasUri(permissions, expiresOn, userDelegationKey, out _); + + /// + /// The + /// returns a that generates a DataLake Directory Service Shared Access Signature (SAS) + /// Uri based on the Client properties and parameter passed. The SAS is signed by the user delegation key passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Specifies the list of permissions to be associated with the SAS. + /// See . + /// + /// + /// Required. Specifies the time at which the SAS becomes invalid. This field + /// must be omitted if it has been specified in an associated stored access policy. + /// + /// + /// Required. A returned from + /// . + /// + /// + /// For debugging purposes only. This string will be overwritten with the string to sign that was used to generate the SAS Uri. + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if a failure occurs. + /// + [EditorBrowsable(EditorBrowsableState.Never)] + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-files-datalake")] + public override Uri GenerateUserDelegationSasUri(DataLakeSasPermissions permissions, DateTimeOffset expiresOn, UserDelegationKey userDelegationKey, out string stringToSign) => + GenerateUserDelegationSasUri(new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = FileSystemName, + Path = Path, + IsDirectory = true + }, userDelegationKey, out stringToSign); + + /// + /// The + /// returns a that generates a DataLake Directory Service Shared Access Signature (SAS) + /// Uri based on the Client properties and builder passed. The SAS is signed by the user delegation key passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Used to generate a Shared Access Signature (SAS). + /// + /// + /// Required. A returned from + /// . + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if a failure occurs. + /// + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-files-datalake")] + public override Uri GenerateUserDelegationSasUri(DataLakeSasBuilder builder, UserDelegationKey userDelegationKey) + => GenerateUserDelegationSasUri(builder, userDelegationKey, out _); + + /// + /// The + /// returns a that generates a DataLake Directory Service Shared Access Signature (SAS) + /// Uri based on the Client properties and builder passed. The SAS is signed by the user delegation key passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Used to generate a Shared Access Signature (SAS). + /// + /// + /// Required. A returned from + /// . + /// + /// + /// For debugging purposes only. This string will be overwritten with the string to sign that was used to generate the SAS Uri. + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if a failure occurs. + /// + [EditorBrowsable(EditorBrowsableState.Never)] + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-files-datalake")] + public override Uri GenerateUserDelegationSasUri(DataLakeSasBuilder builder, UserDelegationKey userDelegationKey, out string stringToSign) + { + builder = builder ?? throw Errors.ArgumentNull(nameof(builder)); + userDelegationKey = userDelegationKey ?? throw Errors.ArgumentNull(nameof(userDelegationKey)); + + // Deep copy of builder so we don't modify the user's original DataLakeSasBuilder. + builder = DataLakeSasBuilder.DeepCopy(builder); + + SetBuilderAndValidate(builder); + if (string.IsNullOrEmpty(AccountName)) + { + throw Errors.SasClientMissingData(nameof(AccountName)); + } + + DataLakeUriBuilder sasUri = new DataLakeUriBuilder(Uri) + { + Sas = builder.ToSasQueryParameters(userDelegationKey, AccountName, out stringToSign) + }; + return sasUri.ToUri(); + } + #endregion + + private void SetBuilderAndValidate(DataLakeSasBuilder builder) + { // Assign builder's IsDirectory, FileSystemName, and Path, if they are null. builder.IsDirectory ??= GetType() == typeof(DataLakeDirectoryClient); builder.FileSystemName ??= FileSystemName; builder.Path ??= Path; + // Validate that builder is properly set if (!builder.IsDirectory.GetValueOrDefault(false)) { throw Errors.SasIncorrectResourceType( @@ -3086,12 +3245,6 @@ public override Uri GenerateSasUri(DataLakeSasBuilder builder, out string string nameof(DataLakeSasBuilder), nameof(Path)); } - DataLakeUriBuilder sasUri = new DataLakeUriBuilder(Uri) - { - Sas = builder.ToSasQueryParameters(ClientConfiguration.SharedKeyCredential, out stringToSign) - }; - return sasUri.ToUri(); } - #endregion } } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileSystemClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileSystemClient.cs index 96cccd792d8cc..5a5e9ca8650cc 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileSystemClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileSystemClient.cs @@ -3287,26 +3287,158 @@ public virtual Uri GenerateSasUri( // Deep copy of builder so we don't modify the user's original DataLakeSasBuilder. builder = DataLakeSasBuilder.DeepCopy(builder); - // Assign builder's FileSystemName, if it is null. - builder.FileSystemName ??= Name; - - if (!builder.FileSystemName.Equals(Name, StringComparison.InvariantCulture)) + SetBuilderAndValidate(builder); + DataLakeUriBuilder sasUri = new DataLakeUriBuilder(Uri) { - throw Errors.SasNamesNotMatching( - nameof(builder.FileSystemName), - nameof(DataLakeSasBuilder), - nameof(Name)); - } - if (!string.IsNullOrEmpty(builder.Path)) + Sas = builder.ToSasQueryParameters(ClientConfiguration.SharedKeyCredential, out stringToSign) + }; + return sasUri.ToUri(); + } + #endregion + + #region GenerateUserDelegationSas + /// + /// The + /// returns a that generates a DataLake FileSystem Service + /// Shared Access Signature (SAS) Uri based on the Client properties and parameters passed. + /// The SAS is signed by the user delegation key passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Specifies the list of permissions to be associated with the SAS. + /// See . + /// + /// + /// Required. Specifies the time at which the SAS becomes invalid. This field + /// must be omitted if it has been specified in an associated stored access policy. + /// + /// + /// Required. A returned from + /// . + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if a failure occurs. + /// + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-files-datalake")] + public virtual Uri GenerateUserDelegationSasUri(DataLakeFileSystemSasPermissions permissions, DateTimeOffset expiresOn, Models.UserDelegationKey userDelegationKey) => + GenerateUserDelegationSasUri(permissions, expiresOn, userDelegationKey, out _); + + /// + /// The + /// returns a that generates a DataLake FileSystem Service + /// Shared Access Signature (SAS) Uri based on the Client properties and parameters passed. + /// The SAS is signed by the user delegation key passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Specifies the list of permissions to be associated with the SAS. + /// See . + /// + /// + /// Required. Specifies the time at which the SAS becomes invalid. This field + /// must be omitted if it has been specified in an associated stored access policy. + /// + /// + /// Required. A returned from + /// . + /// + /// + /// For debugging purposes only. This string will be overwritten with the string to sign that was used to generate the SAS Uri. + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if a failure occurs. + /// + [EditorBrowsable(EditorBrowsableState.Never)] + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-files-datalake")] + public virtual Uri GenerateUserDelegationSasUri(DataLakeFileSystemSasPermissions permissions, DateTimeOffset expiresOn, Models.UserDelegationKey userDelegationKey, out string stringToSign) => + GenerateUserDelegationSasUri(new DataLakeSasBuilder(permissions, expiresOn) { FileSystemName = Name }, userDelegationKey, out stringToSign); + + /// + /// The returns a + /// that generates a DataLake FileSystem Service Shared Access Signature (SAS) + /// Uri based on the Client properties and builder passed. + /// The SAS is signed by the user delegation key passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Used to generate a Shared Access Signature (SAS). + /// + /// + /// Required. A returned from + /// . + /// + /// + /// A on successfully deleting. + /// + /// + /// A will be thrown if + /// a failure occurs. + /// + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-files-datalake")] + public virtual Uri GenerateUserDelegationSasUri(DataLakeSasBuilder builder, Models.UserDelegationKey userDelegationKey) + => GenerateUserDelegationSasUri(builder, userDelegationKey, out _); + + /// + /// The returns a + /// that generates a DataLake FileSystem Service Shared Access Signature (SAS) + /// Uri based on the Client properties and builder passed. + /// The SAS is signed by the user delegation key passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Used to generate a Shared Access Signature (SAS). + /// + /// + /// Required. A returned from + /// . + /// + /// + /// For debugging purposes only. This string will be overwritten with the string to sign that was used to generate the SAS Uri. + /// + /// + /// A on successfully deleting. + /// + /// + /// A will be thrown if + /// a failure occurs. + /// + [EditorBrowsable(EditorBrowsableState.Never)] + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-files-datalake")] + public virtual Uri GenerateUserDelegationSasUri(DataLakeSasBuilder builder, Models.UserDelegationKey userDelegationKey, out string stringToSign) + { + builder = builder ?? throw Errors.ArgumentNull(nameof(builder)); + userDelegationKey = userDelegationKey ?? throw Errors.ArgumentNull(nameof(userDelegationKey)); + + // Deep copy of builder so we don't modify the user's original DataLakeSasBuilder. + builder = DataLakeSasBuilder.DeepCopy(builder); + + SetBuilderAndValidate(builder); + if (string.IsNullOrEmpty(AccountName)) { - throw Errors.SasBuilderEmptyParam( - nameof(builder), - nameof(builder.Path), - nameof(Constants.DataLake.FileSystemName)); + throw Errors.SasClientMissingData(nameof(AccountName)); } + DataLakeUriBuilder sasUri = new DataLakeUriBuilder(Uri) { - Sas = builder.ToSasQueryParameters(ClientConfiguration.SharedKeyCredential, out stringToSign) + Sas = builder.ToSasQueryParameters(userDelegationKey, AccountName, out stringToSign) }; return sasUri.ToUri(); } @@ -3622,6 +3754,28 @@ protected internal virtual DataLakeServiceClient GetParentServiceClientCore() return _parentServiceClient; } #endregion + + private void SetBuilderAndValidate(DataLakeSasBuilder builder) + { + // Assign builder's FileSystemName, if it is null. + builder.FileSystemName ??= Name; + + // Validate that builder is properly set + if (!builder.FileSystemName.Equals(Name, StringComparison.InvariantCulture)) + { + throw Errors.SasNamesNotMatching( + nameof(builder.FileSystemName), + nameof(DataLakeSasBuilder), + nameof(Name)); + } + if (!string.IsNullOrEmpty(builder.Path)) + { + throw Errors.SasBuilderEmptyParam( + nameof(builder), + nameof(builder.Path), + nameof(Constants.DataLake.FileSystemName)); + } + } } namespace Specialized diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakePathClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakePathClient.cs index c876715c2f7b0..cd7d7e0ec75d3 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakePathClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakePathClient.cs @@ -529,7 +529,7 @@ internal DataLakePathClient( (PathRestClient dfsPathRestClient, PathRestClient blobPathRestClient) = BuildPathRestClients(_dfsUri, _blobUri); _pathRestClient = dfsPathRestClient; - _blobPathRestClient = blobPathRestClient; + _blobPathRestClient = blobPathRestClient; DataLakeErrors.VerifyHttpsCustomerProvidedKey(_uri, _clientConfiguration.CustomerProvidedKey); } @@ -1166,7 +1166,7 @@ internal virtual async Task> CreateInternal( if (expiresOn.HasValue && timeToExpire.HasValue) { - throw new ArgumentException($"{nameof(DataLakePathCreateOptions)}.{nameof(DataLakePathCreateOptions.ScheduleDeletionOptions.ExpiresOn)} and {nameof(DataLakePathCreateOptions)}.{nameof(DataLakePathCreateOptions.ScheduleDeletionOptions.TimeToExpire)} cannot both be set."); + throw new ArgumentException($"{nameof(DataLakePathCreateOptions)}.{nameof(DataLakePathCreateOptions.ScheduleDeletionOptions.ExpiresOn)} and {nameof(DataLakePathCreateOptions)}.{nameof(DataLakePathCreateOptions.ScheduleDeletionOptions.TimeToExpire)} cannot both be set."); } try @@ -1418,10 +1418,10 @@ public virtual async Task> CreateIfNotExistsAsync( public virtual Response CreateIfNotExists( #pragma warning restore AZC0002 // DO ensure all service methods, both asynchronous and synchronous, take an optional CancellationToken parameter called cancellationToken. PathResourceType resourceType, - PathHttpHeaders httpHeaders , + PathHttpHeaders httpHeaders, Metadata metadata, string permissions, - string umask , + string umask, CancellationToken cancellationToken) => CreateIfNotExistsInternal( resourceType: resourceType, @@ -3946,36 +3946,158 @@ public virtual Uri GenerateSasUri(DataLakeSasBuilder builder, out string stringT // Deep copy of builder so we don't modify the user's original DataLakeSasBuilder. builder = DataLakeSasBuilder.DeepCopy(builder); - // Assign builder's IsDirectory, FileSystemName, and Path, if they are null. - builder.IsDirectory ??= GetType() == typeof(DataLakeDirectoryClient); - builder.FileSystemName ??= FileSystemName; - builder.Path ??= Path; - - if (builder.IsDirectory.GetValueOrDefault(false)) + SetBuilderAndValidate(builder); + DataLakeUriBuilder sasUri = new DataLakeUriBuilder(Uri) { - throw Errors.SasIncorrectResourceType( - nameof(builder), - nameof(builder.IsDirectory), - nameof(Constants.FalseName), - nameof(this.GetType)); - } - if (!builder.FileSystemName.Equals(FileSystemName, StringComparison.InvariantCulture)) + Sas = builder.ToSasQueryParameters(ClientConfiguration.SharedKeyCredential, out stringToSign) + }; + return sasUri.ToUri(); + } + #endregion + + #region GenerateUserDelegationSas + /// + /// The + /// returns a that generates a DataLake Path Service Shared Access Signature (SAS) + /// Uri based on the Client properties and parameter passed. The SAS is signed by the user delegation key passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Specifies the list of permissions to be associated with the SAS. + /// See . + /// + /// + /// Required. Specifies the time at which the SAS becomes invalid. This field + /// must be omitted if it has been specified in an associated stored access policy. + /// + /// + /// Required. A returned from + /// . + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if a failure occurs. + /// + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-files-datalake")] + public virtual Uri GenerateUserDelegationSasUri(DataLakeSasPermissions permissions, DateTimeOffset expiresOn, UserDelegationKey userDelegationKey) + => GenerateUserDelegationSasUri(permissions, expiresOn, userDelegationKey, out _); + + /// + /// The + /// returns a that generates a DataLake Path Service Shared Access Signature (SAS) + /// Uri based on the Client properties and parameter passed. The SAS is signed by the user delegation key passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Specifies the list of permissions to be associated with the SAS. + /// See . + /// + /// + /// Required. Specifies the time at which the SAS becomes invalid. This field + /// must be omitted if it has been specified in an associated stored access policy. + /// + /// + /// Required. A returned from + /// . + /// + /// + /// For debugging purposes only. This string will be overwritten with the string to sign that was used to generate the SAS Uri. + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if a failure occurs. + /// + [EditorBrowsable(EditorBrowsableState.Never)] + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-files-datalake")] + public virtual Uri GenerateUserDelegationSasUri(DataLakeSasPermissions permissions, DateTimeOffset expiresOn, UserDelegationKey userDelegationKey, out string stringToSign) => + GenerateUserDelegationSasUri(new DataLakeSasBuilder(permissions, expiresOn) { - throw Errors.SasNamesNotMatching( - nameof(builder.FileSystemName), - nameof(DataLakeSasBuilder), - nameof(FileSystemName)); - } - if (!builder.Path.Equals(Path, StringComparison.InvariantCulture)) + FileSystemName = FileSystemName, + Path = Path + }, userDelegationKey, out stringToSign); + + /// + /// The + /// returns a that generates a DataLake Path Service Shared Access Signature (SAS) + /// Uri based on the Client properties and builder passed. The SAS is signed by the user delegation key passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Used to generate a Shared Access Signature (SAS). + /// + /// + /// Required. A returned from + /// . + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if + /// a failure occurs. + /// + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-files-datalake")] + public virtual Uri GenerateUserDelegationSasUri(DataLakeSasBuilder builder, UserDelegationKey userDelegationKey) + => GenerateUserDelegationSasUri(builder, userDelegationKey, out _); + + /// + /// The + /// returns a that generates a DataLake Path Service Shared Access Signature (SAS) + /// Uri based on the Client properties and builder passed. The SAS is signed by the user delegation key passed in. + /// + /// For more information, see + /// + /// Creating an user delegation SAS. + /// + /// + /// Required. Used to generate a Shared Access Signature (SAS). + /// + /// + /// Required. A returned from + /// . + /// + /// + /// For debugging purposes only. This string will be overwritten with the string to sign that was used to generate the SAS Uri. + /// + /// + /// A containing the SAS Uri. + /// + /// + /// A will be thrown if + /// a failure occurs. + /// + [EditorBrowsable(EditorBrowsableState.Never)] + [CallerShouldAudit("https://aka.ms/azsdk/callershouldaudit/storage-files-datalake")] + public virtual Uri GenerateUserDelegationSasUri(DataLakeSasBuilder builder, UserDelegationKey userDelegationKey, out string stringToSign) + { + builder = builder ?? throw Errors.ArgumentNull(nameof(builder)); + userDelegationKey = userDelegationKey ?? throw Errors.ArgumentNull(nameof(userDelegationKey)); + + // Deep copy of builder so we don't modify the user's original DataLakeSasBuilder. + builder = DataLakeSasBuilder.DeepCopy(builder); + + SetBuilderAndValidate(builder); + if (string.IsNullOrEmpty(AccountName)) { - throw Errors.SasNamesNotMatching( - nameof(builder.Path), - nameof(DataLakeSasBuilder), - nameof(Path)); + throw Errors.SasClientMissingData(nameof(AccountName)); } + DataLakeUriBuilder sasUri = new DataLakeUriBuilder(Uri) { - Sas = builder.ToSasQueryParameters(ClientConfiguration.SharedKeyCredential, out stringToSign) + Sas = builder.ToSasQueryParameters(userDelegationKey, AccountName, out stringToSign) }; return sasUri.ToUri(); } @@ -4042,6 +4164,38 @@ protected internal virtual DataLakeDirectoryClient GetParentDirectoryClientCore( return _parentDirectoryClient; } #endregion + + private void SetBuilderAndValidate(DataLakeSasBuilder builder) + { + // Assign builder's IsDirectory, FileSystemName, and Path, if they are null. + builder.IsDirectory ??= GetType() == typeof(DataLakeDirectoryClient); + builder.FileSystemName ??= FileSystemName; + builder.Path ??= Path; + + // Validate that builder is properly set + if (builder.IsDirectory.GetValueOrDefault(false)) + { + throw Errors.SasIncorrectResourceType( + nameof(builder), + nameof(builder.IsDirectory), + Constants.FalseName, + nameof(this.GetType)); + } + if (!builder.FileSystemName.Equals(FileSystemName, StringComparison.InvariantCulture)) + { + throw Errors.SasNamesNotMatching( + nameof(builder.FileSystemName), + nameof(DataLakeSasBuilder), + nameof(FileSystemName)); + } + if (!builder.Path.Equals(Path, StringComparison.InvariantCulture)) + { + throw Errors.SasNamesNotMatching( + nameof(builder.Path), + nameof(DataLakeSasBuilder), + nameof(Path)); + } + } } namespace Specialized diff --git a/sdk/storage/Azure.Storage.Files.DataLake/tests/DirectoryClientTests.cs b/sdk/storage/Azure.Storage.Files.DataLake/tests/DirectoryClientTests.cs index 44f5b378e26df..83938361fb0d0 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/tests/DirectoryClientTests.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/tests/DirectoryClientTests.cs @@ -6470,7 +6470,424 @@ public void GenerateSas_BuilderIsDirectoryError() // Act TestHelper.AssertExpectedException( () => directoryClient.GenerateSasUri(sasBuilder), - new InvalidOperationException("SAS Uri cannot be generated. Expected builder.IsDirectory to be set to true to generatethe respective SAS for the client, GetType")); + new InvalidOperationException("SAS Uri cannot be generated. Expected builder.IsDirectory to be set to true to generate the respective SAS for the client, GetType")); + } + #endregion + + #region GenerateUserDelegationSasTests + [RecordedTest] + public async Task GenerateUserDelegationSas_RequiredParameters() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewDirectoryName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DataLakeDirectoryClient directoryClient = InstrumentClient(new DataLakeDirectoryClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = directoryClient.GenerateUserDelegationSasUri(permissions, expiresOn, userDelegationKey, out stringToSign); + + // Assert + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + IsDirectory = true + }; + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path, + Sas = sasBuilder.ToSasQueryParameters(userDelegationKey, directoryClient.AccountName) + }; + + Assert.AreEqual(expectedUri.ToUri(), sasUri); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_Builder() + { + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewDirectoryName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DataLakeDirectoryClient directoryClient = InstrumentClient(new DataLakeDirectoryClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + IsDirectory = true, + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = directoryClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign); + + // Assert + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + IsDirectory = true, + }; + expectedUri.Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, directoryClient.AccountName); + Assert.AreEqual(expectedUri.ToUri(), sasUri); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNull() + { + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewDirectoryName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeDirectoryClient directoryClient = InstrumentClient(new DataLakeDirectoryClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => directoryClient.GenerateUserDelegationSasUri(null, userDelegationKey, out stringToSign), + new ArgumentNullException("builder")); + } + + [RecordedTest] + public void GenerateUserDelegationSas_UserDelegationKeyNull() + { + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewDirectoryName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DataLakeDirectoryClient directoryClient = InstrumentClient(new DataLakeDirectoryClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + IsDirectory = true, + }; + + string stringToSign = null; + + // Assert + TestHelper.AssertExpectedException( + () => directoryClient.GenerateUserDelegationSasUri(sasBuilder, null, out stringToSign), + new ArgumentNullException("userDelegationKey")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNullFileSystemName() + { + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewDirectoryName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DataLakeDirectoryClient directoryClient = InstrumentClient(new DataLakeDirectoryClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = null, + Path = path, + IsDirectory = true, + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = directoryClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey); + + // Assert + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + IsDirectory = true, + }; + expectedUri.Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, directoryClient.AccountName); + Assert.AreEqual(expectedUri.ToUri(), sasUri); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderWrongFileSystemName() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string directoryName = GetNewDirectoryName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = directoryName + }; + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DataLakeDirectoryClient directoryClient = InstrumentClient(new DataLakeDirectoryClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = GetNewFileSystemName(), // different filesytem name + Path = directoryName, + IsDirectory = true + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => directoryClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey), + new InvalidOperationException("SAS Uri cannot be generated. DataLakeSasBuilder.FileSystemName does not match FileSystemName in the Client. DataLakeSasBuilder.FileSystemName must either be left empty or match the FileSystemName in the Client")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNullPath() + { + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewDirectoryName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DataLakeDirectoryClient directoryClient = InstrumentClient(new DataLakeDirectoryClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = null, + IsDirectory = true, + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = directoryClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey); + + // Assert + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + IsDirectory = true, + }; + expectedUri.Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, directoryClient.AccountName); + Assert.AreEqual(expectedUri.ToUri(), sasUri); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderWrongPathName() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string directoryName = GetNewDirectoryName(); + string fileSystemName = GetNewFileSystemName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = directoryName + }; + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DataLakeDirectoryClient directoryClient = InstrumentClient(new DataLakeDirectoryClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = GetNewDirectoryName(), // different directory name + IsDirectory = true, + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => directoryClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey), + new InvalidOperationException("SAS Uri cannot be generated. DataLakeSasBuilder.Path does not match Path in the Client. DataLakeSasBuilder.Path must either be left empty or match the Path in the Client")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderIsDirectoryNull() + { + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewDirectoryName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DataLakeDirectoryClient directoryClient = InstrumentClient(new DataLakeDirectoryClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + IsDirectory = null, + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = directoryClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey); + + // Assert + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + IsDirectory = true, + }; + expectedUri.Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, directoryClient.AccountName); + Assert.AreEqual(expectedUri.ToUri(), sasUri); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderIsDirectoryError() + { + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string directoryName = GetNewDirectoryName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = directoryName + }; + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DataLakeDirectoryClient directoryClient = InstrumentClient(new DataLakeDirectoryClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = directoryName, + IsDirectory = false, + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => directoryClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey), + new InvalidOperationException("SAS Uri cannot be generated. Expected builder.IsDirectory to be set to true to generate the respective SAS for the client, GetType")); } #endregion diff --git a/sdk/storage/Azure.Storage.Files.DataLake/tests/FileClientTests.cs b/sdk/storage/Azure.Storage.Files.DataLake/tests/FileClientTests.cs index d43891039bdaf..ef9f73e40e18d 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/tests/FileClientTests.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/tests/FileClientTests.cs @@ -6356,7 +6356,409 @@ public void GenerateSas_BuilderIsDirectoryError() // Act TestHelper.AssertExpectedException( () => fileClient.GenerateSasUri(sasBuilder), - new InvalidOperationException("SAS Uri cannot be generated. Expected builder.IsDirectory to be set to FalseName to generatethe respective SAS for the client, GetType")); + new InvalidOperationException("SAS Uri cannot be generated. Expected builder.IsDirectory to be set to false to generate the respective SAS for the client, GetType")); + } + #endregion + + #region GenerateUserDelegationSasTests + [RecordedTest] + public async Task GenerateUserDelegationSas_RequiredParameters() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewFileName(); + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeFileClient fileClient = InstrumentClient(new DataLakeFileClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = fileClient.GenerateUserDelegationSasUri(permissions, expiresOn, userDelegationKey); + + // Assert + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path + }; + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path, + Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, fileClient.AccountName) + }; + Assert.AreEqual(expectedUri.ToUri(), sasUri); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_Builder() + { + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewFileName(); + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeFileClient fileClient = InstrumentClient(new DataLakeFileClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = fileClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey); + + // Assert + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + }; + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path, + Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, fileClient.AccountName) + }; + Assert.AreEqual(expectedUri.ToUri(), sasUri); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNull() + { + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewFileName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeFileClient fileClient = InstrumentClient(new DataLakeFileClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => fileClient.GenerateUserDelegationSasUri(null, userDelegationKey), + new ArgumentNullException("builder")); + } + + [RecordedTest] + public void GenerateUserDelegationSas_UserDelegationKeyNull() + { + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewFileName(); + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeFileClient fileClient = InstrumentClient(new DataLakeFileClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path + }; + + // Act + TestHelper.AssertExpectedException( + () => fileClient.GenerateUserDelegationSasUri(sasBuilder, null), + new ArgumentNullException("userDelegationKey")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNullFileSystemName() + { + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewFileName(); + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeFileClient fileClient = InstrumentClient(new DataLakeFileClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = null, + Path = path + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = fileClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey); + + // Assert + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + }; + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path, + Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, fileClient.AccountName) + }; + Assert.AreEqual(expectedUri.ToUri(), sasUri); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderWrongFileSystemName() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewFileName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DataLakeFileClient fileClient = InstrumentClient(new DataLakeFileClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = GetNewFileSystemName(), // different filesystem name + Path = path, + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => fileClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey), + new InvalidOperationException("SAS Uri cannot be generated. DataLakeSasBuilder.FileSystemName does not match FileSystemName in the Client. DataLakeSasBuilder.FileSystemName must either be left empty or match the FileSystemName in the Client")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNullFileName() + { + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewFileName(); + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeFileClient fileClient = InstrumentClient(new DataLakeFileClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = null + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = fileClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey); + + // Assert + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + }; + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path, + Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, fileClient.AccountName) + }; + Assert.AreEqual(expectedUri.ToUri(), sasUri); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderWrongFileName() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewFileName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DataLakeFileClient fileClient = InstrumentClient(new DataLakeFileClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = GetNewFileName(), // different path + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => fileClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey), + new InvalidOperationException("SAS Uri cannot be generated. DataLakeSasBuilder.Path does not match Path in the Client. DataLakeSasBuilder.Path must either be left empty or match the Path in the Client")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNullIsDirectory() + { + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewFileName(); + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path + }; + DataLakeFileClient fileClient = InstrumentClient(new DataLakeFileClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + IsDirectory = null + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = fileClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey); + + // Assert + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + IsDirectory = false + }; + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = path, + Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, fileClient.AccountName) + }; + Assert.AreEqual(expectedUri.ToUri(), sasUri); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderIsDirectoryError() + { + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string fileName = GetNewFileName(); + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + DirectoryOrFilePath = fileName + }; + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + + DataLakeFileClient fileClient = InstrumentClient(new DataLakeFileClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = GetNewFileName(), + IsDirectory = true, + IPRange = new SasIPRange(System.Net.IPAddress.None, System.Net.IPAddress.None), + ExpiresOn = Recording.UtcNow.AddHours(+1) + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => fileClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey), + new InvalidOperationException("SAS Uri cannot be generated. Expected builder.IsDirectory to be set to false to generate the respective SAS for the client, GetType")); } #endregion diff --git a/sdk/storage/Azure.Storage.Files.DataLake/tests/FileSystemClientTests.cs b/sdk/storage/Azure.Storage.Files.DataLake/tests/FileSystemClientTests.cs index 2c0af9ceaf4eb..6f719cb694eba 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/tests/FileSystemClientTests.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/tests/FileSystemClientTests.cs @@ -3329,6 +3329,272 @@ public void GenerateSas_BuilderWrongName() } #endregion + #region GenerateUserDelegationSasTests + [RecordedTest] + public async Task GenerateUserDelegationSas_RequiredParameters() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + DataLakeFileSystemSasPermissions permissions = DataLakeFileSystemSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName + }; + + DataLakeFileSystemClient fileSystemClient = InstrumentClient(new DataLakeFileSystemClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = fileSystemClient.GenerateUserDelegationSasUri(permissions, expiresOn, userDelegationKey, out stringToSign); + + // Assert + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName + }; + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, fileSystemClient.AccountName) + }; + Assert.AreEqual(expectedUri.ToUri(), sasUri); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_Builder() + { + var constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + DataLakeFileSystemSasPermissions permissions = DataLakeFileSystemSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName + }; + + DataLakeFileSystemClient fileSystemClient = InstrumentClient(new DataLakeFileSystemClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemClient.Name + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = fileSystemClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign); + + // Assert + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName + }; + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, fileSystemClient.AccountName) + }; + + Assert.AreEqual(expectedUri.ToUri(), sasUri); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNull() + { + var constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName + }; + + DataLakeFileSystemClient fileSystemClient = InstrumentClient(new DataLakeFileSystemClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => fileSystemClient.GenerateUserDelegationSasUri(null, userDelegationKey, out stringToSign), + new ArgumentNullException("builder")); + } + + [RecordedTest] + public void GenerateUserDelegationSas_UserDelegationKeyNull() + { + var constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + DataLakeFileSystemSasPermissions permissions = DataLakeFileSystemSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName + }; + + DataLakeFileSystemClient fileSystemClient = InstrumentClient(new DataLakeFileSystemClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemClient.Name + }; + + string stringToSign = null; + + // Act + TestHelper.AssertExpectedException( + () => fileSystemClient.GenerateUserDelegationSasUri(sasBuilder, null, out stringToSign), + new ArgumentNullException("userDelegationKey")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNullName() + { + var constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + DataLakeFileSystemSasPermissions permissions = DataLakeFileSystemSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName + }; + + DataLakeFileSystemClient fileSystemClient = InstrumentClient(new DataLakeFileSystemClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = null + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = fileSystemClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey); + + // Assert + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName + }; + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName, + Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, fileSystemClient.AccountName) + }; + + Assert.AreEqual(expectedUri.ToUri(), sasUri); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderWrongName() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName + }; + + DataLakeFileSystemSasPermissions permissions = DataLakeFileSystemSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DataLakeFileSystemClient fileSystemClient = InstrumentClient(new DataLakeFileSystemClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = GetNewFileSystemName(), // different filesytem name + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => fileSystemClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey), + new InvalidOperationException("SAS Uri cannot be generated. DataLakeSasBuilder.FileSystemName does not match Name in the Client. DataLakeSasBuilder.FileSystemName must either be left empty or match the Name in the Client")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderIncorrectlySetPath() + { + // Arrange + TestConstants constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + + Uri serviceUri = new Uri($"https://{constants.Sas.Account}.dfs.core.windows.net"); + DataLakeUriBuilder dataLakeUriBuilder = new DataLakeUriBuilder(serviceUri) + { + FileSystemName = fileSystemName + }; + + DataLakeFileSystemSasPermissions permissions = DataLakeFileSystemSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DataLakeFileSystemClient fileSystemClient = InstrumentClient(new DataLakeFileSystemClient( + dataLakeUriBuilder.ToUri(), + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = GetNewFileName() + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => fileSystemClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey), + new InvalidOperationException("SAS Uri cannot be generated. builder.Path cannot be set to create a FileSystemName SAS.")); + } + #endregion + [RecordedTest] public void CanMockClientConstructors() { diff --git a/sdk/storage/Azure.Storage.Files.DataLake/tests/PathClientTests.cs b/sdk/storage/Azure.Storage.Files.DataLake/tests/PathClientTests.cs index 64d119eb3c860..f95ecaeac8d59 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/tests/PathClientTests.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/tests/PathClientTests.cs @@ -409,6 +409,368 @@ public void GenerateSas_BuilderIsDirectoryError() } #endregion + #region GenerateUserDelegationSasTests + [RecordedTest] + public async Task GenerateUserDelegationSas_RequiredParameters() + { + // Arrange + var constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewFileName(); + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + var blobEndpoint = new Uri("http://127.0.0.1/" + constants.Sas.Account + "/" + fileSystemName + "/" + path); + DataLakePathClient pathClient = InstrumentClient(new DataLakePathClient( + blobEndpoint, + GetOptions())); + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = pathClient.GenerateUserDelegationSasUri(permissions, expiresOn, userDelegationKey, out stringToSign); + + // Assert + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path + }; + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(blobEndpoint) + { + Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, pathClient.AccountName) + }; + Assert.AreEqual(expectedUri.ToUri().ToString(), sasUri.ToString()); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_Builder() + { + var constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewFileName(); + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DateTimeOffset startsOn = Recording.UtcNow.AddHours(-1); + var blobEndpoint = new Uri("http://127.0.0.1/" + constants.Sas.Account + "/" + fileSystemName + "/" + path); + DataLakePathClient pathClient = InstrumentClient(new DataLakePathClient( + blobEndpoint, + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + StartsOn = startsOn + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = pathClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign); + + // Assert + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(blobEndpoint); + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + StartsOn = startsOn + }; + expectedUri.Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, pathClient.AccountName); + Assert.AreEqual(expectedUri.ToUri().ToString(), sasUri.ToString()); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNull() + { + var constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewFileName(); + var blobEndpoint = new Uri("http://127.0.0.1/" + constants.Sas.Account + "/" + fileSystemName + "/" + path); + DataLakePathClient pathClient = InstrumentClient(new DataLakePathClient( + blobEndpoint, + GetOptions())); + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => pathClient.GenerateUserDelegationSasUri(null, userDelegationKey, out stringToSign), + new ArgumentNullException("builder")); + } + + [RecordedTest] + public void GenerateUserDelegationSas_UserDelegationKeyNull() + { + var constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewFileName(); + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DateTimeOffset startsOn = Recording.UtcNow.AddHours(-1); + var blobEndpoint = new Uri("http://127.0.0.1/" + constants.Sas.Account + "/" + fileSystemName + "/" + path); + DataLakePathClient pathClient = InstrumentClient(new DataLakePathClient( + blobEndpoint, + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + StartsOn = startsOn + }; + + string stringToSign = null; + + // Act + TestHelper.AssertExpectedException( + () => pathClient.GenerateUserDelegationSasUri(sasBuilder, null, out stringToSign), + new ArgumentNullException("userDelegationKey")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNullFileSystemName() + { + var constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewFileName(); + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DateTimeOffset startsOn = Recording.UtcNow.AddHours(-1); + var blobEndpoint = new Uri("http://127.0.0.1/" + constants.Sas.Account + "/" + fileSystemName + "/" + path); + DataLakePathClient pathClient = InstrumentClient(new DataLakePathClient( + blobEndpoint, + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = null, + Path = path, + StartsOn = startsOn + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = pathClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign); + + // Assert + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(blobEndpoint); + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + StartsOn = startsOn + }; + expectedUri.Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, pathClient.AccountName); + Assert.AreEqual(expectedUri.ToUri().ToString(), sasUri.ToString()); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderWrongFileSystemName() + { + // Arrange + var constants = TestConstants.Create(this); + var blobEndpoint = new Uri("http://127.0.0.1/"); + UriBuilder blobUriBuilder = new UriBuilder(blobEndpoint); + string path = GetNewFileName(); + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + blobUriBuilder.Path += constants.Sas.Account + "/" + GetNewFileSystemName() + "/" + path; + DataLakePathClient pathClient = InstrumentClient(new DataLakePathClient( + blobUriBuilder.Uri, + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = GetNewFileSystemName(), // different filesystem name + Path = path, + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => pathClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey), + new InvalidOperationException("SAS Uri cannot be generated. DataLakeSasBuilder.FileSystemName does not match FileSystemName in the Client. DataLakeSasBuilder.FileSystemName must either be left empty or match the FileSystemName in the Client")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNullPath() + { + var constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewFileName(); + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DateTimeOffset startsOn = Recording.UtcNow.AddHours(-1); + var blobEndpoint = new Uri("http://127.0.0.1/" + constants.Sas.Account + "/" + fileSystemName + "/" + path); + DataLakePathClient pathClient = InstrumentClient(new DataLakePathClient( + blobEndpoint, + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = null, + StartsOn = startsOn + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = pathClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign); + + // Assert + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(blobEndpoint); + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + StartsOn = startsOn + }; + expectedUri.Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, pathClient.AccountName); + Assert.AreEqual(expectedUri.ToUri().ToString(), sasUri.ToString()); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderWrongPath() + { + // Arrange + var constants = TestConstants.Create(this); + var blobEndpoint = new Uri("http://127.0.0.1/"); + UriBuilder blobUriBuilder = new UriBuilder(blobEndpoint); + string fileSystemName = GetNewFileSystemName(); + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + blobUriBuilder.Path += constants.Sas.Account + "/" + fileSystemName + "/" + GetNewFileName(); + DataLakePathClient pathClient = InstrumentClient(new DataLakePathClient( + blobUriBuilder.Uri, + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = GetNewFileName(), // different path + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => pathClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey), + new InvalidOperationException("SAS Uri cannot be generated. DataLakeSasBuilder.Path does not match Path in the Client. DataLakeSasBuilder.Path must either be left empty or match the Path in the Client")); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderNullIsDirectory() + { + var constants = TestConstants.Create(this); + string fileSystemName = GetNewFileSystemName(); + string path = GetNewFileName(); + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + DateTimeOffset startsOn = Recording.UtcNow.AddHours(-1); + var blobEndpoint = new Uri("http://127.0.0.1/" + constants.Sas.Account + "/" + fileSystemName + "/" + path); + DataLakePathClient pathClient = InstrumentClient(new DataLakePathClient( + blobEndpoint, + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + StartsOn = startsOn, + IsDirectory = null + }; + + string stringToSign = null; + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + Uri sasUri = pathClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey, out stringToSign); + + // Assert + DataLakeUriBuilder expectedUri = new DataLakeUriBuilder(blobEndpoint); + DataLakeSasBuilder sasBuilder2 = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = path, + StartsOn = startsOn, + IsDirectory = false + }; + expectedUri.Sas = sasBuilder2.ToSasQueryParameters(userDelegationKey, pathClient.AccountName); + Assert.AreEqual(expectedUri.ToUri().ToString(), sasUri.ToString()); + Assert.IsNotNull(stringToSign); + } + + [RecordedTest] + public async Task GenerateUserDelegationSas_BuilderIsDirectoryError() + { + var constants = TestConstants.Create(this); + var blobEndpoint = new Uri("http://127.0.0.1/"); + UriBuilder blobUriBuilder = new UriBuilder(blobEndpoint); + string fileSystemName = GetNewFileSystemName(); + string fileName = GetNewFileName(); + DataLakeSasPermissions permissions = DataLakeSasPermissions.Read; + DateTimeOffset expiresOn = Recording.UtcNow.AddHours(+1); + blobUriBuilder.Path += constants.Sas.Account + "/" + fileSystemName + "/" + fileName; + DataLakePathClient pathClient = InstrumentClient(new DataLakePathClient( + blobUriBuilder.Uri, + GetOptions())); + + DataLakeSasBuilder sasBuilder = new DataLakeSasBuilder(permissions, expiresOn) + { + FileSystemName = fileSystemName, + Path = fileName, + IsDirectory = true, + }; + + Response userDelegationKeyResponse = await GetServiceClient_OAuth().GetUserDelegationKeyAsync( + startsOn: null, + expiresOn: Recording.UtcNow.AddHours(1)); + UserDelegationKey userDelegationKey = userDelegationKeyResponse.Value; + + // Act + TestHelper.AssertExpectedException( + () => pathClient.GenerateUserDelegationSasUri(sasBuilder, userDelegationKey), + new InvalidOperationException("SAS Uri cannot be generated. Expected builder.IsDirectory to be set to false to generate the respective SAS for the client, GetType")); + } + #endregion + [RecordedTest] public void CanMockClientConstructors() { From 683daccef0b210321adbdd9a0abeb73606093b15 Mon Sep 17 00:00:00 2001 From: Sean McCullough <44180881+seanmcc-msft@users.noreply.github.com> Date: Fri, 20 Sep 2024 13:54:09 -0500 Subject: [PATCH 13/25] Added BlobAccessTierNotSupportedForAccountType to BlobErrorCode (#45594) --- .../Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs | 1 + .../api/Azure.Storage.Blobs.netstandard2.0.cs | 1 + .../api/Azure.Storage.Blobs.netstandard2.1.cs | 1 + .../Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs | 2 +- .../Azure.Storage.Blobs/src/Generated/BlobRestClient.cs | 2 +- .../Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs | 2 +- .../Azure.Storage.Blobs/src/Generated/ContainerRestClient.cs | 2 +- .../Azure.Storage.Blobs/src/Generated/Models/BlobErrorCode.cs | 3 +++ .../Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs | 2 +- .../Azure.Storage.Blobs/src/Generated/ServiceRestClient.cs | 2 +- sdk/storage/Azure.Storage.Blobs/src/autorest.md | 2 +- 11 files changed, 13 insertions(+), 7 deletions(-) diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs index 6e618343e0aee..183edccf1578f 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs @@ -615,6 +615,7 @@ public BlobDownloadToOptions() { } public static Azure.Storage.Blobs.Models.BlobErrorCode AuthorizationResourceTypeMismatch { get { throw null; } } public static Azure.Storage.Blobs.Models.BlobErrorCode AuthorizationServiceMismatch { get { throw null; } } public static Azure.Storage.Blobs.Models.BlobErrorCode AuthorizationSourceIPMismatch { get { throw null; } } + public static Azure.Storage.Blobs.Models.BlobErrorCode BlobAccessTierNotSupportedForAccountType { get { throw null; } } public static Azure.Storage.Blobs.Models.BlobErrorCode BlobAlreadyExists { get { throw null; } } public static Azure.Storage.Blobs.Models.BlobErrorCode BlobArchived { get { throw null; } } public static Azure.Storage.Blobs.Models.BlobErrorCode BlobBeingRehydrated { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs index 6e618343e0aee..183edccf1578f 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs @@ -615,6 +615,7 @@ public BlobDownloadToOptions() { } public static Azure.Storage.Blobs.Models.BlobErrorCode AuthorizationResourceTypeMismatch { get { throw null; } } public static Azure.Storage.Blobs.Models.BlobErrorCode AuthorizationServiceMismatch { get { throw null; } } public static Azure.Storage.Blobs.Models.BlobErrorCode AuthorizationSourceIPMismatch { get { throw null; } } + public static Azure.Storage.Blobs.Models.BlobErrorCode BlobAccessTierNotSupportedForAccountType { get { throw null; } } public static Azure.Storage.Blobs.Models.BlobErrorCode BlobAlreadyExists { get { throw null; } } public static Azure.Storage.Blobs.Models.BlobErrorCode BlobArchived { get { throw null; } } public static Azure.Storage.Blobs.Models.BlobErrorCode BlobBeingRehydrated { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs index 6e618343e0aee..183edccf1578f 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs @@ -615,6 +615,7 @@ public BlobDownloadToOptions() { } public static Azure.Storage.Blobs.Models.BlobErrorCode AuthorizationResourceTypeMismatch { get { throw null; } } public static Azure.Storage.Blobs.Models.BlobErrorCode AuthorizationServiceMismatch { get { throw null; } } public static Azure.Storage.Blobs.Models.BlobErrorCode AuthorizationSourceIPMismatch { get { throw null; } } + public static Azure.Storage.Blobs.Models.BlobErrorCode BlobAccessTierNotSupportedForAccountType { get { throw null; } } public static Azure.Storage.Blobs.Models.BlobErrorCode BlobAlreadyExists { get { throw null; } } public static Azure.Storage.Blobs.Models.BlobErrorCode BlobArchived { get { throw null; } } public static Azure.Storage.Blobs.Models.BlobErrorCode BlobBeingRehydrated { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs index 88104aa95bb00..62c45c554783a 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs @@ -29,7 +29,7 @@ internal partial class AppendBlobRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// , , or is null. public AppendBlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs index 615257741b781..4cb72cdf210a6 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs @@ -30,7 +30,7 @@ internal partial class BlobRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// , , or is null. public BlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs index 0723c07204ac2..3f4241dfce9b6 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs @@ -30,7 +30,7 @@ internal partial class BlockBlobRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// , , or is null. public BlockBlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/ContainerRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/ContainerRestClient.cs index 024bfecd4e90b..9dd20ee7e1811 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/ContainerRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/ContainerRestClient.cs @@ -31,7 +31,7 @@ internal partial class ContainerRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// , , or is null. public ContainerRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/Models/BlobErrorCode.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/Models/BlobErrorCode.cs index 6bb7d0cb30494..d26702b46e111 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/Models/BlobErrorCode.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/Models/BlobErrorCode.cs @@ -135,6 +135,7 @@ public BlobErrorCode(string value) private const string AuthorizationPermissionMismatchValue = "AuthorizationPermissionMismatch"; private const string AuthorizationServiceMismatchValue = "AuthorizationServiceMismatch"; private const string AuthorizationResourceTypeMismatchValue = "AuthorizationResourceTypeMismatch"; + private const string BlobAccessTierNotSupportedForAccountTypeValue = "BlobAccessTierNotSupportedForAccountType"; /// AccountAlreadyExists. public static BlobErrorCode AccountAlreadyExists { get; } = new BlobErrorCode(AccountAlreadyExistsValue); @@ -362,6 +363,8 @@ public BlobErrorCode(string value) public static BlobErrorCode AuthorizationServiceMismatch { get; } = new BlobErrorCode(AuthorizationServiceMismatchValue); /// AuthorizationResourceTypeMismatch. public static BlobErrorCode AuthorizationResourceTypeMismatch { get; } = new BlobErrorCode(AuthorizationResourceTypeMismatchValue); + /// BlobAccessTierNotSupportedForAccountType. + public static BlobErrorCode BlobAccessTierNotSupportedForAccountType { get; } = new BlobErrorCode(BlobAccessTierNotSupportedForAccountTypeValue); /// Determines if two values are the same. public static bool operator ==(BlobErrorCode left, BlobErrorCode right) => left.Equals(right); /// Determines if two values are not the same. diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs index 260d8021543e2..0aea4f28d32ff 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs @@ -30,7 +30,7 @@ internal partial class PageBlobRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// , , or is null. public PageBlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/ServiceRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/ServiceRestClient.cs index e274940f81e8d..2abac369c0cae 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/ServiceRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/ServiceRestClient.cs @@ -31,7 +31,7 @@ internal partial class ServiceRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-08-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// , , or is null. public ServiceRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { diff --git a/sdk/storage/Azure.Storage.Blobs/src/autorest.md b/sdk/storage/Azure.Storage.Blobs/src/autorest.md index 85fb92c2349cd..cda269a135e88 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/autorest.md +++ b/sdk/storage/Azure.Storage.Blobs/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/f6f50c6388fd5836fa142384641b8353a99874ef/specification/storage/data-plane/Microsoft.BlobStorage/stable/2024-08-04/blob.json + - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/7c58ab44b9cdd08fa5aae782a34bc42bd67ff59b/specification/storage/data-plane/Microsoft.BlobStorage/stable/2025-01-05/blob.json generation1-convenience-client: true # https://github.com/Azure/autorest/issues/4075 skip-semantics-validation: true From d209893a57627be602d43166346bebc97750a394 Mon Sep 17 00:00:00 2001 From: Sean McCullough <44180881+seanmcc-msft@users.noreply.github.com> Date: Fri, 20 Sep 2024 14:26:14 -0500 Subject: [PATCH 14/25] Added Premium to Share Access Tier (#45603) * Added Premium to Share Access Tier * Updated autorest --- .../api/Azure.Storage.Files.Shares.net6.0.cs | 1 + ...ure.Storage.Files.Shares.netstandard2.0.cs | 1 + .../Azure.Storage.Files.Shares/assets.json | 2 +- .../src/Generated/DirectoryRestClient.cs | 2 +- .../src/Generated/FileRestClient.cs | 2 +- .../src/Generated/Models/ShareAccessTier.cs | 3 ++ .../src/Generated/ServiceRestClient.cs | 2 +- .../src/Generated/ShareRestClient.cs | 2 +- .../src/autorest.md | 2 +- .../tests/ShareClientTests.cs | 45 +++++++++++++++++++ 10 files changed, 56 insertions(+), 6 deletions(-) diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs index 88fbd1326e018..fc2404a47f0af 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs @@ -563,6 +563,7 @@ public ShareAccessPolicy() { } public ShareAccessTier(string value) { throw null; } public static Azure.Storage.Files.Shares.Models.ShareAccessTier Cool { get { throw null; } } public static Azure.Storage.Files.Shares.Models.ShareAccessTier Hot { get { throw null; } } + public static Azure.Storage.Files.Shares.Models.ShareAccessTier Premium { get { throw null; } } public static Azure.Storage.Files.Shares.Models.ShareAccessTier TransactionOptimized { get { throw null; } } public bool Equals(Azure.Storage.Files.Shares.Models.ShareAccessTier other) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs index 88fbd1326e018..fc2404a47f0af 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs @@ -563,6 +563,7 @@ public ShareAccessPolicy() { } public ShareAccessTier(string value) { throw null; } public static Azure.Storage.Files.Shares.Models.ShareAccessTier Cool { get { throw null; } } public static Azure.Storage.Files.Shares.Models.ShareAccessTier Hot { get { throw null; } } + public static Azure.Storage.Files.Shares.Models.ShareAccessTier Premium { get { throw null; } } public static Azure.Storage.Files.Shares.Models.ShareAccessTier TransactionOptimized { get { throw null; } } public bool Equals(Azure.Storage.Files.Shares.Models.ShareAccessTier other) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] diff --git a/sdk/storage/Azure.Storage.Files.Shares/assets.json b/sdk/storage/Azure.Storage.Files.Shares/assets.json index 9ca749681b79e..5b5f810958b23 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/assets.json +++ b/sdk/storage/Azure.Storage.Files.Shares/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Files.Shares", - "Tag": "net/storage/Azure.Storage.Files.Shares_14e0fa0c22" + "Tag": "net/storage/Azure.Storage.Files.Shares_95c2a36dbc" } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/DirectoryRestClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/DirectoryRestClient.cs index 961c6ff47ce59..8a2edb8b99134 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/DirectoryRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/DirectoryRestClient.cs @@ -33,7 +33,7 @@ internal partial class DirectoryRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, share, directory or file that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-11-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// If true, the trailing dot will not be trimmed from the target URI. /// Valid value is backup. /// If true, the trailing dot will not be trimmed from the source URI. diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs index d4b584e6660ee..231f5a494c94e 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs @@ -34,7 +34,7 @@ internal partial class FileRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, share, directory or file that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-11-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// Only update is supported: - Update: Writes the bytes downloaded from the source url into the specified range. The default value is "update". /// If true, the trailing dot will not be trimmed from the target URI. /// Valid value is backup. diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/Models/ShareAccessTier.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/Models/ShareAccessTier.cs index ae732b82353e7..4881d694a6146 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/Models/ShareAccessTier.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/Models/ShareAccessTier.cs @@ -25,6 +25,7 @@ public ShareAccessTier(string value) private const string TransactionOptimizedValue = "TransactionOptimized"; private const string HotValue = "Hot"; private const string CoolValue = "Cool"; + private const string PremiumValue = "Premium"; /// TransactionOptimized. public static ShareAccessTier TransactionOptimized { get; } = new ShareAccessTier(TransactionOptimizedValue); @@ -32,6 +33,8 @@ public ShareAccessTier(string value) public static ShareAccessTier Hot { get; } = new ShareAccessTier(HotValue); /// Cool. public static ShareAccessTier Cool { get; } = new ShareAccessTier(CoolValue); + /// Premium. + public static ShareAccessTier Premium { get; } = new ShareAccessTier(PremiumValue); /// Determines if two values are the same. public static bool operator ==(ShareAccessTier left, ShareAccessTier right) => left.Equals(right); /// Determines if two values are not the same. diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ServiceRestClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ServiceRestClient.cs index ef4c21b9a33c7..fe5ea495a7a15 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ServiceRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ServiceRestClient.cs @@ -31,7 +31,7 @@ internal partial class ServiceRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, share, directory or file that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-11-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// Valid value is backup. /// , , or is null. public ServiceRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version, ShareTokenIntent? fileRequestIntent = null) diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs index 599aacf2c6287..3012d3d8735b1 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs @@ -32,7 +32,7 @@ internal partial class ShareRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, share, directory or file that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2024-11-04". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// Valid value is backup. /// , , or is null. public ShareRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version, ShareTokenIntent? fileRequestIntent = null) diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md index 43022bc56d1c1..facaa61c21784 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md +++ b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/98b600498947073c18c2ac5eb7c3c658db5a1a59/specification/storage/data-plane/Microsoft.FileStorage/stable/2024-11-04/file.json + - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/718996a0c4435626d2f55d4ab6e65da5ac48916c/specification/storage/data-plane/Microsoft.FileStorage/stable/2025-01-05/file.json generation1-convenience-client: true # https://github.com/Azure/autorest/issues/4075 skip-semantics-validation: true diff --git a/sdk/storage/Azure.Storage.Files.Shares/tests/ShareClientTests.cs b/sdk/storage/Azure.Storage.Files.Shares/tests/ShareClientTests.cs index 0640fc504c39e..36c7a115d8ecc 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/tests/ShareClientTests.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/tests/ShareClientTests.cs @@ -364,6 +364,30 @@ public async Task CreateAsync_AccessTier() await share.DeleteAsync(); } + [RecordedTest] + [ServiceVersion(Min = ShareClientOptions.ServiceVersion.V2019_12_12)] + public async Task CreateAsync_AccessTier_Premium() + { + // Arrange + var shareName = GetNewShareName(); + ShareServiceClient service = SharesClientBuilder.GetServiceClient_PremiumFile(); + ShareClient share = InstrumentClient(service.GetShareClient(shareName)); + ShareCreateOptions options = new ShareCreateOptions + { + AccessTier = ShareAccessTier.Premium + }; + + // Act + await share.CreateAsync(options); + + // Assert + Response propertiesResponse = await share.GetPropertiesAsync(); + Assert.AreEqual(ShareAccessTier.Premium.ToString(), propertiesResponse.Value.AccessTier); + + // Cleanup + await share.DeleteAsync(); + } + [RecordedTest] public async Task CreateAsync_Error() { @@ -1774,6 +1798,27 @@ public async Task SetPropertiesAsync_AccessTier() Assert.IsNotNull(response.Value.AccessTierChangeTime); } + [RecordedTest] + [ServiceVersion(Min = ShareClientOptions.ServiceVersion.V2019_12_12)] + public async Task SetPropertiesAsync_AccessTier_Premium() + { + // Arrange + await using DisposingShare test = await GetTestShareAsync(SharesClientBuilder.GetServiceClient_PremiumFile()); + ShareClient share = test.Share; + + ShareSetPropertiesOptions options = new ShareSetPropertiesOptions + { + AccessTier = ShareAccessTier.Premium + }; + + // Act + await share.SetPropertiesAsync(options); + + // Assert + Response response = await share.GetPropertiesAsync(); + Assert.AreEqual(ShareAccessTier.Premium.ToString(), response.Value.AccessTier); + } + [RecordedTest] [PlaybackOnly("https://github.com/Azure/azure-sdk-for-net/issues/17262")] [ServiceVersion(Min = ShareClientOptions.ServiceVersion.V2020_04_08)] From 62194c3f17767c274bc4ef190c558eb3e2da1c34 Mon Sep 17 00:00:00 2001 From: Sean McCullough <44180881+seanmcc-msft@users.noreply.github.com> Date: Fri, 20 Sep 2024 18:27:14 -0500 Subject: [PATCH 15/25] Added Binary ACE to Copy File (#45611) --- .../api/Azure.Storage.Files.Shares.net6.0.cs | 1 + ...ure.Storage.Files.Shares.netstandard2.0.cs | 1 + .../Azure.Storage.Files.Shares/assets.json | 2 +- .../src/Generated/FileRestClient.cs | 16 ++++--- .../src/Models/ShareFileCopyOptions.cs | 7 +++ .../src/ShareFileClient.cs | 12 +++++ .../src/autorest.md | 2 +- .../tests/FileClientTests.cs | 44 +++++++++++++++++++ .../tests/ShareClientTestFixtureAttribute.cs | 4 +- 9 files changed, 80 insertions(+), 9 deletions(-) diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs index fc2404a47f0af..48e367ecc2190 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs @@ -757,6 +757,7 @@ public ShareFileCopyOptions() { } public Azure.Storage.Files.Shares.Models.PermissionCopyMode? FilePermissionCopyMode { get { throw null; } set { } } public bool? IgnoreReadOnly { get { throw null; } set { } } public System.Collections.Generic.IDictionary Metadata { get { throw null; } set { } } + public Azure.Storage.Files.Shares.Models.FilePermissionFormat? PermissionFormat { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.FileSmbProperties SmbProperties { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.CopyableFileSmbProperties SmbPropertiesToCopy { get { throw null; } set { } } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs index fc2404a47f0af..48e367ecc2190 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs @@ -757,6 +757,7 @@ public ShareFileCopyOptions() { } public Azure.Storage.Files.Shares.Models.PermissionCopyMode? FilePermissionCopyMode { get { throw null; } set { } } public bool? IgnoreReadOnly { get { throw null; } set { } } public System.Collections.Generic.IDictionary Metadata { get { throw null; } set { } } + public Azure.Storage.Files.Shares.Models.FilePermissionFormat? PermissionFormat { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.FileSmbProperties SmbProperties { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.CopyableFileSmbProperties SmbPropertiesToCopy { get { throw null; } set { } } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/assets.json b/sdk/storage/Azure.Storage.Files.Shares/assets.json index 5b5f810958b23..4bb5fc7bb03c9 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/assets.json +++ b/sdk/storage/Azure.Storage.Files.Shares/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Files.Shares", - "Tag": "net/storage/Azure.Storage.Files.Shares_95c2a36dbc" + "Tag": "net/storage/Azure.Storage.Files.Shares_27dac02512" } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs index 231f5a494c94e..32484654eb2be 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs @@ -1290,7 +1290,7 @@ public ResponseWithHeaders GetRange } } - internal HttpMessage CreateStartCopyRequest(string copySource, int? timeout, IDictionary metadata, string filePermission, string filePermissionKey, CopyFileSmbInfo copyFileSmbInfo, ShareFileRequestConditions shareFileRequestConditions) + internal HttpMessage CreateStartCopyRequest(string copySource, int? timeout, IDictionary metadata, string filePermission, FilePermissionFormat? filePermissionFormat, string filePermissionKey, CopyFileSmbInfo copyFileSmbInfo, ShareFileRequestConditions shareFileRequestConditions) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -1312,6 +1312,10 @@ internal HttpMessage CreateStartCopyRequest(string copySource, int? timeout, IDi { request.Headers.Add("x-ms-file-permission", filePermission); } + if (filePermissionFormat != null) + { + request.Headers.Add("x-ms-file-permission-format", filePermissionFormat.Value.ToSerialString()); + } if (filePermissionKey != null) { request.Headers.Add("x-ms-file-permission-key", filePermissionKey); @@ -1369,19 +1373,20 @@ internal HttpMessage CreateStartCopyRequest(string copySource, int? timeout, IDi /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting Timeouts for File Service Operations.</a>. /// A name-value pair to associate with a file storage object. /// If specified the permission (security descriptor) shall be set for the directory/file. This header can be used if Permission size is <= 8KB, else x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or x-ms-file-permission-key should be specified. + /// Optional. Available for version 2023-06-01 and later. Specifies the format in which the permission is returned. Acceptable values are SDDL or binary. If x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the permission is returned as a base64 string representing the binary encoding of the permission. /// Key of the permission to be set for the directory/file. Note: Only one of the x-ms-file-permission or x-ms-file-permission-key should be specified. /// Parameter group. /// Parameter group. /// The cancellation token to use. /// is null. - public async Task> StartCopyAsync(string copySource, int? timeout = null, IDictionary metadata = null, string filePermission = null, string filePermissionKey = null, CopyFileSmbInfo copyFileSmbInfo = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) + public async Task> StartCopyAsync(string copySource, int? timeout = null, IDictionary metadata = null, string filePermission = null, FilePermissionFormat? filePermissionFormat = null, string filePermissionKey = null, CopyFileSmbInfo copyFileSmbInfo = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) { if (copySource == null) { throw new ArgumentNullException(nameof(copySource)); } - using var message = CreateStartCopyRequest(copySource, timeout, metadata, filePermission, filePermissionKey, copyFileSmbInfo, shareFileRequestConditions); + using var message = CreateStartCopyRequest(copySource, timeout, metadata, filePermission, filePermissionFormat, filePermissionKey, copyFileSmbInfo, shareFileRequestConditions); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new FileStartCopyHeaders(message.Response); switch (message.Response.Status) @@ -1398,19 +1403,20 @@ public async Task> StartCopyAsync(stri /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting Timeouts for File Service Operations.</a>. /// A name-value pair to associate with a file storage object. /// If specified the permission (security descriptor) shall be set for the directory/file. This header can be used if Permission size is <= 8KB, else x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or x-ms-file-permission-key should be specified. + /// Optional. Available for version 2023-06-01 and later. Specifies the format in which the permission is returned. Acceptable values are SDDL or binary. If x-ms-file-permission-format is unspecified or explicitly set to SDDL, the permission is returned in SDDL format. If x-ms-file-permission-format is explicitly set to binary, the permission is returned as a base64 string representing the binary encoding of the permission. /// Key of the permission to be set for the directory/file. Note: Only one of the x-ms-file-permission or x-ms-file-permission-key should be specified. /// Parameter group. /// Parameter group. /// The cancellation token to use. /// is null. - public ResponseWithHeaders StartCopy(string copySource, int? timeout = null, IDictionary metadata = null, string filePermission = null, string filePermissionKey = null, CopyFileSmbInfo copyFileSmbInfo = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders StartCopy(string copySource, int? timeout = null, IDictionary metadata = null, string filePermission = null, FilePermissionFormat? filePermissionFormat = null, string filePermissionKey = null, CopyFileSmbInfo copyFileSmbInfo = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) { if (copySource == null) { throw new ArgumentNullException(nameof(copySource)); } - using var message = CreateStartCopyRequest(copySource, timeout, metadata, filePermission, filePermissionKey, copyFileSmbInfo, shareFileRequestConditions); + using var message = CreateStartCopyRequest(copySource, timeout, metadata, filePermission, filePermissionFormat, filePermissionKey, copyFileSmbInfo, shareFileRequestConditions); _pipeline.Send(message, cancellationToken); var headers = new FileStartCopyHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileCopyOptions.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileCopyOptions.cs index db7f3bc017ab9..d74113002f182 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileCopyOptions.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileCopyOptions.cs @@ -27,6 +27,13 @@ public class ShareFileCopyOptions /// public string FilePermission { get; set; } + /// + /// Specifies the format in which the file permission is returned. If unspecified or explicitly set to SDDL, + /// the permission is returned in SDDL format. If explicitly set to binary, the permission is returned as a base64 + /// string representing the binary encoding of the permission. + /// + public FilePermissionFormat? PermissionFormat { get; set; } + /// /// Specifies the option to copy file security descriptor from source file or /// to set it using the value which is defined by the header value of FilePermission diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs index 2d58482950b9a..dd1b5bf8633f3 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs @@ -1302,6 +1302,7 @@ public virtual Response StartCopy( metadata: options?.Metadata, smbProperties: options?.SmbProperties, filePermission: options?.FilePermission, + filePermissionFormat: options?.PermissionFormat, filePermissionCopyMode: options?.FilePermissionCopyMode, ignoreReadOnly: options?.IgnoreReadOnly, setArchiveAttribute: options?.Archive, @@ -1378,6 +1379,7 @@ public virtual Response StartCopy( metadata, smbProperties, filePermission, + filePermissionFormat: default, filePermissionCopyMode, ignoreReadOnly, setArchiveAttribute, @@ -1424,6 +1426,7 @@ public virtual Response StartCopy( metadata, smbProperties: default, filePermission: default, + filePermissionFormat: default, filePermissionCopyMode: default, ignoreReadOnly: default, setArchiveAttribute: default, @@ -1467,6 +1470,7 @@ await StartCopyInternal( metadata: options?.Metadata, smbProperties: options?.SmbProperties, filePermission: options?.FilePermission, + filePermissionFormat: options?.PermissionFormat, filePermissionCopyMode: options?.FilePermissionCopyMode, ignoreReadOnly: options?.IgnoreReadOnly, setArchiveAttribute: options?.Archive, @@ -1543,6 +1547,7 @@ await StartCopyInternal( metadata, smbProperties, filePermission, + filePermissionFormat: default, filePermissionCopyMode, ignoreReadOnly, setArchiveAttribute, @@ -1589,6 +1594,7 @@ await StartCopyInternal( metadata, smbProperties: default, filePermission: default, + filePermissionFormat: default, filePermissionCopyMode: default, ignoreReadOnly: default, setArchiveAttribute: default, @@ -1617,6 +1623,9 @@ await StartCopyInternal( /// /// Optional file permission to set for the file. /// + /// + /// Optional file permission format. + /// /// /// Specifies the option to copy file security descriptor from source file or /// to set it using the value which is defined by the header value of FilePermission @@ -1658,6 +1667,7 @@ private async Task> StartCopyInternal( Metadata metadata, FileSmbProperties smbProperties, string filePermission, + FilePermissionFormat? filePermissionFormat, PermissionCopyMode? filePermissionCopyMode, bool? ignoreReadOnly, bool? setArchiveAttribute, @@ -1768,6 +1778,7 @@ private async Task> StartCopyInternal( copySource: uriBuilder.ToString(), metadata: metadata, filePermission: filePermission, + filePermissionFormat: filePermissionFormat, filePermissionKey: smbProperties?.FilePermissionKey, copyFileSmbInfo: copyFileSmbInfo, shareFileRequestConditions: conditions, @@ -1780,6 +1791,7 @@ private async Task> StartCopyInternal( copySource: uriBuilder.ToString(), metadata: metadata, filePermission: filePermission, + filePermissionFormat: filePermissionFormat, filePermissionKey: smbProperties?.FilePermissionKey, copyFileSmbInfo: copyFileSmbInfo, shareFileRequestConditions: conditions, diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md index facaa61c21784..9e0fc07591065 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md +++ b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/718996a0c4435626d2f55d4ab6e65da5ac48916c/specification/storage/data-plane/Microsoft.FileStorage/stable/2025-01-05/file.json + - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/09a930a7adfb8676e54f714d7ea8973caecb9667/specification/storage/data-plane/Microsoft.FileStorage/stable/2025-01-05/file.json generation1-convenience-client: true # https://github.com/Azure/autorest/issues/4075 skip-semantics-validation: true diff --git a/sdk/storage/Azure.Storage.Files.Shares/tests/FileClientTests.cs b/sdk/storage/Azure.Storage.Files.Shares/tests/FileClientTests.cs index 19bd51da71f56..b1ce4cc25987a 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/tests/FileClientTests.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/tests/FileClientTests.cs @@ -1952,6 +1952,50 @@ await dest.StartCopyAsync( Assert.AreEqual(smbProperties.FileLastWrittenOn, propertiesResponse.Value.SmbProperties.FileLastWrittenOn); } + [RecordedTest] + [TestCase(null)] + [TestCase(FilePermissionFormat.Sddl)] + [TestCase(FilePermissionFormat.Binary)] + [ServiceVersion(Min = ShareClientOptions.ServiceVersion.V2025_01_05)] + public async Task StartCopyAsync_FilePermission_Format(FilePermissionFormat? filePermissionFormat) + { + // Arrange + await using DisposingFile testSource = await SharesClientBuilder.GetTestFileAsync(); + ShareFileClient source = testSource.File; + await using DisposingFile testDest = await SharesClientBuilder.GetTestFileAsync(); + ShareFileClient dest = testSource.File; + + var data = GetRandomBuffer(Constants.KB); + using (var stream = new MemoryStream(data)) + { + await source.UploadRangeAsync( + writeType: ShareFileRangeWriteType.Update, + range: new HttpRange(0, Constants.KB), + content: stream); + } + string filePermission; + if (filePermissionFormat == null || filePermissionFormat == FilePermissionFormat.Sddl) + { + filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL"; + } + else + { + filePermission = "AQAUhGwAAACIAAAAAAAAABQAAAACAFgAAwAAAAAAFAD/AR8AAQEAAAAAAAUSAAAAAAAYAP8BHwABAgAAAAAABSAAAAAgAgAAAAAkAKkAEgABBQAAAAAABRUAAABZUbgXZnJdJWRjOwuMmS4AAQUAAAAAAAUVAAAAoGXPfnhLm1/nfIdwr/1IAQEFAAAAAAAFFQAAAKBlz354S5tf53yHcAECAAA="; + } + + ShareFileCopyOptions options = new ShareFileCopyOptions + { + FilePermission = filePermission, + PermissionFormat = filePermissionFormat, + FilePermissionCopyMode = PermissionCopyMode.Override + }; + + // Act + await dest.StartCopyAsync( + sourceUri: source.Uri, + options: options); + } + [RecordedTest] [ServiceVersion(Min = ShareClientOptions.ServiceVersion.V2021_06_08)] public async Task StartCopyAsync_ChangeTime() diff --git a/sdk/storage/Azure.Storage.Files.Shares/tests/ShareClientTestFixtureAttribute.cs b/sdk/storage/Azure.Storage.Files.Shares/tests/ShareClientTestFixtureAttribute.cs index eb73ce7ea2859..7d078484201bc 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/tests/ShareClientTestFixtureAttribute.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/tests/ShareClientTestFixtureAttribute.cs @@ -43,8 +43,8 @@ public ShareClientTestFixtureAttribute(params object[] additionalParameters) }, additionalParameters: additionalParameters) { - RecordingServiceVersion = StorageVersionExtensions.LatestVersion; - LiveServiceVersions = new object[] { StorageVersionExtensions.MaxVersion, }; + RecordingServiceVersion = StorageVersionExtensions.MaxVersion; + LiveServiceVersions = new object[] { StorageVersionExtensions.LatestVersion, }; } } } From 65fb5655ec093112330adacdb7011258095e32f6 Mon Sep 17 00:00:00 2001 From: Sean McCullough <44180881+seanmcc-msft@users.noreply.github.com> Date: Fri, 20 Sep 2024 19:15:42 -0500 Subject: [PATCH 16/25] Added tests for Set/Delete Immutability Policy and Set Legal Hold on Blob Snapshots and Versions (#46104) --- sdk/storage/Azure.Storage.Blobs/assets.json | 2 +- .../src/Generated/BlobRestClient.cs | 66 ++++++-- .../Azure.Storage.Blobs/src/autorest.md | 2 +- .../ImmutableStorageWithVersioningTests.cs | 147 ++++++++++++++++++ 4 files changed, 200 insertions(+), 17 deletions(-) diff --git a/sdk/storage/Azure.Storage.Blobs/assets.json b/sdk/storage/Azure.Storage.Blobs/assets.json index 377294f47b993..355b8279e12f5 100644 --- a/sdk/storage/Azure.Storage.Blobs/assets.json +++ b/sdk/storage/Azure.Storage.Blobs/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Blobs", - "Tag": "net/storage/Azure.Storage.Blobs_730bf5e40e" + "Tag": "net/storage/Azure.Storage.Blobs_be439321c1" } diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs index 4cb72cdf210a6..d627d63506ca4 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs @@ -778,7 +778,7 @@ public ResponseWithHeaders SetHttpHeaders(int? timeou } } - internal HttpMessage CreateSetImmutabilityPolicyRequest(int? timeout, DateTimeOffset? ifUnmodifiedSince, DateTimeOffset? immutabilityPolicyExpiry, BlobImmutabilityPolicyMode? immutabilityPolicyMode) + internal HttpMessage CreateSetImmutabilityPolicyRequest(int? timeout, DateTimeOffset? ifUnmodifiedSince, DateTimeOffset? immutabilityPolicyExpiry, BlobImmutabilityPolicyMode? immutabilityPolicyMode, string snapshot, string versionId) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -790,6 +790,14 @@ internal HttpMessage CreateSetImmutabilityPolicyRequest(int? timeout, DateTimeOf { uri.AppendQuery("timeout", timeout.Value, true); } + if (snapshot != null) + { + uri.AppendQuery("snapshot", snapshot, true); + } + if (versionId != null) + { + uri.AppendQuery("versionid", versionId, true); + } request.Uri = uri; request.Headers.Add("x-ms-version", _version); if (ifUnmodifiedSince != null) @@ -813,10 +821,12 @@ internal HttpMessage CreateSetImmutabilityPolicyRequest(int? timeout, DateTimeOf /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time. /// Specifies the date time when the blobs immutability policy is set to expire. /// Specifies the immutability policy mode to set on the blob. + /// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + /// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. /// The cancellation token to use. - public async Task> SetImmutabilityPolicyAsync(int? timeout = null, DateTimeOffset? ifUnmodifiedSince = null, DateTimeOffset? immutabilityPolicyExpiry = null, BlobImmutabilityPolicyMode? immutabilityPolicyMode = null, CancellationToken cancellationToken = default) + public async Task> SetImmutabilityPolicyAsync(int? timeout = null, DateTimeOffset? ifUnmodifiedSince = null, DateTimeOffset? immutabilityPolicyExpiry = null, BlobImmutabilityPolicyMode? immutabilityPolicyMode = null, string snapshot = null, string versionId = null, CancellationToken cancellationToken = default) { - using var message = CreateSetImmutabilityPolicyRequest(timeout, ifUnmodifiedSince, immutabilityPolicyExpiry, immutabilityPolicyMode); + using var message = CreateSetImmutabilityPolicyRequest(timeout, ifUnmodifiedSince, immutabilityPolicyExpiry, immutabilityPolicyMode, snapshot, versionId); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new BlobSetImmutabilityPolicyHeaders(message.Response); switch (message.Response.Status) @@ -833,10 +843,12 @@ public async Task> SetImmu /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time. /// Specifies the date time when the blobs immutability policy is set to expire. /// Specifies the immutability policy mode to set on the blob. + /// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + /// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. /// The cancellation token to use. - public ResponseWithHeaders SetImmutabilityPolicy(int? timeout = null, DateTimeOffset? ifUnmodifiedSince = null, DateTimeOffset? immutabilityPolicyExpiry = null, BlobImmutabilityPolicyMode? immutabilityPolicyMode = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders SetImmutabilityPolicy(int? timeout = null, DateTimeOffset? ifUnmodifiedSince = null, DateTimeOffset? immutabilityPolicyExpiry = null, BlobImmutabilityPolicyMode? immutabilityPolicyMode = null, string snapshot = null, string versionId = null, CancellationToken cancellationToken = default) { - using var message = CreateSetImmutabilityPolicyRequest(timeout, ifUnmodifiedSince, immutabilityPolicyExpiry, immutabilityPolicyMode); + using var message = CreateSetImmutabilityPolicyRequest(timeout, ifUnmodifiedSince, immutabilityPolicyExpiry, immutabilityPolicyMode, snapshot, versionId); _pipeline.Send(message, cancellationToken); var headers = new BlobSetImmutabilityPolicyHeaders(message.Response); switch (message.Response.Status) @@ -848,7 +860,7 @@ public ResponseWithHeaders SetImmutabilityPoli } } - internal HttpMessage CreateDeleteImmutabilityPolicyRequest(int? timeout) + internal HttpMessage CreateDeleteImmutabilityPolicyRequest(int? timeout, string snapshot, string versionId) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -860,6 +872,14 @@ internal HttpMessage CreateDeleteImmutabilityPolicyRequest(int? timeout) { uri.AppendQuery("timeout", timeout.Value, true); } + if (snapshot != null) + { + uri.AppendQuery("snapshot", snapshot, true); + } + if (versionId != null) + { + uri.AppendQuery("versionid", versionId, true); + } request.Uri = uri; request.Headers.Add("x-ms-version", _version); request.Headers.Add("Accept", "application/xml"); @@ -868,10 +888,12 @@ internal HttpMessage CreateDeleteImmutabilityPolicyRequest(int? timeout) /// The Delete Immutability Policy operation deletes the immutability policy on the blob. /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + /// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + /// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. /// The cancellation token to use. - public async Task> DeleteImmutabilityPolicyAsync(int? timeout = null, CancellationToken cancellationToken = default) + public async Task> DeleteImmutabilityPolicyAsync(int? timeout = null, string snapshot = null, string versionId = null, CancellationToken cancellationToken = default) { - using var message = CreateDeleteImmutabilityPolicyRequest(timeout); + using var message = CreateDeleteImmutabilityPolicyRequest(timeout, snapshot, versionId); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new BlobDeleteImmutabilityPolicyHeaders(message.Response); switch (message.Response.Status) @@ -885,10 +907,12 @@ public async Task> Dele /// The Delete Immutability Policy operation deletes the immutability policy on the blob. /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + /// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + /// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. /// The cancellation token to use. - public ResponseWithHeaders DeleteImmutabilityPolicy(int? timeout = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders DeleteImmutabilityPolicy(int? timeout = null, string snapshot = null, string versionId = null, CancellationToken cancellationToken = default) { - using var message = CreateDeleteImmutabilityPolicyRequest(timeout); + using var message = CreateDeleteImmutabilityPolicyRequest(timeout, snapshot, versionId); _pipeline.Send(message, cancellationToken); var headers = new BlobDeleteImmutabilityPolicyHeaders(message.Response); switch (message.Response.Status) @@ -900,7 +924,7 @@ public ResponseWithHeaders DeleteImmutabili } } - internal HttpMessage CreateSetLegalHoldRequest(bool legalHold, int? timeout) + internal HttpMessage CreateSetLegalHoldRequest(bool legalHold, int? timeout, string snapshot, string versionId) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -912,6 +936,14 @@ internal HttpMessage CreateSetLegalHoldRequest(bool legalHold, int? timeout) { uri.AppendQuery("timeout", timeout.Value, true); } + if (snapshot != null) + { + uri.AppendQuery("snapshot", snapshot, true); + } + if (versionId != null) + { + uri.AppendQuery("versionid", versionId, true); + } request.Uri = uri; request.Headers.Add("x-ms-version", _version); request.Headers.Add("x-ms-legal-hold", legalHold); @@ -922,10 +954,12 @@ internal HttpMessage CreateSetLegalHoldRequest(bool legalHold, int? timeout) /// The Set Legal Hold operation sets a legal hold on the blob. /// Specified if a legal hold should be set on the blob. /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + /// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + /// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. /// The cancellation token to use. - public async Task> SetLegalHoldAsync(bool legalHold, int? timeout = null, CancellationToken cancellationToken = default) + public async Task> SetLegalHoldAsync(bool legalHold, int? timeout = null, string snapshot = null, string versionId = null, CancellationToken cancellationToken = default) { - using var message = CreateSetLegalHoldRequest(legalHold, timeout); + using var message = CreateSetLegalHoldRequest(legalHold, timeout, snapshot, versionId); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new BlobSetLegalHoldHeaders(message.Response); switch (message.Response.Status) @@ -940,10 +974,12 @@ public async Task> SetLegalHoldAsyn /// The Set Legal Hold operation sets a legal hold on the blob. /// Specified if a legal hold should be set on the blob. /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>. + /// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob.</a>. + /// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. /// The cancellation token to use. - public ResponseWithHeaders SetLegalHold(bool legalHold, int? timeout = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders SetLegalHold(bool legalHold, int? timeout = null, string snapshot = null, string versionId = null, CancellationToken cancellationToken = default) { - using var message = CreateSetLegalHoldRequest(legalHold, timeout); + using var message = CreateSetLegalHoldRequest(legalHold, timeout, snapshot, versionId); _pipeline.Send(message, cancellationToken); var headers = new BlobSetLegalHoldHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Blobs/src/autorest.md b/sdk/storage/Azure.Storage.Blobs/src/autorest.md index cda269a135e88..6407445e7bf16 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/autorest.md +++ b/sdk/storage/Azure.Storage.Blobs/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/7c58ab44b9cdd08fa5aae782a34bc42bd67ff59b/specification/storage/data-plane/Microsoft.BlobStorage/stable/2025-01-05/blob.json + - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/2d3b08fe43bc4a573acd166d3d2ba0c631b016fb/specification/storage/data-plane/Microsoft.BlobStorage/stable/2025-01-05/blob.json generation1-convenience-client: true # https://github.com/Azure/autorest/issues/4075 skip-semantics-validation: true diff --git a/sdk/storage/Azure.Storage.Blobs/tests/ImmutableStorageWithVersioningTests.cs b/sdk/storage/Azure.Storage.Blobs/tests/ImmutableStorageWithVersioningTests.cs index 3eac647db9cf8..af473b946047f 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/ImmutableStorageWithVersioningTests.cs +++ b/sdk/storage/Azure.Storage.Blobs/tests/ImmutableStorageWithVersioningTests.cs @@ -562,6 +562,92 @@ public async Task DeleteImmutibilityPolicyAsync() Assert.IsNull(propertiesResponse.Value.ImmutabilityPolicy.PolicyMode); } + [Test] + [ServiceVersion(Min = BlobClientOptions.ServiceVersion.V2020_06_12)] + public async Task SetDeleteImmutibilityPolicyAsync_Snapshot() + { + // Arrange + BlobBaseClient blob = await GetNewBlobClient(_containerClient); + + Response createSnapshotResponse = await blob.CreateSnapshotAsync(); + BlobBaseClient snapshotClient = blob.WithSnapshot(createSnapshotResponse.Value.Snapshot); + try + { + BlobImmutabilityPolicy immutabilityPolicy = new BlobImmutabilityPolicy + { + ExpiresOn = Recording.UtcNow.AddSeconds(5), + PolicyMode = BlobImmutabilityPolicyMode.Unlocked + }; + + // Act + await snapshotClient.SetImmutabilityPolicyAsync(immutabilityPolicy); + + // Assert that the base blob does not have an immutability policy. + Response propertiesResponse = await blob.GetPropertiesAsync(); + Assert.IsNull(propertiesResponse.Value.ImmutabilityPolicy.ExpiresOn); + Assert.IsNull(propertiesResponse.Value.ImmutabilityPolicy.PolicyMode); + + // Assert that the blob snapshot has an immuability policy. + propertiesResponse = await snapshotClient.GetPropertiesAsync(); + Assert.IsNotNull(propertiesResponse.Value.ImmutabilityPolicy.ExpiresOn); + Assert.IsNotNull(propertiesResponse.Value.ImmutabilityPolicy.PolicyMode); + + await snapshotClient.DeleteImmutabilityPolicyAsync(); + + // Assert + propertiesResponse = await snapshotClient.GetPropertiesAsync(); + Assert.IsNull(propertiesResponse.Value.ImmutabilityPolicy.ExpiresOn); + Assert.IsNull(propertiesResponse.Value.ImmutabilityPolicy.PolicyMode); + } + finally + { + await snapshotClient.DeleteAsync(); + } + } + + [Test] + [ServiceVersion(Min = BlobClientOptions.ServiceVersion.V2020_06_12)] + public async Task SetDeleteImmutibilityPolicyAsync_BlobVersion() + { + // Arrange + BlobBaseClient blob = await GetNewBlobClient(_containerClient); + + IDictionary metadata = BuildMetadata(); + + // Create Blob Version + Response setMetadataResponse = await blob.SetMetadataAsync(metadata); + BlobBaseClient versionClient = blob.WithVersion(setMetadataResponse.Value.VersionId); + + // Create another blob Version + await blob.SetMetadataAsync(new Dictionary()); + + BlobImmutabilityPolicy immutabilityPolicy = new BlobImmutabilityPolicy + { + ExpiresOn = Recording.UtcNow.AddSeconds(5), + PolicyMode = BlobImmutabilityPolicyMode.Unlocked + }; + + // Act + await versionClient.SetImmutabilityPolicyAsync(immutabilityPolicy); + + // Assert that the base blob does not have an immutability policy + Response propertiesResponse = await blob.GetPropertiesAsync(); + Assert.IsNull(propertiesResponse.Value.ImmutabilityPolicy.ExpiresOn); + Assert.IsNull(propertiesResponse.Value.ImmutabilityPolicy.PolicyMode); + + // Assert that the blob version does have an immutability policy + propertiesResponse = await versionClient.GetPropertiesAsync(); + Assert.IsNotNull(propertiesResponse.Value.ImmutabilityPolicy.ExpiresOn); + Assert.IsNotNull(propertiesResponse.Value.ImmutabilityPolicy.PolicyMode); + + await versionClient.DeleteImmutabilityPolicyAsync(); + + // Assert blob version does not have an immutability policy + propertiesResponse = await versionClient.GetPropertiesAsync(); + Assert.IsNull(propertiesResponse.Value.ImmutabilityPolicy.ExpiresOn); + Assert.IsNull(propertiesResponse.Value.ImmutabilityPolicy.PolicyMode); + } + [Test] [ServiceVersion(Min = BlobClientOptions.ServiceVersion.V2020_06_12)] public async Task DeleteImmutibilityPolicyAsync_Error() @@ -621,6 +707,67 @@ public async Task SetLegalHoldAsync() Assert.IsFalse(response.Value.HasLegalHold); } + [Test] + [ServiceVersion(Min = BlobClientOptions.ServiceVersion.V2020_06_12)] + public async Task SetLegalHoldAsync_Snapshot() + { + // Arrange + BlobBaseClient blob = await GetNewBlobClient(_containerClient); + + Response createSnapshotResponse = await blob.CreateSnapshotAsync(); + BlobBaseClient snapshotClient = blob.WithSnapshot(createSnapshotResponse.Value.Snapshot); + + try + { + // Act + await snapshotClient.SetLegalHoldAsync(true); + + // Assert the blob snapshot has a legal hold + Response propertiesResponse = await snapshotClient.GetPropertiesAsync(); + Assert.IsTrue(propertiesResponse.Value.HasLegalHold); + + // Assert the base blob does not have a legal hold + propertiesResponse = await blob.GetPropertiesAsync(); + Assert.IsFalse(propertiesResponse.Value.HasLegalHold); + + await snapshotClient.SetLegalHoldAsync(false); + } + finally + { + await snapshotClient.DeleteAsync(); + } + } + + [Test] + [ServiceVersion(Min = BlobClientOptions.ServiceVersion.V2020_06_12)] + public async Task SetLegalHoldAsync_BlobVersion() + { + // Arrange + BlobBaseClient blob = await GetNewBlobClient(_containerClient); + + IDictionary metadata = BuildMetadata(); + + // Create Blob Version + Response setMetadataResponse = await blob.SetMetadataAsync(metadata); + BlobBaseClient versionClient = blob.WithVersion(setMetadataResponse.Value.VersionId); + + // Create another blob Version + await blob.SetMetadataAsync(new Dictionary()); + + // Act + await versionClient.SetLegalHoldAsync(true); + + // Assert the blob version has a legal hold + Response propertiesResponse = await versionClient.GetPropertiesAsync(); + Assert.IsTrue(propertiesResponse.Value.HasLegalHold); + + // Assert the base blob does not have a legal hold + propertiesResponse = await blob.GetPropertiesAsync(); + Assert.IsFalse(propertiesResponse.Value.HasLegalHold); + + await versionClient.SetLegalHoldAsync(false); + } + [Test] [ServiceVersion(Min = BlobClientOptions.ServiceVersion.V2020_06_12)] public async Task SetLegalHoldAsync_Error() From 5b77b5e2adb4a1c86faf5c0d4180f695fffe690d Mon Sep 17 00:00:00 2001 From: Sean McCullough <44180881+seanmcc-msft@users.noreply.github.com> Date: Fri, 20 Sep 2024 21:15:10 -0500 Subject: [PATCH 17/25] Files Provisioned Billing v2 (#45645) --- .../api/Azure.Storage.Files.Shares.net6.0.cs | 23 +++-- ...ure.Storage.Files.Shares.netstandard2.0.cs | 23 +++-- .../Azure.Storage.Files.Shares/assets.json | 2 +- .../src/Generated/Models/ShareErrorCode.cs | 6 ++ .../SharePropertiesInternal.Serialization.cs | 26 +++++- .../Models/SharePropertiesInternal.cs | 18 +++- .../src/Generated/ShareCreateHeaders.cs | 8 ++ .../src/Generated/ShareDeleteHeaders.cs | 4 + .../Generated/ShareGetPropertiesHeaders.cs | 8 ++ .../src/Generated/ShareRestClient.cs | 44 +++++++--- .../src/Generated/ShareRestoreHeaders.cs | 10 +++ .../Generated/ShareSetPropertiesHeaders.cs | 16 ++++ .../src/Models/ShareCreateOptions.cs | 13 +++ .../src/Models/ShareModelFactory.cs | 63 ++++++++++++- .../src/Models/ShareProperties.cs | 24 +++++ .../src/Models/ShareSetPropertiesOptions.cs | 14 +++ .../src/ShareClient.cs | 88 +++++++++++++++++-- .../src/ShareExtensions.cs | 12 ++- .../src/ShareServiceClient.cs | 8 ++ .../src/autorest.md | 2 +- .../tests/DisposingShare.cs | 7 +- .../tests/ServiceClientTests.cs | 27 ++++++ .../tests/ShareClientTests.cs | 75 ++++++++++++++-- 23 files changed, 479 insertions(+), 42 deletions(-) diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs index 48e367ecc2190..04c126afb38cc 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs @@ -25,11 +25,12 @@ public ShareClient(System.Uri shareUri, Azure.Storage.StorageSharedKeyCredential public virtual System.Threading.Tasks.Task> CreateDirectoryAsync(string directoryName, Azure.Storage.Files.Shares.Models.ShareDirectoryCreateOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Threading.Tasks.Task> CreateDirectoryAsync(string directoryName, System.Collections.Generic.IDictionary metadata, Azure.Storage.Files.Shares.Models.FileSmbProperties smbProperties, string filePermission, System.Threading.CancellationToken cancellationToken) { throw null; } - public virtual Azure.Response CreateIfNotExists(Azure.Storage.Files.Shares.Models.ShareCreateOptions options, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response CreateIfNotExists(Azure.Storage.Files.Shares.Models.ShareCreateOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public virtual Azure.Response CreateIfNotExists(System.Collections.Generic.IDictionary metadata = null, int? quotaInGB = default(int?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual System.Threading.Tasks.Task> CreateIfNotExistsAsync(Azure.Storage.Files.Shares.Models.ShareCreateOptions options, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual System.Threading.Tasks.Task> CreateIfNotExistsAsync(System.Collections.Generic.IDictionary metadata = null, int? quotaInGB = default(int?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response CreateIfNotExists(System.Collections.Generic.IDictionary metadata, int? quotaInGB, System.Threading.CancellationToken cancellationToken) { throw null; } + public virtual System.Threading.Tasks.Task> CreateIfNotExistsAsync(Azure.Storage.Files.Shares.Models.ShareCreateOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Threading.Tasks.Task> CreateIfNotExistsAsync(System.Collections.Generic.IDictionary metadata, int? quotaInGB, System.Threading.CancellationToken cancellationToken) { throw null; } public virtual Azure.Response CreatePermission(Azure.Storage.Files.Shares.Models.ShareFilePermission permission, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual Azure.Response CreatePermission(string permission, System.Threading.CancellationToken cancellationToken) { throw null; } @@ -612,6 +613,8 @@ public ShareCreateOptions() { } public long? PaidBurstingMaxBandwidthMibps { get { throw null; } set { } } public long? PaidBurstingMaxIops { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.ShareProtocols? Protocols { get { throw null; } set { } } + public long? ProvisionedMaxBandwidthMibps { get { throw null; } set { } } + public long? ProvisionedMaxIops { get { throw null; } set { } } public int? QuotaInGB { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.ShareRootSquash? RootSquash { get { throw null; } set { } } } @@ -683,6 +686,8 @@ public ShareDirectorySetHttpHeadersOptions() { } public static Azure.Storage.Files.Shares.Models.ShareErrorCode EmptyMetadataKey { get { throw null; } } public static Azure.Storage.Files.Shares.Models.ShareErrorCode FeatureVersionMismatch { get { throw null; } } public static Azure.Storage.Files.Shares.Models.ShareErrorCode FileLockConflict { get { throw null; } } + public static Azure.Storage.Files.Shares.Models.ShareErrorCode FileShareProvisionedBandwidthDowngradeNotAllowed { get { throw null; } } + public static Azure.Storage.Files.Shares.Models.ShareErrorCode FileShareProvisionedIopsDowngradeNotAllowed { get { throw null; } } public static Azure.Storage.Files.Shares.Models.ShareErrorCode InsufficientAccountPermissions { get { throw null; } } public static Azure.Storage.Files.Shares.Models.ShareErrorCode InternalError { get { throw null; } } public static Azure.Storage.Files.Shares.Models.ShareErrorCode InvalidAuthenticationInfo { get { throw null; } } @@ -1113,7 +1118,9 @@ public static partial class ShareModelFactory public static Azure.Storage.Files.Shares.Models.ShareProperties ShareProperties(string accessTier = null, System.DateTimeOffset? lastModified = default(System.DateTimeOffset?), int? provisionedIops = default(int?), int? provisionedIngressMBps = default(int?), int? provisionedEgressMBps = default(int?), System.DateTimeOffset? nextAllowedQuotaDowngradeTime = default(System.DateTimeOffset?), System.DateTimeOffset? deletedOn = default(System.DateTimeOffset?), int? remainingRetentionDays = default(int?), Azure.ETag? eTag = default(Azure.ETag?), System.DateTimeOffset? accessTierChangeTime = default(System.DateTimeOffset?), string accessTierTransitionState = null, Azure.Storage.Files.Shares.Models.ShareLeaseStatus? leaseStatus = default(Azure.Storage.Files.Shares.Models.ShareLeaseStatus?), Azure.Storage.Files.Shares.Models.ShareLeaseState? leaseState = default(Azure.Storage.Files.Shares.Models.ShareLeaseState?), Azure.Storage.Files.Shares.Models.ShareLeaseDuration? leaseDuration = default(Azure.Storage.Files.Shares.Models.ShareLeaseDuration?), int? quotaInGB = default(int?), System.Collections.Generic.IDictionary metadata = null, Azure.Storage.Files.Shares.Models.ShareProtocols? protocols = default(Azure.Storage.Files.Shares.Models.ShareProtocols?), Azure.Storage.Files.Shares.Models.ShareRootSquash? rootSquash = default(Azure.Storage.Files.Shares.Models.ShareRootSquash?)) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public static Azure.Storage.Files.Shares.Models.ShareProperties ShareProperties(string accessTier, System.DateTimeOffset? lastModified, int? provisionedIops, int? provisionedIngressMBps, int? provisionedEgressMBps, System.DateTimeOffset? nextAllowedQuotaDowngradeTime, System.DateTimeOffset? deletedOn, int? remainingRetentionDays, Azure.ETag? eTag, System.DateTimeOffset? accessTierChangeTime, string accessTierTransitionState, Azure.Storage.Files.Shares.Models.ShareLeaseStatus? leaseStatus, Azure.Storage.Files.Shares.Models.ShareLeaseState? leaseState, Azure.Storage.Files.Shares.Models.ShareLeaseDuration? leaseDuration, int? quotaInGB, System.Collections.Generic.IDictionary metadata, Azure.Storage.Files.Shares.Models.ShareProtocols? protocols, Azure.Storage.Files.Shares.Models.ShareRootSquash? rootSquash, bool? enableSnapshotVirtualDirectoryAccess) { throw null; } - public static Azure.Storage.Files.Shares.Models.ShareProperties ShareProperties(string accessTier = null, System.DateTimeOffset? lastModified = default(System.DateTimeOffset?), int? provisionedIops = default(int?), int? provisionedIngressMBps = default(int?), int? provisionedEgressMBps = default(int?), System.DateTimeOffset? nextAllowedQuotaDowngradeTime = default(System.DateTimeOffset?), System.DateTimeOffset? deletedOn = default(System.DateTimeOffset?), int? remainingRetentionDays = default(int?), Azure.ETag? eTag = default(Azure.ETag?), System.DateTimeOffset? accessTierChangeTime = default(System.DateTimeOffset?), string accessTierTransitionState = null, Azure.Storage.Files.Shares.Models.ShareLeaseStatus? leaseStatus = default(Azure.Storage.Files.Shares.Models.ShareLeaseStatus?), Azure.Storage.Files.Shares.Models.ShareLeaseState? leaseState = default(Azure.Storage.Files.Shares.Models.ShareLeaseState?), Azure.Storage.Files.Shares.Models.ShareLeaseDuration? leaseDuration = default(Azure.Storage.Files.Shares.Models.ShareLeaseDuration?), int? quotaInGB = default(int?), System.Collections.Generic.IDictionary metadata = null, Azure.Storage.Files.Shares.Models.ShareProtocols? protocols = default(Azure.Storage.Files.Shares.Models.ShareProtocols?), Azure.Storage.Files.Shares.Models.ShareRootSquash? rootSquash = default(Azure.Storage.Files.Shares.Models.ShareRootSquash?), bool? enableSnapshotVirtualDirectoryAccess = default(bool?), bool? enablePaidBursting = default(bool?), long? paidBurstingMaxIops = default(long?), long? paidBustingMaxBandwidthMibps = default(long?)) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public static Azure.Storage.Files.Shares.Models.ShareProperties ShareProperties(string accessTier, System.DateTimeOffset? lastModified, int? provisionedIops, int? provisionedIngressMBps, int? provisionedEgressMBps, System.DateTimeOffset? nextAllowedQuotaDowngradeTime, System.DateTimeOffset? deletedOn, int? remainingRetentionDays, Azure.ETag? eTag, System.DateTimeOffset? accessTierChangeTime, string accessTierTransitionState, Azure.Storage.Files.Shares.Models.ShareLeaseStatus? leaseStatus, Azure.Storage.Files.Shares.Models.ShareLeaseState? leaseState, Azure.Storage.Files.Shares.Models.ShareLeaseDuration? leaseDuration, int? quotaInGB, System.Collections.Generic.IDictionary metadata, Azure.Storage.Files.Shares.Models.ShareProtocols? protocols, Azure.Storage.Files.Shares.Models.ShareRootSquash? rootSquash, bool? enableSnapshotVirtualDirectoryAccess, bool? enablePaidBursting, long? paidBurstingMaxIops, long? paidBustingMaxBandwidthMibps) { throw null; } + public static Azure.Storage.Files.Shares.Models.ShareProperties ShareProperties(string accessTier = null, System.DateTimeOffset? lastModified = default(System.DateTimeOffset?), int? provisionedIops = default(int?), int? provisionedIngressMBps = default(int?), int? provisionedEgressMBps = default(int?), System.DateTimeOffset? nextAllowedQuotaDowngradeTime = default(System.DateTimeOffset?), System.DateTimeOffset? deletedOn = default(System.DateTimeOffset?), int? remainingRetentionDays = default(int?), Azure.ETag? eTag = default(Azure.ETag?), System.DateTimeOffset? accessTierChangeTime = default(System.DateTimeOffset?), string accessTierTransitionState = null, Azure.Storage.Files.Shares.Models.ShareLeaseStatus? leaseStatus = default(Azure.Storage.Files.Shares.Models.ShareLeaseStatus?), Azure.Storage.Files.Shares.Models.ShareLeaseState? leaseState = default(Azure.Storage.Files.Shares.Models.ShareLeaseState?), Azure.Storage.Files.Shares.Models.ShareLeaseDuration? leaseDuration = default(Azure.Storage.Files.Shares.Models.ShareLeaseDuration?), int? quotaInGB = default(int?), System.Collections.Generic.IDictionary metadata = null, Azure.Storage.Files.Shares.Models.ShareProtocols? protocols = default(Azure.Storage.Files.Shares.Models.ShareProtocols?), Azure.Storage.Files.Shares.Models.ShareRootSquash? rootSquash = default(Azure.Storage.Files.Shares.Models.ShareRootSquash?), bool? enableSnapshotVirtualDirectoryAccess = default(bool?), bool? enablePaidBursting = default(bool?), long? paidBurstingMaxIops = default(long?), long? paidBustingMaxBandwidthMibps = default(long?), long? includedBurstIops = default(long?), long? maxBurstCreditsForIops = default(long?), System.DateTimeOffset? nextAllowedProvisionedIopsDowngradeTime = default(System.DateTimeOffset?), System.DateTimeOffset? nextAllowedProvisionedBandwidthDowngradeTime = default(System.DateTimeOffset?)) { throw null; } public static Azure.Storage.Files.Shares.Models.ShareSnapshotInfo ShareSnapshotInfo(string snapshot, Azure.ETag eTag, System.DateTimeOffset lastModified) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public static Azure.Storage.Files.Shares.Models.ShareStatistics ShareStatistics(int shareUsageBytes) { throw null; } @@ -1132,11 +1139,15 @@ internal ShareProperties() { } public bool? EnablePaidBursting { get { throw null; } } public bool? EnableSnapshotVirtualDirectoryAccess { get { throw null; } } public Azure.ETag? ETag { get { throw null; } } + public long? IncludedBurstIops { get { throw null; } } public System.DateTimeOffset? LastModified { get { throw null; } } public Azure.Storage.Files.Shares.Models.ShareLeaseDuration? LeaseDuration { get { throw null; } } public Azure.Storage.Files.Shares.Models.ShareLeaseState? LeaseState { get { throw null; } } public Azure.Storage.Files.Shares.Models.ShareLeaseStatus? LeaseStatus { get { throw null; } } + public long? MaxBurstCreditsForIops { get { throw null; } } public System.Collections.Generic.IDictionary Metadata { get { throw null; } } + public System.DateTimeOffset? NextAllowedProvisionedBandwidthDowngradeTime { get { throw null; } } + public System.DateTimeOffset? NextAllowedProvisionedIopsDowngradeTime { get { throw null; } } public System.DateTimeOffset? NextAllowedQuotaDowngradeTime { get { throw null; } } public long? PaidBurstingMaxBandwidthMibps { get { throw null; } } public long? PaidBurstingMaxIops { get { throw null; } } @@ -1191,6 +1202,8 @@ public ShareSetPropertiesOptions() { } public bool? EnableSnapshotVirtualDirectoryAccess { get { throw null; } set { } } public long? PaidBurstingMaxBandwidthMibps { get { throw null; } set { } } public long? PaidBurstingMaxIops { get { throw null; } set { } } + public long? ProvisionedMaxBandwidthMibps { get { throw null; } set { } } + public long? ProvisionedMaxIops { get { throw null; } set { } } public int? QuotaInGB { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.ShareRootSquash? RootSquash { get { throw null; } set { } } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs index 48e367ecc2190..04c126afb38cc 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs @@ -25,11 +25,12 @@ public ShareClient(System.Uri shareUri, Azure.Storage.StorageSharedKeyCredential public virtual System.Threading.Tasks.Task> CreateDirectoryAsync(string directoryName, Azure.Storage.Files.Shares.Models.ShareDirectoryCreateOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Threading.Tasks.Task> CreateDirectoryAsync(string directoryName, System.Collections.Generic.IDictionary metadata, Azure.Storage.Files.Shares.Models.FileSmbProperties smbProperties, string filePermission, System.Threading.CancellationToken cancellationToken) { throw null; } - public virtual Azure.Response CreateIfNotExists(Azure.Storage.Files.Shares.Models.ShareCreateOptions options, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response CreateIfNotExists(Azure.Storage.Files.Shares.Models.ShareCreateOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - public virtual Azure.Response CreateIfNotExists(System.Collections.Generic.IDictionary metadata = null, int? quotaInGB = default(int?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual System.Threading.Tasks.Task> CreateIfNotExistsAsync(Azure.Storage.Files.Shares.Models.ShareCreateOptions options, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual System.Threading.Tasks.Task> CreateIfNotExistsAsync(System.Collections.Generic.IDictionary metadata = null, int? quotaInGB = default(int?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response CreateIfNotExists(System.Collections.Generic.IDictionary metadata, int? quotaInGB, System.Threading.CancellationToken cancellationToken) { throw null; } + public virtual System.Threading.Tasks.Task> CreateIfNotExistsAsync(Azure.Storage.Files.Shares.Models.ShareCreateOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public virtual System.Threading.Tasks.Task> CreateIfNotExistsAsync(System.Collections.Generic.IDictionary metadata, int? quotaInGB, System.Threading.CancellationToken cancellationToken) { throw null; } public virtual Azure.Response CreatePermission(Azure.Storage.Files.Shares.Models.ShareFilePermission permission, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual Azure.Response CreatePermission(string permission, System.Threading.CancellationToken cancellationToken) { throw null; } @@ -612,6 +613,8 @@ public ShareCreateOptions() { } public long? PaidBurstingMaxBandwidthMibps { get { throw null; } set { } } public long? PaidBurstingMaxIops { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.ShareProtocols? Protocols { get { throw null; } set { } } + public long? ProvisionedMaxBandwidthMibps { get { throw null; } set { } } + public long? ProvisionedMaxIops { get { throw null; } set { } } public int? QuotaInGB { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.ShareRootSquash? RootSquash { get { throw null; } set { } } } @@ -683,6 +686,8 @@ public ShareDirectorySetHttpHeadersOptions() { } public static Azure.Storage.Files.Shares.Models.ShareErrorCode EmptyMetadataKey { get { throw null; } } public static Azure.Storage.Files.Shares.Models.ShareErrorCode FeatureVersionMismatch { get { throw null; } } public static Azure.Storage.Files.Shares.Models.ShareErrorCode FileLockConflict { get { throw null; } } + public static Azure.Storage.Files.Shares.Models.ShareErrorCode FileShareProvisionedBandwidthDowngradeNotAllowed { get { throw null; } } + public static Azure.Storage.Files.Shares.Models.ShareErrorCode FileShareProvisionedIopsDowngradeNotAllowed { get { throw null; } } public static Azure.Storage.Files.Shares.Models.ShareErrorCode InsufficientAccountPermissions { get { throw null; } } public static Azure.Storage.Files.Shares.Models.ShareErrorCode InternalError { get { throw null; } } public static Azure.Storage.Files.Shares.Models.ShareErrorCode InvalidAuthenticationInfo { get { throw null; } } @@ -1113,7 +1118,9 @@ public static partial class ShareModelFactory public static Azure.Storage.Files.Shares.Models.ShareProperties ShareProperties(string accessTier = null, System.DateTimeOffset? lastModified = default(System.DateTimeOffset?), int? provisionedIops = default(int?), int? provisionedIngressMBps = default(int?), int? provisionedEgressMBps = default(int?), System.DateTimeOffset? nextAllowedQuotaDowngradeTime = default(System.DateTimeOffset?), System.DateTimeOffset? deletedOn = default(System.DateTimeOffset?), int? remainingRetentionDays = default(int?), Azure.ETag? eTag = default(Azure.ETag?), System.DateTimeOffset? accessTierChangeTime = default(System.DateTimeOffset?), string accessTierTransitionState = null, Azure.Storage.Files.Shares.Models.ShareLeaseStatus? leaseStatus = default(Azure.Storage.Files.Shares.Models.ShareLeaseStatus?), Azure.Storage.Files.Shares.Models.ShareLeaseState? leaseState = default(Azure.Storage.Files.Shares.Models.ShareLeaseState?), Azure.Storage.Files.Shares.Models.ShareLeaseDuration? leaseDuration = default(Azure.Storage.Files.Shares.Models.ShareLeaseDuration?), int? quotaInGB = default(int?), System.Collections.Generic.IDictionary metadata = null, Azure.Storage.Files.Shares.Models.ShareProtocols? protocols = default(Azure.Storage.Files.Shares.Models.ShareProtocols?), Azure.Storage.Files.Shares.Models.ShareRootSquash? rootSquash = default(Azure.Storage.Files.Shares.Models.ShareRootSquash?)) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public static Azure.Storage.Files.Shares.Models.ShareProperties ShareProperties(string accessTier, System.DateTimeOffset? lastModified, int? provisionedIops, int? provisionedIngressMBps, int? provisionedEgressMBps, System.DateTimeOffset? nextAllowedQuotaDowngradeTime, System.DateTimeOffset? deletedOn, int? remainingRetentionDays, Azure.ETag? eTag, System.DateTimeOffset? accessTierChangeTime, string accessTierTransitionState, Azure.Storage.Files.Shares.Models.ShareLeaseStatus? leaseStatus, Azure.Storage.Files.Shares.Models.ShareLeaseState? leaseState, Azure.Storage.Files.Shares.Models.ShareLeaseDuration? leaseDuration, int? quotaInGB, System.Collections.Generic.IDictionary metadata, Azure.Storage.Files.Shares.Models.ShareProtocols? protocols, Azure.Storage.Files.Shares.Models.ShareRootSquash? rootSquash, bool? enableSnapshotVirtualDirectoryAccess) { throw null; } - public static Azure.Storage.Files.Shares.Models.ShareProperties ShareProperties(string accessTier = null, System.DateTimeOffset? lastModified = default(System.DateTimeOffset?), int? provisionedIops = default(int?), int? provisionedIngressMBps = default(int?), int? provisionedEgressMBps = default(int?), System.DateTimeOffset? nextAllowedQuotaDowngradeTime = default(System.DateTimeOffset?), System.DateTimeOffset? deletedOn = default(System.DateTimeOffset?), int? remainingRetentionDays = default(int?), Azure.ETag? eTag = default(Azure.ETag?), System.DateTimeOffset? accessTierChangeTime = default(System.DateTimeOffset?), string accessTierTransitionState = null, Azure.Storage.Files.Shares.Models.ShareLeaseStatus? leaseStatus = default(Azure.Storage.Files.Shares.Models.ShareLeaseStatus?), Azure.Storage.Files.Shares.Models.ShareLeaseState? leaseState = default(Azure.Storage.Files.Shares.Models.ShareLeaseState?), Azure.Storage.Files.Shares.Models.ShareLeaseDuration? leaseDuration = default(Azure.Storage.Files.Shares.Models.ShareLeaseDuration?), int? quotaInGB = default(int?), System.Collections.Generic.IDictionary metadata = null, Azure.Storage.Files.Shares.Models.ShareProtocols? protocols = default(Azure.Storage.Files.Shares.Models.ShareProtocols?), Azure.Storage.Files.Shares.Models.ShareRootSquash? rootSquash = default(Azure.Storage.Files.Shares.Models.ShareRootSquash?), bool? enableSnapshotVirtualDirectoryAccess = default(bool?), bool? enablePaidBursting = default(bool?), long? paidBurstingMaxIops = default(long?), long? paidBustingMaxBandwidthMibps = default(long?)) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public static Azure.Storage.Files.Shares.Models.ShareProperties ShareProperties(string accessTier, System.DateTimeOffset? lastModified, int? provisionedIops, int? provisionedIngressMBps, int? provisionedEgressMBps, System.DateTimeOffset? nextAllowedQuotaDowngradeTime, System.DateTimeOffset? deletedOn, int? remainingRetentionDays, Azure.ETag? eTag, System.DateTimeOffset? accessTierChangeTime, string accessTierTransitionState, Azure.Storage.Files.Shares.Models.ShareLeaseStatus? leaseStatus, Azure.Storage.Files.Shares.Models.ShareLeaseState? leaseState, Azure.Storage.Files.Shares.Models.ShareLeaseDuration? leaseDuration, int? quotaInGB, System.Collections.Generic.IDictionary metadata, Azure.Storage.Files.Shares.Models.ShareProtocols? protocols, Azure.Storage.Files.Shares.Models.ShareRootSquash? rootSquash, bool? enableSnapshotVirtualDirectoryAccess, bool? enablePaidBursting, long? paidBurstingMaxIops, long? paidBustingMaxBandwidthMibps) { throw null; } + public static Azure.Storage.Files.Shares.Models.ShareProperties ShareProperties(string accessTier = null, System.DateTimeOffset? lastModified = default(System.DateTimeOffset?), int? provisionedIops = default(int?), int? provisionedIngressMBps = default(int?), int? provisionedEgressMBps = default(int?), System.DateTimeOffset? nextAllowedQuotaDowngradeTime = default(System.DateTimeOffset?), System.DateTimeOffset? deletedOn = default(System.DateTimeOffset?), int? remainingRetentionDays = default(int?), Azure.ETag? eTag = default(Azure.ETag?), System.DateTimeOffset? accessTierChangeTime = default(System.DateTimeOffset?), string accessTierTransitionState = null, Azure.Storage.Files.Shares.Models.ShareLeaseStatus? leaseStatus = default(Azure.Storage.Files.Shares.Models.ShareLeaseStatus?), Azure.Storage.Files.Shares.Models.ShareLeaseState? leaseState = default(Azure.Storage.Files.Shares.Models.ShareLeaseState?), Azure.Storage.Files.Shares.Models.ShareLeaseDuration? leaseDuration = default(Azure.Storage.Files.Shares.Models.ShareLeaseDuration?), int? quotaInGB = default(int?), System.Collections.Generic.IDictionary metadata = null, Azure.Storage.Files.Shares.Models.ShareProtocols? protocols = default(Azure.Storage.Files.Shares.Models.ShareProtocols?), Azure.Storage.Files.Shares.Models.ShareRootSquash? rootSquash = default(Azure.Storage.Files.Shares.Models.ShareRootSquash?), bool? enableSnapshotVirtualDirectoryAccess = default(bool?), bool? enablePaidBursting = default(bool?), long? paidBurstingMaxIops = default(long?), long? paidBustingMaxBandwidthMibps = default(long?), long? includedBurstIops = default(long?), long? maxBurstCreditsForIops = default(long?), System.DateTimeOffset? nextAllowedProvisionedIopsDowngradeTime = default(System.DateTimeOffset?), System.DateTimeOffset? nextAllowedProvisionedBandwidthDowngradeTime = default(System.DateTimeOffset?)) { throw null; } public static Azure.Storage.Files.Shares.Models.ShareSnapshotInfo ShareSnapshotInfo(string snapshot, Azure.ETag eTag, System.DateTimeOffset lastModified) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public static Azure.Storage.Files.Shares.Models.ShareStatistics ShareStatistics(int shareUsageBytes) { throw null; } @@ -1132,11 +1139,15 @@ internal ShareProperties() { } public bool? EnablePaidBursting { get { throw null; } } public bool? EnableSnapshotVirtualDirectoryAccess { get { throw null; } } public Azure.ETag? ETag { get { throw null; } } + public long? IncludedBurstIops { get { throw null; } } public System.DateTimeOffset? LastModified { get { throw null; } } public Azure.Storage.Files.Shares.Models.ShareLeaseDuration? LeaseDuration { get { throw null; } } public Azure.Storage.Files.Shares.Models.ShareLeaseState? LeaseState { get { throw null; } } public Azure.Storage.Files.Shares.Models.ShareLeaseStatus? LeaseStatus { get { throw null; } } + public long? MaxBurstCreditsForIops { get { throw null; } } public System.Collections.Generic.IDictionary Metadata { get { throw null; } } + public System.DateTimeOffset? NextAllowedProvisionedBandwidthDowngradeTime { get { throw null; } } + public System.DateTimeOffset? NextAllowedProvisionedIopsDowngradeTime { get { throw null; } } public System.DateTimeOffset? NextAllowedQuotaDowngradeTime { get { throw null; } } public long? PaidBurstingMaxBandwidthMibps { get { throw null; } } public long? PaidBurstingMaxIops { get { throw null; } } @@ -1191,6 +1202,8 @@ public ShareSetPropertiesOptions() { } public bool? EnableSnapshotVirtualDirectoryAccess { get { throw null; } set { } } public long? PaidBurstingMaxBandwidthMibps { get { throw null; } set { } } public long? PaidBurstingMaxIops { get { throw null; } set { } } + public long? ProvisionedMaxBandwidthMibps { get { throw null; } set { } } + public long? ProvisionedMaxIops { get { throw null; } set { } } public int? QuotaInGB { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.ShareRootSquash? RootSquash { get { throw null; } set { } } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/assets.json b/sdk/storage/Azure.Storage.Files.Shares/assets.json index 4bb5fc7bb03c9..36fd5cc8635bc 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/assets.json +++ b/sdk/storage/Azure.Storage.Files.Shares/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Files.Shares", - "Tag": "net/storage/Azure.Storage.Files.Shares_27dac02512" + "Tag": "net/storage/Azure.Storage.Files.Shares_164369860b" } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/Models/ShareErrorCode.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/Models/ShareErrorCode.cs index 317aac136060d..a0b07bdd181dd 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/Models/ShareErrorCode.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/Models/ShareErrorCode.cs @@ -30,6 +30,8 @@ public ShareErrorCode(string value) private const string ConditionHeadersNotSupportedValue = "ConditionHeadersNotSupported"; private const string ConditionNotMetValue = "ConditionNotMet"; private const string EmptyMetadataKeyValue = "EmptyMetadataKey"; + private const string FileShareProvisionedBandwidthDowngradeNotAllowedValue = "FileShareProvisionedBandwidthDowngradeNotAllowed"; + private const string FileShareProvisionedIopsDowngradeNotAllowedValue = "FileShareProvisionedIopsDowngradeNotAllowed"; private const string InsufficientAccountPermissionsValue = "InsufficientAccountPermissions"; private const string InternalErrorValue = "InternalError"; private const string InvalidAuthenticationInfoValue = "InvalidAuthenticationInfo"; @@ -106,6 +108,10 @@ public ShareErrorCode(string value) public static ShareErrorCode ConditionNotMet { get; } = new ShareErrorCode(ConditionNotMetValue); /// EmptyMetadataKey. public static ShareErrorCode EmptyMetadataKey { get; } = new ShareErrorCode(EmptyMetadataKeyValue); + /// FileShareProvisionedBandwidthDowngradeNotAllowed. + public static ShareErrorCode FileShareProvisionedBandwidthDowngradeNotAllowed { get; } = new ShareErrorCode(FileShareProvisionedBandwidthDowngradeNotAllowedValue); + /// FileShareProvisionedIopsDowngradeNotAllowed. + public static ShareErrorCode FileShareProvisionedIopsDowngradeNotAllowed { get; } = new ShareErrorCode(FileShareProvisionedIopsDowngradeNotAllowedValue); /// InsufficientAccountPermissions. public static ShareErrorCode InsufficientAccountPermissions { get; } = new ShareErrorCode(InsufficientAccountPermissionsValue); /// InternalError. diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/Models/SharePropertiesInternal.Serialization.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/Models/SharePropertiesInternal.Serialization.cs index bccf5b4b21fba..b01f216bb03d6 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/Models/SharePropertiesInternal.Serialization.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/Models/SharePropertiesInternal.Serialization.cs @@ -37,6 +37,10 @@ internal static SharePropertiesInternal DeserializeSharePropertiesInternal(XElem bool? paidBurstingEnabled = default; long? paidBurstingMaxIops = default; long? paidBurstingMaxBandwidthMibps = default; + long? includedBurstIops = default; + long? maxBurstCreditsForIops = default; + DateTimeOffset? nextAllowedProvisionedIopsDowngradeTime = default; + DateTimeOffset? nextAllowedProvisionedBandwidthDowngradeTime = default; if (element.Element("Last-Modified") is XElement lastModifiedElement) { lastModified = lastModifiedElement.GetDateTimeOffsetValue("R"); @@ -125,6 +129,22 @@ internal static SharePropertiesInternal DeserializeSharePropertiesInternal(XElem { paidBurstingMaxBandwidthMibps = (long?)paidBurstingMaxBandwidthMibpsElement; } + if (element.Element("IncludedBurstIops") is XElement includedBurstIopsElement) + { + includedBurstIops = (long?)includedBurstIopsElement; + } + if (element.Element("MaxBurstCreditsForIops") is XElement maxBurstCreditsForIopsElement) + { + maxBurstCreditsForIops = (long?)maxBurstCreditsForIopsElement; + } + if (element.Element("NextAllowedProvisionedIopsDowngradeTime") is XElement nextAllowedProvisionedIopsDowngradeTimeElement) + { + nextAllowedProvisionedIopsDowngradeTime = nextAllowedProvisionedIopsDowngradeTimeElement.GetDateTimeOffsetValue("R"); + } + if (element.Element("NextAllowedProvisionedBandwidthDowngradeTime") is XElement nextAllowedProvisionedBandwidthDowngradeTimeElement) + { + nextAllowedProvisionedBandwidthDowngradeTime = nextAllowedProvisionedBandwidthDowngradeTimeElement.GetDateTimeOffsetValue("R"); + } return new SharePropertiesInternal( lastModified, etag, @@ -147,7 +167,11 @@ internal static SharePropertiesInternal DeserializeSharePropertiesInternal(XElem enableSnapshotVirtualDirectoryAccess, paidBurstingEnabled, paidBurstingMaxIops, - paidBurstingMaxBandwidthMibps); + paidBurstingMaxBandwidthMibps, + includedBurstIops, + maxBurstCreditsForIops, + nextAllowedProvisionedIopsDowngradeTime, + nextAllowedProvisionedBandwidthDowngradeTime); } } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/Models/SharePropertiesInternal.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/Models/SharePropertiesInternal.cs index b550b3e07bd7e..eeb13840f2042 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/Models/SharePropertiesInternal.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/Models/SharePropertiesInternal.cs @@ -50,7 +50,11 @@ internal SharePropertiesInternal(DateTimeOffset lastModified, string etag, int q /// /// /// - internal SharePropertiesInternal(DateTimeOffset lastModified, string etag, int quota, int? provisionedIops, int? provisionedIngressMBps, int? provisionedEgressMBps, int? provisionedBandwidthMiBps, DateTimeOffset? nextAllowedQuotaDowngradeTime, DateTimeOffset? deletedTime, int? remainingRetentionDays, string accessTier, DateTimeOffset? accessTierChangeTime, string accessTierTransitionState, ShareLeaseStatus? leaseStatus, ShareLeaseState? leaseState, ShareLeaseDuration? leaseDuration, string enabledProtocols, ShareRootSquash? rootSquash, bool? enableSnapshotVirtualDirectoryAccess, bool? paidBurstingEnabled, long? paidBurstingMaxIops, long? paidBurstingMaxBandwidthMibps) + /// + /// + /// + /// + internal SharePropertiesInternal(DateTimeOffset lastModified, string etag, int quota, int? provisionedIops, int? provisionedIngressMBps, int? provisionedEgressMBps, int? provisionedBandwidthMiBps, DateTimeOffset? nextAllowedQuotaDowngradeTime, DateTimeOffset? deletedTime, int? remainingRetentionDays, string accessTier, DateTimeOffset? accessTierChangeTime, string accessTierTransitionState, ShareLeaseStatus? leaseStatus, ShareLeaseState? leaseState, ShareLeaseDuration? leaseDuration, string enabledProtocols, ShareRootSquash? rootSquash, bool? enableSnapshotVirtualDirectoryAccess, bool? paidBurstingEnabled, long? paidBurstingMaxIops, long? paidBurstingMaxBandwidthMibps, long? includedBurstIops, long? maxBurstCreditsForIops, DateTimeOffset? nextAllowedProvisionedIopsDowngradeTime, DateTimeOffset? nextAllowedProvisionedBandwidthDowngradeTime) { LastModified = lastModified; Etag = etag; @@ -74,6 +78,10 @@ internal SharePropertiesInternal(DateTimeOffset lastModified, string etag, int q PaidBurstingEnabled = paidBurstingEnabled; PaidBurstingMaxIops = paidBurstingMaxIops; PaidBurstingMaxBandwidthMibps = paidBurstingMaxBandwidthMibps; + IncludedBurstIops = includedBurstIops; + MaxBurstCreditsForIops = maxBurstCreditsForIops; + NextAllowedProvisionedIopsDowngradeTime = nextAllowedProvisionedIopsDowngradeTime; + NextAllowedProvisionedBandwidthDowngradeTime = nextAllowedProvisionedBandwidthDowngradeTime; } /// Gets the last modified. @@ -120,5 +128,13 @@ internal SharePropertiesInternal(DateTimeOffset lastModified, string etag, int q public long? PaidBurstingMaxIops { get; } /// Gets the paid bursting max bandwidth mibps. public long? PaidBurstingMaxBandwidthMibps { get; } + /// Gets the included burst iops. + public long? IncludedBurstIops { get; } + /// Gets the max burst credits for iops. + public long? MaxBurstCreditsForIops { get; } + /// Gets the next allowed provisioned iops downgrade time. + public DateTimeOffset? NextAllowedProvisionedIopsDowngradeTime { get; } + /// Gets the next allowed provisioned bandwidth downgrade time. + public DateTimeOffset? NextAllowedProvisionedBandwidthDowngradeTime { get; } } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareCreateHeaders.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareCreateHeaders.cs index 6f22731fa70ca..38995245569cb 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareCreateHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareCreateHeaders.cs @@ -21,5 +21,13 @@ public ShareCreateHeaders(Response response) public DateTimeOffset? LastModified => _response.Headers.TryGetValue("Last-Modified", out DateTimeOffset? value) ? value : null; /// Indicates the version of the File service used to execute the request. public string Version => _response.Headers.TryGetValue("x-ms-version", out string value) ? value : null; + /// Returns the current share quota in GB. + public long? Quota => _response.Headers.TryGetValue("x-ms-share-quota", out long? value) ? value : null; + /// The provisioned IOPS of the share. If this is not specified, compute the recommended IOPS of the share using the formula for a share in this media tier (SSD/HDD as appropriate). + public long? ShareProvisionedIops => _response.Headers.TryGetValue("x-ms-share-provisioned-iops", out long? value) ? value : null; + /// The provisioned throughput of the share. If this is not specified, compute the recommended throughput of the share using the formula for a share in this media tier (SSD/HDD as appropriate). + public long? ShareProvisionedBandwidthMibps => _response.Headers.TryGetValue("x-ms-share-provisioned-bandwidth-mibps", out long? value) ? value : null; + /// ShareIncludedBurstIops. + public long? ShareIncludedBurstIops => _response.Headers.TryGetValue("x-ms-share-included-burst-iops", out long? value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareDeleteHeaders.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareDeleteHeaders.cs index 980991becbeb8..f0831ee19381d 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareDeleteHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareDeleteHeaders.cs @@ -18,5 +18,9 @@ public ShareDeleteHeaders(Response response) } /// Indicates the version of the File service used to execute the request. public string Version => _response.Headers.TryGetValue("x-ms-version", out string value) ? value : null; + /// The "live share" portion of the data that the customer will be billed for in the soft-deleted capacity (logical storage size). + public long? XMsShareUsageBytes => _response.Headers.TryGetValue("x-ms-share-usage-bytes", out long? value) ? value : null; + /// The snapshot share portion of the data that the customer will be billed for in the soft-deleted capacity (this is the delta, or "physical storage size"). + public long? XMsShareSnapshotUsageBytes => _response.Headers.TryGetValue("x-ms-share-snapshot-usage-bytes", out long? value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareGetPropertiesHeaders.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareGetPropertiesHeaders.cs index 86a01b76969d8..0465b6de626d5 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareGetPropertiesHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareGetPropertiesHeaders.cs @@ -61,5 +61,13 @@ public ShareGetPropertiesHeaders(Response response) public long? PaidBurstingMaxIops => _response.Headers.TryGetValue("x-ms-share-paid-bursting-max-iops", out long? value) ? value : null; /// Optional. Integer. Default if not specified is the maximum throughput the file share can support. Current maximum for a file share is 10,340 MiB/sec. public long? PaidBurstingMaxBandwidthMibps => _response.Headers.TryGetValue("x-ms-share-paid-bursting-max-bandwidth-mibps", out long? value) ? value : null; + /// Return the calculated burst IOPS of the share. + public long? IncludedBurstIops => _response.Headers.TryGetValue("x-ms-share-included-burst-iops", out long? value) ? value : null; + /// Returned the calculated maximum burst credits. This is not the current burst credit level, but the maximum burst credits the share can have. + public long? MaxBurstCreditsForIops => _response.Headers.TryGetValue("x-ms-share-max-burst-credits-for-iops", out long? value) ? value : null; + /// Return timestamp for provisioned IOPS following existing rules for provisioned storage GiB. + public DateTimeOffset? NextAllowedProvisionedIopsDowngradeTime => _response.Headers.TryGetValue("x-ms-share-next-allowed-provisioned-iops-downgrade-time", out DateTimeOffset? value) ? value : null; + /// Return timestamp for provisioned throughput following existing rules for provisioned storage GiB. + public DateTimeOffset? NextAllowedProvisionedBandwidthDowngradeTime => _response.Headers.TryGetValue("x-ms-share-next-allowed-provisioned-bandwidth-downgrade-time", out DateTimeOffset? value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs index 3012d3d8735b1..7a9fbec28b317 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs @@ -44,7 +44,7 @@ public ShareRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipelin _fileRequestIntent = fileRequestIntent; } - internal HttpMessage CreateCreateRequest(int? timeout, IDictionary metadata, int? quota, ShareAccessTier? accessTier, string enabledProtocols, ShareRootSquash? rootSquash, bool? enableSnapshotVirtualDirectoryAccess, bool? paidBurstingEnabled, long? paidBurstingMaxBandwidthMibps, long? paidBurstingMaxIops) + internal HttpMessage CreateCreateRequest(int? timeout, IDictionary metadata, int? quota, ShareAccessTier? accessTier, string enabledProtocols, ShareRootSquash? rootSquash, bool? enableSnapshotVirtualDirectoryAccess, bool? paidBurstingEnabled, long? paidBurstingMaxBandwidthMibps, long? paidBurstingMaxIops, long? shareProvisionedIops, long? shareProvisionedBandwidthMibps) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -98,6 +98,14 @@ internal HttpMessage CreateCreateRequest(int? timeout, IDictionary Optional. Boolean. Default if not specified is false. This property enables paid bursting. /// Optional. Integer. Default if not specified is the maximum throughput the file share can support. Current maximum for a file share is 10,340 MiB/sec. /// Optional. Integer. Default if not specified is the maximum IOPS the file share can support. Current maximum for a file share is 102,400 IOPS. + /// Optional. The provisioned IOPS of the share. If this is not specified, compute the recommended IOPS of the share using the formula for a share in this media tier (SSD/HDD as appropriate). The provisioned IOPS of the share is always explicitly stored on the share object, even if the recommendation formula is used. + /// Optional. The provisioned throughput of the share. If this is not specified, compute the recommended throughput of the share using the formula for a share in this media tier (SSD/HDD as appropriate). /// The cancellation token to use. - public async Task> CreateAsync(int? timeout = null, IDictionary metadata = null, int? quota = null, ShareAccessTier? accessTier = null, string enabledProtocols = null, ShareRootSquash? rootSquash = null, bool? enableSnapshotVirtualDirectoryAccess = null, bool? paidBurstingEnabled = null, long? paidBurstingMaxBandwidthMibps = null, long? paidBurstingMaxIops = null, CancellationToken cancellationToken = default) + public async Task> CreateAsync(int? timeout = null, IDictionary metadata = null, int? quota = null, ShareAccessTier? accessTier = null, string enabledProtocols = null, ShareRootSquash? rootSquash = null, bool? enableSnapshotVirtualDirectoryAccess = null, bool? paidBurstingEnabled = null, long? paidBurstingMaxBandwidthMibps = null, long? paidBurstingMaxIops = null, long? shareProvisionedIops = null, long? shareProvisionedBandwidthMibps = null, CancellationToken cancellationToken = default) { - using var message = CreateCreateRequest(timeout, metadata, quota, accessTier, enabledProtocols, rootSquash, enableSnapshotVirtualDirectoryAccess, paidBurstingEnabled, paidBurstingMaxBandwidthMibps, paidBurstingMaxIops); + using var message = CreateCreateRequest(timeout, metadata, quota, accessTier, enabledProtocols, rootSquash, enableSnapshotVirtualDirectoryAccess, paidBurstingEnabled, paidBurstingMaxBandwidthMibps, paidBurstingMaxIops, shareProvisionedIops, shareProvisionedBandwidthMibps); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new ShareCreateHeaders(message.Response); switch (message.Response.Status) @@ -139,10 +149,12 @@ public async Task> CreateAsync(int? time /// Optional. Boolean. Default if not specified is false. This property enables paid bursting. /// Optional. Integer. Default if not specified is the maximum throughput the file share can support. Current maximum for a file share is 10,340 MiB/sec. /// Optional. Integer. Default if not specified is the maximum IOPS the file share can support. Current maximum for a file share is 102,400 IOPS. + /// Optional. The provisioned IOPS of the share. If this is not specified, compute the recommended IOPS of the share using the formula for a share in this media tier (SSD/HDD as appropriate). The provisioned IOPS of the share is always explicitly stored on the share object, even if the recommendation formula is used. + /// Optional. The provisioned throughput of the share. If this is not specified, compute the recommended throughput of the share using the formula for a share in this media tier (SSD/HDD as appropriate). /// The cancellation token to use. - public ResponseWithHeaders Create(int? timeout = null, IDictionary metadata = null, int? quota = null, ShareAccessTier? accessTier = null, string enabledProtocols = null, ShareRootSquash? rootSquash = null, bool? enableSnapshotVirtualDirectoryAccess = null, bool? paidBurstingEnabled = null, long? paidBurstingMaxBandwidthMibps = null, long? paidBurstingMaxIops = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders Create(int? timeout = null, IDictionary metadata = null, int? quota = null, ShareAccessTier? accessTier = null, string enabledProtocols = null, ShareRootSquash? rootSquash = null, bool? enableSnapshotVirtualDirectoryAccess = null, bool? paidBurstingEnabled = null, long? paidBurstingMaxBandwidthMibps = null, long? paidBurstingMaxIops = null, long? shareProvisionedIops = null, long? shareProvisionedBandwidthMibps = null, CancellationToken cancellationToken = default) { - using var message = CreateCreateRequest(timeout, metadata, quota, accessTier, enabledProtocols, rootSquash, enableSnapshotVirtualDirectoryAccess, paidBurstingEnabled, paidBurstingMaxBandwidthMibps, paidBurstingMaxIops); + using var message = CreateCreateRequest(timeout, metadata, quota, accessTier, enabledProtocols, rootSquash, enableSnapshotVirtualDirectoryAccess, paidBurstingEnabled, paidBurstingMaxBandwidthMibps, paidBurstingMaxIops, shareProvisionedIops, shareProvisionedBandwidthMibps); _pipeline.Send(message, cancellationToken); var headers = new ShareCreateHeaders(message.Response); switch (message.Response.Status) @@ -917,7 +929,7 @@ public ResponseWithHeaders GetPermis } } - internal HttpMessage CreateSetPropertiesRequest(int? timeout, int? quota, ShareAccessTier? accessTier, ShareRootSquash? rootSquash, bool? enableSnapshotVirtualDirectoryAccess, bool? paidBurstingEnabled, long? paidBurstingMaxBandwidthMibps, long? paidBurstingMaxIops, ShareFileRequestConditions shareFileRequestConditions) + internal HttpMessage CreateSetPropertiesRequest(int? timeout, int? quota, ShareAccessTier? accessTier, ShareRootSquash? rootSquash, bool? enableSnapshotVirtualDirectoryAccess, bool? paidBurstingEnabled, long? paidBurstingMaxBandwidthMibps, long? paidBurstingMaxIops, long? shareProvisionedIops, long? shareProvisionedBandwidthMibps, ShareFileRequestConditions shareFileRequestConditions) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -968,6 +980,14 @@ internal HttpMessage CreateSetPropertiesRequest(int? timeout, int? quota, ShareA { request.Headers.Add("x-ms-file-request-intent", _fileRequestIntent.Value.ToString()); } + if (shareProvisionedIops != null) + { + request.Headers.Add("x-ms-share-provisioned-iops", shareProvisionedIops.Value); + } + if (shareProvisionedBandwidthMibps != null) + { + request.Headers.Add("x-ms-share-provisioned-bandwidth-mibps", shareProvisionedBandwidthMibps.Value); + } request.Headers.Add("Accept", "application/xml"); return message; } @@ -981,11 +1001,13 @@ internal HttpMessage CreateSetPropertiesRequest(int? timeout, int? quota, ShareA /// Optional. Boolean. Default if not specified is false. This property enables paid bursting. /// Optional. Integer. Default if not specified is the maximum throughput the file share can support. Current maximum for a file share is 10,340 MiB/sec. /// Optional. Integer. Default if not specified is the maximum IOPS the file share can support. Current maximum for a file share is 102,400 IOPS. + /// Optional. The provisioned IOPS of the share. If this is not specified, compute the recommended IOPS of the share using the formula for a share in this media tier (SSD/HDD as appropriate). The provisioned IOPS of the share is always explicitly stored on the share object, even if the recommendation formula is used. + /// Optional. The provisioned throughput of the share. If this is not specified, compute the recommended throughput of the share using the formula for a share in this media tier (SSD/HDD as appropriate). /// Parameter group. /// The cancellation token to use. - public async Task> SetPropertiesAsync(int? timeout = null, int? quota = null, ShareAccessTier? accessTier = null, ShareRootSquash? rootSquash = null, bool? enableSnapshotVirtualDirectoryAccess = null, bool? paidBurstingEnabled = null, long? paidBurstingMaxBandwidthMibps = null, long? paidBurstingMaxIops = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) + public async Task> SetPropertiesAsync(int? timeout = null, int? quota = null, ShareAccessTier? accessTier = null, ShareRootSquash? rootSquash = null, bool? enableSnapshotVirtualDirectoryAccess = null, bool? paidBurstingEnabled = null, long? paidBurstingMaxBandwidthMibps = null, long? paidBurstingMaxIops = null, long? shareProvisionedIops = null, long? shareProvisionedBandwidthMibps = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) { - using var message = CreateSetPropertiesRequest(timeout, quota, accessTier, rootSquash, enableSnapshotVirtualDirectoryAccess, paidBurstingEnabled, paidBurstingMaxBandwidthMibps, paidBurstingMaxIops, shareFileRequestConditions); + using var message = CreateSetPropertiesRequest(timeout, quota, accessTier, rootSquash, enableSnapshotVirtualDirectoryAccess, paidBurstingEnabled, paidBurstingMaxBandwidthMibps, paidBurstingMaxIops, shareProvisionedIops, shareProvisionedBandwidthMibps, shareFileRequestConditions); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new ShareSetPropertiesHeaders(message.Response); switch (message.Response.Status) @@ -1006,11 +1028,13 @@ public async Task> SetPropertiesA /// Optional. Boolean. Default if not specified is false. This property enables paid bursting. /// Optional. Integer. Default if not specified is the maximum throughput the file share can support. Current maximum for a file share is 10,340 MiB/sec. /// Optional. Integer. Default if not specified is the maximum IOPS the file share can support. Current maximum for a file share is 102,400 IOPS. + /// Optional. The provisioned IOPS of the share. If this is not specified, compute the recommended IOPS of the share using the formula for a share in this media tier (SSD/HDD as appropriate). The provisioned IOPS of the share is always explicitly stored on the share object, even if the recommendation formula is used. + /// Optional. The provisioned throughput of the share. If this is not specified, compute the recommended throughput of the share using the formula for a share in this media tier (SSD/HDD as appropriate). /// Parameter group. /// The cancellation token to use. - public ResponseWithHeaders SetProperties(int? timeout = null, int? quota = null, ShareAccessTier? accessTier = null, ShareRootSquash? rootSquash = null, bool? enableSnapshotVirtualDirectoryAccess = null, bool? paidBurstingEnabled = null, long? paidBurstingMaxBandwidthMibps = null, long? paidBurstingMaxIops = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders SetProperties(int? timeout = null, int? quota = null, ShareAccessTier? accessTier = null, ShareRootSquash? rootSquash = null, bool? enableSnapshotVirtualDirectoryAccess = null, bool? paidBurstingEnabled = null, long? paidBurstingMaxBandwidthMibps = null, long? paidBurstingMaxIops = null, long? shareProvisionedIops = null, long? shareProvisionedBandwidthMibps = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) { - using var message = CreateSetPropertiesRequest(timeout, quota, accessTier, rootSquash, enableSnapshotVirtualDirectoryAccess, paidBurstingEnabled, paidBurstingMaxBandwidthMibps, paidBurstingMaxIops, shareFileRequestConditions); + using var message = CreateSetPropertiesRequest(timeout, quota, accessTier, rootSquash, enableSnapshotVirtualDirectoryAccess, paidBurstingEnabled, paidBurstingMaxBandwidthMibps, paidBurstingMaxIops, shareProvisionedIops, shareProvisionedBandwidthMibps, shareFileRequestConditions); _pipeline.Send(message, cancellationToken); var headers = new ShareSetPropertiesHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestoreHeaders.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestoreHeaders.cs index cd5411eee99d0..38d5ffc7b9409 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestoreHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestoreHeaders.cs @@ -21,5 +21,15 @@ public ShareRestoreHeaders(Response response) public DateTimeOffset? LastModified => _response.Headers.TryGetValue("Last-Modified", out DateTimeOffset? value) ? value : null; /// Indicates the version of the File service used to execute the request. public string Version => _response.Headers.TryGetValue("x-ms-version", out string value) ? value : null; + /// Returns the current share quota in GB. + public long? Quota => _response.Headers.TryGetValue("x-ms-share-quota", out long? value) ? value : null; + /// Returns the current share provisioned ipos. + public long? ProvisionedIops => _response.Headers.TryGetValue("x-ms-share-provisioned-iops", out long? value) ? value : null; + /// Returns the current share provisioned bandwidth in megabits per second. + public long? ProvisionedBandwidthMibps => _response.Headers.TryGetValue("x-ms-share-provisioned-bandwidth-mibps", out long? value) ? value : null; + /// Return the calculated burst IOPS of the share. + public long? IncludedBurstIops => _response.Headers.TryGetValue("x-ms-share-included-burst-iops", out long? value) ? value : null; + /// Returned the calculated maximum burst credits. This is not the current burst credit level, but the maximum burst credits the share can have. + public long? MaxBurstCreditsForIops => _response.Headers.TryGetValue("x-ms-share-max-burst-credits-for-iops", out long? value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareSetPropertiesHeaders.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareSetPropertiesHeaders.cs index 85e942d84cf1f..9bacf8101fd8e 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareSetPropertiesHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareSetPropertiesHeaders.cs @@ -21,5 +21,21 @@ public ShareSetPropertiesHeaders(Response response) public DateTimeOffset? LastModified => _response.Headers.TryGetValue("Last-Modified", out DateTimeOffset? value) ? value : null; /// Indicates the version of the File service used to execute the request. public string Version => _response.Headers.TryGetValue("x-ms-version", out string value) ? value : null; + /// Returns the current share quota in GB. + public long? Quota => _response.Headers.TryGetValue("x-ms-share-quota", out long? value) ? value : null; + /// Returns the current share provisioned ipos. + public long? ProvisionedIops => _response.Headers.TryGetValue("x-ms-share-provisioned-iops", out long? value) ? value : null; + /// Returns the current share provisioned bandwidth in megabits per second. + public long? ProvisionedBandwidthMibps => _response.Headers.TryGetValue("x-ms-share-provisioned-bandwidth-mibps", out long? value) ? value : null; + /// Return the calculated burst IOPS of the share. + public long? IncludedBurstIops => _response.Headers.TryGetValue("x-ms-share-included-burst-iops", out long? value) ? value : null; + /// Returned the calculated maximum burst credits. This is not the current burst credit level, but the maximum burst credits the share can have. + public long? MaxBurstCreditsForIops => _response.Headers.TryGetValue("x-ms-share-max-burst-credits-for-iops", out long? value) ? value : null; + /// Returns the current share next allowed quota downgrade time. + public DateTimeOffset? NextAllowedQuotaDowngradeTime => _response.Headers.TryGetValue("x-ms-share-next-allowed-quota-downgrade-time", out DateTimeOffset? value) ? value : null; + /// Return timestamp for provisioned IOPS following existing rules for provisioned storage GiB. + public DateTimeOffset? NextAllowedProvisionedIopsDowngradeTime => _response.Headers.TryGetValue("x-ms-share-next-allowed-provisioned-iops-downgrade-time", out DateTimeOffset? value) ? value : null; + /// Return timestamp for provisioned throughput following existing rules for provisioned storage GiB. + public DateTimeOffset? NextAllowedProvisionedBandwidthDowngradeTime => _response.Headers.TryGetValue("x-ms-share-next-allowed-provisioned-bandwidth-downgrade-time", out DateTimeOffset? value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareCreateOptions.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareCreateOptions.cs index 5da39cf3fb5cc..39ebe5d6ea003 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareCreateOptions.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareCreateOptions.cs @@ -64,5 +64,18 @@ public class ShareCreateOptions /// Default if not specified is the maximum throughput the file share can support. Current maximum for a file share is 10,340 MiB/sec. /// public long? PaidBurstingMaxBandwidthMibps { get; set; } + + /// + /// Optional. Only applicable to provisioned v2 storage accounts. + /// The provisioned IOPS of the share. For SSD, minimum IOPS is 3,000 and maximum is 100,000. For HDD, minimum IOPS is 500 and maximum is 50,000. + /// + public long? ProvisionedMaxIops { get; set; } + + /// + /// Optional. Only applicable to provisioned v2 storage accounts. + /// The provisioned throughput of the share. For SSD, minimum throughput is 125 MiB/sec and maximum is 10,340 MiB/sec. + /// For HDD, minimum throughput is 60 MiB/sec and maximum is 5,125 MiB/sec. + /// + public long? ProvisionedMaxBandwidthMibps { get; set; } } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareModelFactory.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareModelFactory.cs index 618fdfc0c6aba..a708316d026fc 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareModelFactory.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareModelFactory.cs @@ -48,7 +48,68 @@ public static ShareProperties ShareProperties( bool? enableSnapshotVirtualDirectoryAccess = default, bool? enablePaidBursting = default, long? paidBurstingMaxIops = default, - long? paidBustingMaxBandwidthMibps = default) + long? paidBustingMaxBandwidthMibps = default, + long? includedBurstIops = default, + long? maxBurstCreditsForIops = default, + DateTimeOffset? nextAllowedProvisionedIopsDowngradeTime = default, + DateTimeOffset? nextAllowedProvisionedBandwidthDowngradeTime = default) + => new ShareProperties() + { + AccessTier = accessTier, + LastModified = lastModified, + ProvisionedIops = provisionedIops, + ProvisionedIngressMBps = provisionedIngressMBps, + ProvisionedEgressMBps = provisionedEgressMBps, + NextAllowedQuotaDowngradeTime = nextAllowedQuotaDowngradeTime, + DeletedOn = deletedOn, + RemainingRetentionDays = remainingRetentionDays, + ETag = eTag, + AccessTierChangeTime = accessTierChangeTime, + AccessTierTransitionState = accessTierTransitionState, + LeaseStatus = leaseStatus, + LeaseState = leaseState, + LeaseDuration = leaseDuration, + QuotaInGB = quotaInGB, + Metadata = metadata, + Protocols = protocols, + RootSquash = rootSquash, + EnableSnapshotVirtualDirectoryAccess = enableSnapshotVirtualDirectoryAccess, + EnablePaidBursting = enablePaidBursting, + PaidBurstingMaxIops = paidBurstingMaxIops, + PaidBurstingMaxBandwidthMibps = paidBustingMaxBandwidthMibps, + IncludedBurstIops = includedBurstIops, + MaxBurstCreditsForIops = maxBurstCreditsForIops, + NextAllowedProvisionedIopsDowngradeTime = nextAllowedProvisionedIopsDowngradeTime, + NextAllowedProvisionedBandwidthDowngradeTime = nextAllowedProvisionedBandwidthDowngradeTime, + }; + + /// + /// Creates a new ShareProperties instance for mocking. + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public static ShareProperties ShareProperties( + string accessTier, + DateTimeOffset? lastModified, + int? provisionedIops, + int? provisionedIngressMBps, + int? provisionedEgressMBps, + DateTimeOffset? nextAllowedQuotaDowngradeTime, + DateTimeOffset? deletedOn, + int? remainingRetentionDays, + ETag? eTag, + DateTimeOffset? accessTierChangeTime, + string accessTierTransitionState, + ShareLeaseStatus? leaseStatus, + ShareLeaseState? leaseState, + ShareLeaseDuration? leaseDuration, + int? quotaInGB, + IDictionary metadata, + ShareProtocols? protocols, + ShareRootSquash? rootSquash, + bool? enableSnapshotVirtualDirectoryAccess, + bool? enablePaidBursting, + long? paidBurstingMaxIops, + long? paidBustingMaxBandwidthMibps) => new ShareProperties() { AccessTier = accessTier, diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareProperties.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareProperties.cs index 743ad88232164..ef2ec2569f595 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareProperties.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareProperties.cs @@ -137,6 +137,30 @@ public class ShareProperties /// public long? PaidBurstingMaxBandwidthMibps { get; internal set; } + /// + /// Only applicable to provisioned v2 storage accounts. + /// The calculated burst IOPS of the share. + /// + public long? IncludedBurstIops { get; internal set; } + + /// + /// Only applicable to provisioned v2 storage accounts. + /// The calculated maximum burst credits. This is not the current burst credit level, but the maximum burst credits the share can have. + /// + public long? MaxBurstCreditsForIops { get; internal set; } + + /// + /// Only applicable to provisioned v2 storage accounts. + /// The time the share can be downgraded to lower provisioned IOPs. + /// + public DateTimeOffset? NextAllowedProvisionedIopsDowngradeTime { get; internal set; } + + /// + /// Only applicable to provisioned v2 storage accounts. + /// The time the shaare can be downgraded to lower provisioned bandwidth. + /// + public DateTimeOffset? NextAllowedProvisionedBandwidthDowngradeTime { get; internal set; } + /// /// Internal constructor. /// diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareSetPropertiesOptions.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareSetPropertiesOptions.cs index 81dfbf4a5c0be..fd4b4f1b0a061 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareSetPropertiesOptions.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareSetPropertiesOptions.cs @@ -49,6 +49,20 @@ public class ShareSetPropertiesOptions /// public long? PaidBurstingMaxBandwidthMibps { get; set; } + /// + /// Optional. Supported in version 2025-01-05 and above. Only applicable to provisioned v2 storage accounts. + /// Sets the max provisioned IOPs for a share. For SSD, min IOPs is 3,000 and max is 100,000. + /// For HDD, min IOPs is 500 and max is 50,000. + /// + public long? ProvisionedMaxIops { get; set; } + + /// + /// Optional. Supported in version 2025-01-05 and above. Only applicable to provisioned v2 storage accounts. + /// Sets the max provisioned brandwith for a share. For SSD, min bandwidth is 125 MiB/sec and max is 10,340 MiB/sec. + /// For HDD, min bandwidth is 60 MiB/sec and max is 5,120 MiB/sec. + /// + public long? ProvisionedMaxBandwidthMibps { get; set; } + /// /// Optional to add conditions /// on setting the share's properties. diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/ShareClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/ShareClient.cs index 7c683888ee9d0..75d4430e5b26d 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/ShareClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/ShareClient.cs @@ -490,6 +490,8 @@ public virtual Response Create( options?.EnablePaidBursting, options?.PaidBurstingMaxIops, options?.PaidBurstingMaxBandwidthMibps, + options?.ProvisionedMaxIops, + options?.ProvisionedMaxBandwidthMibps, async: false, cancellationToken) .EnsureCompleted(); @@ -531,6 +533,8 @@ await CreateInternal( options?.EnablePaidBursting, options?.PaidBurstingMaxIops, options?.PaidBurstingMaxBandwidthMibps, + options?.ProvisionedMaxIops, + options?.ProvisionedMaxBandwidthMibps, async: true, cancellationToken) .ConfigureAwait(false); @@ -577,6 +581,8 @@ public virtual Response Create( enablePaidBursting: default, paidBurstingMaxIops: default, paidBurstingMaxBandwidthMibps: default, + provisionedMaxIops: default, + provisionedMaxBandwidthMibps: default, async: false, cancellationToken) .EnsureCompleted(); @@ -623,6 +629,8 @@ await CreateInternal( enablePaidBursting: default, paidBurstingMaxIops: default, paidBurstingMaxBandwidthMibps: default, + provisionedMaxIops: default, + provisionedMaxBandwidthMibps: default, async: true, cancellationToken) .ConfigureAwait(false); @@ -668,6 +676,12 @@ await CreateInternal( /// Optional. Supported in version 2024-11-04 and above. Only applicable for premium file storage accounts. /// Default if not specified is the maximum throughput the file share can support. Current maximum for a file share is 10,340 MiB/sec. /// + /// + /// Provisioned max IOPS. + /// + /// + /// Provisioned max bandwidth MiBps. + /// /// /// Whether to invoke the operation asynchronously. /// @@ -696,6 +710,8 @@ internal async Task> CreateInternal( bool? enablePaidBursting, long? paidBurstingMaxIops, long? paidBurstingMaxBandwidthMibps, + long? provisionedMaxIops, + long? provisionedMaxBandwidthMibps, bool async, CancellationToken cancellationToken, string operationName = default) @@ -728,6 +744,8 @@ internal async Task> CreateInternal( paidBurstingEnabled: enablePaidBursting, paidBurstingMaxIops: paidBurstingMaxIops, paidBurstingMaxBandwidthMibps: paidBurstingMaxBandwidthMibps, + shareProvisionedIops: provisionedMaxIops, + shareProvisionedBandwidthMibps: provisionedMaxBandwidthMibps, cancellationToken: cancellationToken) .ConfigureAwait(false); } @@ -743,6 +761,8 @@ internal async Task> CreateInternal( paidBurstingEnabled: enablePaidBursting, paidBurstingMaxIops: paidBurstingMaxIops, paidBurstingMaxBandwidthMibps: paidBurstingMaxBandwidthMibps, + shareProvisionedIops: provisionedMaxIops, + shareProvisionedBandwidthMibps: provisionedMaxBandwidthMibps, cancellationToken: cancellationToken); } @@ -791,7 +811,7 @@ internal async Task> CreateInternal( /// a failure occurs. /// public virtual Response CreateIfNotExists( - ShareCreateOptions options, + ShareCreateOptions options = default, CancellationToken cancellationToken = default) => CreateIfNotExistsInternal( options?.Metadata, @@ -803,6 +823,8 @@ public virtual Response CreateIfNotExists( options?.EnablePaidBursting, options?.PaidBurstingMaxIops, options?.PaidBurstingMaxBandwidthMibps, + options?.ProvisionedMaxIops, + options?.ProvisionedMaxBandwidthMibps, async: false, cancellationToken).EnsureCompleted(); @@ -831,7 +853,7 @@ public virtual Response CreateIfNotExists( /// a failure occurs. /// public virtual async Task> CreateIfNotExistsAsync( - ShareCreateOptions options, + ShareCreateOptions options = default, CancellationToken cancellationToken = default) => await CreateIfNotExistsInternal( options?.Metadata, @@ -843,6 +865,8 @@ await CreateIfNotExistsInternal( options?.EnablePaidBursting, options?.PaidBurstingMaxIops, options?.PaidBurstingMaxBandwidthMibps, + options?.ProvisionedMaxIops, + options?.ProvisionedMaxBandwidthMibps, async: true, cancellationToken).ConfigureAwait(false); @@ -874,10 +898,12 @@ await CreateIfNotExistsInternal( /// a failure occurs. /// [EditorBrowsable(EditorBrowsableState.Never)] +#pragma warning disable AZC0002 // DO ensure all service methods, both asynchronous and synchronous, take an optional CancellationToken parameter called cancellationToken. public virtual Response CreateIfNotExists( - Metadata metadata = default, - int? quotaInGB = default, - CancellationToken cancellationToken = default) => +#pragma warning restore AZC0002 // DO ensure all service methods, both asynchronous and synchronous, take an optional CancellationToken parameter called cancellationToken. + Metadata metadata, + int? quotaInGB, + CancellationToken cancellationToken) => CreateIfNotExistsInternal( metadata, quotaInGB, @@ -888,6 +914,8 @@ public virtual Response CreateIfNotExists( enablePaidBursting: default, paidBurstingMaxIops: default, paidBurstingMaxBandwidthMibps: default, + provisionedMaxIops: default, + provisionedMaxBandwidthMibps: default, async: false, cancellationToken).EnsureCompleted(); @@ -918,10 +946,14 @@ public virtual Response CreateIfNotExists( /// A will be thrown if /// a failure occurs. /// + [EditorBrowsable(EditorBrowsableState.Never)] +#pragma warning disable AZC0002 // DO ensure all service methods, both asynchronous and synchronous, take an optional CancellationToken parameter called cancellationToken. public virtual async Task> CreateIfNotExistsAsync( - Metadata metadata = default, - int? quotaInGB = default, - CancellationToken cancellationToken = default) => +#pragma warning restore AZC0002 // DO ensure all service methods, both asynchronous and synchronous, take an optional CancellationToken parameter called cancellationToken. + + Metadata metadata, + int? quotaInGB, + CancellationToken cancellationToken) => await CreateIfNotExistsInternal( metadata, quotaInGB, @@ -932,6 +964,8 @@ await CreateIfNotExistsInternal( enablePaidBursting: default, paidBurstingMaxIops: default, paidBurstingMaxBandwidthMibps: default, + provisionedMaxIops: default, + provisionedMaxBandwidthMibps: default, async: true, cancellationToken).ConfigureAwait(false); @@ -976,6 +1010,12 @@ await CreateIfNotExistsInternal( /// Optional. Supported in version 2024-11-04 and above. Only applicable for premium file storage accounts. /// Default if not specified is the maximum throughput the file share can support. Current maximum for a file share is 10,340 MiB/sec. /// + /// + /// Provisioned max IOPS. + /// + /// + /// Provisioned max bandwidth MiBps. + /// /// /// Whether to invoke the operation asynchronously. /// @@ -1001,6 +1041,8 @@ private async Task> CreateIfNotExistsInternal( bool? enablePaidBursting, long? paidBurstingMaxIops, long? paidBurstingMaxBandwidthMibps, + long? provisionedMaxIops, + long? provisionedMaxBandwidthMibps, bool async, CancellationToken cancellationToken) { @@ -1024,6 +1066,8 @@ private async Task> CreateIfNotExistsInternal( enablePaidBursting, paidBurstingMaxIops, paidBurstingMaxBandwidthMibps, + provisionedMaxIops, + provisionedMaxBandwidthMibps, async, cancellationToken, operationName: $"{nameof(ShareClient)}.{nameof(CreateIfNotExists)}") @@ -1993,6 +2037,8 @@ public virtual Response SetProperties( enablePaidBursting: options?.EnablePaidBursting, paidBurstingMaxIops: options?.PaidBurstingMaxIops, paidBurstingMaxBandwidthMibps: options?.PaidBurstingMaxBandwidthMibps, + provisionedMaxIops: options?.ProvisionedMaxIops, + provisionedMaxBandwidthBandwidthMibps: options?.ProvisionedMaxBandwidthMibps, conditions: options?.Conditions, operationName: $"{nameof(ShareClient)}.{nameof(SetProperties)}", async: false, @@ -2032,6 +2078,8 @@ await SetPropertiesInternal( enablePaidBursting: options?.EnablePaidBursting, paidBurstingMaxIops: options?.PaidBurstingMaxIops, paidBurstingMaxBandwidthMibps: options?.PaidBurstingMaxBandwidthMibps, + provisionedMaxIops: options?.ProvisionedMaxIops, + provisionedMaxBandwidthBandwidthMibps: options?.ProvisionedMaxBandwidthMibps, conditions: options?.Conditions, operationName: $"{nameof(ShareClient)}.{nameof(SetProperties)}", async: true, @@ -2072,6 +2120,16 @@ await SetPropertiesInternal( /// Optional. Supported in version 2024-11-04 and above. Only applicable for premium file storage accounts. /// Default if not specified is the maximum throughput the file share can support. Current maximum for a file share is 10,340 MiB/sec. /// + /// + /// Optional. Supported in version 2025-01-05 and above. Only applicable to provisioned v2 storage accounts. + /// Sets the max provisioned IOPs for a share. For SSD, min IOPs is 3,000 and max is 100,000. + /// For HDD, min IOPs is 500 and max is 50,000. + /// + /// + /// Optional. Supported in version 2025-01-05 and above. Only applicable to provisioned v2 storage accounts. + /// Sets the max provisioned brandwith for a share. For SSD, min bandwidth is 125 MiB/sec and max is 10,340 MiB/sec. + /// For HDD, min bandwidth is 60 MiB/sec and max is 5,120 MiB/sec. + /// /// /// Optional to add conditions /// on setting the quota. @@ -2102,6 +2160,8 @@ internal virtual async Task> SetPropertiesInternal( bool? enablePaidBursting, long? paidBurstingMaxIops, long? paidBurstingMaxBandwidthMibps, + long? provisionedMaxIops, + long? provisionedMaxBandwidthBandwidthMibps, ShareFileRequestConditions conditions, string operationName, bool async, @@ -2134,6 +2194,8 @@ internal virtual async Task> SetPropertiesInternal( paidBurstingEnabled: enablePaidBursting, paidBurstingMaxIops: paidBurstingMaxIops, paidBurstingMaxBandwidthMibps: paidBurstingMaxBandwidthMibps, + shareProvisionedIops: provisionedMaxIops, + shareProvisionedBandwidthMibps: provisionedMaxBandwidthBandwidthMibps, shareFileRequestConditions: conditions, cancellationToken: cancellationToken) .ConfigureAwait(false); @@ -2148,6 +2210,8 @@ internal virtual async Task> SetPropertiesInternal( paidBurstingEnabled: enablePaidBursting, paidBurstingMaxIops: paidBurstingMaxIops, paidBurstingMaxBandwidthMibps: paidBurstingMaxBandwidthMibps, + shareProvisionedIops: provisionedMaxIops, + shareProvisionedBandwidthMibps: provisionedMaxBandwidthBandwidthMibps, shareFileRequestConditions: conditions, cancellationToken: cancellationToken); } @@ -2212,6 +2276,8 @@ public virtual Response SetQuota( enablePaidBursting: default, paidBurstingMaxIops: default, paidBurstingMaxBandwidthMibps: default, + provisionedMaxIops: default, + provisionedMaxBandwidthBandwidthMibps: default, conditions: conditions, operationName: $"{nameof(ShareClient)}.{nameof(SetQuota)}", async: false, @@ -2258,6 +2324,8 @@ await SetPropertiesInternal( enablePaidBursting: default, paidBurstingMaxIops: default, paidBurstingMaxBandwidthMibps: default, + provisionedMaxIops: default, + provisionedMaxBandwidthBandwidthMibps: default, conditions: conditions, operationName: $"{nameof(ShareClient)}.{nameof(SetQuota)}", async: true, @@ -2302,6 +2370,8 @@ public virtual Response SetQuota( enablePaidBursting: default, paidBurstingMaxIops: default, paidBurstingMaxBandwidthMibps: default, + provisionedMaxIops: default, + provisionedMaxBandwidthBandwidthMibps: default, conditions: default, operationName: $"{nameof(ShareClient)}.{nameof(SetQuota)}", async: false, @@ -2345,6 +2415,8 @@ await SetPropertiesInternal( enablePaidBursting: default, paidBurstingMaxIops: default, paidBurstingMaxBandwidthMibps: default, + provisionedMaxIops: default, + provisionedMaxBandwidthBandwidthMibps: default, conditions: default, operationName: $"{nameof(ShareClient)}.{nameof(SetQuota)}", async: true, diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/ShareExtensions.cs b/sdk/storage/Azure.Storage.Files.Shares/src/ShareExtensions.cs index 00f3c357b5746..4d7a0950ab0d6 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/ShareExtensions.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/ShareExtensions.cs @@ -713,7 +713,11 @@ internal static ShareProperties ToShareProperties(this ResponseWithHeaders CreateShare( enablePaidBursting: options?.EnablePaidBursting, paidBurstingMaxIops: options?.PaidBurstingMaxIops, paidBurstingMaxBandwidthMibps: options?.PaidBurstingMaxBandwidthMibps, + provisionedMaxIops: options?.PaidBurstingMaxIops, + provisionedMaxBandwidthMibps: options?.ProvisionedMaxBandwidthMibps, async: false, cancellationToken: cancellationToken, operationName: $"{nameof(ShareServiceClient)}.{nameof(CreateShare)}") @@ -944,6 +946,8 @@ public virtual async Task> CreateShareAsync( enablePaidBursting: options?.EnablePaidBursting, paidBurstingMaxIops: options?.PaidBurstingMaxIops, paidBurstingMaxBandwidthMibps: options?.PaidBurstingMaxBandwidthMibps, + provisionedMaxIops: options?.PaidBurstingMaxIops, + provisionedMaxBandwidthMibps: options?.ProvisionedMaxBandwidthMibps, async: true, cancellationToken: cancellationToken, operationName: $"{nameof(ShareServiceClient)}.{nameof(CreateShare)}") @@ -1001,6 +1005,8 @@ public virtual Response CreateShare( enablePaidBursting: default, paidBurstingMaxIops: default, paidBurstingMaxBandwidthMibps: default, + provisionedMaxIops: default, + provisionedMaxBandwidthMibps: default, async: false, cancellationToken: cancellationToken, operationName: $"{nameof(ShareServiceClient)}.{nameof(CreateShare)}") @@ -1058,6 +1064,8 @@ public virtual async Task> CreateShareAsync( enablePaidBursting: default, paidBurstingMaxIops: default, paidBurstingMaxBandwidthMibps: default, + provisionedMaxIops: default, + provisionedMaxBandwidthMibps: default, async: true, cancellationToken: cancellationToken, operationName: $"{nameof(ShareServiceClient)}.{nameof(CreateShare)}") diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md index 9e0fc07591065..22f96a9fbc323 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md +++ b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/09a930a7adfb8676e54f714d7ea8973caecb9667/specification/storage/data-plane/Microsoft.FileStorage/stable/2025-01-05/file.json + - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/133677b644bcae8e8ada9c3af24d6dee63665e66/specification/storage/data-plane/Microsoft.FileStorage/stable/2025-01-05/file.json generation1-convenience-client: true # https://github.com/Azure/autorest/issues/4075 skip-semantics-validation: true diff --git a/sdk/storage/Azure.Storage.Files.Shares/tests/DisposingShare.cs b/sdk/storage/Azure.Storage.Files.Shares/tests/DisposingShare.cs index 32765a7e04f29..4389023663cf5 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/tests/DisposingShare.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/tests/DisposingShare.cs @@ -4,6 +4,7 @@ using System; using System.Collections.Generic; using System.Threading.Tasks; +using Azure.Storage.Files.Shares.Models; using Azure.Storage.Test.Shared; namespace Azure.Storage.Files.Shares.Tests @@ -16,7 +17,11 @@ public class DisposingShare : IDisposingContainer public static async Task CreateAsync(ShareClient share, IDictionary metadata) { - await share.CreateIfNotExistsAsync(metadata: metadata); + ShareCreateOptions options = new ShareCreateOptions + { + Metadata = metadata + }; + await share.CreateIfNotExistsAsync(options); return new DisposingShare(share); } diff --git a/sdk/storage/Azure.Storage.Files.Shares/tests/ServiceClientTests.cs b/sdk/storage/Azure.Storage.Files.Shares/tests/ServiceClientTests.cs index 01f071ca1dafb..e52b1b2da1bc6 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/tests/ServiceClientTests.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/tests/ServiceClientTests.cs @@ -431,6 +431,33 @@ public async Task ListSharesSegmentAsync_OAuth() Assert.IsTrue(shares.All(c => c.Properties.Metadata == null)); } + [RecordedTest] + [PlaybackOnly("https://github.com/Azure/azure-sdk-for-net/issues/45675")] + [ServiceVersion(Min = ShareClientOptions.ServiceVersion.V2025_01_05)] + public async Task ListSharesSegmentAsync_ProvisionedBilling() + { + // Arrange + ShareServiceClient service = SharesClientBuilder.GetServiceClient_SharedKey(); + + // Ensure at least one share + await using DisposingShare test = await GetTestShareAsync(service); + ShareClient share = test.Share; + + List shares = new List(); + await foreach (ShareItem item in service.GetSharesAsync()) + { + shares.Add(item); + } + + ShareItem shareItem = shares.FirstOrDefault(); + + // Assert + Assert.IsNotNull(shareItem.Properties.IncludedBurstIops); + Assert.IsNotNull(shareItem.Properties.MaxBurstCreditsForIops); + Assert.IsNotNull(shareItem.Properties.NextAllowedProvisionedIopsDowngradeTime); + Assert.IsNotNull(shareItem.Properties.NextAllowedProvisionedBandwidthDowngradeTime); + } + [RecordedTest] public async Task CreateShareAsync() { diff --git a/sdk/storage/Azure.Storage.Files.Shares/tests/ShareClientTests.cs b/sdk/storage/Azure.Storage.Files.Shares/tests/ShareClientTests.cs index 36c7a115d8ecc..7b004470f00a5 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/tests/ShareClientTests.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/tests/ShareClientTests.cs @@ -5,7 +5,6 @@ using System.Collections.Generic; using System.IO; using System.Linq; -using System.Reflection; using System.Threading.Tasks; using Azure.Core.TestFramework; using Azure.Storage.Files.Shares.Models; @@ -13,8 +12,6 @@ using Azure.Storage.Sas; using Azure.Storage.Test; using NUnit.Framework; -using System.Threading; -using Azure.Identity; using Moq; namespace Azure.Storage.Files.Shares.Tests @@ -364,6 +361,33 @@ public async Task CreateAsync_AccessTier() await share.DeleteAsync(); } + [RecordedTest] + [PlaybackOnly("https://github.com/Azure/azure-sdk-for-net/issues/45675")] + [ServiceVersion(Min = ShareClientOptions.ServiceVersion.V2025_01_05)] + public async Task CreateAsync_ProvisionedMaxIopsAndBandwidth() + { + // Arrange + var shareName = GetNewShareName(); + ShareServiceClient service = SharesClientBuilder.GetServiceClient_SharedKey(); + ShareClient share = InstrumentClient(service.GetShareClient(shareName)); + ShareCreateOptions options = new ShareCreateOptions + { + ProvisionedMaxIops = 500, + ProvisionedMaxBandwidthMibps = 125 + }; + + // Act + Response response = await share.CreateAsync(options); + + // Assert + Response propertiesResponse = await share.GetPropertiesAsync(); + Assert.AreEqual(500, propertiesResponse.Value.ProvisionedIops); + Assert.AreEqual(125, propertiesResponse.Value.ProvisionedBandwidthMiBps); + + // Cleanup + await share.DeleteAsync(); + } + [RecordedTest] [ServiceVersion(Min = ShareClientOptions.ServiceVersion.V2019_12_12)] public async Task CreateAsync_AccessTier_Premium() @@ -1937,6 +1961,33 @@ public async Task SetPropertiesAsync_PaidBursting() Assert.AreEqual(1000, response.Value.PaidBurstingMaxBandwidthMibps); } + [RecordedTest] + [PlaybackOnly("https://github.com/Azure/azure-sdk-for-net/issues/45675")] + [ServiceVersion(Min = ShareClientOptions.ServiceVersion.V2025_01_05)] + public async Task SetPropertiesAsync_ProvisionedBilling() + { + // Arrange + await using DisposingShare test = await GetTestShareAsync(); + + ShareSetPropertiesOptions setPropertiesOptions = new ShareSetPropertiesOptions + { + ProvisionedMaxIops = 3000, + ProvisionedMaxBandwidthMibps = 125 + }; + + // Act + await test.Share.SetPropertiesAsync(setPropertiesOptions); + + // Assert + Response response = await test.Share.GetPropertiesAsync(); + Assert.AreEqual(3000, response.Value.ProvisionedIops); + Assert.AreEqual(125, response.Value.ProvisionedBandwidthMiBps); + Assert.IsNotNull(response.Value.IncludedBurstIops); + Assert.IsNotNull(response.Value.MaxBurstCreditsForIops); + Assert.IsNotNull(response.Value.NextAllowedProvisionedIopsDowngradeTime); + Assert.IsNotNull(response.Value.NextAllowedProvisionedBandwidthDowngradeTime); + } + [RecordedTest] public async Task SetQuotaAsync() { @@ -2022,7 +2073,11 @@ public async Task DeleteAsync() var shareName = GetNewShareName(); ShareServiceClient service = SharesClientBuilder.GetServiceClient_SharedKey(); ShareClient share = InstrumentClient(service.GetShareClient(shareName)); - await share.CreateIfNotExistsAsync(quotaInGB: 1); + ShareCreateOptions options = new ShareCreateOptions + { + QuotaInGB = 1 + }; + await share.CreateIfNotExistsAsync(options); // Act Response response = await share.DeleteAsync(false); @@ -2038,7 +2093,11 @@ public async Task DeleteAsync_IncludeLeasedSnapshots() // Arrange ShareServiceClient service = SharesClientBuilder.GetServiceClient_SharedKey(); ShareClient share = InstrumentClient(service.GetShareClient(GetNewShareName())); - await share.CreateIfNotExistsAsync(quotaInGB: 1); + ShareCreateOptions createOptions = new ShareCreateOptions + { + QuotaInGB = 1 + }; + await share.CreateIfNotExistsAsync(createOptions); // Create a snapshot Response snapshotResponse0 = await share.CreateSnapshotAsync(); @@ -2186,7 +2245,11 @@ public async Task DeleteAsync_OAuth() var shareName = GetNewShareName(); ShareServiceClient service = GetServiceClient_OAuth(); ShareClient share = InstrumentClient(service.GetShareClient(shareName)); - await share.CreateIfNotExistsAsync(quotaInGB: 1); + ShareCreateOptions options = new ShareCreateOptions + { + QuotaInGB = 1 + }; + await share.CreateIfNotExistsAsync(options); // Act Response response = await share.DeleteAsync(false); From c0952c639f5bf36d27d8b05ae5402ffcd00dfde3 Mon Sep 17 00:00:00 2001 From: Sean McCullough <44180881+seanmcc-msft@users.noreply.github.com> Date: Mon, 23 Sep 2024 14:43:16 -0500 Subject: [PATCH 18/25] Regenerated after merging Structured Data swagger (#46131) --- .../Generated/AppendBlobAppendBlockHeaders.cs | 2 + .../src/Generated/AppendBlobRestClient.cs | 22 +++++++-- .../src/Generated/BlobDownloadHeaders.cs | 4 ++ .../src/Generated/BlobRestClient.cs | 16 +++++-- .../src/Generated/BlockBlobRestClient.cs | 44 ++++++++++++++---- .../Generated/BlockBlobStageBlockHeaders.cs | 2 + .../src/Generated/BlockBlobUploadHeaders.cs | 2 + .../src/Generated/PageBlobRestClient.cs | 22 +++++++-- .../Generated/PageBlobUploadPagesHeaders.cs | 2 + .../Azure.Storage.Blobs/src/autorest.md | 2 +- .../src/Generated/FileSystemRestClient.cs | 2 +- .../src/Generated/PathAppendDataHeaders.cs | 2 + .../src/Generated/PathRestClient.cs | 46 ++++++++++++++----- .../src/Generated/PathUpdateHeaders.cs | 2 + .../src/Generated/ServiceRestClient.cs | 2 +- .../src/autorest.md | 2 +- .../src/Generated/FileDownloadHeaders.cs | 4 ++ .../src/Generated/FileRestClient.cs | 38 +++++++++++---- .../src/Generated/FileUploadRangeHeaders.cs | 2 + .../src/autorest.md | 2 +- 20 files changed, 169 insertions(+), 51 deletions(-) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobAppendBlockHeaders.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobAppendBlockHeaders.cs index 9303ec3a3d653..48139cc16a682 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobAppendBlockHeaders.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobAppendBlockHeaders.cs @@ -35,5 +35,7 @@ public AppendBlobAppendBlockHeaders(Response response) public string EncryptionKeySha256 => _response.Headers.TryGetValue("x-ms-encryption-key-sha256", out string value) ? value : null; /// Returns the name of the encryption scope used to encrypt the blob contents and application metadata. Note that the absence of this header implies use of the default account encryption scope. public string EncryptionScope => _response.Headers.TryGetValue("x-ms-encryption-scope", out string value) ? value : null; + /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs index 62c45c554783a..a3d0eca1ec405 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/AppendBlobRestClient.cs @@ -219,7 +219,7 @@ public ResponseWithHeaders Create(long contentLength, i } } - internal HttpMessage CreateAppendBlockRequest(long contentLength, Stream body, int? timeout, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, string leaseId, long? maxSize, long? appendPosition, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags) + internal HttpMessage CreateAppendBlockRequest(long contentLength, Stream body, int? timeout, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, string leaseId, long? maxSize, long? appendPosition, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags, string structuredBodyType, long? structuredContentLength) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -285,6 +285,14 @@ internal HttpMessage CreateAppendBlockRequest(long contentLength, Stream body, i request.Headers.Add("x-ms-if-tags", ifTags); } request.Headers.Add("x-ms-version", _version); + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } + if (structuredContentLength != null) + { + request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); + } request.Headers.Add("Accept", "application/xml"); request.Headers.Add("Content-Length", contentLength); if (transactionalContentMD5 != null) @@ -314,16 +322,18 @@ internal HttpMessage CreateAppendBlockRequest(long contentLength, Stream body, i /// Specify an ETag value to operate only on blobs with a matching value. /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public async Task> AppendBlockAsync(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, string leaseId = null, long? maxSize = null, long? appendPosition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) + public async Task> AppendBlockAsync(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, string leaseId = null, long? maxSize = null, long? appendPosition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateAppendBlockRequest(contentLength, body, timeout, transactionalContentMD5, transactionalContentCrc64, leaseId, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); + using var message = CreateAppendBlockRequest(contentLength, body, timeout, transactionalContentMD5, transactionalContentCrc64, leaseId, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, structuredBodyType, structuredContentLength); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new AppendBlobAppendBlockHeaders(message.Response); switch (message.Response.Status) @@ -353,16 +363,18 @@ public async Task> AppendBlock /// Specify an ETag value to operate only on blobs with a matching value. /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public ResponseWithHeaders AppendBlock(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, string leaseId = null, long? maxSize = null, long? appendPosition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders AppendBlock(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, string leaseId = null, long? maxSize = null, long? appendPosition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateAppendBlockRequest(contentLength, body, timeout, transactionalContentMD5, transactionalContentCrc64, leaseId, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); + using var message = CreateAppendBlockRequest(contentLength, body, timeout, transactionalContentMD5, transactionalContentCrc64, leaseId, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, structuredBodyType, structuredContentLength); _pipeline.Send(message, cancellationToken); var headers = new AppendBlobAppendBlockHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobDownloadHeaders.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobDownloadHeaders.cs index ad17079901a72..1897117cb01d8 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobDownloadHeaders.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobDownloadHeaders.cs @@ -96,6 +96,10 @@ public BlobDownloadHeaders(Response response) public BlobImmutabilityPolicyMode? ImmutabilityPolicyMode => _response.Headers.TryGetValue("x-ms-immutability-policy-mode", out string value) ? value.ToBlobImmutabilityPolicyMode() : null; /// Indicates if a legal hold is present on the blob. public bool? LegalHold => _response.Headers.TryGetValue("x-ms-legal-hold", out bool? value) ? value : null; + /// Indicates the response body contains a structured message and specifies the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; + /// The length of the blob/file content inside the message body when the response body is returned as a structured message. Will always be smaller than Content-Length. + public long? StructuredContentLength => _response.Headers.TryGetValue("x-ms-structured-content-length", out long? value) ? value : null; /// If the request is to read a specified range and the x-ms-range-get-content-crc64 is set to true, then the request returns a crc64 for the range, as long as the range size is less than or equal to 4 MB. If both x-ms-range-get-content-crc64 & x-ms-range-get-content-md5 is specified in the same request, it will fail with 400(Bad Request). public byte[] ContentCrc64 => _response.Headers.TryGetValue("x-ms-content-crc64", out byte[] value) ? value : null; public string ErrorCode => _response.Headers.TryGetValue("x-ms-error-code", out string value) ? value : null; diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs index d627d63506ca4..2702d622e4bd8 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlobRestClient.cs @@ -40,7 +40,7 @@ public BlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline _version = version ?? throw new ArgumentNullException(nameof(version)); } - internal HttpMessage CreateDownloadRequest(string snapshot, string versionId, int? timeout, string range, string leaseId, bool? rangeGetContentMD5, bool? rangeGetContentCRC64, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags) + internal HttpMessage CreateDownloadRequest(string snapshot, string versionId, int? timeout, string range, string leaseId, bool? rangeGetContentMD5, bool? rangeGetContentCRC64, string structuredBodyType, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -77,6 +77,10 @@ internal HttpMessage CreateDownloadRequest(string snapshot, string versionId, in { request.Headers.Add("x-ms-range-get-content-crc64", rangeGetContentCRC64.Value); } + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } if (encryptionKey != null) { request.Headers.Add("x-ms-encryption-key", encryptionKey); @@ -122,6 +126,7 @@ internal HttpMessage CreateDownloadRequest(string snapshot, string versionId, in /// If specified, the operation only succeeds if the resource's lease is active and matches this ID. /// When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. /// When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 MB in size. + /// Specifies the response content should be returned as a structured message and specifies the message schema version and properties. /// Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services. /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. @@ -131,9 +136,9 @@ internal HttpMessage CreateDownloadRequest(string snapshot, string versionId, in /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. /// The cancellation token to use. - public async Task> DownloadAsync(string snapshot = null, string versionId = null, int? timeout = null, string range = null, string leaseId = null, bool? rangeGetContentMD5 = null, bool? rangeGetContentCRC64 = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) + public async Task> DownloadAsync(string snapshot = null, string versionId = null, int? timeout = null, string range = null, string leaseId = null, bool? rangeGetContentMD5 = null, bool? rangeGetContentCRC64 = null, string structuredBodyType = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) { - using var message = CreateDownloadRequest(snapshot, versionId, timeout, range, leaseId, rangeGetContentMD5, rangeGetContentCRC64, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); + using var message = CreateDownloadRequest(snapshot, versionId, timeout, range, leaseId, rangeGetContentMD5, rangeGetContentCRC64, structuredBodyType, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new BlobDownloadHeaders(message.Response); switch (message.Response.Status) @@ -159,6 +164,7 @@ public async Task> DownloadAsyn /// If specified, the operation only succeeds if the resource's lease is active and matches this ID. /// When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. /// When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 MB in size. + /// Specifies the response content should be returned as a structured message and specifies the message schema version and properties. /// Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services. /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. @@ -168,9 +174,9 @@ public async Task> DownloadAsyn /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. /// The cancellation token to use. - public ResponseWithHeaders Download(string snapshot = null, string versionId = null, int? timeout = null, string range = null, string leaseId = null, bool? rangeGetContentMD5 = null, bool? rangeGetContentCRC64 = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders Download(string snapshot = null, string versionId = null, int? timeout = null, string range = null, string leaseId = null, bool? rangeGetContentMD5 = null, bool? rangeGetContentCRC64 = null, string structuredBodyType = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) { - using var message = CreateDownloadRequest(snapshot, versionId, timeout, range, leaseId, rangeGetContentMD5, rangeGetContentCRC64, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); + using var message = CreateDownloadRequest(snapshot, versionId, timeout, range, leaseId, rangeGetContentMD5, rangeGetContentCRC64, structuredBodyType, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); _pipeline.Send(message, cancellationToken); var headers = new BlobDownloadHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs index 3f4241dfce9b6..78ef424f66b13 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobRestClient.cs @@ -40,7 +40,7 @@ public BlockBlobRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pip _version = version ?? throw new ArgumentNullException(nameof(version)); } - internal HttpMessage CreateUploadRequest(long contentLength, Stream body, int? timeout, byte[] transactionalContentMD5, string blobContentType, string blobContentEncoding, string blobContentLanguage, byte[] blobContentMD5, string blobCacheControl, IDictionary metadata, string leaseId, string blobContentDisposition, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, AccessTier? tier, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags, string blobTagsString, DateTimeOffset? immutabilityPolicyExpiry, BlobImmutabilityPolicyMode? immutabilityPolicyMode, bool? legalHold, byte[] transactionalContentCrc64) + internal HttpMessage CreateUploadRequest(long contentLength, Stream body, int? timeout, byte[] transactionalContentMD5, string blobContentType, string blobContentEncoding, string blobContentLanguage, byte[] blobContentMD5, string blobCacheControl, IDictionary metadata, string leaseId, string blobContentDisposition, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, AccessTier? tier, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags, string blobTagsString, DateTimeOffset? immutabilityPolicyExpiry, BlobImmutabilityPolicyMode? immutabilityPolicyMode, bool? legalHold, byte[] transactionalContentCrc64, string structuredBodyType, long? structuredContentLength) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -146,6 +146,14 @@ internal HttpMessage CreateUploadRequest(long contentLength, Stream body, int? t { request.Headers.Add("x-ms-content-crc64", transactionalContentCrc64, "D"); } + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } + if (structuredContentLength != null) + { + request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); + } request.Headers.Add("Accept", "application/xml"); if (transactionalContentMD5 != null) { @@ -185,16 +193,18 @@ internal HttpMessage CreateUploadRequest(long contentLength, Stream body, int? t /// Specifies the immutability policy mode to set on the blob. /// Specified if a legal hold should be set on the blob. /// Specify the transactional crc64 for the body, to be validated by the service. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public async Task> UploadAsync(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, string blobContentType = null, string blobContentEncoding = null, string blobContentLanguage = null, byte[] blobContentMD5 = null, string blobCacheControl = null, IDictionary metadata = null, string leaseId = null, string blobContentDisposition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, AccessTier? tier = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string blobTagsString = null, DateTimeOffset? immutabilityPolicyExpiry = null, BlobImmutabilityPolicyMode? immutabilityPolicyMode = null, bool? legalHold = null, byte[] transactionalContentCrc64 = null, CancellationToken cancellationToken = default) + public async Task> UploadAsync(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, string blobContentType = null, string blobContentEncoding = null, string blobContentLanguage = null, byte[] blobContentMD5 = null, string blobCacheControl = null, IDictionary metadata = null, string leaseId = null, string blobContentDisposition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, AccessTier? tier = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string blobTagsString = null, DateTimeOffset? immutabilityPolicyExpiry = null, BlobImmutabilityPolicyMode? immutabilityPolicyMode = null, bool? legalHold = null, byte[] transactionalContentCrc64 = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUploadRequest(contentLength, body, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseId, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold, transactionalContentCrc64); + using var message = CreateUploadRequest(contentLength, body, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseId, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold, transactionalContentCrc64, structuredBodyType, structuredContentLength); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new BlockBlobUploadHeaders(message.Response); switch (message.Response.Status) @@ -234,16 +244,18 @@ public async Task> UploadAsync(long /// Specifies the immutability policy mode to set on the blob. /// Specified if a legal hold should be set on the blob. /// Specify the transactional crc64 for the body, to be validated by the service. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public ResponseWithHeaders Upload(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, string blobContentType = null, string blobContentEncoding = null, string blobContentLanguage = null, byte[] blobContentMD5 = null, string blobCacheControl = null, IDictionary metadata = null, string leaseId = null, string blobContentDisposition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, AccessTier? tier = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string blobTagsString = null, DateTimeOffset? immutabilityPolicyExpiry = null, BlobImmutabilityPolicyMode? immutabilityPolicyMode = null, bool? legalHold = null, byte[] transactionalContentCrc64 = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders Upload(long contentLength, Stream body, int? timeout = null, byte[] transactionalContentMD5 = null, string blobContentType = null, string blobContentEncoding = null, string blobContentLanguage = null, byte[] blobContentMD5 = null, string blobCacheControl = null, IDictionary metadata = null, string leaseId = null, string blobContentDisposition = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, AccessTier? tier = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string blobTagsString = null, DateTimeOffset? immutabilityPolicyExpiry = null, BlobImmutabilityPolicyMode? immutabilityPolicyMode = null, bool? legalHold = null, byte[] transactionalContentCrc64 = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUploadRequest(contentLength, body, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseId, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold, transactionalContentCrc64); + using var message = CreateUploadRequest(contentLength, body, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseId, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold, transactionalContentCrc64, structuredBodyType, structuredContentLength); _pipeline.Send(message, cancellationToken); var headers = new BlockBlobUploadHeaders(message.Response); switch (message.Response.Status) @@ -494,7 +506,7 @@ public ResponseWithHeaders PutBlobFromUrl(long c } } - internal HttpMessage CreateStageBlockRequest(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, int? timeout, string leaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope) + internal HttpMessage CreateStageBlockRequest(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, int? timeout, string leaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, string structuredBodyType, long? structuredContentLength) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -533,6 +545,14 @@ internal HttpMessage CreateStageBlockRequest(string blockId, long contentLength, request.Headers.Add("x-ms-encryption-scope", encryptionScope); } request.Headers.Add("x-ms-version", _version); + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } + if (structuredContentLength != null) + { + request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); + } request.Headers.Add("Accept", "application/xml"); request.Headers.Add("Content-Length", contentLength); if (transactionalContentMD5 != null) @@ -556,9 +576,11 @@ internal HttpMessage CreateStageBlockRequest(string blockId, long contentLength, /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. /// Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// or is null. - public async Task> StageBlockAsync(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, CancellationToken cancellationToken = default) + public async Task> StageBlockAsync(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (blockId == null) { @@ -569,7 +591,7 @@ public async Task> StageBlockAsy throw new ArgumentNullException(nameof(body)); } - using var message = CreateStageBlockRequest(blockId, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope); + using var message = CreateStageBlockRequest(blockId, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, structuredBodyType, structuredContentLength); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new BlockBlobStageBlockHeaders(message.Response); switch (message.Response.Status) @@ -593,9 +615,11 @@ public async Task> StageBlockAsy /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. /// Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// or is null. - public ResponseWithHeaders StageBlock(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders StageBlock(string blockId, long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (blockId == null) { @@ -606,7 +630,7 @@ public ResponseWithHeaders StageBlock(string blockId throw new ArgumentNullException(nameof(body)); } - using var message = CreateStageBlockRequest(blockId, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope); + using var message = CreateStageBlockRequest(blockId, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, structuredBodyType, structuredContentLength); _pipeline.Send(message, cancellationToken); var headers = new BlockBlobStageBlockHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobStageBlockHeaders.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobStageBlockHeaders.cs index 7888b27dd7383..b13a3b7d1609a 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobStageBlockHeaders.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobStageBlockHeaders.cs @@ -29,5 +29,7 @@ public BlockBlobStageBlockHeaders(Response response) public string EncryptionKeySha256 => _response.Headers.TryGetValue("x-ms-encryption-key-sha256", out string value) ? value : null; /// Returns the name of the encryption scope used to encrypt the blob contents and application metadata. Note that the absence of this header implies use of the default account encryption scope. public string EncryptionScope => _response.Headers.TryGetValue("x-ms-encryption-scope", out string value) ? value : null; + /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobUploadHeaders.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobUploadHeaders.cs index 1cfbd3924fa55..ca024b1fb5d84 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobUploadHeaders.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/BlockBlobUploadHeaders.cs @@ -31,5 +31,7 @@ public BlockBlobUploadHeaders(Response response) public string EncryptionKeySha256 => _response.Headers.TryGetValue("x-ms-encryption-key-sha256", out string value) ? value : null; /// Returns the name of the encryption scope used to encrypt the blob contents and application metadata. Note that the absence of this header implies use of the default account encryption scope. public string EncryptionScope => _response.Headers.TryGetValue("x-ms-encryption-scope", out string value) ? value : null; + /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs index 0aea4f28d32ff..68a9e85b00d1b 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobRestClient.cs @@ -235,7 +235,7 @@ public ResponseWithHeaders Create(long contentLength, lon } } - internal HttpMessage CreateUploadPagesRequest(long contentLength, Stream body, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, int? timeout, string range, string leaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, long? ifSequenceNumberLessThanOrEqualTo, long? ifSequenceNumberLessThan, long? ifSequenceNumberEqualTo, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags) + internal HttpMessage CreateUploadPagesRequest(long contentLength, Stream body, byte[] transactionalContentMD5, byte[] transactionalContentCrc64, int? timeout, string range, string leaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, string encryptionScope, long? ifSequenceNumberLessThanOrEqualTo, long? ifSequenceNumberLessThan, long? ifSequenceNumberEqualTo, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string ifMatch, string ifNoneMatch, string ifTags, string structuredBodyType, long? structuredContentLength) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -310,6 +310,14 @@ internal HttpMessage CreateUploadPagesRequest(long contentLength, Stream body, b request.Headers.Add("x-ms-if-tags", ifTags); } request.Headers.Add("x-ms-version", _version); + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } + if (structuredContentLength != null) + { + request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); + } request.Headers.Add("Accept", "application/xml"); request.Headers.Add("Content-Length", contentLength); if (transactionalContentMD5 != null) @@ -341,16 +349,18 @@ internal HttpMessage CreateUploadPagesRequest(long contentLength, Stream body, b /// Specify an ETag value to operate only on blobs with a matching value. /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public async Task> UploadPagesAsync(long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string range = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, long? ifSequenceNumberLessThanOrEqualTo = null, long? ifSequenceNumberLessThan = null, long? ifSequenceNumberEqualTo = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) + public async Task> UploadPagesAsync(long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string range = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, long? ifSequenceNumberLessThanOrEqualTo = null, long? ifSequenceNumberLessThan = null, long? ifSequenceNumberEqualTo = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUploadPagesRequest(contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, range, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); + using var message = CreateUploadPagesRequest(contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, range, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, structuredBodyType, structuredContentLength); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new PageBlobUploadPagesHeaders(message.Response); switch (message.Response.Status) @@ -382,16 +392,18 @@ public async Task> UploadPagesAs /// Specify an ETag value to operate only on blobs with a matching value. /// Specify an ETag value to operate only on blobs without a matching value. /// Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public ResponseWithHeaders UploadPages(long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string range = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, long? ifSequenceNumberLessThanOrEqualTo = null, long? ifSequenceNumberLessThan = null, long? ifSequenceNumberEqualTo = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders UploadPages(long contentLength, Stream body, byte[] transactionalContentMD5 = null, byte[] transactionalContentCrc64 = null, int? timeout = null, string range = null, string leaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, string encryptionScope = null, long? ifSequenceNumberLessThanOrEqualTo = null, long? ifSequenceNumberLessThan = null, long? ifSequenceNumberEqualTo = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string ifMatch = null, string ifNoneMatch = null, string ifTags = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUploadPagesRequest(contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, range, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags); + using var message = CreateUploadPagesRequest(contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, range, leaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, structuredBodyType, structuredContentLength); _pipeline.Send(message, cancellationToken); var headers = new PageBlobUploadPagesHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobUploadPagesHeaders.cs b/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobUploadPagesHeaders.cs index 77d37d90027aa..c04659bc43322 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobUploadPagesHeaders.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Generated/PageBlobUploadPagesHeaders.cs @@ -33,5 +33,7 @@ public PageBlobUploadPagesHeaders(Response response) public string EncryptionKeySha256 => _response.Headers.TryGetValue("x-ms-encryption-key-sha256", out string value) ? value : null; /// Returns the name of the encryption scope used to encrypt the blob contents and application metadata. Note that the absence of this header implies use of the default account encryption scope. public string EncryptionScope => _response.Headers.TryGetValue("x-ms-encryption-scope", out string value) ? value : null; + /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Blobs/src/autorest.md b/sdk/storage/Azure.Storage.Blobs/src/autorest.md index 6407445e7bf16..7160bd89aba05 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/autorest.md +++ b/sdk/storage/Azure.Storage.Blobs/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/2d3b08fe43bc4a573acd166d3d2ba0c631b016fb/specification/storage/data-plane/Microsoft.BlobStorage/stable/2025-01-05/blob.json + - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/a936baeb45003f1d31ce855084b2e54365af78af/specification/storage/data-plane/Microsoft.BlobStorage/stable/2025-01-05/blob.json generation1-convenience-client: true # https://github.com/Azure/autorest/issues/4075 skip-semantics-validation: true diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/FileSystemRestClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/FileSystemRestClient.cs index 719932d5cd500..4144d908b7549 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/FileSystemRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/FileSystemRestClient.cs @@ -33,7 +33,7 @@ internal partial class FileSystemRestClient /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. /// The value must be "filesystem" for all filesystem operations. The default value is "filesystem". - /// Specifies the version of the operation to use for this request. The default value is "2023-05-03". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// , , , or is null. public FileSystemRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string resource, string version) { diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathAppendDataHeaders.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathAppendDataHeaders.cs index 6ec456a438564..502dd557f4822 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathAppendDataHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathAppendDataHeaders.cs @@ -29,5 +29,7 @@ public PathAppendDataHeaders(Response response) public string EncryptionKeySha256 => _response.Headers.TryGetValue("x-ms-encryption-key-sha256", out string value) ? value : null; /// If the lease was auto-renewed with this request. public bool? LeaseRenewed => _response.Headers.TryGetValue("x-ms-lease-renewed", out bool? value) ? value : null; + /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathRestClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathRestClient.cs index 6b1e970bd2fc8..d328c3079de6b 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathRestClient.cs @@ -30,7 +30,7 @@ internal partial class PathRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2023-05-03". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// The lease duration is required to acquire a lease, and specifies the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or -1 for infinite lease. /// , , or is null. public PathRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version, int? xMsLeaseDuration = null) @@ -293,7 +293,7 @@ public ResponseWithHeaders Create(int? timeout = null, PathRe } } - internal HttpMessage CreateUpdateRequest(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout, int? maxRecords, string continuation, bool? forceFlag, long? position, bool? retainUncommittedData, bool? close, long? contentLength, byte[] contentMD5, string leaseId, string cacheControl, string contentType, string contentDisposition, string contentEncoding, string contentLanguage, string properties, string owner, string group, string permissions, string acl, string ifMatch, string ifNoneMatch, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince) + internal HttpMessage CreateUpdateRequest(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout, int? maxRecords, string continuation, bool? forceFlag, long? position, bool? retainUncommittedData, bool? close, long? contentLength, byte[] contentMD5, string leaseId, string cacheControl, string contentType, string contentDisposition, string contentEncoding, string contentLanguage, string properties, string owner, string group, string permissions, string acl, string ifMatch, string ifNoneMatch, DateTimeOffset? ifModifiedSince, DateTimeOffset? ifUnmodifiedSince, string structuredBodyType, long? structuredContentLength) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -396,6 +396,14 @@ internal HttpMessage CreateUpdateRequest(PathUpdateAction action, PathSetAccessC { request.Headers.Add("If-Unmodified-Since", ifUnmodifiedSince.Value, "R"); } + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } + if (structuredContentLength != null) + { + request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); + } request.Headers.Add("Accept", "application/json"); if (contentLength != null) { @@ -434,17 +442,19 @@ internal HttpMessage CreateUpdateRequest(PathUpdateAction action, PathSetAccessC /// Specify an ETag value to operate only on blobs without a matching value. /// Specify this header value to operate only on a blob if it has been modified since the specified date/time. /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. /// Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, sets properties for a file or directory, or sets access control for a file or directory. Data can only be appended to a file. Concurrent writes to the same file using multiple clients are not supported. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - public async Task> UpdateAsync(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout = null, int? maxRecords = null, string continuation = null, bool? forceFlag = null, long? position = null, bool? retainUncommittedData = null, bool? close = null, long? contentLength = null, byte[] contentMD5 = null, string leaseId = null, string cacheControl = null, string contentType = null, string contentDisposition = null, string contentEncoding = null, string contentLanguage = null, string properties = null, string owner = null, string group = null, string permissions = null, string acl = null, string ifMatch = null, string ifNoneMatch = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, CancellationToken cancellationToken = default) + public async Task> UpdateAsync(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout = null, int? maxRecords = null, string continuation = null, bool? forceFlag = null, long? position = null, bool? retainUncommittedData = null, bool? close = null, long? contentLength = null, byte[] contentMD5 = null, string leaseId = null, string cacheControl = null, string contentType = null, string contentDisposition = null, string contentEncoding = null, string contentLanguage = null, string properties = null, string owner = null, string group = null, string permissions = null, string acl = null, string ifMatch = null, string ifNoneMatch = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUpdateRequest(action, mode, body, timeout, maxRecords, continuation, forceFlag, position, retainUncommittedData, close, contentLength, contentMD5, leaseId, cacheControl, contentType, contentDisposition, contentEncoding, contentLanguage, properties, owner, group, permissions, acl, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince); + using var message = CreateUpdateRequest(action, mode, body, timeout, maxRecords, continuation, forceFlag, position, retainUncommittedData, close, contentLength, contentMD5, leaseId, cacheControl, contentType, contentDisposition, contentEncoding, contentLanguage, properties, owner, group, permissions, acl, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince, structuredBodyType, structuredContentLength); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new PathUpdateHeaders(message.Response); switch (message.Response.Status) @@ -491,17 +501,19 @@ public async Task Specify an ETag value to operate only on blobs without a matching value. /// Specify this header value to operate only on a blob if it has been modified since the specified date/time. /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. /// Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, sets properties for a file or directory, or sets access control for a file or directory. Data can only be appended to a file. Concurrent writes to the same file using multiple clients are not supported. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - public ResponseWithHeaders Update(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout = null, int? maxRecords = null, string continuation = null, bool? forceFlag = null, long? position = null, bool? retainUncommittedData = null, bool? close = null, long? contentLength = null, byte[] contentMD5 = null, string leaseId = null, string cacheControl = null, string contentType = null, string contentDisposition = null, string contentEncoding = null, string contentLanguage = null, string properties = null, string owner = null, string group = null, string permissions = null, string acl = null, string ifMatch = null, string ifNoneMatch = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders Update(PathUpdateAction action, PathSetAccessControlRecursiveMode mode, Stream body, int? timeout = null, int? maxRecords = null, string continuation = null, bool? forceFlag = null, long? position = null, bool? retainUncommittedData = null, bool? close = null, long? contentLength = null, byte[] contentMD5 = null, string leaseId = null, string cacheControl = null, string contentType = null, string contentDisposition = null, string contentEncoding = null, string contentLanguage = null, string properties = null, string owner = null, string group = null, string permissions = null, string acl = null, string ifMatch = null, string ifNoneMatch = null, DateTimeOffset? ifModifiedSince = null, DateTimeOffset? ifUnmodifiedSince = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateUpdateRequest(action, mode, body, timeout, maxRecords, continuation, forceFlag, position, retainUncommittedData, close, contentLength, contentMD5, leaseId, cacheControl, contentType, contentDisposition, contentEncoding, contentLanguage, properties, owner, group, permissions, acl, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince); + using var message = CreateUpdateRequest(action, mode, body, timeout, maxRecords, continuation, forceFlag, position, retainUncommittedData, close, contentLength, contentMD5, leaseId, cacheControl, contentType, contentDisposition, contentEncoding, contentLanguage, properties, owner, group, permissions, acl, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince, structuredBodyType, structuredContentLength); _pipeline.Send(message, cancellationToken); var headers = new PathUpdateHeaders(message.Response); switch (message.Response.Status) @@ -1315,7 +1327,7 @@ public ResponseWithHeaders FlushData(int? timeout = null, } } - internal HttpMessage CreateAppendDataRequest(Stream body, long? position, int? timeout, long? contentLength, byte[] transactionalContentHash, byte[] transactionalContentCrc64, string leaseId, DataLakeLeaseAction? leaseAction, long? leaseDuration, string proposedLeaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, bool? flush) + internal HttpMessage CreateAppendDataRequest(Stream body, long? position, int? timeout, long? contentLength, byte[] transactionalContentHash, byte[] transactionalContentCrc64, string leaseId, DataLakeLeaseAction? leaseAction, long? leaseDuration, string proposedLeaseId, string encryptionKey, string encryptionKeySha256, EncryptionAlgorithmTypeInternal? encryptionAlgorithm, bool? flush, string structuredBodyType, long? structuredContentLength) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -1369,6 +1381,14 @@ internal HttpMessage CreateAppendDataRequest(Stream body, long? position, int? t { request.Headers.Add("x-ms-encryption-algorithm", encryptionAlgorithm.Value.ToSerialString()); } + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } + if (structuredContentLength != null) + { + request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); + } request.Headers.Add("Accept", "application/json"); if (contentLength != null) { @@ -1398,16 +1418,18 @@ internal HttpMessage CreateAppendDataRequest(Stream body, long? position, int? t /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. /// If file should be flushed after the append. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public async Task> AppendDataAsync(Stream body, long? position = null, int? timeout = null, long? contentLength = null, byte[] transactionalContentHash = null, byte[] transactionalContentCrc64 = null, string leaseId = null, DataLakeLeaseAction? leaseAction = null, long? leaseDuration = null, string proposedLeaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, bool? flush = null, CancellationToken cancellationToken = default) + public async Task> AppendDataAsync(Stream body, long? position = null, int? timeout = null, long? contentLength = null, byte[] transactionalContentHash = null, byte[] transactionalContentCrc64 = null, string leaseId = null, DataLakeLeaseAction? leaseAction = null, long? leaseDuration = null, string proposedLeaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, bool? flush = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateAppendDataRequest(body, position, timeout, contentLength, transactionalContentHash, transactionalContentCrc64, leaseId, leaseAction, leaseDuration, proposedLeaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, flush); + using var message = CreateAppendDataRequest(body, position, timeout, contentLength, transactionalContentHash, transactionalContentCrc64, leaseId, leaseAction, leaseDuration, proposedLeaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, flush, structuredBodyType, structuredContentLength); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new PathAppendDataHeaders(message.Response); switch (message.Response.Status) @@ -1434,16 +1456,18 @@ public async Task> AppendDataAsync(St /// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. /// If file should be flushed after the append. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// The cancellation token to use. /// is null. - public ResponseWithHeaders AppendData(Stream body, long? position = null, int? timeout = null, long? contentLength = null, byte[] transactionalContentHash = null, byte[] transactionalContentCrc64 = null, string leaseId = null, DataLakeLeaseAction? leaseAction = null, long? leaseDuration = null, string proposedLeaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, bool? flush = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders AppendData(Stream body, long? position = null, int? timeout = null, long? contentLength = null, byte[] transactionalContentHash = null, byte[] transactionalContentCrc64 = null, string leaseId = null, DataLakeLeaseAction? leaseAction = null, long? leaseDuration = null, string proposedLeaseId = null, string encryptionKey = null, string encryptionKeySha256 = null, EncryptionAlgorithmTypeInternal? encryptionAlgorithm = null, bool? flush = null, string structuredBodyType = null, long? structuredContentLength = null, CancellationToken cancellationToken = default) { if (body == null) { throw new ArgumentNullException(nameof(body)); } - using var message = CreateAppendDataRequest(body, position, timeout, contentLength, transactionalContentHash, transactionalContentCrc64, leaseId, leaseAction, leaseDuration, proposedLeaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, flush); + using var message = CreateAppendDataRequest(body, position, timeout, contentLength, transactionalContentHash, transactionalContentCrc64, leaseId, leaseAction, leaseDuration, proposedLeaseId, encryptionKey, encryptionKeySha256, encryptionAlgorithm, flush, structuredBodyType, structuredContentLength); _pipeline.Send(message, cancellationToken); var headers = new PathAppendDataHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathUpdateHeaders.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathUpdateHeaders.cs index 35668cb1c3a1d..026c78e72481a 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathUpdateHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/PathUpdateHeaders.cs @@ -43,5 +43,7 @@ public PathUpdateHeaders(Response response) public string XMsContinuation => _response.Headers.TryGetValue("x-ms-continuation", out string value) ? value : null; /// The version of the REST protocol used to process the request. public string Version => _response.Headers.TryGetValue("x-ms-version", out string value) ? value : null; + /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/ServiceRestClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/ServiceRestClient.cs index 118595b4d87d1..b00fa12238f4e 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/ServiceRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/ServiceRestClient.cs @@ -28,7 +28,7 @@ internal partial class ServiceRestClient /// The handler for diagnostic messaging in the client. /// The HTTP pipeline for sending and receiving REST requests and responses. /// The URL of the service account, container, or blob that is the target of the desired operation. - /// Specifies the version of the operation to use for this request. The default value is "2023-05-03". + /// Specifies the version of the operation to use for this request. The default value is "2025-01-05". /// , , or is null. public ServiceRestClient(ClientDiagnostics clientDiagnostics, HttpPipeline pipeline, string url, string version) { diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md b/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md index 4121ebab9932e..ec9675a014f70 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/5da3c08b92d05858b728b013b69502dc93485373/specification/storage/data-plane/Azure.Storage.Files.DataLake/stable/2023-05-03/DataLakeStorage.json + - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/a936baeb45003f1d31ce855084b2e54365af78af/specification/storage/data-plane/Azure.Storage.Files.DataLake/stable/2025-01-05/DataLakeStorage.json generation1-convenience-client: true modelerfour: seal-single-value-enum-by-default: true diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileDownloadHeaders.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileDownloadHeaders.cs index 61384dee810d4..c4d7056a5cfa3 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileDownloadHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileDownloadHeaders.cs @@ -79,5 +79,9 @@ public FileDownloadHeaders(Response response) public ShareLeaseState? LeaseState => _response.Headers.TryGetValue("x-ms-lease-state", out string value) ? value.ToShareLeaseState() : null; /// The current lease status of the file. public ShareLeaseStatus? LeaseStatus => _response.Headers.TryGetValue("x-ms-lease-status", out string value) ? value.ToShareLeaseStatus() : null; + /// Indicates the response body contains a structured message and specifies the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; + /// The length of the blob/file content inside the message body when the response body is returned as a structured message. Will always be smaller than Content-Length. + public long? StructuredContentLength => _response.Headers.TryGetValue("x-ms-structured-content-length", out long? value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs index 32484654eb2be..07f88af545aec 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileRestClient.cs @@ -204,7 +204,7 @@ public ResponseWithHeaders Create(long fileContentLength, str } } - internal HttpMessage CreateDownloadRequest(int? timeout, string range, bool? rangeGetContentMD5, ShareFileRequestConditions shareFileRequestConditions) + internal HttpMessage CreateDownloadRequest(int? timeout, string range, bool? rangeGetContentMD5, string structuredBodyType, ShareFileRequestConditions shareFileRequestConditions) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -230,6 +230,10 @@ internal HttpMessage CreateDownloadRequest(int? timeout, string range, bool? ran { request.Headers.Add("x-ms-range-get-content-md5", rangeGetContentMD5.Value); } + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } if (shareFileRequestConditions?.LeaseId != null) { request.Headers.Add("x-ms-lease-id", shareFileRequestConditions.LeaseId); @@ -246,11 +250,12 @@ internal HttpMessage CreateDownloadRequest(int? timeout, string range, bool? ran /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting Timeouts for File Service Operations.</a>. /// Return file data only from the specified byte range. /// When this header is set to true and specified together with the Range header, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. + /// Specifies the response content should be returned as a structured message and specifies the message schema version and properties. /// Parameter group. /// The cancellation token to use. - public async Task> DownloadAsync(int? timeout = null, string range = null, bool? rangeGetContentMD5 = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) + public async Task> DownloadAsync(int? timeout = null, string range = null, bool? rangeGetContentMD5 = null, string structuredBodyType = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) { - using var message = CreateDownloadRequest(timeout, range, rangeGetContentMD5, shareFileRequestConditions); + using var message = CreateDownloadRequest(timeout, range, rangeGetContentMD5, structuredBodyType, shareFileRequestConditions); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new FileDownloadHeaders(message.Response); switch (message.Response.Status) @@ -270,11 +275,12 @@ public async Task> DownloadAsyn /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting Timeouts for File Service Operations.</a>. /// Return file data only from the specified byte range. /// When this header is set to true and specified together with the Range header, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB in size. + /// Specifies the response content should be returned as a structured message and specifies the message schema version and properties. /// Parameter group. /// The cancellation token to use. - public ResponseWithHeaders Download(int? timeout = null, string range = null, bool? rangeGetContentMD5 = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders Download(int? timeout = null, string range = null, bool? rangeGetContentMD5 = null, string structuredBodyType = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) { - using var message = CreateDownloadRequest(timeout, range, rangeGetContentMD5, shareFileRequestConditions); + using var message = CreateDownloadRequest(timeout, range, rangeGetContentMD5, structuredBodyType, shareFileRequestConditions); _pipeline.Send(message, cancellationToken); var headers = new FileDownloadHeaders(message.Response); switch (message.Response.Status) @@ -945,7 +951,7 @@ public ResponseWithHeaders BreakLease(int? timeout = null } } - internal HttpMessage CreateUploadRangeRequest(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout, byte[] contentMD5, FileLastWrittenMode? fileLastWrittenMode, Stream optionalbody, ShareFileRequestConditions shareFileRequestConditions) + internal HttpMessage CreateUploadRangeRequest(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout, byte[] contentMD5, FileLastWrittenMode? fileLastWrittenMode, string structuredBodyType, long? structuredContentLength, Stream optionalbody, ShareFileRequestConditions shareFileRequestConditions) { var message = _pipeline.CreateMessage(); var request = message.Request; @@ -977,6 +983,14 @@ internal HttpMessage CreateUploadRangeRequest(string range, ShareFileRangeWriteT { request.Headers.Add("x-ms-file-request-intent", _fileRequestIntent.Value.ToString()); } + if (structuredBodyType != null) + { + request.Headers.Add("x-ms-structured-body", structuredBodyType); + } + if (structuredContentLength != null) + { + request.Headers.Add("x-ms-structured-content-length", structuredContentLength.Value); + } request.Headers.Add("Accept", "application/xml"); if (optionalbody != null) { @@ -998,18 +1012,20 @@ internal HttpMessage CreateUploadRangeRequest(string range, ShareFileRangeWriteT /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting Timeouts for File Service Operations.</a>. /// An MD5 hash of the content. This hash is used to verify the integrity of the data during transport. When the Content-MD5 header is specified, the File service compares the hash of the content that has arrived with the header value that was sent. If the two hashes do not match, the operation will fail with error code 400 (Bad Request). /// If the file last write time should be preserved or overwritten. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// Initial data. /// Parameter group. /// The cancellation token to use. /// is null. - public async Task> UploadRangeAsync(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout = null, byte[] contentMD5 = null, FileLastWrittenMode? fileLastWrittenMode = null, Stream optionalbody = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) + public async Task> UploadRangeAsync(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout = null, byte[] contentMD5 = null, FileLastWrittenMode? fileLastWrittenMode = null, string structuredBodyType = null, long? structuredContentLength = null, Stream optionalbody = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) { if (range == null) { throw new ArgumentNullException(nameof(range)); } - using var message = CreateUploadRangeRequest(range, fileRangeWrite, contentLength, timeout, contentMD5, fileLastWrittenMode, optionalbody, shareFileRequestConditions); + using var message = CreateUploadRangeRequest(range, fileRangeWrite, contentLength, timeout, contentMD5, fileLastWrittenMode, structuredBodyType, structuredContentLength, optionalbody, shareFileRequestConditions); await _pipeline.SendAsync(message, cancellationToken).ConfigureAwait(false); var headers = new FileUploadRangeHeaders(message.Response); switch (message.Response.Status) @@ -1028,18 +1044,20 @@ public async Task> UploadRangeAsync( /// The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting Timeouts for File Service Operations.</a>. /// An MD5 hash of the content. This hash is used to verify the integrity of the data during transport. When the Content-MD5 header is specified, the File service compares the hash of the content that has arrived with the header value that was sent. If the two hashes do not match, the operation will fail with error code 400 (Bad Request). /// If the file last write time should be preserved or overwritten. + /// Required if the request body is a structured message. Specifies the message schema version and properties. + /// Required if the request body is a structured message. Specifies the length of the blob/file content inside the message body. Will always be smaller than Content-Length. /// Initial data. /// Parameter group. /// The cancellation token to use. /// is null. - public ResponseWithHeaders UploadRange(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout = null, byte[] contentMD5 = null, FileLastWrittenMode? fileLastWrittenMode = null, Stream optionalbody = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) + public ResponseWithHeaders UploadRange(string range, ShareFileRangeWriteType fileRangeWrite, long contentLength, int? timeout = null, byte[] contentMD5 = null, FileLastWrittenMode? fileLastWrittenMode = null, string structuredBodyType = null, long? structuredContentLength = null, Stream optionalbody = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) { if (range == null) { throw new ArgumentNullException(nameof(range)); } - using var message = CreateUploadRangeRequest(range, fileRangeWrite, contentLength, timeout, contentMD5, fileLastWrittenMode, optionalbody, shareFileRequestConditions); + using var message = CreateUploadRangeRequest(range, fileRangeWrite, contentLength, timeout, contentMD5, fileLastWrittenMode, structuredBodyType, structuredContentLength, optionalbody, shareFileRequestConditions); _pipeline.Send(message, cancellationToken); var headers = new FileUploadRangeHeaders(message.Response); switch (message.Response.Status) diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileUploadRangeHeaders.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileUploadRangeHeaders.cs index db079c2692663..322bfcd1b6d83 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileUploadRangeHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/FileUploadRangeHeaders.cs @@ -27,5 +27,7 @@ public FileUploadRangeHeaders(Response response) public bool? IsServerEncrypted => _response.Headers.TryGetValue("x-ms-request-server-encrypted", out bool? value) ? value : null; /// Last write time for the file. public DateTimeOffset? FileLastWriteTime => _response.Headers.TryGetValue("x-ms-file-last-write-time", out DateTimeOffset? value) ? value : null; + /// Indicates the structured message body was accepted and mirrors back the message schema version and properties. + public string StructuredBodyType => _response.Headers.TryGetValue("x-ms-structured-body", out string value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md index 22f96a9fbc323..2bcc0e37ee65a 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md +++ b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/133677b644bcae8e8ada9c3af24d6dee63665e66/specification/storage/data-plane/Microsoft.FileStorage/stable/2025-01-05/file.json + - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/a936baeb45003f1d31ce855084b2e54365af78af/specification/storage/data-plane/Microsoft.FileStorage/stable/2025-01-05/file.json generation1-convenience-client: true # https://github.com/Azure/autorest/issues/4075 skip-semantics-validation: true From 99c02de3fdf9942e1b0e65ed99c98a653ae732f4 Mon Sep 17 00:00:00 2001 From: Jocelyn <41338290+jaschrep-msft@users.noreply.github.com> Date: Wed, 25 Sep 2024 17:00:25 -0400 Subject: [PATCH 19/25] Structured message cherrypick stg96 (#45626) --- ...e.Storage.Blobs.Batch.Samples.Tests.csproj | 1 + .../Azure.Storage.Blobs.Batch.Tests.csproj | 3 +- ...rage.Blobs.ChangeFeed.Samples.Tests.csproj | 3 +- ...zure.Storage.Blobs.ChangeFeed.Tests.csproj | 3 +- .../api/Azure.Storage.Blobs.net6.0.cs | 7 +- .../api/Azure.Storage.Blobs.netstandard2.0.cs | 7 +- .../api/Azure.Storage.Blobs.netstandard2.1.cs | 7 +- sdk/storage/Azure.Storage.Blobs/assets.json | 2 +- .../Azure.Storage.Blobs.Samples.Tests.csproj | 1 + .../src/AppendBlobClient.cs | 45 +- .../src/Azure.Storage.Blobs.csproj | 7 + .../Azure.Storage.Blobs/src/BlobBaseClient.cs | 110 +++- .../src/BlobClientOptions.cs | 2 + .../src/BlobClientSideDecryptor.cs | 2 +- .../src/BlockBlobClient.cs | 92 ++- .../src/Models/BlobDownloadDetails.cs | 8 + .../src/Models/BlobDownloadInfo.cs | 10 + .../src/Models/BlobDownloadStreamingResult.cs | 8 + .../Azure.Storage.Blobs/src/PageBlobClient.cs | 49 +- .../src/PartitionedDownloader.cs | 95 +-- .../Azure.Storage.Blobs/src/autorest.md | 4 +- .../tests/Azure.Storage.Blobs.Tests.csproj | 3 + .../BlobBaseClientTransferValidationTests.cs | 114 ++-- .../tests/ClientSideEncryptionTests.cs | 2 +- .../tests/PartitionedDownloaderTests.cs | 2 +- .../Azure.Storage.Common.Samples.Tests.csproj | 1 + .../src/Shared/ChecksumExtensions.cs | 22 + .../src/Shared/Constants.cs | 9 + .../src/Shared/ContentRange.cs | 18 +- .../src/Shared/ContentRangeExtensions.cs | 14 + .../src/Shared/Errors.Clients.cs | 10 + .../Azure.Storage.Common/src/Shared/Errors.cs | 19 + .../src/Shared/LazyLoadingReadOnlyStream.cs | 40 +- .../src/Shared/PooledMemoryStream.cs | 2 +- .../src/Shared/StorageCrc64Composer.cs | 48 +- .../StorageRequestValidationPipelinePolicy.cs | 29 + .../src/Shared/StorageVersionExtensions.cs | 2 +- .../src/Shared/StreamExtensions.cs | 22 +- .../src/Shared/StructuredMessage.cs | 244 ++++++++ ...tructuredMessageDecodingRetriableStream.cs | 264 +++++++++ .../Shared/StructuredMessageDecodingStream.cs | 542 +++++++++++++++++ .../Shared/StructuredMessageEncodingStream.cs | 545 ++++++++++++++++++ ...redMessagePrecalculatedCrcWrapperStream.cs | 451 +++++++++++++++ .../TransferValidationOptionsExtensions.cs | 7 - .../tests/Azure.Storage.Common.Tests.csproj | 9 + .../tests/Shared/FaultyStream.cs | 13 +- .../Shared/ObserveStructuredMessagePolicy.cs | 85 +++ .../tests/Shared/RequestExtensions.cs | 27 + .../Shared/TamperStreamContentsPolicy.cs | 11 +- .../Shared/TransferValidationTestBase.cs | 325 ++++++++--- ...uredMessageDecodingRetriableStreamTests.cs | 246 ++++++++ .../StructuredMessageDecodingStreamTests.cs | 323 +++++++++++ .../StructuredMessageEncodingStreamTests.cs | 271 +++++++++ .../tests/StructuredMessageHelper.cs | 68 +++ .../StructuredMessageStreamRoundtripTests.cs | 127 ++++ .../tests/StructuredMessageTests.cs | 114 ++++ ...ge.DataMovement.Blobs.Samples.Tests.csproj | 1 + .../Azure.Storage.DataMovement.Blobs.csproj | 1 + .../src/DataMovementBlobsExtensions.cs | 4 +- ...re.Storage.DataMovement.Blobs.Tests.csproj | 5 + ...taMovement.Blobs.Files.Shares.Tests.csproj | 1 + ...Movement.Files.Shares.Samples.Tests.csproj | 3 +- .../src/DataMovementSharesExtensions.cs | 4 +- ...age.DataMovement.Files.Shares.Tests.csproj | 1 + .../tests/Shared/DisposingShare.cs | 2 +- .../src/Azure.Storage.DataMovement.csproj | 2 +- .../Azure.Storage.DataMovement.Tests.csproj | 1 + .../Azure.Storage.Files.DataLake.net6.0.cs | 2 +- ...e.Storage.Files.DataLake.netstandard2.0.cs | 2 +- .../Azure.Storage.Files.DataLake/assets.json | 2 +- ...torage.Files.DataLake.Samples.Tests.csproj | 1 + .../src/Azure.Storage.Files.DataLake.csproj | 5 + .../src/DataLakeFileClient.cs | 43 +- .../src/autorest.md | 4 +- .../Azure.Storage.Files.DataLake.Tests.csproj | 3 + ...taLakeFileClientTransferValidationTests.cs | 5 +- .../api/Azure.Storage.Files.Shares.net6.0.cs | 3 +- ...ure.Storage.Files.Shares.netstandard2.0.cs | 3 +- .../Azure.Storage.Files.Shares/assets.json | 2 +- ....Storage.Files.Shares.Samples.Tests.csproj | 1 + .../src/Azure.Storage.Files.Shares.csproj | 8 +- .../src/Models/ShareFileDownloadInfo.cs | 6 + .../src/ShareErrors.cs | 15 - .../src/ShareFileClient.cs | 165 ++++-- .../src/autorest.md | 4 +- .../Azure.Storage.Files.Shares.Tests.csproj | 1 + .../ShareFileClientTransferValidationTests.cs | 42 +- .../api/Azure.Storage.Queues.net6.0.cs | 4 +- .../Azure.Storage.Queues.netstandard2.0.cs | 4 +- .../Azure.Storage.Queues.netstandard2.1.cs | 4 +- .../Azure.Storage.Queues.Samples.Tests.csproj | 1 + .../tests/Azure.Storage.Queues.Tests.csproj | 1 + 92 files changed, 4446 insertions(+), 405 deletions(-) create mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/ChecksumExtensions.cs create mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/ContentRangeExtensions.cs create mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessage.cs create mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingRetriableStream.cs create mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingStream.cs create mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageEncodingStream.cs create mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessagePrecalculatedCrcWrapperStream.cs create mode 100644 sdk/storage/Azure.Storage.Common/tests/Shared/ObserveStructuredMessagePolicy.cs create mode 100644 sdk/storage/Azure.Storage.Common/tests/Shared/RequestExtensions.cs create mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingRetriableStreamTests.cs create mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingStreamTests.cs create mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageEncodingStreamTests.cs create mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageHelper.cs create mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageStreamRoundtripTests.cs create mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageTests.cs diff --git a/sdk/storage/Azure.Storage.Blobs.Batch/samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Blobs.Batch/samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj index 3dea34a02b7ea..6009a5336b8b9 100644 --- a/sdk/storage/Azure.Storage.Blobs.Batch/samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs.Batch/samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj @@ -17,6 +17,7 @@ + PreserveNewest diff --git a/sdk/storage/Azure.Storage.Blobs.Batch/tests/Azure.Storage.Blobs.Batch.Tests.csproj b/sdk/storage/Azure.Storage.Blobs.Batch/tests/Azure.Storage.Blobs.Batch.Tests.csproj index 2b77907e9aaac..286ab317256bf 100644 --- a/sdk/storage/Azure.Storage.Blobs.Batch/tests/Azure.Storage.Blobs.Batch.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs.Batch/tests/Azure.Storage.Blobs.Batch.Tests.csproj @@ -23,6 +23,7 @@ + PreserveNewest @@ -42,4 +43,4 @@ - \ No newline at end of file + diff --git a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/samples/Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/samples/Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj index 7711cae537db6..6f8fcaf6528b3 100644 --- a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/samples/Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/samples/Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj @@ -1,4 +1,4 @@ - + $(RequiredTargetFrameworks) Microsoft Azure.Storage.Blobs.ChangeFeed client library samples @@ -14,6 +14,7 @@ + diff --git a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/Azure.Storage.Blobs.ChangeFeed.Tests.csproj b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/Azure.Storage.Blobs.ChangeFeed.Tests.csproj index 9682ab15ecd60..8cf13cd60744f 100644 --- a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/Azure.Storage.Blobs.ChangeFeed.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/Azure.Storage.Blobs.ChangeFeed.Tests.csproj @@ -17,6 +17,7 @@ + @@ -28,4 +29,4 @@ PreserveNewest - \ No newline at end of file + diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs index 25640917de5bb..822d5b41d1404 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs @@ -51,7 +51,7 @@ public BlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredential c } public partial class BlobClientOptions : Azure.Core.ClientOptions { - public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) { } + public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Blobs.Models.BlobAudience? Audience { get { throw null; } set { } } public Azure.Storage.Blobs.Models.CustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } @@ -522,6 +522,7 @@ public BlobDownloadDetails() { } public long BlobSequenceNumber { get { throw null; } } public Azure.Storage.Blobs.Models.BlobType BlobType { get { throw null; } } public string CacheControl { get { throw null; } } + public byte[] ContentCrc { get { throw null; } } public string ContentDisposition { get { throw null; } } public string ContentEncoding { get { throw null; } } public byte[] ContentHash { get { throw null; } } @@ -567,6 +568,7 @@ internal BlobDownloadInfo() { } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public string ContentType { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } + public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadOptions @@ -588,6 +590,7 @@ public partial class BlobDownloadStreamingResult : System.IDisposable internal BlobDownloadStreamingResult() { } public System.IO.Stream Content { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } + public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadToOptions @@ -1850,7 +1853,7 @@ public PageBlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti } public partial class SpecializedBlobClientOptions : Azure.Storage.Blobs.BlobClientOptions { - public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } + public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } public Azure.Storage.ClientSideEncryptionOptions ClientSideEncryption { get { throw null; } set { } } } public static partial class SpecializedBlobExtensions diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs index 25640917de5bb..822d5b41d1404 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs @@ -51,7 +51,7 @@ public BlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredential c } public partial class BlobClientOptions : Azure.Core.ClientOptions { - public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) { } + public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Blobs.Models.BlobAudience? Audience { get { throw null; } set { } } public Azure.Storage.Blobs.Models.CustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } @@ -522,6 +522,7 @@ public BlobDownloadDetails() { } public long BlobSequenceNumber { get { throw null; } } public Azure.Storage.Blobs.Models.BlobType BlobType { get { throw null; } } public string CacheControl { get { throw null; } } + public byte[] ContentCrc { get { throw null; } } public string ContentDisposition { get { throw null; } } public string ContentEncoding { get { throw null; } } public byte[] ContentHash { get { throw null; } } @@ -567,6 +568,7 @@ internal BlobDownloadInfo() { } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public string ContentType { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } + public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadOptions @@ -588,6 +590,7 @@ public partial class BlobDownloadStreamingResult : System.IDisposable internal BlobDownloadStreamingResult() { } public System.IO.Stream Content { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } + public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadToOptions @@ -1850,7 +1853,7 @@ public PageBlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti } public partial class SpecializedBlobClientOptions : Azure.Storage.Blobs.BlobClientOptions { - public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } + public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } public Azure.Storage.ClientSideEncryptionOptions ClientSideEncryption { get { throw null; } set { } } } public static partial class SpecializedBlobExtensions diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs index 25640917de5bb..822d5b41d1404 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs @@ -51,7 +51,7 @@ public BlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredential c } public partial class BlobClientOptions : Azure.Core.ClientOptions { - public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) { } + public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Blobs.Models.BlobAudience? Audience { get { throw null; } set { } } public Azure.Storage.Blobs.Models.CustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } @@ -522,6 +522,7 @@ public BlobDownloadDetails() { } public long BlobSequenceNumber { get { throw null; } } public Azure.Storage.Blobs.Models.BlobType BlobType { get { throw null; } } public string CacheControl { get { throw null; } } + public byte[] ContentCrc { get { throw null; } } public string ContentDisposition { get { throw null; } } public string ContentEncoding { get { throw null; } } public byte[] ContentHash { get { throw null; } } @@ -567,6 +568,7 @@ internal BlobDownloadInfo() { } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public string ContentType { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } + public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadOptions @@ -588,6 +590,7 @@ public partial class BlobDownloadStreamingResult : System.IDisposable internal BlobDownloadStreamingResult() { } public System.IO.Stream Content { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } + public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadToOptions @@ -1850,7 +1853,7 @@ public PageBlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti } public partial class SpecializedBlobClientOptions : Azure.Storage.Blobs.BlobClientOptions { - public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } + public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } public Azure.Storage.ClientSideEncryptionOptions ClientSideEncryption { get { throw null; } set { } } } public static partial class SpecializedBlobExtensions diff --git a/sdk/storage/Azure.Storage.Blobs/assets.json b/sdk/storage/Azure.Storage.Blobs/assets.json index 0facb33e2a026..1994292f7b658 100644 --- a/sdk/storage/Azure.Storage.Blobs/assets.json +++ b/sdk/storage/Azure.Storage.Blobs/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Blobs", - "Tag": "net/storage/Azure.Storage.Blobs_5c382dfb14" + "Tag": "net/storage/Azure.Storage.Blobs_c5174c4663" } diff --git a/sdk/storage/Azure.Storage.Blobs/samples/Azure.Storage.Blobs.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Blobs/samples/Azure.Storage.Blobs.Samples.Tests.csproj index 77fd767c3486c..568dd6cba9516 100644 --- a/sdk/storage/Azure.Storage.Blobs/samples/Azure.Storage.Blobs.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs/samples/Azure.Storage.Blobs.Samples.Tests.csproj @@ -16,6 +16,7 @@ + diff --git a/sdk/storage/Azure.Storage.Blobs/src/AppendBlobClient.cs b/sdk/storage/Azure.Storage.Blobs/src/AppendBlobClient.cs index e70d5e02c82d7..9a110cf8eb13a 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/AppendBlobClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/AppendBlobClient.cs @@ -1242,14 +1242,39 @@ internal async Task> AppendBlockInternal( BlobErrors.VerifyHttpsCustomerProvidedKey(Uri, ClientConfiguration.CustomerProvidedKey); Errors.VerifyStreamPosition(content, nameof(content)); - // compute hash BEFORE attaching progress handler - ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - - content = content.WithNoDispose().WithProgress(progressHandler); + ContentHasher.GetHashResult hashResult = null; + long contentLength = (content?.Length - content?.Position) ?? 0; + long? structuredContentLength = default; + string structuredBodyType = null; + if (validationOptions != null && + validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && + ClientSideEncryption == null) // don't allow feature combination + { + // report progress in terms of caller bytes, not encoded bytes + structuredContentLength = contentLength; + contentLength = (content?.Length - content?.Position) ?? 0; + structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; + content = content.WithNoDispose().WithProgress(progressHandler); + content = validationOptions.PrecalculatedChecksum.IsEmpty + ? new StructuredMessageEncodingStream( + content, + Constants.StructuredMessage.DefaultSegmentContentLength, + StructuredMessage.Flags.StorageCrc64) + : new StructuredMessagePrecalculatedCrcWrapperStream( + content, + validationOptions.PrecalculatedChecksum.Span); + contentLength = (content?.Length - content?.Position) ?? 0; + } + else + { + // compute hash BEFORE attaching progress handler + hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + content = content.WithNoDispose().WithProgress(progressHandler); + } ResponseWithHeaders response; @@ -1267,6 +1292,8 @@ internal async Task> AppendBlockInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, ifModifiedSince: conditions?.IfModifiedSince, ifUnmodifiedSince: conditions?.IfUnmodifiedSince, ifMatch: conditions?.IfMatch?.ToString(), @@ -1289,6 +1316,8 @@ internal async Task> AppendBlockInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, ifModifiedSince: conditions?.IfModifiedSince, ifUnmodifiedSince: conditions?.IfUnmodifiedSince, ifMatch: conditions?.IfMatch?.ToString(), diff --git a/sdk/storage/Azure.Storage.Blobs/src/Azure.Storage.Blobs.csproj b/sdk/storage/Azure.Storage.Blobs/src/Azure.Storage.Blobs.csproj index 8b09c620d1654..e29acc40ca38b 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Azure.Storage.Blobs.csproj +++ b/sdk/storage/Azure.Storage.Blobs/src/Azure.Storage.Blobs.csproj @@ -52,6 +52,8 @@ + + @@ -91,6 +93,11 @@ + + + + + diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs b/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs index aa91edb9f6c41..6b95b04c703db 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs @@ -1031,6 +1031,7 @@ private async Task> DownloadInternal( ContentHash = blobDownloadDetails.ContentHash, ContentLength = blobDownloadDetails.ContentLength, ContentType = blobDownloadDetails.ContentType, + ExpectTrailingDetails = blobDownloadStreamingResult.ExpectTrailingDetails, }, response.GetRawResponse()); } #endregion @@ -1547,30 +1548,52 @@ internal virtual async ValueTask> Download // Wrap the response Content in a RetriableStream so we // can return it before it's finished downloading, but still // allow retrying if it fails. - Stream stream = RetriableStream.Create( - response.Value.Content, - startOffset => - StartDownloadAsync( - range, - conditionsWithEtag, - validationOptions, - startOffset, - async, - cancellationToken) - .EnsureCompleted() - .Value.Content, - async startOffset => - (await StartDownloadAsync( - range, - conditionsWithEtag, - validationOptions, - startOffset, - async, - cancellationToken) - .ConfigureAwait(false)) - .Value.Content, - ClientConfiguration.Pipeline.ResponseClassifier, - Constants.MaxReliabilityRetries); + ValueTask> Factory(long offset, bool async, CancellationToken cancellationToken) + => StartDownloadAsync( + range, + conditionsWithEtag, + validationOptions, + offset, + async, + cancellationToken); + async ValueTask<(Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData)> StructuredMessageFactory( + long offset, bool async, CancellationToken cancellationToken) + { + Response result = await Factory(offset, async, cancellationToken).ConfigureAwait(false); + return StructuredMessageDecodingStream.WrapStream(result.Value.Content, result.Value.Details.ContentLength); + } + Stream stream; + if (response.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) + { + (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = StructuredMessageDecodingStream.WrapStream( + response.Value.Content, response.Value.Details.ContentLength); + stream = new StructuredMessageDecodingRetriableStream( + decodingStream, + decodedData, + StructuredMessage.Flags.StorageCrc64, + startOffset => StructuredMessageFactory(startOffset, async: false, cancellationToken) + .EnsureCompleted(), + async startOffset => await StructuredMessageFactory(startOffset, async: true, cancellationToken) + .ConfigureAwait(false), + decodedData => + { + response.Value.Details.ContentCrc = new byte[StructuredMessage.Crc64Length]; + decodedData.Crc.WriteCrc64(response.Value.Details.ContentCrc); + }, + ClientConfiguration.Pipeline.ResponseClassifier, + Constants.MaxReliabilityRetries); + } + else + { + stream = RetriableStream.Create( + response.Value.Content, + startOffset => Factory(startOffset, async: false, cancellationToken) + .EnsureCompleted().Value.Content, + async startOffset => (await Factory(startOffset, async: true, cancellationToken) + .ConfigureAwait(false)).Value.Content, + ClientConfiguration.Pipeline.ResponseClassifier, + Constants.MaxReliabilityRetries); + } stream = stream.WithNoDispose().WithProgress(progressHandler); @@ -1578,7 +1601,11 @@ internal virtual async ValueTask> Download * Buffer response stream and ensure it matches the transactional checksum if any. * Storage will not return a checksum for payload >4MB, so this buffer is capped similarly. * Checksum validation is opt-in, so this buffer is part of that opt-in. */ - if (validationOptions != default && validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && validationOptions.AutoValidateChecksum) + if (validationOptions != default && + validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && + validationOptions.AutoValidateChecksum && + // structured message decoding does the validation for us + !response.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) { // safe-buffer; transactional hash download limit well below maxInt var readDestStream = new MemoryStream((int)response.Value.Details.ContentLength); @@ -1649,8 +1676,8 @@ await ContentHasher.AssertResponseHashMatchInternal( /// notifications that the operation should be cancelled. /// /// - /// A describing the - /// downloaded blob. contains + /// A describing the + /// downloaded blob. contains /// the blob's data. /// /// @@ -1689,13 +1716,29 @@ private async ValueTask> StartDownloadAsyn operationName: nameof(BlobBaseClient.Download), parameterName: nameof(conditions)); + bool? rangeGetContentMD5 = null; + bool? rangeGetContentCRC64 = null; + string structuredBodyType = null; + switch (validationOptions?.ChecksumAlgorithm.ResolveAuto()) + { + case StorageChecksumAlgorithm.MD5: + rangeGetContentMD5 = true; + break; + case StorageChecksumAlgorithm.StorageCrc64: + structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; + break; + default: + break; + } + if (async) { response = await BlobRestClient.DownloadAsync( range: pageRange?.ToString(), leaseId: conditions?.LeaseId, - rangeGetContentMD5: validationOptions?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.MD5 ? true : null, - rangeGetContentCRC64: validationOptions?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 ? true : null, + rangeGetContentMD5: rangeGetContentMD5, + rangeGetContentCRC64: rangeGetContentCRC64, + structuredBodyType: structuredBodyType, encryptionKey: ClientConfiguration.CustomerProvidedKey?.EncryptionKey, encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, @@ -1712,8 +1755,9 @@ private async ValueTask> StartDownloadAsyn response = BlobRestClient.Download( range: pageRange?.ToString(), leaseId: conditions?.LeaseId, - rangeGetContentMD5: validationOptions?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.MD5 ? true : null, - rangeGetContentCRC64: validationOptions?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 ? true : null, + rangeGetContentMD5: rangeGetContentMD5, + rangeGetContentCRC64: rangeGetContentCRC64, + structuredBodyType: structuredBodyType, encryptionKey: ClientConfiguration.CustomerProvidedKey?.EncryptionKey, encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, @@ -1729,9 +1773,11 @@ private async ValueTask> StartDownloadAsyn long length = response.IsUnavailable() ? 0 : response.Headers.ContentLength ?? 0; ClientConfiguration.Pipeline.LogTrace($"Response: {response.GetRawResponse().Status}, ContentLength: {length}"); - return Response.FromValue( + Response result = Response.FromValue( response.ToBlobDownloadStreamingResult(), response.GetRawResponse()); + result.Value.ExpectTrailingDetails = structuredBodyType != null; + return result; } #endregion diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs b/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs index b16cefc83a535..f312e621bffc4 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs @@ -318,6 +318,8 @@ private void AddHeadersAndQueryParameters() Diagnostics.LoggedHeaderNames.Add("x-ms-encryption-key-sha256"); Diagnostics.LoggedHeaderNames.Add("x-ms-copy-source-error-code"); Diagnostics.LoggedHeaderNames.Add("x-ms-copy-source-status-code"); + Diagnostics.LoggedHeaderNames.Add("x-ms-structured-body"); + Diagnostics.LoggedHeaderNames.Add("x-ms-structured-content-length"); Diagnostics.LoggedQueryParameters.Add("comp"); Diagnostics.LoggedQueryParameters.Add("maxresults"); diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlobClientSideDecryptor.cs b/sdk/storage/Azure.Storage.Blobs/src/BlobClientSideDecryptor.cs index 9006282fab5b7..59b036d4b20bd 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlobClientSideDecryptor.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlobClientSideDecryptor.cs @@ -186,7 +186,7 @@ private static bool CanIgnorePadding(ContentRange? contentRange) // did we request the last block? // end is inclusive/0-index, so end = n and size = n+1 means we requested the last block - if (contentRange.Value.Size - contentRange.Value.End == 1) + if (contentRange.Value.TotalResourceLength - contentRange.Value.End == 1) { return false; } diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlockBlobClient.cs b/sdk/storage/Azure.Storage.Blobs/src/BlockBlobClient.cs index f5348303e57f0..00e6bf0780e2f 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlockBlobClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlockBlobClient.cs @@ -875,14 +875,35 @@ internal virtual async Task> UploadInternal( scope.Start(); Errors.VerifyStreamPosition(content, nameof(content)); - // compute hash BEFORE attaching progress handler - ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - - content = content?.WithNoDispose().WithProgress(progressHandler); + ContentHasher.GetHashResult hashResult = null; + long contentLength = (content?.Length - content?.Position) ?? 0; + long? structuredContentLength = default; + string structuredBodyType = null; + if (content != null && + validationOptions != null && + validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && + ClientSideEncryption == null) // don't allow feature combination + { + // report progress in terms of caller bytes, not encoded bytes + structuredContentLength = contentLength; + structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; + content = content.WithNoDispose().WithProgress(progressHandler); + content = new StructuredMessageEncodingStream( + content, + Constants.StructuredMessage.DefaultSegmentContentLength, + StructuredMessage.Flags.StorageCrc64); + contentLength = content.Length - content.Position; + } + else + { + // compute hash BEFORE attaching progress handler + hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + content = content.WithNoDispose().WithProgress(progressHandler); + } ResponseWithHeaders response; @@ -921,6 +942,8 @@ internal virtual async Task> UploadInternal( legalHold: legalHold, transactionalContentMD5: hashResult?.MD5AsArray, transactionalContentCrc64: hashResult?.StorageCrc64AsArray, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, cancellationToken: cancellationToken) .ConfigureAwait(false); } @@ -953,6 +976,8 @@ internal virtual async Task> UploadInternal( legalHold: legalHold, transactionalContentMD5: hashResult?.MD5AsArray, transactionalContentCrc64: hashResult?.StorageCrc64AsArray, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, cancellationToken: cancellationToken); } @@ -1305,14 +1330,39 @@ internal virtual async Task> StageBlockInternal( Errors.VerifyStreamPosition(content, nameof(content)); - // compute hash BEFORE attaching progress handler - ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - - content = content.WithNoDispose().WithProgress(progressHandler); + ContentHasher.GetHashResult hashResult = null; + long contentLength = (content?.Length - content?.Position) ?? 0; + long? structuredContentLength = default; + string structuredBodyType = null; + if (validationOptions != null && + validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && + ClientSideEncryption == null) // don't allow feature combination + { + // report progress in terms of caller bytes, not encoded bytes + structuredContentLength = contentLength; + contentLength = (content?.Length - content?.Position) ?? 0; + structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; + content = content.WithNoDispose().WithProgress(progressHandler); + content = validationOptions.PrecalculatedChecksum.IsEmpty + ? new StructuredMessageEncodingStream( + content, + Constants.StructuredMessage.DefaultSegmentContentLength, + StructuredMessage.Flags.StorageCrc64) + : new StructuredMessagePrecalculatedCrcWrapperStream( + content, + validationOptions.PrecalculatedChecksum.Span); + contentLength = (content?.Length - content?.Position) ?? 0; + } + else + { + // compute hash BEFORE attaching progress handler + hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + content = content.WithNoDispose().WithProgress(progressHandler); + } ResponseWithHeaders response; @@ -1320,7 +1370,7 @@ internal virtual async Task> StageBlockInternal( { response = await BlockBlobRestClient.StageBlockAsync( blockId: base64BlockId, - contentLength: (content?.Length - content?.Position) ?? 0, + contentLength: contentLength, body: content, transactionalContentCrc64: hashResult?.StorageCrc64AsArray, transactionalContentMD5: hashResult?.MD5AsArray, @@ -1329,6 +1379,8 @@ internal virtual async Task> StageBlockInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, cancellationToken: cancellationToken) .ConfigureAwait(false); } @@ -1336,7 +1388,7 @@ internal virtual async Task> StageBlockInternal( { response = BlockBlobRestClient.StageBlock( blockId: base64BlockId, - contentLength: (content?.Length - content?.Position) ?? 0, + contentLength: contentLength, body: content, transactionalContentCrc64: hashResult?.StorageCrc64AsArray, transactionalContentMD5: hashResult?.MD5AsArray, @@ -1345,6 +1397,8 @@ internal virtual async Task> StageBlockInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, cancellationToken: cancellationToken); } @@ -2791,7 +2845,7 @@ internal async Task OpenWriteInternal( immutabilityPolicy: default, legalHold: default, progressHandler: default, - transferValidationOverride: default, + transferValidationOverride: new() { ChecksumAlgorithm = StorageChecksumAlgorithm.None }, operationName: default, async: async, cancellationToken: cancellationToken) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadDetails.cs b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadDetails.cs index bc119822cdc12..0490ec239798e 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadDetails.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadDetails.cs @@ -34,6 +34,14 @@ public class BlobDownloadDetails public byte[] ContentHash { get; internal set; } #pragma warning restore CA1819 // Properties should not return arrays + /// + /// When requested using , this value contains the CRC for the download blob range. + /// This value may only become populated once the network stream is fully consumed. If this instance is accessed through + /// , the network stream has already been consumed. Otherwise, consume the content stream before + /// checking this value. + /// + public byte[] ContentCrc { get; internal set; } + /// /// Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob. /// diff --git a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadInfo.cs b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadInfo.cs index e034573b54b3a..b42801e36ab55 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadInfo.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadInfo.cs @@ -4,6 +4,8 @@ using System; using System.ComponentModel; using System.IO; +using System.Threading.Tasks; +using Azure.Core; using Azure.Storage.Shared; namespace Azure.Storage.Blobs.Models @@ -49,6 +51,14 @@ public class BlobDownloadInfo : IDisposable, IDownloadedContent /// public BlobDownloadDetails Details { get; internal set; } + /// + /// Indicates some contents of are mixed into the response stream. + /// They will not be set until has been fully consumed. These details + /// will be extracted from the content stream by the library before the calling code can + /// encounter them. + /// + public bool ExpectTrailingDetails { get; internal set; } + /// /// Constructor. /// diff --git a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadStreamingResult.cs b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadStreamingResult.cs index 4fbada6e67aad..9b7d4d4e00dad 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadStreamingResult.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadStreamingResult.cs @@ -24,6 +24,14 @@ internal BlobDownloadStreamingResult() { } /// public Stream Content { get; internal set; } + /// + /// Indicates some contents of are mixed into the response stream. + /// They will not be set until has been fully consumed. These details + /// will be extracted from the content stream by the library before the calling code can + /// encounter them. + /// + public bool ExpectTrailingDetails { get; internal set; } + /// /// Disposes the by calling Dispose on the underlying stream. /// diff --git a/sdk/storage/Azure.Storage.Blobs/src/PageBlobClient.cs b/sdk/storage/Azure.Storage.Blobs/src/PageBlobClient.cs index fa575e41b8ebe..7038897531fbb 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/PageBlobClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/PageBlobClient.cs @@ -1363,15 +1363,42 @@ internal async Task> UploadPagesInternal( scope.Start(); Errors.VerifyStreamPosition(content, nameof(content)); - // compute hash BEFORE attaching progress handler - ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - - content = content?.WithNoDispose().WithProgress(progressHandler); - HttpRange range = new HttpRange(offset, (content?.Length - content?.Position) ?? null); + ContentHasher.GetHashResult hashResult = null; + long contentLength = (content?.Length - content?.Position) ?? 0; + long? structuredContentLength = default; + string structuredBodyType = null; + HttpRange range; + if (validationOptions != null && + validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && + ClientSideEncryption == null) // don't allow feature combination + { + // report progress in terms of caller bytes, not encoded bytes + structuredContentLength = contentLength; + contentLength = (content?.Length - content?.Position) ?? 0; + range = new HttpRange(offset, (content?.Length - content?.Position) ?? null); + structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; + content = content?.WithNoDispose().WithProgress(progressHandler); + content = validationOptions.PrecalculatedChecksum.IsEmpty + ? new StructuredMessageEncodingStream( + content, + Constants.StructuredMessage.DefaultSegmentContentLength, + StructuredMessage.Flags.StorageCrc64) + : new StructuredMessagePrecalculatedCrcWrapperStream( + content, + validationOptions.PrecalculatedChecksum.Span); + contentLength = (content?.Length - content?.Position) ?? 0; + } + else + { + // compute hash BEFORE attaching progress handler + hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + content = content?.WithNoDispose().WithProgress(progressHandler); + range = new HttpRange(offset, (content?.Length - content?.Position) ?? null); + } ResponseWithHeaders response; @@ -1388,6 +1415,8 @@ internal async Task> UploadPagesInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, ifSequenceNumberLessThanOrEqualTo: conditions?.IfSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan: conditions?.IfSequenceNumberLessThan, ifSequenceNumberEqualTo: conditions?.IfSequenceNumberEqual, @@ -1412,6 +1441,8 @@ internal async Task> UploadPagesInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, ifSequenceNumberLessThanOrEqualTo: conditions?.IfSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan: conditions?.IfSequenceNumberLessThan, ifSequenceNumberEqualTo: conditions?.IfSequenceNumberEqual, diff --git a/sdk/storage/Azure.Storage.Blobs/src/PartitionedDownloader.cs b/sdk/storage/Azure.Storage.Blobs/src/PartitionedDownloader.cs index 2c52d0c256e34..1b14bcf98ec04 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/PartitionedDownloader.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/PartitionedDownloader.cs @@ -22,6 +22,8 @@ internal class PartitionedDownloader private const string _operationName = nameof(BlobBaseClient) + "." + nameof(BlobBaseClient.DownloadTo); private const string _innerOperationName = nameof(BlobBaseClient) + "." + nameof(BlobBaseClient.DownloadStreaming); + private const int Crc64Len = Constants.StorageCrc64SizeInBytes; + /// /// The client used to download the blob. /// @@ -48,6 +50,7 @@ internal class PartitionedDownloader /// private readonly StorageChecksumAlgorithm _validationAlgorithm; private readonly int _checksumSize; + // TODO disabling master crc temporarily. segment CRCs still handled. private bool UseMasterCrc => _validationAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64; private StorageCrc64HashAlgorithm _masterCrcCalculator = null; @@ -200,20 +203,31 @@ public async Task DownloadToInternal( } // Destination wrapped in master crc step if needed (must wait until after encryption wrap check) - Memory composedCrc = default; + byte[] composedCrcBuf = default; if (UseMasterCrc) { _masterCrcCalculator = StorageCrc64HashAlgorithm.Create(); destination = ChecksumCalculatingStream.GetWriteStream(destination, _masterCrcCalculator.Append); - disposables.Add(_arrayPool.RentAsMemoryDisposable( - Constants.StorageCrc64SizeInBytes, out composedCrc)); - composedCrc.Span.Clear(); + disposables.Add(_arrayPool.RentDisposable(Crc64Len, out composedCrcBuf)); + composedCrcBuf.Clear(); } // If the first segment was the entire blob, we'll copy that to // the output stream and finish now - long initialLength = initialResponse.Value.Details.ContentLength; - long totalLength = ParseRangeTotalLength(initialResponse.Value.Details.ContentRange); + long initialLength; + long totalLength; + // Get blob content length downloaded from content range when available to handle transit encoding + if (string.IsNullOrWhiteSpace(initialResponse.Value.Details.ContentRange)) + { + initialLength = initialResponse.Value.Details.ContentLength; + totalLength = 0; + } + else + { + ContentRange recievedRange = ContentRange.Parse(initialResponse.Value.Details.ContentRange); + initialLength = recievedRange.GetRangeLength(); + totalLength = recievedRange.TotalResourceLength.Value; + } if (initialLength == totalLength) { await HandleOneShotDownload(initialResponse, destination, async, cancellationToken) @@ -239,15 +253,16 @@ await HandleOneShotDownload(initialResponse, destination, async, cancellationTok } else { - using (_arrayPool.RentAsMemoryDisposable(_checksumSize, out Memory partitionChecksum)) + using (_arrayPool.RentDisposable(_checksumSize, out byte[] partitionChecksum)) { - await CopyToInternal(initialResponse, destination, partitionChecksum, async, cancellationToken).ConfigureAwait(false); + await CopyToInternal(initialResponse, destination, new(partitionChecksum, 0, _checksumSize), async, cancellationToken).ConfigureAwait(false); if (UseMasterCrc) { StorageCrc64Composer.Compose( - (composedCrc.ToArray(), 0L), - (partitionChecksum.ToArray(), initialResponse.Value.Details.ContentLength) - ).CopyTo(composedCrc); + (composedCrcBuf, 0L), + (partitionChecksum, initialResponse.Value.Details.ContentRange.GetContentRangeLengthOrDefault() + ?? initialResponse.Value.Details.ContentLength) + ).AsSpan(0, Crc64Len).CopyTo(composedCrcBuf); } } } @@ -286,15 +301,16 @@ await HandleOneShotDownload(initialResponse, destination, async, cancellationTok else { Response result = await responseValueTask.ConfigureAwait(false); - using (_arrayPool.RentAsMemoryDisposable(_checksumSize, out Memory partitionChecksum)) + using (_arrayPool.RentDisposable(_checksumSize, out byte[] partitionChecksum)) { - await CopyToInternal(result, destination, partitionChecksum, async, cancellationToken).ConfigureAwait(false); + await CopyToInternal(result, destination, new(partitionChecksum, 0, _checksumSize), async, cancellationToken).ConfigureAwait(false); if (UseMasterCrc) { StorageCrc64Composer.Compose( - (composedCrc.ToArray(), 0L), - (partitionChecksum.ToArray(), result.Value.Details.ContentLength) - ).CopyTo(composedCrc); + (composedCrcBuf, 0L), + (partitionChecksum, result.Value.Details.ContentRange.GetContentRangeLengthOrDefault() + ?? result.Value.Details.ContentLength) + ).AsSpan(0, Crc64Len).CopyTo(composedCrcBuf); } } } @@ -310,7 +326,7 @@ await HandleOneShotDownload(initialResponse, destination, async, cancellationTok } #pragma warning restore AZC0110 // DO NOT use await keyword in possibly synchronous scope. - await FinalizeDownloadInternal(destination, composedCrc, async, cancellationToken) + await FinalizeDownloadInternal(destination, composedCrcBuf?.AsMemory(0, Crc64Len) ?? default, async, cancellationToken) .ConfigureAwait(false); return initialResponse.GetRawResponse(); @@ -328,7 +344,7 @@ async Task ConsumeQueuedTask() // CopyToAsync causes ConsumeQueuedTask to wait until the // download is complete - using (_arrayPool.RentAsMemoryDisposable(_checksumSize, out Memory partitionChecksum)) + using (_arrayPool.RentDisposable(_checksumSize, out byte[] partitionChecksum)) { await CopyToInternal( response, @@ -337,13 +353,14 @@ await CopyToInternal( async, cancellationToken) .ConfigureAwait(false); - if (UseMasterCrc) - { - StorageCrc64Composer.Compose( - (composedCrc.ToArray(), 0L), - (partitionChecksum.ToArray(), response.Value.Details.ContentLength) - ).CopyTo(composedCrc); - } + if (UseMasterCrc) + { + StorageCrc64Composer.Compose( + (composedCrcBuf, 0L), + (partitionChecksum, response.Value.Details.ContentRange.GetContentRangeLengthOrDefault() + ?? response.Value.Details.ContentLength) + ).AsSpan(0, Crc64Len).CopyTo(composedCrcBuf); + } } } } @@ -379,7 +396,7 @@ await FinalizeDownloadInternal(destination, partitionChecksum, async, cancellati private async Task FinalizeDownloadInternal( Stream destination, - Memory composedCrc, + ReadOnlyMemory composedCrc, bool async, CancellationToken cancellationToken) { @@ -395,20 +412,6 @@ private async Task FinalizeDownloadInternal( } } - private static long ParseRangeTotalLength(string range) - { - if (range == null) - { - return 0; - } - int lengthSeparator = range.IndexOf("/", StringComparison.InvariantCultureIgnoreCase); - if (lengthSeparator == -1) - { - throw BlobErrors.ParsingFullHttpRangeFailed(range); - } - return long.Parse(range.Substring(lengthSeparator + 1), CultureInfo.InvariantCulture); - } - private async Task CopyToInternal( Response response, Stream destination, @@ -417,7 +420,9 @@ private async Task CopyToInternal( CancellationToken cancellationToken) { CancellationHelper.ThrowIfCancellationRequested(cancellationToken); - using IHasher hasher = ContentHasher.GetHasherFromAlgorithmId(_validationAlgorithm); + // if structured message, this crc is validated in the decoding process. don't decode it here. + bool structuredMessage = response.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader); + using IHasher hasher = structuredMessage ? null : ContentHasher.GetHasherFromAlgorithmId(_validationAlgorithm); using Stream rawSource = response.Value.Content; using Stream source = hasher != null ? ChecksumCalculatingStream.GetReadStream(rawSource, hasher.AppendHash) @@ -429,7 +434,13 @@ await source.CopyToInternal( cancellationToken) .ConfigureAwait(false); - if (hasher != null) + // with structured message, the message integrity will already be validated, + // but we can still get the checksum out of the response object + if (structuredMessage) + { + response.Value.Details.ContentCrc?.CopyTo(checksumBuffer.Span); + } + else if (hasher != null) { hasher.GetFinalHash(checksumBuffer.Span); (ReadOnlyMemory checksum, StorageChecksumAlgorithm _) diff --git a/sdk/storage/Azure.Storage.Blobs/src/autorest.md b/sdk/storage/Azure.Storage.Blobs/src/autorest.md index 7160bd89aba05..a96db9856ca58 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/autorest.md +++ b/sdk/storage/Azure.Storage.Blobs/src/autorest.md @@ -34,7 +34,7 @@ directive: if (property.includes('/{containerName}/{blob}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))}); - } + } else if (property.includes('/{containerName}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))}); @@ -158,7 +158,7 @@ directive: var newName = property.replace('/{containerName}/{blob}', ''); $[newName] = $[oldName]; delete $[oldName]; - } + } else if (property.includes('/{containerName}')) { var oldName = property; diff --git a/sdk/storage/Azure.Storage.Blobs/tests/Azure.Storage.Blobs.Tests.csproj b/sdk/storage/Azure.Storage.Blobs/tests/Azure.Storage.Blobs.Tests.csproj index 62c7b6d17e63e..1c3856c83b64e 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/Azure.Storage.Blobs.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs/tests/Azure.Storage.Blobs.Tests.csproj @@ -6,6 +6,9 @@ Microsoft Azure.Storage.Blobs client library tests false + + BlobSDK + diff --git a/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTransferValidationTests.cs b/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTransferValidationTests.cs index 73d11612f1d8c..3ec448e6d1ed0 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTransferValidationTests.cs +++ b/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTransferValidationTests.cs @@ -1,6 +1,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +using System; +using System.Buffers; using System.IO; using System.Threading.Tasks; using Azure.Core.TestFramework; @@ -37,7 +39,10 @@ protected override async Task> GetDispo StorageChecksumAlgorithm uploadAlgorithm = StorageChecksumAlgorithm.None, StorageChecksumAlgorithm downloadAlgorithm = StorageChecksumAlgorithm.None) { - var disposingContainer = await ClientBuilder.GetTestContainerAsync(service: service, containerName: containerName); + var disposingContainer = await ClientBuilder.GetTestContainerAsync( + service: service, + containerName: containerName, + publicAccessType: PublicAccessType.None); disposingContainer.Container.ClientConfiguration.TransferValidation.Upload.ChecksumAlgorithm = uploadAlgorithm; disposingContainer.Container.ClientConfiguration.TransferValidation.Download.ChecksumAlgorithm = downloadAlgorithm; @@ -91,57 +96,96 @@ public override void TestAutoResolve() } #region Added Tests - [TestCaseSource("GetValidationAlgorithms")] - public async Task ExpectedDownloadStreamingStreamTypeReturned(StorageChecksumAlgorithm algorithm) + [Test] + public virtual async Task OlderServiceVersionThrowsOnStructuredMessage() { - await using var test = await GetDisposingContainerAsync(); + // use service version before structured message was introduced + await using DisposingContainer disposingContainer = await ClientBuilder.GetTestContainerAsync( + service: ClientBuilder.GetServiceClient_SharedKey( + InstrumentClientOptions(new BlobClientOptions(BlobClientOptions.ServiceVersion.V2024_11_04))), + publicAccessType: PublicAccessType.None); // Arrange - var data = GetRandomBuffer(Constants.KB); - BlobClient blob = InstrumentClient(test.Container.GetBlobClient(GetNewResourceName())); - using (var stream = new MemoryStream(data)) + const int dataLength = Constants.KB; + var data = GetRandomBuffer(dataLength); + + var resourceName = GetNewResourceName(); + var blob = InstrumentClient(disposingContainer.Container.GetBlobClient(GetNewResourceName())); + await blob.UploadAsync(BinaryData.FromBytes(data)); + + var validationOptions = new DownloadTransferValidationOptions { - await blob.UploadAsync(stream); - } - // don't make options instance at all for no hash request - DownloadTransferValidationOptions transferValidation = algorithm == StorageChecksumAlgorithm.None - ? default - : new DownloadTransferValidationOptions { ChecksumAlgorithm = algorithm }; + ChecksumAlgorithm = StorageChecksumAlgorithm.StorageCrc64 + }; + AsyncTestDelegate operation = async () => await (await blob.DownloadStreamingAsync( + new BlobDownloadOptions + { + Range = new HttpRange(length: Constants.StructuredMessage.MaxDownloadCrcWithHeader + 1), + TransferValidation = validationOptions, + })).Value.Content.CopyToAsync(Stream.Null); + Assert.That(operation, Throws.TypeOf()); + } + + [Test] + public async Task StructuredMessagePopulatesCrcDownloadStreaming() + { + await using DisposingContainer disposingContainer = await ClientBuilder.GetTestContainerAsync( + publicAccessType: PublicAccessType.None); + + const int dataLength = Constants.KB; + byte[] data = GetRandomBuffer(dataLength); + byte[] dataCrc = new byte[8]; + StorageCrc64Calculator.ComputeSlicedSafe(data, 0L).WriteCrc64(dataCrc); + + var blob = disposingContainer.Container.GetBlobClient(GetNewResourceName()); + await blob.UploadAsync(BinaryData.FromBytes(data)); - // Act - Response response = await blob.DownloadStreamingAsync(new BlobDownloadOptions + Response response = await blob.DownloadStreamingAsync(new() { - TransferValidation = transferValidation, - Range = new HttpRange(length: data.Length) + TransferValidation = new DownloadTransferValidationOptions + { + ChecksumAlgorithm = StorageChecksumAlgorithm.StorageCrc64 + } }); - // Assert - // validated stream is buffered - Assert.AreEqual(typeof(MemoryStream), response.Value.Content.GetType()); + // crc is not present until response stream is consumed + Assert.That(response.Value.Details.ContentCrc, Is.Null); + + byte[] downloadedData; + using (MemoryStream ms = new()) + { + await response.Value.Content.CopyToAsync(ms); + downloadedData = ms.ToArray(); + } + + Assert.That(response.Value.Details.ContentCrc, Is.EqualTo(dataCrc)); + Assert.That(downloadedData, Is.EqualTo(data)); } [Test] - public async Task ExpectedDownloadStreamingStreamTypeReturned_None() + public async Task StructuredMessagePopulatesCrcDownloadContent() { - await using var test = await GetDisposingContainerAsync(); + await using DisposingContainer disposingContainer = await ClientBuilder.GetTestContainerAsync( + publicAccessType: PublicAccessType.None); - // Arrange - var data = GetRandomBuffer(Constants.KB); - BlobClient blob = InstrumentClient(test.Container.GetBlobClient(GetNewResourceName())); - using (var stream = new MemoryStream(data)) - { - await blob.UploadAsync(stream); - } + const int dataLength = Constants.KB; + byte[] data = GetRandomBuffer(dataLength); + byte[] dataCrc = new byte[8]; + StorageCrc64Calculator.ComputeSlicedSafe(data, 0L).WriteCrc64(dataCrc); + + var blob = disposingContainer.Container.GetBlobClient(GetNewResourceName()); + await blob.UploadAsync(BinaryData.FromBytes(data)); - // Act - Response response = await blob.DownloadStreamingAsync(new BlobDownloadOptions + Response response = await blob.DownloadContentAsync(new BlobDownloadOptions() { - Range = new HttpRange(length: data.Length) + TransferValidation = new DownloadTransferValidationOptions + { + ChecksumAlgorithm = StorageChecksumAlgorithm.StorageCrc64 + } }); - // Assert - // unvalidated stream type is private; just check we didn't get back a buffered stream - Assert.AreNotEqual(typeof(MemoryStream), response.Value.Content.GetType()); + Assert.That(response.Value.Details.ContentCrc, Is.EqualTo(dataCrc)); + Assert.That(response.Value.Content.ToArray(), Is.EqualTo(data)); } #endregion } diff --git a/sdk/storage/Azure.Storage.Blobs/tests/ClientSideEncryptionTests.cs b/sdk/storage/Azure.Storage.Blobs/tests/ClientSideEncryptionTests.cs index 5d391440ea1b6..e85ff3aa5473f 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/ClientSideEncryptionTests.cs +++ b/sdk/storage/Azure.Storage.Blobs/tests/ClientSideEncryptionTests.cs @@ -1343,7 +1343,7 @@ public void CanParseLargeContentRange() { long compareValue = (long)Int32.MaxValue + 1; //Increase max int32 by one ContentRange contentRange = ContentRange.Parse($"bytes 0 {compareValue} {compareValue}"); - Assert.AreEqual((long)Int32.MaxValue + 1, contentRange.Size); + Assert.AreEqual((long)Int32.MaxValue + 1, contentRange.TotalResourceLength); Assert.AreEqual(0, contentRange.Start); Assert.AreEqual((long)Int32.MaxValue + 1, contentRange.End); } diff --git a/sdk/storage/Azure.Storage.Blobs/tests/PartitionedDownloaderTests.cs b/sdk/storage/Azure.Storage.Blobs/tests/PartitionedDownloaderTests.cs index d8d4756a510c1..af408264c5bfa 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/PartitionedDownloaderTests.cs +++ b/sdk/storage/Azure.Storage.Blobs/tests/PartitionedDownloaderTests.cs @@ -305,7 +305,7 @@ public Response GetStream(HttpRange range, BlobRequ ContentHash = new byte[] { 1, 2, 3 }, LastModified = DateTimeOffset.Now, Metadata = new Dictionary() { { "meta", "data" } }, - ContentRange = $"bytes {range.Offset}-{range.Offset + contentLength}/{_length}", + ContentRange = $"bytes {range.Offset}-{Math.Max(1, range.Offset + contentLength - 1)}/{_length}", ETag = s_etag, ContentEncoding = "test", CacheControl = "test", diff --git a/sdk/storage/Azure.Storage.Common/samples/Azure.Storage.Common.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Common/samples/Azure.Storage.Common.Samples.Tests.csproj index 7d454aeaa0af2..aeca4497a8770 100644 --- a/sdk/storage/Azure.Storage.Common/samples/Azure.Storage.Common.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Common/samples/Azure.Storage.Common.Samples.Tests.csproj @@ -19,6 +19,7 @@ + PreserveNewest diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/ChecksumExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/ChecksumExtensions.cs new file mode 100644 index 0000000000000..48304640eee43 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/src/Shared/ChecksumExtensions.cs @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers.Binary; + +namespace Azure.Storage; + +internal static class ChecksumExtensions +{ + public static void WriteCrc64(this ulong crc, Span dest) + => BinaryPrimitives.WriteUInt64LittleEndian(dest, crc); + + public static bool TryWriteCrc64(this ulong crc, Span dest) + => BinaryPrimitives.TryWriteUInt64LittleEndian(dest, crc); + + public static ulong ReadCrc64(this ReadOnlySpan crc) + => BinaryPrimitives.ReadUInt64LittleEndian(crc); + + public static bool TryReadCrc64(this ReadOnlySpan crc, out ulong value) + => BinaryPrimitives.TryReadUInt64LittleEndian(crc, out value); +} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs b/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs index 3e00882188fba..35d5c1f1fde8c 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs @@ -657,6 +657,15 @@ internal static class AccountResources internal static readonly int[] PathStylePorts = { 10000, 10001, 10002, 10003, 10004, 10100, 10101, 10102, 10103, 10104, 11000, 11001, 11002, 11003, 11004, 11100, 11101, 11102, 11103, 11104 }; } + internal static class StructuredMessage + { + public const string StructuredMessageHeader = "x-ms-structured-body"; + public const string StructuredContentLength = "x-ms-structured-content-length"; + public const string CrcStructuredMessage = "XSM/1.0; properties=crc64"; + public const int DefaultSegmentContentLength = 4 * MB; + public const int MaxDownloadCrcWithHeader = 4 * MB; + } + internal static class ClientSideEncryption { public const string HttpMessagePropertyKeyV1 = "Azure.Storage.StorageTelemetryPolicy.ClientSideEncryption.V1"; diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/ContentRange.cs b/sdk/storage/Azure.Storage.Common/src/Shared/ContentRange.cs index f656382efad2b..cb3b0a7bee189 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/ContentRange.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/ContentRange.cs @@ -82,20 +82,20 @@ public RangeUnit(string value) public long? End { get; } /// - /// Size of this range, measured in this instance's . + /// Size of the entire resource this range is from, measured in this instance's . /// - public long? Size { get; } + public long? TotalResourceLength { get; } /// /// Unit this range is measured in. Generally "bytes". /// public RangeUnit Unit { get; } - public ContentRange(RangeUnit unit, long? start, long? end, long? size) + public ContentRange(RangeUnit unit, long? start, long? end, long? totalResourceLength) { Start = start; End = end; - Size = size; + TotalResourceLength = totalResourceLength; Unit = unit; } @@ -113,7 +113,7 @@ public static ContentRange Parse(string headerValue) string unit = default; long? start = default; long? end = default; - long? size = default; + long? resourceSize = default; try { @@ -136,10 +136,10 @@ public static ContentRange Parse(string headerValue) var rawSize = tokens[blobSizeIndex]; if (rawSize != WildcardMarker) { - size = long.Parse(rawSize, CultureInfo.InvariantCulture); + resourceSize = long.Parse(rawSize, CultureInfo.InvariantCulture); } - return new ContentRange(unit, start, end, size); + return new ContentRange(unit, start, end, resourceSize); } catch (IndexOutOfRangeException) { @@ -165,7 +165,7 @@ public static HttpRange ToHttpRange(ContentRange contentRange) /// /// Indicates whether this instance and a specified are equal /// - public bool Equals(ContentRange other) => (other.Start == Start) && (other.End == End) && (other.Unit == Unit) && (other.Size == Size); + public bool Equals(ContentRange other) => (other.Start == Start) && (other.End == End) && (other.Unit == Unit) && (other.TotalResourceLength == TotalResourceLength); /// /// Determines if two values are the same. @@ -185,6 +185,6 @@ public static HttpRange ToHttpRange(ContentRange contentRange) /// [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => HashCodeBuilder.Combine(Start, End, Size, Unit.GetHashCode()); + public override int GetHashCode() => HashCodeBuilder.Combine(Start, End, TotalResourceLength, Unit.GetHashCode()); } } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/ContentRangeExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/ContentRangeExtensions.cs new file mode 100644 index 0000000000000..160a69b19a9c8 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/src/Shared/ContentRangeExtensions.cs @@ -0,0 +1,14 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Azure.Storage.Cryptography; + +internal static class ContentRangeExtensions +{ + public static long? GetContentRangeLengthOrDefault(this string contentRange) + => string.IsNullOrWhiteSpace(contentRange) + ? default : ContentRange.Parse(contentRange).GetRangeLength(); + + public static long GetRangeLength(this ContentRange contentRange) + => contentRange.End.Value - contentRange.Start.Value + 1; +} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs index 2a5fe38668104..867607e551e6a 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs @@ -3,6 +3,7 @@ using System; using System.Globalization; +using System.IO; using System.Linq; using System.Security.Authentication; using System.Xml.Serialization; @@ -105,9 +106,18 @@ public static ArgumentException VersionNotSupported(string paramName) public static RequestFailedException ClientRequestIdMismatch(Response response, string echo, string original) => new RequestFailedException(response.Status, $"Response x-ms-client-request-id '{echo}' does not match the original expected request id, '{original}'.", null); + public static InvalidDataException StructuredMessageNotAcknowledgedGET(Response response) + => new InvalidDataException($"Response does not acknowledge structured message was requested. Unknown data structure in response body."); + + public static InvalidDataException StructuredMessageNotAcknowledgedPUT(Response response) + => new InvalidDataException($"Response does not acknowledge structured message was sent. Unexpected data may have been persisted to storage."); + public static ArgumentException TransactionalHashingNotSupportedWithClientSideEncryption() => new ArgumentException("Client-side encryption and transactional hashing are not supported at the same time."); + public static InvalidDataException ExpectedStructuredMessage() + => new InvalidDataException($"Expected {Constants.StructuredMessage.StructuredMessageHeader} in response, but found none."); + public static void VerifyHttpsTokenAuth(Uri uri) { if (uri.Scheme != Constants.Https) diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.cs b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.cs index 6b89a59011d51..e3372665928c1 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.cs @@ -72,6 +72,9 @@ public static ArgumentException CannotDeferTransactionalHashVerification() public static ArgumentException CannotInitializeWriteStreamWithData() => new ArgumentException("Initialized buffer for StorageWriteStream must be empty."); + public static InvalidDataException InvalidStructuredMessage(string optionalMessage = default) + => new InvalidDataException(("Invalid structured message data. " + optionalMessage ?? "").Trim()); + internal static void VerifyStreamPosition(Stream stream, string streamName) { if (stream != null && stream.CanSeek && stream.Length > 0 && stream.Position >= stream.Length) @@ -80,6 +83,22 @@ internal static void VerifyStreamPosition(Stream stream, string streamName) } } + internal static void AssertBufferMinimumSize(ReadOnlySpan buffer, int minSize, string paramName) + { + if (buffer.Length < minSize) + { + throw new ArgumentException($"Expected buffer Length of at least {minSize} bytes. Got {buffer.Length}.", paramName); + } + } + + internal static void AssertBufferExactSize(ReadOnlySpan buffer, int size, string paramName) + { + if (buffer.Length != size) + { + throw new ArgumentException($"Expected buffer Length of exactly {size} bytes. Got {buffer.Length}.", paramName); + } + } + public static void ThrowIfParamNull(object obj, string paramName) { if (obj == null) diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/LazyLoadingReadOnlyStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/LazyLoadingReadOnlyStream.cs index c3e9c641c3fea..fe2db427bef02 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/LazyLoadingReadOnlyStream.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/LazyLoadingReadOnlyStream.cs @@ -249,41 +249,9 @@ private async Task DownloadInternal(bool async, CancellationToken cancellat response = await _downloadInternalFunc(range, _validationOptions, async, cancellationToken).ConfigureAwait(false); using Stream networkStream = response.Value.Content; - - // The number of bytes we just downloaded. - long downloadSize = GetResponseRange(response.GetRawResponse()).Length.Value; - - // The number of bytes we copied in the last loop. - int copiedBytes; - - // Bytes we have copied so far. - int totalCopiedBytes = 0; - - // Bytes remaining to copy. It is save to truncate the long because we asked for a max of int _buffer size bytes. - int remainingBytes = (int)downloadSize; - - do - { - if (async) - { - copiedBytes = await networkStream.ReadAsync( - buffer: _buffer, - offset: totalCopiedBytes, - count: remainingBytes, - cancellationToken: cancellationToken).ConfigureAwait(false); - } - else - { - copiedBytes = networkStream.Read( - buffer: _buffer, - offset: totalCopiedBytes, - count: remainingBytes); - } - - totalCopiedBytes += copiedBytes; - remainingBytes -= copiedBytes; - } - while (copiedBytes != 0); + // use stream copy to ensure consumption of any trailing metadata (e.g. structured message) + // allow buffer limits to catch the error of data size mismatch + int totalCopiedBytes = (int) await networkStream.CopyToInternal(new MemoryStream(_buffer), async, cancellationToken).ConfigureAwait((false)); _bufferPosition = 0; _bufferLength = totalCopiedBytes; @@ -291,7 +259,7 @@ private async Task DownloadInternal(bool async, CancellationToken cancellat // if we deferred transactional hash validation on download, validate now // currently we always defer but that may change - if (_validationOptions != default && _validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && !_validationOptions.AutoValidateChecksum) + if (_validationOptions != default && _validationOptions.ChecksumAlgorithm == StorageChecksumAlgorithm.MD5 && !_validationOptions.AutoValidateChecksum) // TODO better condition { ContentHasher.AssertResponseHashMatch(_buffer, _bufferPosition, _bufferLength, _validationOptions.ChecksumAlgorithm, response.GetRawResponse()); } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/PooledMemoryStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/PooledMemoryStream.cs index 3e218d18a90af..6070329d10d3d 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/PooledMemoryStream.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/PooledMemoryStream.cs @@ -251,7 +251,7 @@ public override int Read(byte[] buffer, int offset, int count) Length - Position, bufferCount - (Position - offsetOfBuffer), count - read); - Array.Copy(currentBuffer, Position - offsetOfBuffer, buffer, read, toCopy); + Array.Copy(currentBuffer, Position - offsetOfBuffer, buffer, offset + read, toCopy); read += toCopy; Position += toCopy; } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StorageCrc64Composer.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StorageCrc64Composer.cs index ab6b76d78a87e..307ff23b21144 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StorageCrc64Composer.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StorageCrc64Composer.cs @@ -12,22 +12,52 @@ namespace Azure.Storage /// internal static class StorageCrc64Composer { - public static Memory Compose(params (byte[] Crc64, long OriginalDataLength)[] partitions) + public static byte[] Compose(params (byte[] Crc64, long OriginalDataLength)[] partitions) + => Compose(partitions.AsEnumerable()); + + public static byte[] Compose(IEnumerable<(byte[] Crc64, long OriginalDataLength)> partitions) { - return Compose(partitions.AsEnumerable()); + ulong result = Compose(partitions.Select(tup => (BitConverter.ToUInt64(tup.Crc64, 0), tup.OriginalDataLength))); + return BitConverter.GetBytes(result); } - public static Memory Compose(IEnumerable<(byte[] Crc64, long OriginalDataLength)> partitions) + public static byte[] Compose(params (ReadOnlyMemory Crc64, long OriginalDataLength)[] partitions) + => Compose(partitions.AsEnumerable()); + + public static byte[] Compose(IEnumerable<(ReadOnlyMemory Crc64, long OriginalDataLength)> partitions) { - ulong result = Compose(partitions.Select(tup => (BitConverter.ToUInt64(tup.Crc64, 0), tup.OriginalDataLength))); - return new Memory(BitConverter.GetBytes(result)); +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + ulong result = Compose(partitions.Select(tup => (BitConverter.ToUInt64(tup.Crc64.Span), tup.OriginalDataLength))); +#else + ulong result = Compose(partitions.Select(tup => (System.BitConverter.ToUInt64(tup.Crc64.ToArray(), 0), tup.OriginalDataLength))); +#endif + return BitConverter.GetBytes(result); } + public static byte[] Compose( + ReadOnlySpan leftCrc64, long leftOriginalDataLength, + ReadOnlySpan rightCrc64, long rightOriginalDataLength) + { +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + ulong result = Compose( + (BitConverter.ToUInt64(leftCrc64), leftOriginalDataLength), + (BitConverter.ToUInt64(rightCrc64), rightOriginalDataLength)); +#else + ulong result = Compose( + (BitConverter.ToUInt64(leftCrc64.ToArray(), 0), leftOriginalDataLength), + (BitConverter.ToUInt64(rightCrc64.ToArray(), 0), rightOriginalDataLength)); +#endif + return BitConverter.GetBytes(result); + } + + public static ulong Compose(params (ulong Crc64, long OriginalDataLength)[] partitions) + => Compose(partitions.AsEnumerable()); + public static ulong Compose(IEnumerable<(ulong Crc64, long OriginalDataLength)> partitions) { ulong composedCrc = 0; long composedDataLength = 0; - foreach (var tup in partitions) + foreach ((ulong crc64, long originalDataLength) in partitions) { composedCrc = StorageCrc64Calculator.Concatenate( uInitialCrcAB: 0, @@ -35,9 +65,9 @@ public static ulong Compose(IEnumerable<(ulong Crc64, long OriginalDataLength)> uFinalCrcA: composedCrc, uSizeA: (ulong) composedDataLength, uInitialCrcB: 0, - uFinalCrcB: tup.Crc64, - uSizeB: (ulong)tup.OriginalDataLength); - composedDataLength += tup.OriginalDataLength; + uFinalCrcB: crc64, + uSizeB: (ulong)originalDataLength); + composedDataLength += originalDataLength; } return composedCrc; } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StorageRequestValidationPipelinePolicy.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StorageRequestValidationPipelinePolicy.cs index 0cef4f4d8d4ed..9f4ddb5249e82 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StorageRequestValidationPipelinePolicy.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StorageRequestValidationPipelinePolicy.cs @@ -33,6 +33,35 @@ public override void OnReceivedResponse(HttpMessage message) { throw Errors.ClientRequestIdMismatch(message.Response, echo.First(), original); } + + if (message.Request.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader) && + message.Request.Headers.Contains(Constants.StructuredMessage.StructuredContentLength)) + { + AssertStructuredMessageAcknowledgedPUT(message); + } + else if (message.Request.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) + { + AssertStructuredMessageAcknowledgedGET(message); + } + } + + private static void AssertStructuredMessageAcknowledgedPUT(HttpMessage message) + { + if (!message.Response.IsError && + !message.Response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) + { + throw Errors.StructuredMessageNotAcknowledgedPUT(message.Response); + } + } + + private static void AssertStructuredMessageAcknowledgedGET(HttpMessage message) + { + if (!message.Response.IsError && + !(message.Response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader) && + message.Response.Headers.Contains(Constants.StructuredMessage.StructuredContentLength))) + { + throw Errors.StructuredMessageNotAcknowledgedGET(message.Response); + } } } } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs index 2a7bd90fb82a1..44c0973ea9be1 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs @@ -46,7 +46,7 @@ internal static class StorageVersionExtensions /// public const ServiceVersion LatestVersion = #if BlobSDK || QueueSDK || FileSDK || DataLakeSDK || ChangeFeedSDK || DataMovementSDK || BlobDataMovementSDK || ShareDataMovementSDK - ServiceVersion.V2024_11_04; + ServiceVersion.V2025_01_05; #else ERROR_STORAGE_SERVICE_NOT_DEFINED; #endif diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StreamExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StreamExtensions.cs index 31f121d414ea4..c8803ecf421e7 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StreamExtensions.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StreamExtensions.cs @@ -1,6 +1,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +using System; +using System.Buffers; using System.IO; using System.Threading; using System.Threading.Tasks; @@ -48,7 +50,7 @@ public static async Task WriteInternal( } } - public static Task CopyToInternal( + public static Task CopyToInternal( this Stream src, Stream dest, bool async, @@ -79,21 +81,33 @@ public static Task CopyToInternal( /// Cancellation token for the operation. /// /// - public static async Task CopyToInternal( + public static async Task CopyToInternal( this Stream src, Stream dest, int bufferSize, bool async, CancellationToken cancellationToken) { + using IDisposable _ = ArrayPool.Shared.RentDisposable(bufferSize, out byte[] buffer); + long totalRead = 0; + int read; if (async) { - await src.CopyToAsync(dest, bufferSize, cancellationToken).ConfigureAwait(false); + while (0 < (read = await src.ReadAsync(buffer, 0, buffer.Length, cancellationToken).ConfigureAwait(false))) + { + totalRead += read; + await dest.WriteAsync(buffer, 0, read, cancellationToken).ConfigureAwait(false); + } } else { - src.CopyTo(dest, bufferSize); + while (0 < (read = src.Read(buffer, 0, buffer.Length))) + { + totalRead += read; + dest.Write(buffer, 0, read); + } } + return totalRead; } } } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessage.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessage.cs new file mode 100644 index 0000000000000..a0a46837797b9 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessage.cs @@ -0,0 +1,244 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers; +using System.Buffers.Binary; +using System.IO; +using Azure.Storage.Common; + +namespace Azure.Storage.Shared; + +internal static class StructuredMessage +{ + public const int Crc64Length = 8; + + [Flags] + public enum Flags + { + None = 0, + StorageCrc64 = 1, + } + + public static class V1_0 + { + public const byte MessageVersionByte = 1; + + public const int StreamHeaderLength = 13; + public const int StreamHeaderVersionOffset = 0; + public const int StreamHeaderMessageLengthOffset = 1; + public const int StreamHeaderFlagsOffset = 9; + public const int StreamHeaderSegmentCountOffset = 11; + + public const int SegmentHeaderLength = 10; + public const int SegmentHeaderNumOffset = 0; + public const int SegmentHeaderContentLengthOffset = 2; + + #region Stream Header + public static void ReadStreamHeader( + ReadOnlySpan buffer, + out long messageLength, + out Flags flags, + out int totalSegments) + { + Errors.AssertBufferExactSize(buffer, 13, nameof(buffer)); + if (buffer[StreamHeaderVersionOffset] != 1) + { + throw new InvalidDataException("Unrecognized version of structured message."); + } + messageLength = (long)BinaryPrimitives.ReadUInt64LittleEndian(buffer.Slice(StreamHeaderMessageLengthOffset, 8)); + flags = (Flags)BinaryPrimitives.ReadUInt16LittleEndian(buffer.Slice(StreamHeaderFlagsOffset, 2)); + totalSegments = BinaryPrimitives.ReadUInt16LittleEndian(buffer.Slice(StreamHeaderSegmentCountOffset, 2)); + } + + public static int WriteStreamHeader( + Span buffer, + long messageLength, + Flags flags, + int totalSegments) + { + const int versionOffset = 0; + const int messageLengthOffset = 1; + const int flagsOffset = 9; + const int numSegmentsOffset = 11; + + Errors.AssertBufferMinimumSize(buffer, StreamHeaderLength, nameof(buffer)); + + buffer[versionOffset] = MessageVersionByte; + BinaryPrimitives.WriteUInt64LittleEndian(buffer.Slice(messageLengthOffset, 8), (ulong)messageLength); + BinaryPrimitives.WriteUInt16LittleEndian(buffer.Slice(flagsOffset, 2), (ushort)flags); + BinaryPrimitives.WriteUInt16LittleEndian(buffer.Slice(numSegmentsOffset, 2), (ushort)totalSegments); + + return StreamHeaderLength; + } + + /// + /// Gets stream header in a buffer rented from the provided ArrayPool. + /// + /// + /// Disposable to return the buffer to the pool. + /// + public static IDisposable GetStreamHeaderBytes( + ArrayPool pool, + out Memory bytes, + long messageLength, + Flags flags, + int totalSegments) + { + Argument.AssertNotNull(pool, nameof(pool)); + IDisposable disposable = pool.RentAsMemoryDisposable(StreamHeaderLength, out bytes); + WriteStreamHeader(bytes.Span, messageLength, flags, totalSegments); + return disposable; + } + #endregion + + #region StreamFooter + public static int GetStreamFooterSize(Flags flags) + => flags.HasFlag(Flags.StorageCrc64) ? Crc64Length : 0; + + public static void ReadStreamFooter( + ReadOnlySpan buffer, + Flags flags, + out ulong crc64) + { + int expectedBufferSize = GetSegmentFooterSize(flags); + Errors.AssertBufferExactSize(buffer, expectedBufferSize, nameof(buffer)); + + crc64 = flags.HasFlag(Flags.StorageCrc64) ? buffer.ReadCrc64() : default; + } + + public static int WriteStreamFooter(Span buffer, ReadOnlySpan crc64 = default) + { + int requiredSpace = 0; + if (!crc64.IsEmpty) + { + Errors.AssertBufferExactSize(crc64, Crc64Length, nameof(crc64)); + requiredSpace += Crc64Length; + } + + Errors.AssertBufferMinimumSize(buffer, requiredSpace, nameof(buffer)); + int offset = 0; + if (!crc64.IsEmpty) + { + crc64.CopyTo(buffer.Slice(offset, Crc64Length)); + offset += Crc64Length; + } + + return offset; + } + + /// + /// Gets stream header in a buffer rented from the provided ArrayPool. + /// + /// + /// Disposable to return the buffer to the pool. + /// + public static IDisposable GetStreamFooterBytes( + ArrayPool pool, + out Memory bytes, + ReadOnlySpan crc64 = default) + { + Argument.AssertNotNull(pool, nameof(pool)); + IDisposable disposable = pool.RentAsMemoryDisposable(StreamHeaderLength, out bytes); + WriteStreamFooter(bytes.Span, crc64); + return disposable; + } + #endregion + + #region SegmentHeader + public static void ReadSegmentHeader( + ReadOnlySpan buffer, + out int segmentNum, + out long contentLength) + { + Errors.AssertBufferExactSize(buffer, 10, nameof(buffer)); + segmentNum = BinaryPrimitives.ReadUInt16LittleEndian(buffer.Slice(0, 2)); + contentLength = (long)BinaryPrimitives.ReadUInt64LittleEndian(buffer.Slice(2, 8)); + } + + public static int WriteSegmentHeader(Span buffer, int segmentNum, long segmentLength) + { + const int segmentNumOffset = 0; + const int segmentLengthOffset = 2; + + Errors.AssertBufferMinimumSize(buffer, SegmentHeaderLength, nameof(buffer)); + + BinaryPrimitives.WriteUInt16LittleEndian(buffer.Slice(segmentNumOffset, 2), (ushort)segmentNum); + BinaryPrimitives.WriteUInt64LittleEndian(buffer.Slice(segmentLengthOffset, 8), (ulong)segmentLength); + + return SegmentHeaderLength; + } + + /// + /// Gets segment header in a buffer rented from the provided ArrayPool. + /// + /// + /// Disposable to return the buffer to the pool. + /// + public static IDisposable GetSegmentHeaderBytes( + ArrayPool pool, + out Memory bytes, + int segmentNum, + long segmentLength) + { + Argument.AssertNotNull(pool, nameof(pool)); + IDisposable disposable = pool.RentAsMemoryDisposable(SegmentHeaderLength, out bytes); + WriteSegmentHeader(bytes.Span, segmentNum, segmentLength); + return disposable; + } + #endregion + + #region SegmentFooter + public static int GetSegmentFooterSize(Flags flags) + => flags.HasFlag(Flags.StorageCrc64) ? Crc64Length : 0; + + public static void ReadSegmentFooter( + ReadOnlySpan buffer, + Flags flags, + out ulong crc64) + { + int expectedBufferSize = GetSegmentFooterSize(flags); + Errors.AssertBufferExactSize(buffer, expectedBufferSize, nameof(buffer)); + + crc64 = flags.HasFlag(Flags.StorageCrc64) ? buffer.ReadCrc64() : default; + } + + public static int WriteSegmentFooter(Span buffer, ReadOnlySpan crc64 = default) + { + int requiredSpace = 0; + if (!crc64.IsEmpty) + { + Errors.AssertBufferExactSize(crc64, Crc64Length, nameof(crc64)); + requiredSpace += Crc64Length; + } + + Errors.AssertBufferMinimumSize(buffer, requiredSpace, nameof(buffer)); + int offset = 0; + if (!crc64.IsEmpty) + { + crc64.CopyTo(buffer.Slice(offset, Crc64Length)); + offset += Crc64Length; + } + + return offset; + } + + /// + /// Gets stream header in a buffer rented from the provided ArrayPool. + /// + /// + /// Disposable to return the buffer to the pool. + /// + public static IDisposable GetSegmentFooterBytes( + ArrayPool pool, + out Memory bytes, + ReadOnlySpan crc64 = default) + { + Argument.AssertNotNull(pool, nameof(pool)); + IDisposable disposable = pool.RentAsMemoryDisposable(StreamHeaderLength, out bytes); + WriteSegmentFooter(bytes.Span, crc64); + return disposable; + } + #endregion + } +} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingRetriableStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingRetriableStream.cs new file mode 100644 index 0000000000000..22dfaef259972 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingRetriableStream.cs @@ -0,0 +1,264 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers; +using System.Buffers.Binary; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Azure.Core; +using Azure.Core.Pipeline; + +namespace Azure.Storage.Shared; + +internal class StructuredMessageDecodingRetriableStream : Stream +{ + public class DecodedData + { + public ulong Crc { get; set; } + } + + private readonly Stream _innerRetriable; + private long _decodedBytesRead; + + private readonly StructuredMessage.Flags _expectedFlags; + private readonly List _decodedDatas; + private readonly Action _onComplete; + + private StorageCrc64HashAlgorithm _totalContentCrc; + + private readonly Func _decodingStreamFactory; + private readonly Func> _decodingAsyncStreamFactory; + + public StructuredMessageDecodingRetriableStream( + Stream initialDecodingStream, + StructuredMessageDecodingStream.RawDecodedData initialDecodedData, + StructuredMessage.Flags expectedFlags, + Func decodingStreamFactory, + Func> decodingAsyncStreamFactory, + Action onComplete, + ResponseClassifier responseClassifier, + int maxRetries) + { + _decodingStreamFactory = decodingStreamFactory; + _decodingAsyncStreamFactory = decodingAsyncStreamFactory; + _innerRetriable = RetriableStream.Create(initialDecodingStream, StreamFactory, StreamFactoryAsync, responseClassifier, maxRetries); + _decodedDatas = new() { initialDecodedData }; + _expectedFlags = expectedFlags; + _onComplete = onComplete; + + if (expectedFlags.HasFlag(StructuredMessage.Flags.StorageCrc64)) + { + _totalContentCrc = StorageCrc64HashAlgorithm.Create(); + } + } + + private Stream StreamFactory(long _) + { + long offset = _decodedDatas.SelectMany(d => d.SegmentCrcs).Select(s => s.SegmentLen).Sum(); + (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = _decodingStreamFactory(offset); + _decodedDatas.Add(decodedData); + FastForwardInternal(decodingStream, _decodedBytesRead - offset, false).EnsureCompleted(); + return decodingStream; + } + + private async ValueTask StreamFactoryAsync(long _) + { + long offset = _decodedDatas.SelectMany(d => d.SegmentCrcs).Select(s => s.SegmentLen).Sum(); + (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = await _decodingAsyncStreamFactory(offset).ConfigureAwait(false); + _decodedDatas.Add(decodedData); + await FastForwardInternal(decodingStream, _decodedBytesRead - offset, true).ConfigureAwait(false); + return decodingStream; + } + + private static async ValueTask FastForwardInternal(Stream stream, long bytes, bool async) + { + using (ArrayPool.Shared.RentDisposable(4 * Constants.KB, out byte[] buffer)) + { + if (async) + { + while (bytes > 0) + { + bytes -= await stream.ReadAsync(buffer, 0, (int)Math.Min(bytes, buffer.Length)).ConfigureAwait(false); + } + } + else + { + while (bytes > 0) + { + bytes -= stream.Read(buffer, 0, (int)Math.Min(bytes, buffer.Length)); + } + } + } + } + + protected override void Dispose(bool disposing) + { + _decodedDatas.Clear(); + _innerRetriable.Dispose(); + } + + private void OnCompleted() + { + DecodedData final = new(); + if (_totalContentCrc != null) + { + final.Crc = ValidateCrc(); + } + _onComplete?.Invoke(final); + } + + private ulong ValidateCrc() + { + using IDisposable _ = ArrayPool.Shared.RentDisposable(StructuredMessage.Crc64Length * 2, out byte[] buf); + Span calculatedBytes = new(buf, 0, StructuredMessage.Crc64Length); + _totalContentCrc.GetCurrentHash(calculatedBytes); + ulong calculated = BinaryPrimitives.ReadUInt64LittleEndian(calculatedBytes); + + ulong reported = _decodedDatas.Count == 1 + ? _decodedDatas.First().TotalCrc.Value + : StorageCrc64Composer.Compose(_decodedDatas.SelectMany(d => d.SegmentCrcs)); + + if (calculated != reported) + { + Span reportedBytes = new(buf, calculatedBytes.Length, StructuredMessage.Crc64Length); + BinaryPrimitives.WriteUInt64LittleEndian(reportedBytes, reported); + throw Errors.ChecksumMismatch(calculatedBytes, reportedBytes); + } + + return calculated; + } + + #region Read + public override int Read(byte[] buffer, int offset, int count) + { + int read = _innerRetriable.Read(buffer, offset, count); + _decodedBytesRead += read; + if (read == 0) + { + OnCompleted(); + } + else + { + _totalContentCrc?.Append(new ReadOnlySpan(buffer, offset, read)); + } + return read; + } + + public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + int read = await _innerRetriable.ReadAsync(buffer, offset, count, cancellationToken).ConfigureAwait(false); + _decodedBytesRead += read; + if (read == 0) + { + OnCompleted(); + } + else + { + _totalContentCrc?.Append(new ReadOnlySpan(buffer, offset, read)); + } + return read; + } + +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + public override int Read(Span buffer) + { + int read = _innerRetriable.Read(buffer); + _decodedBytesRead += read; + if (read == 0) + { + OnCompleted(); + } + else + { + _totalContentCrc?.Append(buffer.Slice(0, read)); + } + return read; + } + + public override async ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) + { + int read = await _innerRetriable.ReadAsync(buffer, cancellationToken).ConfigureAwait(false); + _decodedBytesRead += read; + if (read == 0) + { + OnCompleted(); + } + else + { + _totalContentCrc?.Append(buffer.Span.Slice(0, read)); + } + return read; + } +#endif + + public override int ReadByte() + { + int val = _innerRetriable.ReadByte(); + _decodedBytesRead += 1; + if (val == -1) + { + OnCompleted(); + } + return val; + } + + public override int EndRead(IAsyncResult asyncResult) + { + int read = _innerRetriable.EndRead(asyncResult); + _decodedBytesRead += read; + if (read == 0) + { + OnCompleted(); + } + return read; + } + #endregion + + #region Passthru + public override bool CanRead => _innerRetriable.CanRead; + + public override bool CanSeek => _innerRetriable.CanSeek; + + public override bool CanWrite => _innerRetriable.CanWrite; + + public override bool CanTimeout => _innerRetriable.CanTimeout; + + public override long Length => _innerRetriable.Length; + + public override long Position { get => _innerRetriable.Position; set => _innerRetriable.Position = value; } + + public override void Flush() => _innerRetriable.Flush(); + + public override Task FlushAsync(CancellationToken cancellationToken) => _innerRetriable.FlushAsync(cancellationToken); + + public override long Seek(long offset, SeekOrigin origin) => _innerRetriable.Seek(offset, origin); + + public override void SetLength(long value) => _innerRetriable.SetLength(value); + + public override void Write(byte[] buffer, int offset, int count) => _innerRetriable.Write(buffer, offset, count); + + public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) => _innerRetriable.WriteAsync(buffer, offset, count, cancellationToken); + + public override void WriteByte(byte value) => _innerRetriable.WriteByte(value); + + public override IAsyncResult BeginWrite(byte[] buffer, int offset, int count, AsyncCallback callback, object state) => _innerRetriable.BeginWrite(buffer, offset, count, callback, state); + + public override void EndWrite(IAsyncResult asyncResult) => _innerRetriable.EndWrite(asyncResult); + + public override IAsyncResult BeginRead(byte[] buffer, int offset, int count, AsyncCallback callback, object state) => _innerRetriable.BeginRead(buffer, offset, count, callback, state); + + public override int ReadTimeout { get => _innerRetriable.ReadTimeout; set => _innerRetriable.ReadTimeout = value; } + + public override int WriteTimeout { get => _innerRetriable.WriteTimeout; set => _innerRetriable.WriteTimeout = value; } + +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + public override void Write(ReadOnlySpan buffer) => _innerRetriable.Write(buffer); + + public override ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) => _innerRetriable.WriteAsync(buffer, cancellationToken); +#endif + #endregion +} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingStream.cs new file mode 100644 index 0000000000000..e6b193ae18260 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingStream.cs @@ -0,0 +1,542 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers; +using System.Buffers.Binary; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Azure.Storage.Common; + +namespace Azure.Storage.Shared; + +/// +/// Decodes a structured message stream as the data is read. +/// +/// +/// Wraps the inner stream in a , which avoids using its internal +/// buffer if individual Read() calls are larger than it. This ensures one of the three scenarios +/// +/// +/// Read buffer >= stream buffer: +/// There is enough space in the read buffer for inline metadata to be safely +/// extracted in only one read to the true inner stream. +/// +/// +/// Read buffer < next inline metadata: +/// The stream buffer has been activated, and we can read multiple small times from the inner stream +/// without multi-reading the real stream, even when partway through an existing stream buffer. +/// +/// +/// Else: +/// Same as #1, but also the already-allocated stream buffer has been used to slightly improve +/// resource churn when reading inner stream. +/// +/// +/// +internal class StructuredMessageDecodingStream : Stream +{ + internal class RawDecodedData + { + public long? InnerStreamLength { get; set; } + public int? TotalSegments { get; set; } + public StructuredMessage.Flags? Flags { get; set; } + public List<(ulong SegmentCrc, long SegmentLen)> SegmentCrcs { get; } = new(); + public ulong? TotalCrc { get; set; } + public bool DecodeCompleted { get; set; } + } + + private enum SMRegion + { + StreamHeader, + StreamFooter, + SegmentHeader, + SegmentFooter, + SegmentContent, + } + + private readonly Stream _innerBufferedStream; + + private byte[] _metadataBuffer = ArrayPool.Shared.Rent(Constants.KB); + private int _metadataBufferOffset = 0; + private int _metadataBufferLength = 0; + + private int _streamHeaderLength; + private int _streamFooterLength; + private int _segmentHeaderLength; + private int _segmentFooterLength; + + private long? _expectedInnerStreamLength; + + private bool _disposed; + + private readonly RawDecodedData _decodedData; + private StorageCrc64HashAlgorithm _totalContentCrc; + private StorageCrc64HashAlgorithm _segmentCrc; + + private readonly bool _validateChecksums; + + public override bool CanRead => true; + + public override bool CanWrite => false; + + public override bool CanSeek => false; + + public override bool CanTimeout => _innerBufferedStream.CanTimeout; + + public override int ReadTimeout => _innerBufferedStream.ReadTimeout; + + public override int WriteTimeout => _innerBufferedStream.WriteTimeout; + + public override long Length => throw new NotSupportedException(); + + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } + + public static (Stream DecodedStream, RawDecodedData DecodedData) WrapStream( + Stream innerStream, + long? expextedStreamLength = default) + { + RawDecodedData data = new(); + return (new StructuredMessageDecodingStream(innerStream, data, expextedStreamLength), data); + } + + private StructuredMessageDecodingStream( + Stream innerStream, + RawDecodedData decodedData, + long? expectedStreamLength) + { + Argument.AssertNotNull(innerStream, nameof(innerStream)); + Argument.AssertNotNull(decodedData, nameof(decodedData)); + + _expectedInnerStreamLength = expectedStreamLength; + _innerBufferedStream = new BufferedStream(innerStream); + _decodedData = decodedData; + + // Assumes stream will be structured message 1.0. Will validate this when consuming stream. + _streamHeaderLength = StructuredMessage.V1_0.StreamHeaderLength; + _segmentHeaderLength = StructuredMessage.V1_0.SegmentHeaderLength; + + _validateChecksums = true; + } + + #region Write + public override void Flush() => throw new NotSupportedException(); + + public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException(); + + public override void SetLength(long value) => throw new NotSupportedException(); + #endregion + + #region Read + public override int Read(byte[] buf, int offset, int count) + { + int decodedRead; + int read; + do + { + read = _innerBufferedStream.Read(buf, offset, count); + _innerStreamConsumed += read; + decodedRead = Decode(new Span(buf, offset, read)); + } while (decodedRead <= 0 && read > 0); + + if (read <= 0) + { + AssertDecodeFinished(); + } + + return decodedRead; + } + + public override async Task ReadAsync(byte[] buf, int offset, int count, CancellationToken cancellationToken) + { + int decodedRead; + int read; + do + { + read = await _innerBufferedStream.ReadAsync(buf, offset, count, cancellationToken).ConfigureAwait(false); + _innerStreamConsumed += read; + decodedRead = Decode(new Span(buf, offset, read)); + } while (decodedRead <= 0 && read > 0); + + if (read <= 0) + { + AssertDecodeFinished(); + } + + return decodedRead; + } + +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + public override int Read(Span buf) + { + int decodedRead; + int read; + do + { + read = _innerBufferedStream.Read(buf); + _innerStreamConsumed += read; + decodedRead = Decode(buf.Slice(0, read)); + } while (decodedRead <= 0 && read > 0); + + if (read <= 0) + { + AssertDecodeFinished(); + } + + return decodedRead; + } + + public override async ValueTask ReadAsync(Memory buf, CancellationToken cancellationToken = default) + { + int decodedRead; + int read; + do + { + read = await _innerBufferedStream.ReadAsync(buf).ConfigureAwait(false); + _innerStreamConsumed += read; + decodedRead = Decode(buf.Slice(0, read).Span); + } while (decodedRead <= 0 && read > 0); + + if (read <= 0) + { + AssertDecodeFinished(); + } + + return decodedRead; + } +#endif + + private void AssertDecodeFinished() + { + if (_streamFooterLength > 0 && !_decodedData.DecodeCompleted) + { + throw Errors.InvalidStructuredMessage("Premature end of stream."); + } + _decodedData.DecodeCompleted = true; + } + + private long _innerStreamConsumed = 0; + private long _decodedContentConsumed = 0; + private SMRegion _currentRegion = SMRegion.StreamHeader; + private int _currentSegmentNum = 0; + private long _currentSegmentContentLength; + private long _currentSegmentContentRemaining; + private long CurrentRegionLength => _currentRegion switch + { + SMRegion.StreamHeader => _streamHeaderLength, + SMRegion.StreamFooter => _streamFooterLength, + SMRegion.SegmentHeader => _segmentHeaderLength, + SMRegion.SegmentFooter => _segmentFooterLength, + SMRegion.SegmentContent => _currentSegmentContentLength, + _ => 0, + }; + + /// + /// Decodes given bytes in place. Decoding based on internal stream position info. + /// Decoded data size will be less than or equal to encoded data length. + /// + /// + /// Length of the decoded data in . + /// + private int Decode(Span buffer) + { + if (buffer.IsEmpty) + { + return 0; + } + List<(int Offset, int Count)> gaps = new(); + + int bufferConsumed = ProcessMetadataBuffer(buffer); + + if (bufferConsumed > 0) + { + gaps.Add((0, bufferConsumed)); + } + + while (bufferConsumed < buffer.Length) + { + if (_currentRegion == SMRegion.SegmentContent) + { + int read = (int)Math.Min(buffer.Length - bufferConsumed, _currentSegmentContentRemaining); + _totalContentCrc?.Append(buffer.Slice(bufferConsumed, read)); + _segmentCrc?.Append(buffer.Slice(bufferConsumed, read)); + bufferConsumed += read; + _decodedContentConsumed += read; + _currentSegmentContentRemaining -= read; + if (_currentSegmentContentRemaining == 0) + { + _currentRegion = SMRegion.SegmentFooter; + } + } + else if (buffer.Length - bufferConsumed < CurrentRegionLength) + { + SavePartialMetadata(buffer.Slice(bufferConsumed)); + gaps.Add((bufferConsumed, buffer.Length - bufferConsumed)); + bufferConsumed = buffer.Length; + } + else + { + int processed = _currentRegion switch + { + SMRegion.StreamHeader => ProcessStreamHeader(buffer.Slice(bufferConsumed)), + SMRegion.StreamFooter => ProcessStreamFooter(buffer.Slice(bufferConsumed)), + SMRegion.SegmentHeader => ProcessSegmentHeader(buffer.Slice(bufferConsumed)), + SMRegion.SegmentFooter => ProcessSegmentFooter(buffer.Slice(bufferConsumed)), + _ => 0, + }; + // TODO surface error if processed is 0 + gaps.Add((bufferConsumed, processed)); + bufferConsumed += processed; + } + } + + if (gaps.Count == 0) + { + return buffer.Length; + } + + // gaps is already sorted by offset due to how it was assembled + int gap = 0; + for (int i = gaps.First().Offset; i < buffer.Length; i++) + { + if (gaps.Count > 0 && gaps.First().Offset == i) + { + int count = gaps.First().Count; + gap += count; + i += count - 1; + gaps.RemoveAt(0); + } + else + { + buffer[i - gap] = buffer[i]; + } + } + return buffer.Length - gap; + } + + /// + /// Processes metadata in the internal buffer, if any. Appends any necessary data + /// from the append buffer to complete metadata. + /// + /// + /// Bytes consumed from . + /// + private int ProcessMetadataBuffer(ReadOnlySpan append) + { + if (_metadataBufferLength == 0) + { + return 0; + } + if (_currentRegion == SMRegion.SegmentContent) + { + return 0; + } + int appended = 0; + if (_metadataBufferLength < CurrentRegionLength && append.Length > 0) + { + appended = Math.Min((int)CurrentRegionLength - _metadataBufferLength, append.Length); + SavePartialMetadata(append.Slice(0, appended)); + } + if (_metadataBufferLength == CurrentRegionLength) + { + Span metadata = new(_metadataBuffer, _metadataBufferOffset, (int)CurrentRegionLength); + switch (_currentRegion) + { + case SMRegion.StreamHeader: + ProcessStreamHeader(metadata); + break; + case SMRegion.StreamFooter: + ProcessStreamFooter(metadata); + break; + case SMRegion.SegmentHeader: + ProcessSegmentHeader(metadata); + break; + case SMRegion.SegmentFooter: + ProcessSegmentFooter(metadata); + break; + } + _metadataBufferOffset = 0; + _metadataBufferLength = 0; + } + return appended; + } + + private void SavePartialMetadata(ReadOnlySpan span) + { + // safety array resize w/ArrayPool + if (_metadataBufferLength + span.Length > _metadataBuffer.Length) + { + ResizeMetadataBuffer(2 * (_metadataBufferLength + span.Length)); + } + + // realign any existing content if necessary + if (_metadataBufferLength != 0 && _metadataBufferOffset != 0) + { + // don't use Array.Copy() to move elements in the same array + for (int i = 0; i < _metadataBufferLength; i++) + { + _metadataBuffer[i] = _metadataBuffer[i + _metadataBufferOffset]; + } + _metadataBufferOffset = 0; + } + + span.CopyTo(new Span(_metadataBuffer, _metadataBufferOffset + _metadataBufferLength, span.Length)); + _metadataBufferLength += span.Length; + } + + private int ProcessStreamHeader(ReadOnlySpan span) + { + StructuredMessage.V1_0.ReadStreamHeader( + span.Slice(0, _streamHeaderLength), + out long streamLength, + out StructuredMessage.Flags flags, + out int totalSegments); + + _decodedData.InnerStreamLength = streamLength; + _decodedData.Flags = flags; + _decodedData.TotalSegments = totalSegments; + + if (_expectedInnerStreamLength.HasValue && _expectedInnerStreamLength.Value != streamLength) + { + throw Errors.InvalidStructuredMessage("Unexpected message size."); + } + + if (_decodedData.Flags.Value.HasFlag(StructuredMessage.Flags.StorageCrc64)) + { + _segmentFooterLength = StructuredMessage.Crc64Length; + _streamFooterLength = StructuredMessage.Crc64Length; + if (_validateChecksums) + { + _segmentCrc = StorageCrc64HashAlgorithm.Create(); + _totalContentCrc = StorageCrc64HashAlgorithm.Create(); + } + } + _currentRegion = SMRegion.SegmentHeader; + return _streamHeaderLength; + } + + private int ProcessStreamFooter(ReadOnlySpan span) + { + int footerLen = StructuredMessage.V1_0.GetStreamFooterSize(_decodedData.Flags.Value); + StructuredMessage.V1_0.ReadStreamFooter( + span.Slice(0, footerLen), + _decodedData.Flags.Value, + out ulong reportedCrc); + if (_decodedData.Flags.Value.HasFlag(StructuredMessage.Flags.StorageCrc64)) + { + if (_validateChecksums) + { + ValidateCrc64(_totalContentCrc, reportedCrc); + } + _decodedData.TotalCrc = reportedCrc; + } + + if (_innerStreamConsumed != _decodedData.InnerStreamLength) + { + throw Errors.InvalidStructuredMessage("Unexpected message size."); + } + if (_currentSegmentNum != _decodedData.TotalSegments) + { + throw Errors.InvalidStructuredMessage("Missing expected message segments."); + } + + _decodedData.DecodeCompleted = true; + return footerLen; + } + + private int ProcessSegmentHeader(ReadOnlySpan span) + { + StructuredMessage.V1_0.ReadSegmentHeader( + span.Slice(0, _segmentHeaderLength), + out int newSegNum, + out _currentSegmentContentLength); + _currentSegmentContentRemaining = _currentSegmentContentLength; + if (newSegNum != _currentSegmentNum + 1) + { + throw Errors.InvalidStructuredMessage("Unexpected segment number in structured message."); + } + _currentSegmentNum = newSegNum; + _currentRegion = SMRegion.SegmentContent; + return _segmentHeaderLength; + } + + private int ProcessSegmentFooter(ReadOnlySpan span) + { + int footerLen = StructuredMessage.V1_0.GetSegmentFooterSize(_decodedData.Flags.Value); + StructuredMessage.V1_0.ReadSegmentFooter( + span.Slice(0, footerLen), + _decodedData.Flags.Value, + out ulong reportedCrc); + if (_decodedData.Flags.Value.HasFlag(StructuredMessage.Flags.StorageCrc64)) + { + if (_validateChecksums) + { + ValidateCrc64(_segmentCrc, reportedCrc); + _segmentCrc = StorageCrc64HashAlgorithm.Create(); + } + _decodedData.SegmentCrcs.Add((reportedCrc, _currentSegmentContentLength)); + } + _currentRegion = _currentSegmentNum == _decodedData.TotalSegments ? SMRegion.StreamFooter : SMRegion.SegmentHeader; + return footerLen; + } + + private static void ValidateCrc64(StorageCrc64HashAlgorithm calculation, ulong reported) + { + using IDisposable _ = ArrayPool.Shared.RentDisposable(StructuredMessage.Crc64Length * 2, out byte[] buf); + Span calculatedBytes = new(buf, 0, StructuredMessage.Crc64Length); + Span reportedBytes = new(buf, calculatedBytes.Length, StructuredMessage.Crc64Length); + calculation.GetCurrentHash(calculatedBytes); + reported.WriteCrc64(reportedBytes); + if (!calculatedBytes.SequenceEqual(reportedBytes)) + { + throw Errors.ChecksumMismatch(calculatedBytes, reportedBytes); + } + } + #endregion + + public override long Seek(long offset, SeekOrigin origin) + => throw new NotSupportedException(); + + protected override void Dispose(bool disposing) + { + base.Dispose(disposing); + + if (_disposed) + { + return; + } + + if (disposing) + { + _innerBufferedStream.Dispose(); + _disposed = true; + } + } + + private void ResizeMetadataBuffer(int newSize) + { + byte[] newBuf = ArrayPool.Shared.Rent(newSize); + Array.Copy(_metadataBuffer, _metadataBufferOffset, newBuf, 0, _metadataBufferLength); + ArrayPool.Shared.Return(_metadataBuffer); + _metadataBuffer = newBuf; + } + + private void AlignMetadataBuffer() + { + if (_metadataBufferOffset != 0 && _metadataBufferLength != 0) + { + for (int i = 0; i < _metadataBufferLength; i++) + { + _metadataBuffer[i] = _metadataBuffer[_metadataBufferOffset + i]; + } + _metadataBufferOffset = 0; + } + } +} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageEncodingStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageEncodingStream.cs new file mode 100644 index 0000000000000..cb0ef340155ec --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageEncodingStream.cs @@ -0,0 +1,545 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Azure.Core.Pipeline; +using Azure.Storage.Common; + +namespace Azure.Storage.Shared; + +internal class StructuredMessageEncodingStream : Stream +{ + private readonly Stream _innerStream; + + private readonly int _streamHeaderLength; + private readonly int _streamFooterLength; + private readonly int _segmentHeaderLength; + private readonly int _segmentFooterLength; + private readonly int _segmentContentLength; + + private readonly StructuredMessage.Flags _flags; + private bool _disposed; + + private bool UseCrcSegment => _flags.HasFlag(StructuredMessage.Flags.StorageCrc64); + private readonly StorageCrc64HashAlgorithm _totalCrc; + private StorageCrc64HashAlgorithm _segmentCrc; + private readonly byte[] _segmentCrcs; + private int _latestSegmentCrcd = 0; + + #region Segments + /// + /// Gets the 1-indexed segment number the underlying stream is currently positioned in. + /// 1-indexed to match segment labelling as specified by SM spec. + /// + private int CurrentInnerSegment => (int)Math.Floor(_innerStream.Position / (float)_segmentContentLength) + 1; + + /// + /// Gets the 1-indexed segment number the encoded data stream is currently positioned in. + /// 1-indexed to match segment labelling as specified by SM spec. + /// + private int CurrentEncodingSegment + { + get + { + // edge case: always on final segment when at end of inner stream + if (_innerStream.Position == _innerStream.Length) + { + return TotalSegments; + } + // when writing footer, inner stream is positioned at next segment, + // but this stream is still writing the previous one + if (_currentRegion == SMRegion.SegmentFooter) + { + return CurrentInnerSegment - 1; + } + return CurrentInnerSegment; + } + } + + /// + /// Segment length including header and footer. + /// + private int SegmentTotalLength => _segmentHeaderLength + _segmentContentLength + _segmentFooterLength; + + private int TotalSegments => GetTotalSegments(_innerStream, _segmentContentLength); + private static int GetTotalSegments(Stream innerStream, long segmentContentLength) + { + return (int)Math.Ceiling(innerStream.Length / (float)segmentContentLength); + } + #endregion + + public override bool CanRead => true; + + public override bool CanWrite => false; + + public override bool CanSeek => _innerStream.CanSeek; + + public override bool CanTimeout => _innerStream.CanTimeout; + + public override int ReadTimeout => _innerStream.ReadTimeout; + + public override int WriteTimeout => _innerStream.WriteTimeout; + + public override long Length => + _streamHeaderLength + _streamFooterLength + + (_segmentHeaderLength + _segmentFooterLength) * TotalSegments + + _innerStream.Length; + + #region Position + private enum SMRegion + { + StreamHeader, + StreamFooter, + SegmentHeader, + SegmentFooter, + SegmentContent, + } + + private SMRegion _currentRegion = SMRegion.StreamHeader; + private int _currentRegionPosition = 0; + + private long _maxSeekPosition = 0; + + public override long Position + { + get + { + return _currentRegion switch + { + SMRegion.StreamHeader => _currentRegionPosition, + SMRegion.StreamFooter => _streamHeaderLength + + TotalSegments * (_segmentHeaderLength + _segmentFooterLength) + + _innerStream.Length + + _currentRegionPosition, + SMRegion.SegmentHeader => _innerStream.Position + + _streamHeaderLength + + (CurrentEncodingSegment - 1) * (_segmentHeaderLength + _segmentFooterLength) + + _currentRegionPosition, + SMRegion.SegmentFooter => _innerStream.Position + + _streamHeaderLength + + // Inner stream has moved to next segment but we're still writing the previous segment footer + CurrentEncodingSegment * (_segmentHeaderLength + _segmentFooterLength) - + _segmentFooterLength + _currentRegionPosition, + SMRegion.SegmentContent => _innerStream.Position + + _streamHeaderLength + + CurrentEncodingSegment * (_segmentHeaderLength + _segmentFooterLength) - + _segmentFooterLength, + _ => throw new InvalidDataException($"{nameof(StructuredMessageEncodingStream)} invalid state."), + }; + } + set + { + Argument.AssertInRange(value, 0, _maxSeekPosition, nameof(value)); + if (value < _streamHeaderLength) + { + _currentRegion = SMRegion.StreamHeader; + _currentRegionPosition = (int)value; + _innerStream.Position = 0; + return; + } + if (value >= Length - _streamFooterLength) + { + _currentRegion = SMRegion.StreamFooter; + _currentRegionPosition = (int)(value - (Length - _streamFooterLength)); + _innerStream.Position = _innerStream.Length; + return; + } + int newSegmentNum = 1 + (int)Math.Floor((value - _streamHeaderLength) / (double)(_segmentHeaderLength + _segmentFooterLength + _segmentContentLength)); + int segmentPosition = (int)(value - _streamHeaderLength - + ((newSegmentNum - 1) * (_segmentHeaderLength + _segmentFooterLength + _segmentContentLength))); + + if (segmentPosition < _segmentHeaderLength) + { + _currentRegion = SMRegion.SegmentHeader; + _currentRegionPosition = (int)((value - _streamHeaderLength) % SegmentTotalLength); + _innerStream.Position = (newSegmentNum - 1) * _segmentContentLength; + return; + } + if (segmentPosition < _segmentHeaderLength + _segmentContentLength) + { + _currentRegion = SMRegion.SegmentContent; + _currentRegionPosition = (int)((value - _streamHeaderLength) % SegmentTotalLength) - + _segmentHeaderLength; + _innerStream.Position = (newSegmentNum - 1) * _segmentContentLength + _currentRegionPosition; + return; + } + + _currentRegion = SMRegion.SegmentFooter; + _currentRegionPosition = (int)((value - _streamHeaderLength) % SegmentTotalLength) - + _segmentHeaderLength - _segmentContentLength; + _innerStream.Position = newSegmentNum * _segmentContentLength; + } + } + #endregion + + public StructuredMessageEncodingStream( + Stream innerStream, + int segmentContentLength, + StructuredMessage.Flags flags) + { + Argument.AssertNotNull(innerStream, nameof(innerStream)); + if (innerStream.GetLengthOrDefault() == default) + { + throw new ArgumentException("Stream must have known length.", nameof(innerStream)); + } + if (innerStream.Position != 0) + { + throw new ArgumentException("Stream must be at starting position.", nameof(innerStream)); + } + // stream logic likely breaks down with segment length of 1; enforce >=2 rather than just positive number + // real world scenarios will probably use a minimum of tens of KB + Argument.AssertInRange(segmentContentLength, 2, int.MaxValue, nameof(segmentContentLength)); + + _flags = flags; + _segmentContentLength = segmentContentLength; + + _streamHeaderLength = StructuredMessage.V1_0.StreamHeaderLength; + _streamFooterLength = UseCrcSegment ? StructuredMessage.Crc64Length : 0; + _segmentHeaderLength = StructuredMessage.V1_0.SegmentHeaderLength; + _segmentFooterLength = UseCrcSegment ? StructuredMessage.Crc64Length : 0; + + if (UseCrcSegment) + { + _totalCrc = StorageCrc64HashAlgorithm.Create(); + _segmentCrc = StorageCrc64HashAlgorithm.Create(); + _segmentCrcs = ArrayPool.Shared.Rent( + GetTotalSegments(innerStream, segmentContentLength) * StructuredMessage.Crc64Length); + innerStream = ChecksumCalculatingStream.GetReadStream(innerStream, span => + { + _totalCrc.Append(span); + _segmentCrc.Append(span); + }); + } + + _innerStream = innerStream; + } + + #region Write + public override void Flush() => throw new NotSupportedException(); + + public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException(); + + public override void SetLength(long value) => throw new NotSupportedException(); + #endregion + + #region Read + public override int Read(byte[] buffer, int offset, int count) + => ReadInternal(buffer, offset, count, async: false, cancellationToken: default).EnsureCompleted(); + + public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => await ReadInternal(buffer, offset, count, async: true, cancellationToken).ConfigureAwait(false); + + private async ValueTask ReadInternal(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken) + { + int totalRead = 0; + bool readInner = false; + while (totalRead < count && Position < Length) + { + int subreadOffset = offset + totalRead; + int subreadCount = count - totalRead; + switch (_currentRegion) + { + case SMRegion.StreamHeader: + totalRead += ReadFromStreamHeader(new Span(buffer, subreadOffset, subreadCount)); + break; + case SMRegion.StreamFooter: + totalRead += ReadFromStreamFooter(new Span(buffer, subreadOffset, subreadCount)); + break; + case SMRegion.SegmentHeader: + totalRead += ReadFromSegmentHeader(new Span(buffer, subreadOffset, subreadCount)); + break; + case SMRegion.SegmentFooter: + totalRead += ReadFromSegmentFooter(new Span(buffer, subreadOffset, subreadCount)); + break; + case SMRegion.SegmentContent: + // don't double read from stream. Allow caller to multi-read when desired. + if (readInner) + { + UpdateLatestPosition(); + return totalRead; + } + totalRead += await ReadFromInnerStreamInternal( + buffer, subreadOffset, subreadCount, async, cancellationToken).ConfigureAwait(false); + readInner = true; + break; + default: + break; + } + } + UpdateLatestPosition(); + return totalRead; + } + +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + public override int Read(Span buffer) + { + int totalRead = 0; + bool readInner = false; + while (totalRead < buffer.Length && Position < Length) + { + switch (_currentRegion) + { + case SMRegion.StreamHeader: + totalRead += ReadFromStreamHeader(buffer.Slice(totalRead)); + break; + case SMRegion.StreamFooter: + totalRead += ReadFromStreamFooter(buffer.Slice(totalRead)); + break; + case SMRegion.SegmentHeader: + totalRead += ReadFromSegmentHeader(buffer.Slice(totalRead)); + break; + case SMRegion.SegmentFooter: + totalRead += ReadFromSegmentFooter(buffer.Slice(totalRead)); + break; + case SMRegion.SegmentContent: + // don't double read from stream. Allow caller to multi-read when desired. + if (readInner) + { + UpdateLatestPosition(); + return totalRead; + } + totalRead += ReadFromInnerStream(buffer.Slice(totalRead)); + readInner = true; + break; + default: + break; + } + } + UpdateLatestPosition(); + return totalRead; + } + + public override async ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) + { + int totalRead = 0; + bool readInner = false; + while (totalRead < buffer.Length && Position < Length) + { + switch (_currentRegion) + { + case SMRegion.StreamHeader: + totalRead += ReadFromStreamHeader(buffer.Slice(totalRead).Span); + break; + case SMRegion.StreamFooter: + totalRead += ReadFromStreamFooter(buffer.Slice(totalRead).Span); + break; + case SMRegion.SegmentHeader: + totalRead += ReadFromSegmentHeader(buffer.Slice(totalRead).Span); + break; + case SMRegion.SegmentFooter: + totalRead += ReadFromSegmentFooter(buffer.Slice(totalRead).Span); + break; + case SMRegion.SegmentContent: + // don't double read from stream. Allow caller to multi-read when desired. + if (readInner) + { + UpdateLatestPosition(); + return totalRead; + } + totalRead += await ReadFromInnerStreamAsync(buffer.Slice(totalRead), cancellationToken).ConfigureAwait(false); + readInner = true; + break; + default: + break; + } + } + UpdateLatestPosition(); + return totalRead; + } +#endif + + #region Read Headers/Footers + private int ReadFromStreamHeader(Span buffer) + { + int read = Math.Min(buffer.Length, _streamHeaderLength - _currentRegionPosition); + using IDisposable _ = StructuredMessage.V1_0.GetStreamHeaderBytes( + ArrayPool.Shared, out Memory headerBytes, Length, _flags, TotalSegments); + headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); + _currentRegionPosition += read; + + if (_currentRegionPosition == _streamHeaderLength) + { + _currentRegion = SMRegion.SegmentHeader; + _currentRegionPosition = 0; + } + + return read; + } + + private int ReadFromStreamFooter(Span buffer) + { + int read = Math.Min(buffer.Length, _segmentFooterLength - _currentRegionPosition); + if (read <= 0) + { + return 0; + } + + using IDisposable _ = StructuredMessage.V1_0.GetStreamFooterBytes( + ArrayPool.Shared, + out Memory footerBytes, + crc64: UseCrcSegment + ? _totalCrc.GetCurrentHash() // TODO array pooling + : default); + footerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); + _currentRegionPosition += read; + + return read; + } + + private int ReadFromSegmentHeader(Span buffer) + { + int read = Math.Min(buffer.Length, _segmentHeaderLength - _currentRegionPosition); + using IDisposable _ = StructuredMessage.V1_0.GetSegmentHeaderBytes( + ArrayPool.Shared, + out Memory headerBytes, + CurrentInnerSegment, + Math.Min(_segmentContentLength, _innerStream.Length - _innerStream.Position)); + headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); + _currentRegionPosition += read; + + if (_currentRegionPosition == _segmentHeaderLength) + { + _currentRegion = SMRegion.SegmentContent; + _currentRegionPosition = 0; + } + + return read; + } + + private int ReadFromSegmentFooter(Span buffer) + { + int read = Math.Min(buffer.Length, _segmentFooterLength - _currentRegionPosition); + if (read < 0) + { + return 0; + } + + using IDisposable _ = StructuredMessage.V1_0.GetSegmentFooterBytes( + ArrayPool.Shared, + out Memory headerBytes, + crc64: UseCrcSegment + ? new Span( + _segmentCrcs, + (CurrentEncodingSegment-1) * _totalCrc.HashLengthInBytes, + _totalCrc.HashLengthInBytes) + : default); + headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); + _currentRegionPosition += read; + + if (_currentRegionPosition == _segmentFooterLength) + { + _currentRegion = _innerStream.Position == _innerStream.Length + ? SMRegion.StreamFooter : SMRegion.SegmentHeader; + _currentRegionPosition = 0; + } + + return read; + } + #endregion + + #region ReadUnderlyingStream + private int MaxInnerStreamRead => _segmentContentLength - _currentRegionPosition; + + private void CleanupContentSegment() + { + if (_currentRegionPosition == _segmentContentLength || _innerStream.Position >= _innerStream.Length) + { + _currentRegion = SMRegion.SegmentFooter; + _currentRegionPosition = 0; + if (UseCrcSegment && CurrentEncodingSegment - 1 == _latestSegmentCrcd) + { + _segmentCrc.GetCurrentHash(new Span( + _segmentCrcs, + _latestSegmentCrcd * _segmentCrc.HashLengthInBytes, + _segmentCrc.HashLengthInBytes)); + _latestSegmentCrcd++; + _segmentCrc = StorageCrc64HashAlgorithm.Create(); + } + } + } + + private async ValueTask ReadFromInnerStreamInternal( + byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken) + { + int read = async + ? await _innerStream.ReadAsync(buffer, offset, Math.Min(count, MaxInnerStreamRead)).ConfigureAwait(false) + : _innerStream.Read(buffer, offset, Math.Min(count, MaxInnerStreamRead)); + _currentRegionPosition += read; + CleanupContentSegment(); + return read; + } + +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + private int ReadFromInnerStream(Span buffer) + { + if (MaxInnerStreamRead < buffer.Length) + { + buffer = buffer.Slice(0, MaxInnerStreamRead); + } + int read = _innerStream.Read(buffer); + _currentRegionPosition += read; + CleanupContentSegment(); + return read; + } + + private async ValueTask ReadFromInnerStreamAsync(Memory buffer, CancellationToken cancellationToken) + { + if (MaxInnerStreamRead < buffer.Length) + { + buffer = buffer.Slice(0, MaxInnerStreamRead); + } + int read = await _innerStream.ReadAsync(buffer, cancellationToken).ConfigureAwait(false); + _currentRegionPosition += read; + CleanupContentSegment(); + return read; + } +#endif + #endregion + + // don't allow stream to seek too far forward. track how far the stream has been naturally read. + private void UpdateLatestPosition() + { + if (_maxSeekPosition < Position) + { + _maxSeekPosition = Position; + } + } + #endregion + + public override long Seek(long offset, SeekOrigin origin) + { + switch (origin) + { + case SeekOrigin.Begin: + Position = offset; + break; + case SeekOrigin.Current: + Position += offset; + break; + case SeekOrigin.End: + Position = Length + offset; + break; + } + return Position; + } + + protected override void Dispose(bool disposing) + { + base.Dispose(disposing); + + if (_disposed) + { + return; + } + + if (disposing) + { + _innerStream.Dispose(); + _disposed = true; + } + } +} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessagePrecalculatedCrcWrapperStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessagePrecalculatedCrcWrapperStream.cs new file mode 100644 index 0000000000000..3569ef4339735 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessagePrecalculatedCrcWrapperStream.cs @@ -0,0 +1,451 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Azure.Core.Pipeline; +using Azure.Storage.Common; + +namespace Azure.Storage.Shared; + +internal class StructuredMessagePrecalculatedCrcWrapperStream : Stream +{ + private readonly Stream _innerStream; + + private readonly int _streamHeaderLength; + private readonly int _streamFooterLength; + private readonly int _segmentHeaderLength; + private readonly int _segmentFooterLength; + + private bool _disposed; + + private readonly byte[] _crc; + + public override bool CanRead => true; + + public override bool CanWrite => false; + + public override bool CanSeek => _innerStream.CanSeek; + + public override bool CanTimeout => _innerStream.CanTimeout; + + public override int ReadTimeout => _innerStream.ReadTimeout; + + public override int WriteTimeout => _innerStream.WriteTimeout; + + public override long Length => + _streamHeaderLength + _streamFooterLength + + _segmentHeaderLength + _segmentFooterLength + + _innerStream.Length; + + #region Position + private enum SMRegion + { + StreamHeader, + StreamFooter, + SegmentHeader, + SegmentFooter, + SegmentContent, + } + + private SMRegion _currentRegion = SMRegion.StreamHeader; + private int _currentRegionPosition = 0; + + private long _maxSeekPosition = 0; + + public override long Position + { + get + { + return _currentRegion switch + { + SMRegion.StreamHeader => _currentRegionPosition, + SMRegion.SegmentHeader => _innerStream.Position + + _streamHeaderLength + + _currentRegionPosition, + SMRegion.SegmentContent => _streamHeaderLength + + _segmentHeaderLength + + _innerStream.Position, + SMRegion.SegmentFooter => _streamHeaderLength + + _segmentHeaderLength + + _innerStream.Length + + _currentRegionPosition, + SMRegion.StreamFooter => _streamHeaderLength + + _segmentHeaderLength + + _innerStream.Length + + _segmentFooterLength + + _currentRegionPosition, + _ => throw new InvalidDataException($"{nameof(StructuredMessageEncodingStream)} invalid state."), + }; + } + set + { + Argument.AssertInRange(value, 0, _maxSeekPosition, nameof(value)); + if (value < _streamHeaderLength) + { + _currentRegion = SMRegion.StreamHeader; + _currentRegionPosition = (int)value; + _innerStream.Position = 0; + return; + } + if (value < _streamHeaderLength + _segmentHeaderLength) + { + _currentRegion = SMRegion.SegmentHeader; + _currentRegionPosition = (int)(value - _streamHeaderLength); + _innerStream.Position = 0; + return; + } + if (value < _streamHeaderLength + _segmentHeaderLength + _innerStream.Length) + { + _currentRegion = SMRegion.SegmentContent; + _currentRegionPosition = (int)(value - _streamHeaderLength - _segmentHeaderLength); + _innerStream.Position = value - _streamHeaderLength - _segmentHeaderLength; + return; + } + if (value < _streamHeaderLength + _segmentHeaderLength + _innerStream.Length + _segmentFooterLength) + { + _currentRegion = SMRegion.SegmentFooter; + _currentRegionPosition = (int)(value - _streamHeaderLength - _segmentHeaderLength - _innerStream.Length); + _innerStream.Position = _innerStream.Length; + return; + } + + _currentRegion = SMRegion.StreamFooter; + _currentRegionPosition = (int)(value - _streamHeaderLength - _segmentHeaderLength - _innerStream.Length - _segmentFooterLength); + _innerStream.Position = _innerStream.Length; + } + } + #endregion + + public StructuredMessagePrecalculatedCrcWrapperStream( + Stream innerStream, + ReadOnlySpan precalculatedCrc) + { + Argument.AssertNotNull(innerStream, nameof(innerStream)); + if (innerStream.GetLengthOrDefault() == default) + { + throw new ArgumentException("Stream must have known length.", nameof(innerStream)); + } + if (innerStream.Position != 0) + { + throw new ArgumentException("Stream must be at starting position.", nameof(innerStream)); + } + + _streamHeaderLength = StructuredMessage.V1_0.StreamHeaderLength; + _streamFooterLength = StructuredMessage.Crc64Length; + _segmentHeaderLength = StructuredMessage.V1_0.SegmentHeaderLength; + _segmentFooterLength = StructuredMessage.Crc64Length; + + _crc = ArrayPool.Shared.Rent(StructuredMessage.Crc64Length); + precalculatedCrc.CopyTo(_crc); + + _innerStream = innerStream; + } + + #region Write + public override void Flush() => throw new NotSupportedException(); + + public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException(); + + public override void SetLength(long value) => throw new NotSupportedException(); + #endregion + + #region Read + public override int Read(byte[] buffer, int offset, int count) + => ReadInternal(buffer, offset, count, async: false, cancellationToken: default).EnsureCompleted(); + + public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => await ReadInternal(buffer, offset, count, async: true, cancellationToken).ConfigureAwait(false); + + private async ValueTask ReadInternal(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken) + { + int totalRead = 0; + bool readInner = false; + while (totalRead < count && Position < Length) + { + int subreadOffset = offset + totalRead; + int subreadCount = count - totalRead; + switch (_currentRegion) + { + case SMRegion.StreamHeader: + totalRead += ReadFromStreamHeader(new Span(buffer, subreadOffset, subreadCount)); + break; + case SMRegion.StreamFooter: + totalRead += ReadFromStreamFooter(new Span(buffer, subreadOffset, subreadCount)); + break; + case SMRegion.SegmentHeader: + totalRead += ReadFromSegmentHeader(new Span(buffer, subreadOffset, subreadCount)); + break; + case SMRegion.SegmentFooter: + totalRead += ReadFromSegmentFooter(new Span(buffer, subreadOffset, subreadCount)); + break; + case SMRegion.SegmentContent: + // don't double read from stream. Allow caller to multi-read when desired. + if (readInner) + { + UpdateLatestPosition(); + return totalRead; + } + totalRead += await ReadFromInnerStreamInternal( + buffer, subreadOffset, subreadCount, async, cancellationToken).ConfigureAwait(false); + readInner = true; + break; + default: + break; + } + } + UpdateLatestPosition(); + return totalRead; + } + +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + public override int Read(Span buffer) + { + int totalRead = 0; + bool readInner = false; + while (totalRead < buffer.Length && Position < Length) + { + switch (_currentRegion) + { + case SMRegion.StreamHeader: + totalRead += ReadFromStreamHeader(buffer.Slice(totalRead)); + break; + case SMRegion.StreamFooter: + totalRead += ReadFromStreamFooter(buffer.Slice(totalRead)); + break; + case SMRegion.SegmentHeader: + totalRead += ReadFromSegmentHeader(buffer.Slice(totalRead)); + break; + case SMRegion.SegmentFooter: + totalRead += ReadFromSegmentFooter(buffer.Slice(totalRead)); + break; + case SMRegion.SegmentContent: + // don't double read from stream. Allow caller to multi-read when desired. + if (readInner) + { + UpdateLatestPosition(); + return totalRead; + } + totalRead += ReadFromInnerStream(buffer.Slice(totalRead)); + readInner = true; + break; + default: + break; + } + } + UpdateLatestPosition(); + return totalRead; + } + + public override async ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) + { + int totalRead = 0; + bool readInner = false; + while (totalRead < buffer.Length && Position < Length) + { + switch (_currentRegion) + { + case SMRegion.StreamHeader: + totalRead += ReadFromStreamHeader(buffer.Slice(totalRead).Span); + break; + case SMRegion.StreamFooter: + totalRead += ReadFromStreamFooter(buffer.Slice(totalRead).Span); + break; + case SMRegion.SegmentHeader: + totalRead += ReadFromSegmentHeader(buffer.Slice(totalRead).Span); + break; + case SMRegion.SegmentFooter: + totalRead += ReadFromSegmentFooter(buffer.Slice(totalRead).Span); + break; + case SMRegion.SegmentContent: + // don't double read from stream. Allow caller to multi-read when desired. + if (readInner) + { + UpdateLatestPosition(); + return totalRead; + } + totalRead += await ReadFromInnerStreamAsync(buffer.Slice(totalRead), cancellationToken).ConfigureAwait(false); + readInner = true; + break; + default: + break; + } + } + UpdateLatestPosition(); + return totalRead; + } +#endif + + #region Read Headers/Footers + private int ReadFromStreamHeader(Span buffer) + { + int read = Math.Min(buffer.Length, _streamHeaderLength - _currentRegionPosition); + using IDisposable _ = StructuredMessage.V1_0.GetStreamHeaderBytes( + ArrayPool.Shared, + out Memory headerBytes, + Length, + StructuredMessage.Flags.StorageCrc64, + totalSegments: 1); + headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); + _currentRegionPosition += read; + + if (_currentRegionPosition == _streamHeaderLength) + { + _currentRegion = SMRegion.SegmentHeader; + _currentRegionPosition = 0; + } + + return read; + } + + private int ReadFromStreamFooter(Span buffer) + { + int read = Math.Min(buffer.Length, _segmentFooterLength - _currentRegionPosition); + if (read <= 0) + { + return 0; + } + + using IDisposable _ = StructuredMessage.V1_0.GetStreamFooterBytes( + ArrayPool.Shared, + out Memory footerBytes, + new ReadOnlySpan(_crc, 0, StructuredMessage.Crc64Length)); + footerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); + _currentRegionPosition += read; + + return read; + } + + private int ReadFromSegmentHeader(Span buffer) + { + int read = Math.Min(buffer.Length, _segmentHeaderLength - _currentRegionPosition); + using IDisposable _ = StructuredMessage.V1_0.GetSegmentHeaderBytes( + ArrayPool.Shared, + out Memory headerBytes, + segmentNum: 1, + _innerStream.Length); + headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); + _currentRegionPosition += read; + + if (_currentRegionPosition == _segmentHeaderLength) + { + _currentRegion = SMRegion.SegmentContent; + _currentRegionPosition = 0; + } + + return read; + } + + private int ReadFromSegmentFooter(Span buffer) + { + int read = Math.Min(buffer.Length, _segmentFooterLength - _currentRegionPosition); + if (read < 0) + { + return 0; + } + + using IDisposable _ = StructuredMessage.V1_0.GetSegmentFooterBytes( + ArrayPool.Shared, + out Memory headerBytes, + new ReadOnlySpan(_crc, 0, StructuredMessage.Crc64Length)); + headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); + _currentRegionPosition += read; + + if (_currentRegionPosition == _segmentFooterLength) + { + _currentRegion = _innerStream.Position == _innerStream.Length + ? SMRegion.StreamFooter : SMRegion.SegmentHeader; + _currentRegionPosition = 0; + } + + return read; + } + #endregion + + #region ReadUnderlyingStream + private void CleanupContentSegment() + { + if (_innerStream.Position >= _innerStream.Length) + { + _currentRegion = SMRegion.SegmentFooter; + _currentRegionPosition = 0; + } + } + + private async ValueTask ReadFromInnerStreamInternal( + byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken) + { + int read = async + ? await _innerStream.ReadAsync(buffer, offset, count).ConfigureAwait(false) + : _innerStream.Read(buffer, offset, count); + _currentRegionPosition += read; + CleanupContentSegment(); + return read; + } + +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + private int ReadFromInnerStream(Span buffer) + { + int read = _innerStream.Read(buffer); + _currentRegionPosition += read; + CleanupContentSegment(); + return read; + } + + private async ValueTask ReadFromInnerStreamAsync(Memory buffer, CancellationToken cancellationToken) + { + int read = await _innerStream.ReadAsync(buffer, cancellationToken).ConfigureAwait(false); + _currentRegionPosition += read; + CleanupContentSegment(); + return read; + } +#endif + #endregion + + // don't allow stream to seek too far forward. track how far the stream has been naturally read. + private void UpdateLatestPosition() + { + if (_maxSeekPosition < Position) + { + _maxSeekPosition = Position; + } + } + #endregion + + public override long Seek(long offset, SeekOrigin origin) + { + switch (origin) + { + case SeekOrigin.Begin: + Position = offset; + break; + case SeekOrigin.Current: + Position += offset; + break; + case SeekOrigin.End: + Position = Length + offset; + break; + } + return Position; + } + + protected override void Dispose(bool disposing) + { + base.Dispose(disposing); + + if (_disposed) + { + return; + } + + if (disposing) + { + ArrayPool.Shared.Return(_crc); + _innerStream.Dispose(); + _disposed = true; + } + } +} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/TransferValidationOptionsExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/TransferValidationOptionsExtensions.cs index af21588b4ae09..763d385240383 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/TransferValidationOptionsExtensions.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/TransferValidationOptionsExtensions.cs @@ -9,14 +9,7 @@ public static StorageChecksumAlgorithm ResolveAuto(this StorageChecksumAlgorithm { if (checksumAlgorithm == StorageChecksumAlgorithm.Auto) { -#if BlobSDK || DataLakeSDK || CommonSDK return StorageChecksumAlgorithm.StorageCrc64; -#elif FileSDK // file shares don't support crc64 - return StorageChecksumAlgorithm.MD5; -#else - throw new System.NotSupportedException( - $"{typeof(TransferValidationOptionsExtensions).FullName}.{nameof(ResolveAuto)} is not supported."); -#endif } return checksumAlgorithm; } diff --git a/sdk/storage/Azure.Storage.Common/tests/Azure.Storage.Common.Tests.csproj b/sdk/storage/Azure.Storage.Common/tests/Azure.Storage.Common.Tests.csproj index 5db86ebee984b..2863b85f6feb2 100644 --- a/sdk/storage/Azure.Storage.Common/tests/Azure.Storage.Common.Tests.csproj +++ b/sdk/storage/Azure.Storage.Common/tests/Azure.Storage.Common.Tests.csproj @@ -13,9 +13,12 @@ + + + @@ -28,6 +31,7 @@ + @@ -46,6 +50,11 @@ + + + + + diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/FaultyStream.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/FaultyStream.cs index 7411eb1499312..f4e4b92ed73c4 100644 --- a/sdk/storage/Azure.Storage.Common/tests/Shared/FaultyStream.cs +++ b/sdk/storage/Azure.Storage.Common/tests/Shared/FaultyStream.cs @@ -15,6 +15,7 @@ internal class FaultyStream : Stream private readonly Exception _exceptionToRaise; private int _remainingExceptions; private Action _onFault; + private long _position = 0; public FaultyStream( Stream innerStream, @@ -40,7 +41,7 @@ public FaultyStream( public override long Position { - get => _innerStream.Position; + get => CanSeek ? _innerStream.Position : _position; set => _innerStream.Position = value; } @@ -53,7 +54,9 @@ public override int Read(byte[] buffer, int offset, int count) { if (_remainingExceptions == 0 || Position + count <= _raiseExceptionAt || _raiseExceptionAt >= _innerStream.Length) { - return _innerStream.Read(buffer, offset, count); + int read = _innerStream.Read(buffer, offset, count); + _position += read; + return read; } else { @@ -61,11 +64,13 @@ public override int Read(byte[] buffer, int offset, int count) } } - public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) { if (_remainingExceptions == 0 || Position + count <= _raiseExceptionAt || _raiseExceptionAt >= _innerStream.Length) { - return _innerStream.ReadAsync(buffer, offset, count, cancellationToken); + int read = await _innerStream.ReadAsync(buffer, offset, count, cancellationToken); + _position += read; + return read; } else { diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/ObserveStructuredMessagePolicy.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/ObserveStructuredMessagePolicy.cs new file mode 100644 index 0000000000000..828c41179bba3 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/tests/Shared/ObserveStructuredMessagePolicy.cs @@ -0,0 +1,85 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.IO; +using Azure.Core; +using Azure.Core.Pipeline; +using Azure.Storage.Shared; + +namespace Azure.Storage.Test.Shared +{ + internal class ObserveStructuredMessagePolicy : HttpPipelineSynchronousPolicy + { + private readonly HashSet _requestScopes = new(); + + private readonly HashSet _responseScopes = new(); + + public ObserveStructuredMessagePolicy() + { + } + + public override void OnSendingRequest(HttpMessage message) + { + if (_requestScopes.Count > 0) + { + byte[] encodedContent; + byte[] underlyingContent; + StructuredMessageDecodingStream.RawDecodedData decodedData; + using (MemoryStream ms = new()) + { + message.Request.Content.WriteTo(ms, default); + encodedContent = ms.ToArray(); + using (MemoryStream ms2 = new()) + { + (Stream s, decodedData) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedContent)); + s.CopyTo(ms2); + underlyingContent = ms2.ToArray(); + } + } + } + } + + public override void OnReceivedResponse(HttpMessage message) + { + } + + public IDisposable CheckRequestScope() => CheckMessageScope.CheckRequestScope(this); + + public IDisposable CheckResponseScope() => CheckMessageScope.CheckResponseScope(this); + + private class CheckMessageScope : IDisposable + { + private bool _isRequestScope; + private ObserveStructuredMessagePolicy _policy; + + public static CheckMessageScope CheckRequestScope(ObserveStructuredMessagePolicy policy) + { + CheckMessageScope result = new() + { + _isRequestScope = true, + _policy = policy + }; + result._policy._requestScopes.Add(result); + return result; + } + + public static CheckMessageScope CheckResponseScope(ObserveStructuredMessagePolicy policy) + { + CheckMessageScope result = new() + { + _isRequestScope = false, + _policy = policy + }; + result._policy._responseScopes.Add(result); + return result; + } + + public void Dispose() + { + (_isRequestScope ? _policy._requestScopes : _policy._responseScopes).Remove(this); + } + } + } +} diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/RequestExtensions.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/RequestExtensions.cs new file mode 100644 index 0000000000000..ad395e862f827 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/tests/Shared/RequestExtensions.cs @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Linq; +using System.Text; +using Azure.Core; +using NUnit.Framework; + +namespace Azure.Storage; + +public static partial class RequestExtensions +{ + public static string AssertHeaderPresent(this Request request, string headerName) + { + if (request.Headers.TryGetValue(headerName, out string value)) + { + return headerName == Constants.StructuredMessage.StructuredMessageHeader ? null : value; + } + StringBuilder sb = new StringBuilder() + .AppendLine($"`{headerName}` expected on request but was not found.") + .AppendLine($"{request.Method} {request.Uri}") + .AppendLine(string.Join("\n", request.Headers.Select(h => $"{h.Name}: {h.Value}s"))) + ; + Assert.Fail(sb.ToString()); + return null; + } +} diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/TamperStreamContentsPolicy.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/TamperStreamContentsPolicy.cs index f4198e9dfd532..7e6c78117f53b 100644 --- a/sdk/storage/Azure.Storage.Common/tests/Shared/TamperStreamContentsPolicy.cs +++ b/sdk/storage/Azure.Storage.Common/tests/Shared/TamperStreamContentsPolicy.cs @@ -14,7 +14,7 @@ internal class TamperStreamContentsPolicy : HttpPipelineSynchronousPolicy /// /// Default tampering that changes the first byte of the stream. /// - private static readonly Func _defaultStreamTransform = stream => + private static Func GetTamperByteStreamTransform(long position) => stream => { if (stream is not MemoryStream) { @@ -23,10 +23,10 @@ internal class TamperStreamContentsPolicy : HttpPipelineSynchronousPolicy stream = buffer; } - stream.Position = 0; + stream.Position = position; var firstByte = stream.ReadByte(); - stream.Position = 0; + stream.Position = position; stream.WriteByte((byte)((firstByte + 1) % byte.MaxValue)); stream.Position = 0; @@ -37,9 +37,12 @@ internal class TamperStreamContentsPolicy : HttpPipelineSynchronousPolicy public TamperStreamContentsPolicy(Func streamTransform = default) { - _streamTransform = streamTransform ?? _defaultStreamTransform; + _streamTransform = streamTransform ?? GetTamperByteStreamTransform(0); } + public static TamperStreamContentsPolicy TamperByteAt(long position) + => new(GetTamperByteStreamTransform(position)); + public bool TransformRequestBody { get; set; } public bool TransformResponseBody { get; set; } diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/TransferValidationTestBase.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/TransferValidationTestBase.cs index c18492d2fb4dd..248acf8811960 100644 --- a/sdk/storage/Azure.Storage.Common/tests/Shared/TransferValidationTestBase.cs +++ b/sdk/storage/Azure.Storage.Common/tests/Shared/TransferValidationTestBase.cs @@ -5,10 +5,13 @@ using System.Collections.Generic; using System.IO; using System.Linq; +using System.Security.Cryptography; using System.Threading.Tasks; using Azure.Core; +using Azure.Core.Diagnostics; +using Azure.Core.Pipeline; using Azure.Core.TestFramework; -using FastSerialization; +using Azure.Storage.Shared; using NUnit.Framework; namespace Azure.Storage.Test.Shared @@ -190,21 +193,15 @@ protected string GetNewResourceName() /// The actual checksum value expected to be on the request, if known. Defaults to no specific value expected or checked. /// /// An assertion to put into a pipeline policy. - internal static Action GetRequestChecksumAssertion(StorageChecksumAlgorithm algorithm, Func isChecksumExpected = default, byte[] expectedChecksum = default) + internal static Action GetRequestChecksumHeaderAssertion(StorageChecksumAlgorithm algorithm, Func isChecksumExpected = default, byte[] expectedChecksum = default) { // action to assert a request header is as expected - void AssertChecksum(RequestHeaders headers, string headerName) + void AssertChecksum(Request req, string headerName) { - if (headers.TryGetValue(headerName, out string checksum)) + string checksum = req.AssertHeaderPresent(headerName); + if (expectedChecksum != default) { - if (expectedChecksum != default) - { - Assert.AreEqual(Convert.ToBase64String(expectedChecksum), checksum); - } - } - else - { - Assert.Fail($"{headerName} expected on request but was not found."); + Assert.AreEqual(Convert.ToBase64String(expectedChecksum), checksum); } }; @@ -219,14 +216,39 @@ void AssertChecksum(RequestHeaders headers, string headerName) switch (algorithm.ResolveAuto()) { case StorageChecksumAlgorithm.MD5: - AssertChecksum(request.Headers, "Content-MD5"); + AssertChecksum(request, "Content-MD5"); break; case StorageChecksumAlgorithm.StorageCrc64: - AssertChecksum(request.Headers, "x-ms-content-crc64"); + AssertChecksum(request, Constants.StructuredMessage.StructuredMessageHeader); break; default: - throw new Exception($"Bad {nameof(StorageChecksumAlgorithm)} provided to {nameof(GetRequestChecksumAssertion)}."); + throw new Exception($"Bad {nameof(StorageChecksumAlgorithm)} provided to {nameof(GetRequestChecksumHeaderAssertion)}."); + } + }; + } + + internal static Action GetRequestStructuredMessageAssertion( + StructuredMessage.Flags flags, + Func isStructuredMessageExpected = default, + long? structuredContentSegmentLength = default) + { + return request => + { + // filter some requests out with predicate + if (isStructuredMessageExpected != default && !isStructuredMessageExpected(request)) + { + return; } + + Assert.That(request.Headers.TryGetValue("x-ms-structured-body", out string structuredBody)); + Assert.That(structuredBody, Does.Contain("XSM/1.0")); + if (flags.HasFlag(StructuredMessage.Flags.StorageCrc64)) + { + Assert.That(structuredBody, Does.Contain("crc64")); + } + + Assert.That(request.Headers.TryGetValue("Content-Length", out string contentLength)); + Assert.That(request.Headers.TryGetValue("x-ms-structured-content-length", out string structuredContentLength)); }; } @@ -278,32 +300,66 @@ void AssertChecksum(ResponseHeaders headers, string headerName) AssertChecksum(response.Headers, "Content-MD5"); break; case StorageChecksumAlgorithm.StorageCrc64: - AssertChecksum(response.Headers, "x-ms-content-crc64"); + AssertChecksum(response.Headers, Constants.StructuredMessage.StructuredMessageHeader); break; default: - throw new Exception($"Bad {nameof(StorageChecksumAlgorithm)} provided to {nameof(GetRequestChecksumAssertion)}."); + throw new Exception($"Bad {nameof(StorageChecksumAlgorithm)} provided to {nameof(GetRequestChecksumHeaderAssertion)}."); } }; } + internal static Action GetResponseStructuredMessageAssertion( + StructuredMessage.Flags flags, + Func isStructuredMessageExpected = default) + { + return response => + { + // filter some requests out with predicate + if (isStructuredMessageExpected != default && !isStructuredMessageExpected(response)) + { + return; + } + + Assert.That(response.Headers.TryGetValue("x-ms-structured-body", out string structuredBody)); + Assert.That(structuredBody, Does.Contain("XSM/1.0")); + if (flags.HasFlag(StructuredMessage.Flags.StorageCrc64)) + { + Assert.That(structuredBody, Does.Contain("crc64")); + } + + Assert.That(response.Headers.TryGetValue("Content-Length", out string contentLength)); + Assert.That(response.Headers.TryGetValue("x-ms-structured-content-length", out string structuredContentLength)); + }; + } + /// /// Asserts the service returned an error that expected checksum did not match checksum on upload. /// /// Async action to upload data to service. /// Checksum algorithm used. - internal static void AssertWriteChecksumMismatch(AsyncTestDelegate writeAction, StorageChecksumAlgorithm algorithm) + internal static void AssertWriteChecksumMismatch( + AsyncTestDelegate writeAction, + StorageChecksumAlgorithm algorithm, + bool expectStructuredMessage = false) { var exception = ThrowsOrInconclusiveAsync(writeAction); - switch (algorithm.ResolveAuto()) + if (expectStructuredMessage) { - case StorageChecksumAlgorithm.MD5: - Assert.AreEqual("Md5Mismatch", exception.ErrorCode); - break; - case StorageChecksumAlgorithm.StorageCrc64: - Assert.AreEqual("Crc64Mismatch", exception.ErrorCode); - break; - default: - throw new ArgumentException("Test arguments contain bad algorithm specifier."); + Assert.That(exception.ErrorCode, Is.EqualTo("Crc64Mismatch")); + } + else + { + switch (algorithm.ResolveAuto()) + { + case StorageChecksumAlgorithm.MD5: + Assert.That(exception.ErrorCode, Is.EqualTo("Md5Mismatch")); + break; + case StorageChecksumAlgorithm.StorageCrc64: + Assert.That(exception.ErrorCode, Is.EqualTo("Crc64Mismatch")); + break; + default: + throw new ArgumentException("Test arguments contain bad algorithm specifier."); + } } } #endregion @@ -348,6 +404,7 @@ public virtual async Task UploadPartitionSuccessfulHashComputation(StorageChecks await using IDisposingContainer disposingContainer = await GetDisposingContainerAsync(); // Arrange + bool expectStructuredMessage = algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64; const int dataLength = Constants.KB; var data = GetRandomBuffer(dataLength); var validationOptions = new UploadTransferValidationOptions @@ -356,7 +413,10 @@ public virtual async Task UploadPartitionSuccessfulHashComputation(StorageChecks }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(algorithm)); + var assertion = algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 + ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, null, dataLength) + : GetRequestChecksumHeaderAssertion(algorithm); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -406,7 +466,11 @@ public virtual async Task UploadPartitionUsePrecalculatedHash(StorageChecksumAlg }; // make pipeline assertion for checking precalculated checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(algorithm, expectedChecksum: precalculatedChecksum)); + // precalculated partition upload will never use structured message. always check header + var assertion = GetRequestChecksumHeaderAssertion( + algorithm, + expectedChecksum: algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 ? default : precalculatedChecksum); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -423,12 +487,12 @@ public virtual async Task UploadPartitionUsePrecalculatedHash(StorageChecksumAlg AsyncTestDelegate operation = async () => await UploadPartitionAsync(client, stream, validationOptions); // Assert - AssertWriteChecksumMismatch(operation, algorithm); + AssertWriteChecksumMismatch(operation, algorithm, algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64); } } [TestCaseSource(nameof(GetValidationAlgorithms))] - public virtual async Task UploadPartitionMismatchedHashThrows(StorageChecksumAlgorithm algorithm) + public virtual async Task UploadPartitionTamperedStreamThrows(StorageChecksumAlgorithm algorithm) { await using IDisposingContainer disposingContainer = await GetDisposingContainerAsync(); @@ -441,7 +505,7 @@ public virtual async Task UploadPartitionMismatchedHashThrows(StorageChecksumAlg }; // Tamper with stream contents in the pipeline to simulate silent failure in the transit layer - var streamTamperPolicy = new TamperStreamContentsPolicy(); + var streamTamperPolicy = TamperStreamContentsPolicy.TamperByteAt(100); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(streamTamperPolicy, HttpPipelinePosition.PerCall); @@ -456,9 +520,10 @@ public virtual async Task UploadPartitionMismatchedHashThrows(StorageChecksumAlg // Act streamTamperPolicy.TransformRequestBody = true; AsyncTestDelegate operation = async () => await UploadPartitionAsync(client, stream, validationOptions); - + using var listener = AzureEventSourceListener.CreateConsoleLogger(); // Assert - AssertWriteChecksumMismatch(operation, algorithm); + AssertWriteChecksumMismatch(operation, algorithm, + expectStructuredMessage: algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64); } } @@ -473,7 +538,10 @@ public virtual async Task UploadPartitionUsesDefaultClientValidationOptions( var data = GetRandomBuffer(dataLength); // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(clientAlgorithm)); + var assertion = clientAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 + ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, null, dataLength) + : GetRequestChecksumHeaderAssertion(clientAlgorithm); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -512,7 +580,10 @@ public virtual async Task UploadPartitionOverwritesDefaultClientValidationOption }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(overrideAlgorithm)); + var assertion = overrideAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 + ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, null, dataLength) + : GetRequestChecksumHeaderAssertion(overrideAlgorithm); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -555,10 +626,14 @@ public virtual async Task UploadPartitionDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (request.Headers.Contains("x-ms-content-crc64")) + if (request.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) { Assert.Fail($"Hash found when none expected."); } + if (request.Headers.Contains("x-ms-structured-body")) + { + Assert.Fail($"Structured body used when none expected."); + } }); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -601,9 +676,11 @@ public virtual async Task OpenWriteSuccessfulHashComputation( }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(algorithm)); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumHeaderAssertion(algorithm)); var clientOptions = ClientBuilder.GetOptions(); + //ObserveStructuredMessagePolicy observe = new(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); + //clientOptions.AddPolicy(observe, HttpPipelinePosition.BeforeTransport); var client = await GetResourceClientAsync( disposingContainer.Container, @@ -616,6 +693,7 @@ public virtual async Task OpenWriteSuccessfulHashComputation( using var writeStream = await OpenWriteAsync(client, validationOptions, streamBufferSize); // Assert + //using var obsv = observe.CheckRequestScope(); using (checksumPipelineAssertion.CheckRequestScope()) { foreach (var _ in Enumerable.Range(0, streamWrites)) @@ -644,7 +722,7 @@ public virtual async Task OpenWriteMismatchedHashThrows(StorageChecksumAlgorithm // Tamper with stream contents in the pipeline to simulate silent failure in the transit layer var clientOptions = ClientBuilder.GetOptions(); - var tamperPolicy = new TamperStreamContentsPolicy(); + var tamperPolicy = TamperStreamContentsPolicy.TamperByteAt(100); clientOptions.AddPolicy(tamperPolicy, HttpPipelinePosition.PerCall); var client = await GetResourceClientAsync( @@ -682,7 +760,7 @@ public virtual async Task OpenWriteUsesDefaultClientValidationOptions( var data = GetRandomBuffer(dataLength); // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(clientAlgorithm)); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumHeaderAssertion(clientAlgorithm)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -726,7 +804,7 @@ public virtual async Task OpenWriteOverwritesDefaultClientValidationOptions( }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(overrideAlgorithm)); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumHeaderAssertion(overrideAlgorithm)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -774,7 +852,7 @@ public virtual async Task OpenWriteDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (request.Headers.Contains("x-ms-content-crc64")) + if (request.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) { Assert.Fail($"Hash found when none expected."); } @@ -886,7 +964,7 @@ public virtual async Task ParallelUploadSplitSuccessfulHashComputation(StorageCh // make pipeline assertion for checking checksum was present on upload var checksumPipelineAssertion = new AssertMessageContentsPolicy( - checkRequest: GetRequestChecksumAssertion(algorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); + checkRequest: GetRequestChecksumHeaderAssertion(algorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -923,8 +1001,10 @@ public virtual async Task ParallelUploadOneShotSuccessfulHashComputation(Storage }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy( - checkRequest: GetRequestChecksumAssertion(algorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); + var assertion = algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 + ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, ParallelUploadIsChecksumExpected, dataLength) + : GetRequestChecksumHeaderAssertion(algorithm, isChecksumExpected: ParallelUploadIsChecksumExpected); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -981,7 +1061,7 @@ public virtual async Task ParallelUploadPrecalculatedComposableHashAccepted(Stor PrecalculatedChecksum = hash }; - var client = await GetResourceClientAsync(disposingContainer.Container, dataLength); + var client = await GetResourceClientAsync(disposingContainer.Container, dataLength, createResource: true); // Act await DoesNotThrowOrInconclusiveAsync( @@ -1011,8 +1091,10 @@ public virtual async Task ParallelUploadUsesDefaultClientValidationOptions( }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion( - clientAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); + var assertion = clientAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && !split + ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, ParallelUploadIsChecksumExpected, dataLength) + : GetRequestChecksumHeaderAssertion(clientAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -1063,8 +1145,10 @@ public virtual async Task ParallelUploadOverwritesDefaultClientValidationOptions }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion( - overrideAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); + var assertion = overrideAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && !split + ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, ParallelUploadIsChecksumExpected, dataLength) + : GetRequestChecksumHeaderAssertion(overrideAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -1119,7 +1203,7 @@ public virtual async Task ParallelUploadDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (request.Headers.Contains("x-ms-content-crc64")) + if (request.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) { Assert.Fail($"Hash found when none expected."); } @@ -1184,15 +1268,17 @@ public virtual async Task ParallelDownloadSuccessfulHashVerification( }; // Act - var dest = new MemoryStream(); + byte[] dest; + using (MemoryStream ms = new()) using (checksumPipelineAssertion.CheckRequestScope()) { - await ParallelDownloadAsync(client, dest, validationOptions, transferOptions); + await ParallelDownloadAsync(client, ms, validationOptions, transferOptions); + dest = ms.ToArray(); } // Assert // Assertion was in the pipeline and the SDK not throwing means the checksum was validated - Assert.IsTrue(dest.ToArray().SequenceEqual(data)); + Assert.IsTrue(dest.SequenceEqual(data)); } [Test] @@ -1357,7 +1443,7 @@ public virtual async Task ParallelDownloadDisablesDefaultClientValidationOptions { Assert.Fail($"Hash found when none expected."); } - if (response.Headers.Contains("x-ms-content-crc64")) + if (response.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) { Assert.Fail($"Hash found when none expected."); } @@ -1565,7 +1651,7 @@ public virtual async Task OpenReadDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (response.Headers.Contains("x-ms-content-crc64")) + if (response.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) { Assert.Fail($"Hash found when none expected."); } @@ -1615,7 +1701,7 @@ public virtual async Task DownloadSuccessfulHashVerification(StorageChecksumAlgo var validationOptions = new DownloadTransferValidationOptions { ChecksumAlgorithm = algorithm }; // Act - var dest = new MemoryStream(); + using var dest = new MemoryStream(); var response = await DownloadPartitionAsync(client, dest, validationOptions, new HttpRange(length: data.Length)); // Assert @@ -1626,13 +1712,71 @@ public virtual async Task DownloadSuccessfulHashVerification(StorageChecksumAlgo Assert.True(response.Headers.Contains("Content-MD5")); break; case StorageChecksumAlgorithm.StorageCrc64: - Assert.True(response.Headers.Contains("x-ms-content-crc64")); + Assert.True(response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)); break; default: Assert.Fail("Test can't validate given algorithm type."); break; } - Assert.IsTrue(dest.ToArray().SequenceEqual(data)); + var result = dest.ToArray(); + Assert.IsTrue(result.SequenceEqual(data)); + } + + [TestCase(StorageChecksumAlgorithm.StorageCrc64, Constants.StructuredMessage.MaxDownloadCrcWithHeader, false, false)] + [TestCase(StorageChecksumAlgorithm.StorageCrc64, Constants.StructuredMessage.MaxDownloadCrcWithHeader-1, false, false)] + [TestCase(StorageChecksumAlgorithm.StorageCrc64, Constants.StructuredMessage.MaxDownloadCrcWithHeader+1, true, false)] + [TestCase(StorageChecksumAlgorithm.MD5, Constants.StructuredMessage.MaxDownloadCrcWithHeader+1, false, true)] + public virtual async Task DownloadApporpriatelyUsesStructuredMessage( + StorageChecksumAlgorithm algorithm, + int? downloadLen, + bool expectStructuredMessage, + bool expectThrow) + { + await using IDisposingContainer disposingContainer = await GetDisposingContainerAsync(); + + // Arrange + const int dataLength = Constants.KB; + var data = GetRandomBuffer(dataLength); + + var resourceName = GetNewResourceName(); + var client = await GetResourceClientAsync( + disposingContainer.Container, + resourceLength: dataLength, + createResource: true, + resourceName: resourceName); + await SetupDataAsync(client, new MemoryStream(data)); + + // make pipeline assertion for checking checksum was present on download + HttpPipelinePolicy checksumPipelineAssertion = new AssertMessageContentsPolicy(checkResponse: expectStructuredMessage + ? GetResponseStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64) + : GetResponseChecksumAssertion(algorithm)); + TClientOptions clientOptions = ClientBuilder.GetOptions(); + clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); + + client = await GetResourceClientAsync( + disposingContainer.Container, + resourceLength: dataLength, + resourceName: resourceName, + createResource: false, + downloadAlgorithm: algorithm, + options: clientOptions); + + var validationOptions = new DownloadTransferValidationOptions { ChecksumAlgorithm = algorithm }; + + // Act + var dest = new MemoryStream(); + AsyncTestDelegate operation = async () => await DownloadPartitionAsync( + client, dest, validationOptions, downloadLen.HasValue ? new HttpRange(length: downloadLen.Value) : default); + // Assert (policies checked use of content validation) + if (expectThrow) + { + Assert.That(operation, Throws.TypeOf()); + } + else + { + Assert.That(operation, Throws.Nothing); + Assert.IsTrue(dest.ToArray().SequenceEqual(data)); + } } [Test, Combinatorial] @@ -1658,7 +1802,9 @@ public virtual async Task DownloadHashMismatchThrows( // alter response contents in pipeline, forcing a checksum mismatch on verification step var clientOptions = ClientBuilder.GetOptions(); - clientOptions.AddPolicy(new TamperStreamContentsPolicy() { TransformResponseBody = true }, HttpPipelinePosition.PerCall); + var tamperPolicy = TamperStreamContentsPolicy.TamperByteAt(50); + tamperPolicy.TransformResponseBody = true; + clientOptions.AddPolicy(tamperPolicy, HttpPipelinePosition.PerCall); client = await GetResourceClientAsync( disposingContainer.Container, createResource: false, @@ -1670,7 +1816,7 @@ public virtual async Task DownloadHashMismatchThrows( AsyncTestDelegate operation = async () => await DownloadPartitionAsync(client, dest, validationOptions, new HttpRange(length: data.Length)); // Assert - if (validate) + if (validate || algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64) { // SDK responsible for finding bad checksum. Throw. ThrowsOrInconclusiveAsync(operation); @@ -1728,7 +1874,7 @@ public virtual async Task DownloadUsesDefaultClientValidationOptions( Assert.True(response.Headers.Contains("Content-MD5")); break; case StorageChecksumAlgorithm.StorageCrc64: - Assert.True(response.Headers.Contains("x-ms-content-crc64")); + Assert.True(response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)); break; default: Assert.Fail("Test can't validate given algorithm type."); @@ -1788,7 +1934,7 @@ public virtual async Task DownloadOverwritesDefaultClientValidationOptions( Assert.True(response.Headers.Contains("Content-MD5")); break; case StorageChecksumAlgorithm.StorageCrc64: - Assert.True(response.Headers.Contains("x-ms-content-crc64")); + Assert.True(response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)); break; default: Assert.Fail("Test can't validate given algorithm type."); @@ -1827,7 +1973,7 @@ public virtual async Task DownloadDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (response.Headers.Contains("x-ms-content-crc64")) + if (response.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) { Assert.Fail($"Hash found when none expected."); } @@ -1850,7 +1996,54 @@ public virtual async Task DownloadDisablesDefaultClientValidationOptions( // Assert // no policies this time; just check response headers Assert.False(response.Headers.Contains("Content-MD5")); - Assert.False(response.Headers.Contains("x-ms-content-crc64")); + Assert.False(response.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)); + Assert.IsTrue(dest.ToArray().SequenceEqual(data)); + } + + [Test] + public virtual async Task DownloadRecoversFromInterruptWithValidation( + [ValueSource(nameof(GetValidationAlgorithms))] StorageChecksumAlgorithm algorithm) + { + using var _ = AzureEventSourceListener.CreateConsoleLogger(); + int dataLen = algorithm.ResolveAuto() switch { + StorageChecksumAlgorithm.StorageCrc64 => 5 * Constants.MB, // >4MB for multisegment + _ => Constants.KB, + }; + + await using IDisposingContainer disposingContainer = await GetDisposingContainerAsync(); + + // Arrange + var data = GetRandomBuffer(dataLen); + + TClientOptions options = ClientBuilder.GetOptions(); + options.AddPolicy(new FaultyDownloadPipelinePolicy(dataLen - 512, new IOException(), () => { }), HttpPipelinePosition.BeforeTransport); + var client = await GetResourceClientAsync( + disposingContainer.Container, + resourceLength: dataLen, + createResource: true, + options: options); + await SetupDataAsync(client, new MemoryStream(data)); + + var validationOptions = new DownloadTransferValidationOptions { ChecksumAlgorithm = algorithm }; + + // Act + var dest = new MemoryStream(); + var response = await DownloadPartitionAsync(client, dest, validationOptions, new HttpRange(length: data.Length)); + + // Assert + // no policies this time; just check response headers + switch (algorithm.ResolveAuto()) + { + case StorageChecksumAlgorithm.MD5: + Assert.True(response.Headers.Contains("Content-MD5")); + break; + case StorageChecksumAlgorithm.StorageCrc64: + Assert.True(response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)); + break; + default: + Assert.Fail("Test can't validate given algorithm type."); + break; + } Assert.IsTrue(dest.ToArray().SequenceEqual(data)); } #endregion @@ -1891,7 +2084,7 @@ public async Task RoundtripWIthDefaults() // make pipeline assertion for checking checksum was present on upload AND download var checksumPipelineAssertion = new AssertMessageContentsPolicy( - checkRequest: GetRequestChecksumAssertion(expectedAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected), + checkRequest: GetRequestChecksumHeaderAssertion(expectedAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected), checkResponse: GetResponseChecksumAssertion(expectedAlgorithm)); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingRetriableStreamTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingRetriableStreamTests.cs new file mode 100644 index 0000000000000..a0f9158040b11 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingRetriableStreamTests.cs @@ -0,0 +1,246 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers.Binary; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using Azure.Core; +using Azure.Storage.Shared; +using Azure.Storage.Test.Shared; +using Microsoft.Diagnostics.Tracing.Parsers.AspNet; +using Moq; +using NUnit.Framework; + +namespace Azure.Storage.Tests; + +[TestFixture(true)] +[TestFixture(false)] +public class StructuredMessageDecodingRetriableStreamTests +{ + public bool Async { get; } + + public StructuredMessageDecodingRetriableStreamTests(bool async) + { + Async = async; + } + + private Mock AllExceptionsRetry() + { + Mock mock = new(MockBehavior.Strict); + mock.Setup(rc => rc.IsRetriableException(It.IsAny())).Returns(true); + return mock; + } + + [Test] + public async ValueTask UninterruptedStream() + { + byte[] data = new Random().NextBytesInline(4 * Constants.KB).ToArray(); + byte[] dest = new byte[data.Length]; + + // mock with a simple MemoryStream rather than an actual StructuredMessageDecodingStream + using (Stream src = new MemoryStream(data)) + using (Stream retriableSrc = new StructuredMessageDecodingRetriableStream(src, new(), default, default, default, default, default, 1)) + using (Stream dst = new MemoryStream(dest)) + { + await retriableSrc.CopyToInternal(dst, Async, default); + } + + Assert.AreEqual(data, dest); + } + + [Test] + public async Task Interrupt_DataIntact([Values(true, false)] bool multipleInterrupts) + { + const int segments = 4; + const int segmentLen = Constants.KB; + const int readLen = 128; + const int interruptPos = segmentLen + (3 * readLen) + 10; + + Random r = new(); + byte[] data = r.NextBytesInline(segments * Constants.KB).ToArray(); + byte[] dest = new byte[data.Length]; + + // Mock a decoded data for the mocked StructuredMessageDecodingStream + StructuredMessageDecodingStream.RawDecodedData initialDecodedData = new() + { + TotalSegments = segments, + InnerStreamLength = data.Length, + Flags = StructuredMessage.Flags.StorageCrc64 + }; + // for test purposes, initialize a DecodedData, since we are not actively decoding in this test + initialDecodedData.SegmentCrcs.Add((BinaryPrimitives.ReadUInt64LittleEndian(r.NextBytesInline(StructuredMessage.Crc64Length)), segmentLen)); + + (Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData) Factory(long offset, bool faulty) + { + Stream stream = new MemoryStream(data, (int)offset, data.Length - (int)offset); + if (faulty) + { + stream = new FaultyStream(stream, interruptPos, 1, new Exception(), () => { }); + } + // Mock a decoded data for the mocked StructuredMessageDecodingStream + StructuredMessageDecodingStream.RawDecodedData decodedData = new() + { + TotalSegments = segments, + InnerStreamLength = data.Length, + Flags = StructuredMessage.Flags.StorageCrc64, + }; + // for test purposes, initialize a DecodedData, since we are not actively decoding in this test + initialDecodedData.SegmentCrcs.Add((BinaryPrimitives.ReadUInt64LittleEndian(r.NextBytesInline(StructuredMessage.Crc64Length)), segmentLen)); + return (stream, decodedData); + } + + // mock with a simple MemoryStream rather than an actual StructuredMessageDecodingStream + using (Stream src = new MemoryStream(data)) + using (Stream faultySrc = new FaultyStream(src, interruptPos, 1, new Exception(), () => { })) + using (Stream retriableSrc = new StructuredMessageDecodingRetriableStream( + faultySrc, + initialDecodedData, + default, + offset => Factory(offset, multipleInterrupts), + offset => new ValueTask<(Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData)>(Factory(offset, multipleInterrupts)), + null, + AllExceptionsRetry().Object, + int.MaxValue)) + using (Stream dst = new MemoryStream(dest)) + { + await retriableSrc.CopyToInternal(dst, readLen, Async, default); + } + + Assert.AreEqual(data, dest); + } + + [Test] + public async Task Interrupt_AppropriateRewind() + { + const int segments = 2; + const int segmentLen = Constants.KB; + const int dataLen = segments * segmentLen; + const int readLen = segmentLen / 4; + const int interruptOffset = 10; + const int interruptPos = segmentLen + (2 * readLen) + interruptOffset; + Random r = new(); + + // Mock a decoded data for the mocked StructuredMessageDecodingStream + StructuredMessageDecodingStream.RawDecodedData initialDecodedData = new() + { + TotalSegments = segments, + InnerStreamLength = segments * segmentLen, + Flags = StructuredMessage.Flags.StorageCrc64, + }; + // By the time of interrupt, there will be one segment reported + initialDecodedData.SegmentCrcs.Add((BinaryPrimitives.ReadUInt64LittleEndian(r.NextBytesInline(StructuredMessage.Crc64Length)), segmentLen)); + + Mock mock = new(MockBehavior.Strict); + mock.SetupGet(s => s.CanRead).Returns(true); + mock.SetupGet(s => s.CanSeek).Returns(false); + if (Async) + { + mock.SetupSequence(s => s.ReadAsync(It.IsAny(), It.IsAny(), It.IsAny(), default)) + .Returns(Task.FromResult(readLen)) // start first segment + .Returns(Task.FromResult(readLen)) + .Returns(Task.FromResult(readLen)) + .Returns(Task.FromResult(readLen)) // finish first segment + .Returns(Task.FromResult(readLen)) // start second segment + .Returns(Task.FromResult(readLen)) + // faulty stream interrupt + .Returns(Task.FromResult(readLen * 2)) // restart second segment. fast-forward uses an internal 4KB buffer, so it will leap the 512 byte catchup all at once + .Returns(Task.FromResult(readLen)) + .Returns(Task.FromResult(readLen)) // end second segment + .Returns(Task.FromResult(0)) // signal end of stream + .Returns(Task.FromResult(0)) // second signal needed for stream wrapping reasons + ; + } + else + { + mock.SetupSequence(s => s.Read(It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(readLen) // start first segment + .Returns(readLen) + .Returns(readLen) + .Returns(readLen) // finish first segment + .Returns(readLen) // start second segment + .Returns(readLen) + // faulty stream interrupt + .Returns(readLen * 2) // restart second segment. fast-forward uses an internal 4KB buffer, so it will leap the 512 byte catchup all at once + .Returns(readLen) + .Returns(readLen) // end second segment + .Returns(0) // signal end of stream + .Returns(0) // second signal needed for stream wrapping reasons + ; + } + Stream faultySrc = new FaultyStream(mock.Object, interruptPos, 1, new Exception(), default); + Stream retriableSrc = new StructuredMessageDecodingRetriableStream( + faultySrc, + initialDecodedData, + default, + offset => (mock.Object, new()), + offset => new(Task.FromResult((mock.Object, new StructuredMessageDecodingStream.RawDecodedData()))), + null, + AllExceptionsRetry().Object, + 1); + + int totalRead = 0; + int read = 0; + byte[] buf = new byte[readLen]; + if (Async) + { + while ((read = await retriableSrc.ReadAsync(buf, 0, buf.Length)) > 0) + { + totalRead += read; + } + } + else + { + while ((read = retriableSrc.Read(buf, 0, buf.Length)) > 0) + { + totalRead += read; + } + } + await retriableSrc.CopyToInternal(Stream.Null, readLen, Async, default); + + // Asserts we read exactly the data length, excluding the fastforward of the inner stream + Assert.That(totalRead, Is.EqualTo(dataLen)); + } + + [Test] + public async Task Interrupt_ProperDecode([Values(true, false)] bool multipleInterrupts) + { + // decoding stream inserts a buffered layer of 4 KB. use larger sizes to avoid interference from it. + const int segments = 4; + const int segmentLen = 128 * Constants.KB; + const int readLen = 8 * Constants.KB; + const int interruptPos = segmentLen + (3 * readLen) + 10; + + Random r = new(); + byte[] data = r.NextBytesInline(segments * Constants.KB).ToArray(); + byte[] dest = new byte[data.Length]; + + (Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData) Factory(long offset, bool faulty) + { + Stream stream = new MemoryStream(data, (int)offset, data.Length - (int)offset); + stream = new StructuredMessageEncodingStream(stream, segmentLen, StructuredMessage.Flags.StorageCrc64); + if (faulty) + { + stream = new FaultyStream(stream, interruptPos, 1, new Exception(), () => { }); + } + return StructuredMessageDecodingStream.WrapStream(stream); + } + + (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = Factory(0, true); + using Stream retriableSrc = new StructuredMessageDecodingRetriableStream( + decodingStream, + decodedData, + default, + offset => Factory(offset, multipleInterrupts), + offset => new ValueTask<(Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData)>(Factory(offset, multipleInterrupts)), + null, + AllExceptionsRetry().Object, + int.MaxValue); + using Stream dst = new MemoryStream(dest); + + await retriableSrc.CopyToInternal(dst, readLen, Async, default); + + Assert.AreEqual(data, dest); + } +} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingStreamTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingStreamTests.cs new file mode 100644 index 0000000000000..2789672df4976 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingStreamTests.cs @@ -0,0 +1,323 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers.Binary; +using System.Dynamic; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Azure.Storage.Blobs.Tests; +using Azure.Storage.Shared; +using NUnit.Framework; +using static Azure.Storage.Shared.StructuredMessage; + +namespace Azure.Storage.Tests +{ + [TestFixture(ReadMethod.SyncArray)] + [TestFixture(ReadMethod.AsyncArray)] +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + [TestFixture(ReadMethod.SyncSpan)] + [TestFixture(ReadMethod.AsyncMemory)] +#endif + public class StructuredMessageDecodingStreamTests + { + // Cannot just implement as passthru in the stream + // Must test each one + public enum ReadMethod + { + SyncArray, + AsyncArray, +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + SyncSpan, + AsyncMemory +#endif + } + + public ReadMethod Method { get; } + + public StructuredMessageDecodingStreamTests(ReadMethod method) + { + Method = method; + } + + private class CopyStreamException : Exception + { + public long TotalCopied { get; } + + public CopyStreamException(Exception inner, long totalCopied) + : base($"Failed read after {totalCopied}-many bytes.", inner) + { + TotalCopied = totalCopied; + } + } + private async ValueTask CopyStream(Stream source, Stream destination, int bufferSize = 81920) // number default for CopyTo impl + { + byte[] buf = new byte[bufferSize]; + int read; + long totalRead = 0; + try + { + switch (Method) + { + case ReadMethod.SyncArray: + while ((read = source.Read(buf, 0, bufferSize)) > 0) + { + totalRead += read; + destination.Write(buf, 0, read); + } + break; + case ReadMethod.AsyncArray: + while ((read = await source.ReadAsync(buf, 0, bufferSize)) > 0) + { + totalRead += read; + await destination.WriteAsync(buf, 0, read); + } + break; +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + case ReadMethod.SyncSpan: + while ((read = source.Read(new Span(buf))) > 0) + { + totalRead += read; + destination.Write(new Span(buf, 0, read)); + } + break; + case ReadMethod.AsyncMemory: + while ((read = await source.ReadAsync(new Memory(buf))) > 0) + { + totalRead += read; + await destination.WriteAsync(new Memory(buf, 0, read)); + } + break; +#endif + } + destination.Flush(); + } + catch (Exception ex) + { + throw new CopyStreamException(ex, totalRead); + } + return totalRead; + } + + [Test] + [Pairwise] + public async Task DecodesData( + [Values(2048, 2005)] int dataLength, + [Values(default, 512)] int? seglen, + [Values(8*Constants.KB, 512, 530, 3)] int readLen, + [Values(true, false)] bool useCrc) + { + int segmentContentLength = seglen ?? int.MaxValue; + Flags flags = useCrc ? Flags.StorageCrc64 : Flags.None; + + byte[] originalData = new byte[dataLength]; + new Random().NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentContentLength, flags); + + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); + byte[] decodedData; + using (MemoryStream dest = new()) + { + await CopyStream(decodingStream, dest, readLen); + decodedData = dest.ToArray(); + } + + Assert.That(new Span(decodedData).SequenceEqual(originalData)); + } + + [Test] + public void BadStreamBadVersion() + { + byte[] originalData = new byte[1024]; + new Random().NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); + + encodedData[0] = byte.MaxValue; + + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); + Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); + } + + [Test] + public async Task BadSegmentCrcThrows() + { + const int segmentLength = 256; + Random r = new(); + + byte[] originalData = new byte[2048]; + r.NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentLength, Flags.StorageCrc64); + + const int badBytePos = 1024; + encodedData[badBytePos] = (byte)~encodedData[badBytePos]; + + MemoryStream encodedDataStream = new(encodedData); + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(encodedDataStream); + + // manual try/catch to validate the proccess failed mid-stream rather than the end + const int copyBufferSize = 4; + bool caught = false; + try + { + await CopyStream(decodingStream, Stream.Null, copyBufferSize); + } + catch (CopyStreamException ex) + { + caught = true; + Assert.That(ex.TotalCopied, Is.LessThanOrEqualTo(badBytePos)); + } + Assert.That(caught); + } + + [Test] + public void BadStreamCrcThrows() + { + const int segmentLength = 256; + Random r = new(); + + byte[] originalData = new byte[2048]; + r.NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentLength, Flags.StorageCrc64); + + encodedData[originalData.Length - 1] = (byte)~encodedData[originalData.Length - 1]; + + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); + Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); + } + + [Test] + public void BadStreamWrongContentLength() + { + byte[] originalData = new byte[1024]; + new Random().NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); + + BinaryPrimitives.WriteInt64LittleEndian(new Span(encodedData, V1_0.StreamHeaderMessageLengthOffset, 8), 123456789L); + + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); + Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); + } + + [TestCase(-1)] + [TestCase(1)] + public void BadStreamWrongSegmentCount(int difference) + { + const int dataSize = 1024; + const int segmentSize = 256; + const int numSegments = 4; + + byte[] originalData = new byte[dataSize]; + new Random().NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentSize, Flags.StorageCrc64); + + // rewrite the segment count to be different than the actual number of segments + BinaryPrimitives.WriteInt16LittleEndian( + new Span(encodedData, V1_0.StreamHeaderSegmentCountOffset, 2), (short)(numSegments + difference)); + + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); + Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); + } + + [Test] + public void BadStreamWrongSegmentNum() + { + byte[] originalData = new byte[1024]; + new Random().NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); + + BinaryPrimitives.WriteInt16LittleEndian( + new Span(encodedData, V1_0.StreamHeaderLength + V1_0.SegmentHeaderNumOffset, 2), 123); + + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); + Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); + } + + [Test] + [Combinatorial] + public async Task BadStreamWrongContentLength( + [Values(-1, 1)] int difference, + [Values(true, false)] bool lengthProvided) + { + byte[] originalData = new byte[1024]; + new Random().NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); + + BinaryPrimitives.WriteInt64LittleEndian( + new Span(encodedData, V1_0.StreamHeaderMessageLengthOffset, 8), + encodedData.Length + difference); + + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream( + new MemoryStream(encodedData), + lengthProvided ? (long?)encodedData.Length : default); + + // manual try/catch with tiny buffer to validate the proccess failed mid-stream rather than the end + const int copyBufferSize = 4; + bool caught = false; + try + { + await CopyStream(decodingStream, Stream.Null, copyBufferSize); + } + catch (CopyStreamException ex) + { + caught = true; + if (lengthProvided) + { + Assert.That(ex.TotalCopied, Is.EqualTo(0)); + } + else + { + Assert.That(ex.TotalCopied, Is.EqualTo(originalData.Length)); + } + } + Assert.That(caught); + } + + [Test] + public void BadStreamMissingExpectedStreamFooter() + { + byte[] originalData = new byte[1024]; + new Random().NextBytes(originalData); + byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); + + byte[] brokenData = new byte[encodedData.Length - Crc64Length]; + new Span(encodedData, 0, encodedData.Length - Crc64Length).CopyTo(brokenData); + + (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(brokenData)); + Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); + } + + [Test] + public void NoSeek() + { + (Stream stream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream()); + + Assert.That(stream.CanSeek, Is.False); + Assert.That(() => stream.Length, Throws.TypeOf()); + Assert.That(() => stream.Position, Throws.TypeOf()); + Assert.That(() => stream.Position = 0, Throws.TypeOf()); + Assert.That(() => stream.Seek(0, SeekOrigin.Begin), Throws.TypeOf()); + } + + [Test] + public void NoWrite() + { + (Stream stream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream()); + byte[] data = new byte[1024]; + new Random().NextBytes(data); + + Assert.That(stream.CanWrite, Is.False); + Assert.That(() => stream.Write(data, 0, data.Length), + Throws.TypeOf()); + Assert.That(async () => await stream.WriteAsync(data, 0, data.Length, CancellationToken.None), + Throws.TypeOf()); +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + Assert.That(() => stream.Write(new Span(data)), + Throws.TypeOf()); + Assert.That(async () => await stream.WriteAsync(new Memory(data), CancellationToken.None), + Throws.TypeOf()); +#endif + } + } +} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageEncodingStreamTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageEncodingStreamTests.cs new file mode 100644 index 0000000000000..e0f91dee7de3a --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageEncodingStreamTests.cs @@ -0,0 +1,271 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers.Binary; +using System.IO; +using System.Linq; +using System.Threading.Tasks; +using Azure.Storage.Blobs.Tests; +using Azure.Storage.Shared; +using NUnit.Framework; +using static Azure.Storage.Shared.StructuredMessage; + +namespace Azure.Storage.Tests +{ + [TestFixture(ReadMethod.SyncArray)] + [TestFixture(ReadMethod.AsyncArray)] +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + [TestFixture(ReadMethod.SyncSpan)] + [TestFixture(ReadMethod.AsyncMemory)] +#endif + public class StructuredMessageEncodingStreamTests + { + // Cannot just implement as passthru in the stream + // Must test each one + public enum ReadMethod + { + SyncArray, + AsyncArray, +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + SyncSpan, + AsyncMemory +#endif + } + + public ReadMethod Method { get; } + + public StructuredMessageEncodingStreamTests(ReadMethod method) + { + Method = method; + } + + private async ValueTask CopyStream(Stream source, Stream destination, int bufferSize = 81920) // number default for CopyTo impl + { + byte[] buf = new byte[bufferSize]; + int read; + switch (Method) + { + case ReadMethod.SyncArray: + while ((read = source.Read(buf, 0, bufferSize)) > 0) + { + destination.Write(buf, 0, read); + } + break; + case ReadMethod.AsyncArray: + while ((read = await source.ReadAsync(buf, 0, bufferSize)) > 0) + { + await destination.WriteAsync(buf, 0, read); + } + break; +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + case ReadMethod.SyncSpan: + while ((read = source.Read(new Span(buf))) > 0) + { + destination.Write(new Span(buf, 0, read)); + } + break; + case ReadMethod.AsyncMemory: + while ((read = await source.ReadAsync(new Memory(buf))) > 0) + { + await destination.WriteAsync(new Memory(buf, 0, read)); + } + break; +#endif + } + destination.Flush(); + } + + [Test] + [Pairwise] + public async Task EncodesData( + [Values(2048, 2005)] int dataLength, + [Values(default, 512)] int? seglen, + [Values(8 * Constants.KB, 512, 530, 3)] int readLen, + [Values(true, false)] bool useCrc) + { + int segmentContentLength = seglen ?? int.MaxValue; + Flags flags = useCrc ? Flags.StorageCrc64 : Flags.None; + + byte[] originalData = new byte[dataLength]; + new Random().NextBytes(originalData); + byte[] expectedEncodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentContentLength, flags); + + Stream encodingStream = new StructuredMessageEncodingStream(new MemoryStream(originalData), segmentContentLength, flags); + byte[] encodedData; + using (MemoryStream dest = new()) + { + await CopyStream(encodingStream, dest, readLen); + encodedData = dest.ToArray(); + } + + Assert.That(new Span(encodedData).SequenceEqual(expectedEncodedData)); + } + + [TestCase(0, 0)] // start + [TestCase(5, 0)] // partway through stream header + [TestCase(V1_0.StreamHeaderLength, 0)] // start of segment + [TestCase(V1_0.StreamHeaderLength + 3, 0)] // partway through segment header + [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength, 0)] // start of segment content + [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 123, 123)] // partway through segment content + [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 512, 512)] // start of segment footer + [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 515, 512)] // partway through segment footer + [TestCase(V1_0.StreamHeaderLength + 3*V1_0.SegmentHeaderLength + 2*Crc64Length + 1500, 1500)] // partway through not first segment content + public async Task Seek(int targetRewindOffset, int expectedInnerStreamPosition) + { + const int segmentLength = 512; + const int dataLength = 2055; + byte[] data = new byte[dataLength]; + new Random().NextBytes(data); + + MemoryStream dataStream = new(data); + StructuredMessageEncodingStream encodingStream = new(dataStream, segmentLength, Flags.StorageCrc64); + + // no support for seeking past existing read, need to consume whole stream before seeking + await CopyStream(encodingStream, Stream.Null); + + encodingStream.Position = targetRewindOffset; + Assert.That(encodingStream.Position, Is.EqualTo(targetRewindOffset)); + Assert.That(dataStream.Position, Is.EqualTo(expectedInnerStreamPosition)); + } + + [TestCase(0)] // start + [TestCase(5)] // partway through stream header + [TestCase(V1_0.StreamHeaderLength)] // start of segment + [TestCase(V1_0.StreamHeaderLength + 3)] // partway through segment header + [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength)] // start of segment content + [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 123)] // partway through segment content + [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 512)] // start of segment footer + [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 515)] // partway through segment footer + [TestCase(V1_0.StreamHeaderLength + 2 * V1_0.SegmentHeaderLength + Crc64Length + 1500)] // partway through not first segment content + public async Task SupportsRewind(int targetRewindOffset) + { + const int segmentLength = 512; + const int dataLength = 2055; + byte[] data = new byte[dataLength]; + new Random().NextBytes(data); + + Stream encodingStream = new StructuredMessageEncodingStream(new MemoryStream(data), segmentLength, Flags.StorageCrc64); + byte[] encodedData1; + using (MemoryStream dest = new()) + { + await CopyStream(encodingStream, dest); + encodedData1 = dest.ToArray(); + } + encodingStream.Position = targetRewindOffset; + byte[] encodedData2; + using (MemoryStream dest = new()) + { + await CopyStream(encodingStream, dest); + encodedData2 = dest.ToArray(); + } + + Assert.That(new Span(encodedData1).Slice(targetRewindOffset).SequenceEqual(encodedData2)); + } + + [Test] + public async Task SupportsFastForward() + { + const int segmentLength = 512; + const int dataLength = 2055; + byte[] data = new byte[dataLength]; + new Random().NextBytes(data); + + // must have read stream to fastforward. so read whole stream upfront & save result to check later + Stream encodingStream = new StructuredMessageEncodingStream(new MemoryStream(data), segmentLength, Flags.StorageCrc64); + byte[] encodedData; + using (MemoryStream dest = new()) + { + await CopyStream(encodingStream, dest); + encodedData = dest.ToArray(); + } + + encodingStream.Position = 0; + + bool skip = false; + const int increment = 499; + while (encodingStream.Position < encodingStream.Length) + { + if (skip) + { + encodingStream.Position = Math.Min(dataLength, encodingStream.Position + increment); + skip = !skip; + continue; + } + ReadOnlyMemory expected = new(encodedData, (int)encodingStream.Position, + (int)Math.Min(increment, encodedData.Length - encodingStream.Position)); + ReadOnlyMemory actual; + using (MemoryStream dest = new(increment)) + { + await CopyStream(WindowStream.GetWindow(encodingStream, increment), dest); + actual = dest.ToArray(); + } + Assert.That(expected.Span.SequenceEqual(actual.Span)); + skip = !skip; + } + } + + [Test] + public void NotSupportsFastForwardBeyondLatestRead() + { + const int segmentLength = 512; + const int dataLength = 2055; + byte[] data = new byte[dataLength]; + new Random().NextBytes(data); + + Stream encodingStream = new StructuredMessageEncodingStream(new MemoryStream(data), segmentLength, Flags.StorageCrc64); + + Assert.That(() => encodingStream.Position = 123, Throws.TypeOf()); + } + + [Test] + [Pairwise] + public async Task WrapperStreamCorrectData( + [Values(2048, 2005)] int dataLength, + [Values(8 * Constants.KB, 512, 530, 3)] int readLen) + { + int segmentContentLength = dataLength; + Flags flags = Flags.StorageCrc64; + + byte[] originalData = new byte[dataLength]; + new Random().NextBytes(originalData); + byte[] crc = CrcInline(originalData); + byte[] expectedEncodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentContentLength, flags); + + Stream encodingStream = new StructuredMessagePrecalculatedCrcWrapperStream(new MemoryStream(originalData), crc); + byte[] encodedData; + using (MemoryStream dest = new()) + { + await CopyStream(encodingStream, dest, readLen); + encodedData = dest.ToArray(); + } + + Assert.That(new Span(encodedData).SequenceEqual(expectedEncodedData)); + } + + private static void AssertExpectedStreamHeader(ReadOnlySpan actual, int originalDataLength, Flags flags, int expectedSegments) + { + int expectedFooterLen = flags.HasFlag(Flags.StorageCrc64) ? Crc64Length : 0; + + Assert.That(actual.Length, Is.EqualTo(V1_0.StreamHeaderLength)); + Assert.That(actual[0], Is.EqualTo(1)); + Assert.That(BinaryPrimitives.ReadInt64LittleEndian(actual.Slice(1, 8)), + Is.EqualTo(V1_0.StreamHeaderLength + expectedSegments * (V1_0.SegmentHeaderLength + expectedFooterLen) + originalDataLength)); + Assert.That(BinaryPrimitives.ReadInt16LittleEndian(actual.Slice(9, 2)), Is.EqualTo((short)flags)); + Assert.That(BinaryPrimitives.ReadInt16LittleEndian(actual.Slice(11, 2)), Is.EqualTo((short)expectedSegments)); + } + + private static void AssertExpectedSegmentHeader(ReadOnlySpan actual, int segmentNum, long contentLength) + { + Assert.That(BinaryPrimitives.ReadInt16LittleEndian(actual.Slice(0, 2)), Is.EqualTo((short) segmentNum)); + Assert.That(BinaryPrimitives.ReadInt64LittleEndian(actual.Slice(2, 8)), Is.EqualTo(contentLength)); + } + + private static byte[] CrcInline(ReadOnlySpan data) + { + var crc = StorageCrc64HashAlgorithm.Create(); + crc.Append(data); + return crc.GetCurrentHash(); + } + } +} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageHelper.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageHelper.cs new file mode 100644 index 0000000000000..59e80320d96a0 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageHelper.cs @@ -0,0 +1,68 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Azure.Storage.Shared; +using static Azure.Storage.Shared.StructuredMessage; + +namespace Azure.Storage.Blobs.Tests +{ + internal class StructuredMessageHelper + { + public static byte[] MakeEncodedData(ReadOnlySpan data, long segmentContentLength, Flags flags) + { + int segmentCount = (int) Math.Ceiling(data.Length / (double)segmentContentLength); + int segmentFooterLen = flags.HasFlag(Flags.StorageCrc64) ? 8 : 0; + int streamFooterLen = flags.HasFlag(Flags.StorageCrc64) ? 8 : 0; + + byte[] encodedData = new byte[ + V1_0.StreamHeaderLength + + segmentCount*(V1_0.SegmentHeaderLength + segmentFooterLen) + + streamFooterLen + + data.Length]; + V1_0.WriteStreamHeader( + new Span(encodedData, 0, V1_0.StreamHeaderLength), + encodedData.Length, + flags, + segmentCount); + + int i = V1_0.StreamHeaderLength; + int j = 0; + foreach (int seg in Enumerable.Range(1, segmentCount)) + { + int segContentLen = Math.Min((int)segmentContentLength, data.Length - j); + V1_0.WriteSegmentHeader( + new Span(encodedData, i, V1_0.SegmentHeaderLength), + seg, + segContentLen); + i += V1_0.SegmentHeaderLength; + + data.Slice(j, segContentLen) + .CopyTo(new Span(encodedData).Slice(i)); + i += segContentLen; + + if (flags.HasFlag(Flags.StorageCrc64)) + { + var crc = StorageCrc64HashAlgorithm.Create(); + crc.Append(data.Slice(j, segContentLen)); + crc.GetCurrentHash(new Span(encodedData, i, Crc64Length)); + i += Crc64Length; + } + j += segContentLen; + } + + if (flags.HasFlag(Flags.StorageCrc64)) + { + var crc = StorageCrc64HashAlgorithm.Create(); + crc.Append(data); + crc.GetCurrentHash(new Span(encodedData, i, Crc64Length)); + } + + return encodedData; + } + } +} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageStreamRoundtripTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageStreamRoundtripTests.cs new file mode 100644 index 0000000000000..61583aa1ebe4e --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageStreamRoundtripTests.cs @@ -0,0 +1,127 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.IO; +using System.Linq; +using System.Threading.Tasks; +using Azure.Storage.Shared; +using NUnit.Framework; +using static Azure.Storage.Shared.StructuredMessage; + +namespace Azure.Storage.Tests +{ + [TestFixture(ReadMethod.SyncArray)] + [TestFixture(ReadMethod.AsyncArray)] +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + [TestFixture(ReadMethod.SyncSpan)] + [TestFixture(ReadMethod.AsyncMemory)] +#endif + public class StructuredMessageStreamRoundtripTests + { + // Cannot just implement as passthru in the stream + // Must test each one + public enum ReadMethod + { + SyncArray, + AsyncArray, +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + SyncSpan, + AsyncMemory +#endif + } + + public ReadMethod Method { get; } + + public StructuredMessageStreamRoundtripTests(ReadMethod method) + { + Method = method; + } + + private class CopyStreamException : Exception + { + public long TotalCopied { get; } + + public CopyStreamException(Exception inner, long totalCopied) + : base($"Failed read after {totalCopied}-many bytes.", inner) + { + TotalCopied = totalCopied; + } + } + private async ValueTask CopyStream(Stream source, Stream destination, int bufferSize = 81920) // number default for CopyTo impl + { + byte[] buf = new byte[bufferSize]; + int read; + long totalRead = 0; + try + { + switch (Method) + { + case ReadMethod.SyncArray: + while ((read = source.Read(buf, 0, bufferSize)) > 0) + { + totalRead += read; + destination.Write(buf, 0, read); + } + break; + case ReadMethod.AsyncArray: + while ((read = await source.ReadAsync(buf, 0, bufferSize)) > 0) + { + totalRead += read; + await destination.WriteAsync(buf, 0, read); + } + break; +#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER + case ReadMethod.SyncSpan: + while ((read = source.Read(new Span(buf))) > 0) + { + totalRead += read; + destination.Write(new Span(buf, 0, read)); + } + break; + case ReadMethod.AsyncMemory: + while ((read = await source.ReadAsync(new Memory(buf))) > 0) + { + totalRead += read; + await destination.WriteAsync(new Memory(buf, 0, read)); + } + break; +#endif + } + destination.Flush(); + } + catch (Exception ex) + { + throw new CopyStreamException(ex, totalRead); + } + return totalRead; + } + + [Test] + [Pairwise] + public async Task RoundTrip( + [Values(2048, 2005)] int dataLength, + [Values(default, 512)] int? seglen, + [Values(8 * Constants.KB, 512, 530, 3)] int readLen, + [Values(true, false)] bool useCrc) + { + int segmentLength = seglen ?? int.MaxValue; + Flags flags = useCrc ? Flags.StorageCrc64 : Flags.None; + + byte[] originalData = new byte[dataLength]; + new Random().NextBytes(originalData); + + byte[] roundtripData; + using (MemoryStream source = new(originalData)) + using (Stream encode = new StructuredMessageEncodingStream(source, segmentLength, flags)) + using (Stream decode = StructuredMessageDecodingStream.WrapStream(encode).DecodedStream) + using (MemoryStream dest = new()) + { + await CopyStream(source, dest, readLen); + roundtripData = dest.ToArray(); + } + + Assert.That(originalData.SequenceEqual(roundtripData)); + } + } +} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageTests.cs new file mode 100644 index 0000000000000..b4f1dfe178246 --- /dev/null +++ b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageTests.cs @@ -0,0 +1,114 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Buffers.Binary; +using System.Collections.Generic; +using NUnit.Framework; +using static Azure.Storage.Shared.StructuredMessage; + +namespace Azure.Storage.Tests +{ + public class StructuredMessageTests + { + [TestCase(1024, Flags.None, 2)] + [TestCase(2000, Flags.StorageCrc64, 4)] + public void EncodeStreamHeader(int messageLength, int flags, int numSegments) + { + Span encoding = new(new byte[V1_0.StreamHeaderLength]); + V1_0.WriteStreamHeader(encoding, messageLength, (Flags)flags, numSegments); + + Assert.That(encoding[0], Is.EqualTo((byte)1)); + Assert.That(BinaryPrimitives.ReadUInt64LittleEndian(encoding.Slice(1, 8)), Is.EqualTo(messageLength)); + Assert.That(BinaryPrimitives.ReadUInt16LittleEndian(encoding.Slice(9, 2)), Is.EqualTo(flags)); + Assert.That(BinaryPrimitives.ReadUInt16LittleEndian(encoding.Slice(11, 2)), Is.EqualTo(numSegments)); + } + + [TestCase(V1_0.StreamHeaderLength)] + [TestCase(V1_0.StreamHeaderLength + 1)] + [TestCase(V1_0.StreamHeaderLength - 1)] + public void EncodeStreamHeaderRejectBadBufferSize(int bufferSize) + { + Random r = new(); + byte[] encoding = new byte[bufferSize]; + + void Action() => V1_0.WriteStreamHeader(encoding, r.Next(2, int.MaxValue), Flags.StorageCrc64, r.Next(2, int.MaxValue)); + if (bufferSize < V1_0.StreamHeaderLength) + { + Assert.That(Action, Throws.ArgumentException); + } + else + { + Assert.That(Action, Throws.Nothing); + } + } + + [TestCase(1, 1024)] + [TestCase(5, 39578)] + public void EncodeSegmentHeader(int segmentNum, int contentLength) + { + Span encoding = new(new byte[V1_0.SegmentHeaderLength]); + V1_0.WriteSegmentHeader(encoding, segmentNum, contentLength); + + Assert.That(BinaryPrimitives.ReadUInt16LittleEndian(encoding.Slice(0, 2)), Is.EqualTo(segmentNum)); + Assert.That(BinaryPrimitives.ReadUInt64LittleEndian(encoding.Slice(2, 8)), Is.EqualTo(contentLength)); + } + + [TestCase(V1_0.SegmentHeaderLength)] + [TestCase(V1_0.SegmentHeaderLength + 1)] + [TestCase(V1_0.SegmentHeaderLength - 1)] + public void EncodeSegmentHeaderRejectBadBufferSize(int bufferSize) + { + Random r = new(); + byte[] encoding = new byte[bufferSize]; + + void Action() => V1_0.WriteSegmentHeader(encoding, r.Next(1, int.MaxValue), r.Next(2, int.MaxValue)); + if (bufferSize < V1_0.SegmentHeaderLength) + { + Assert.That(Action, Throws.ArgumentException); + } + else + { + Assert.That(Action, Throws.Nothing); + } + } + + [TestCase(true)] + [TestCase(false)] + public void EncodeSegmentFooter(bool useCrc) + { + Span encoding = new(new byte[Crc64Length]); + Span crc = useCrc ? new Random().NextBytesInline(Crc64Length) : default; + V1_0.WriteSegmentFooter(encoding, crc); + + if (useCrc) + { + Assert.That(encoding.SequenceEqual(crc), Is.True); + } + else + { + Assert.That(encoding.SequenceEqual(new Span(new byte[Crc64Length])), Is.True); + } + } + + [TestCase(Crc64Length)] + [TestCase(Crc64Length + 1)] + [TestCase(Crc64Length - 1)] + public void EncodeSegmentFooterRejectBadBufferSize(int bufferSize) + { + byte[] encoding = new byte[bufferSize]; + byte[] crc = new byte[Crc64Length]; + new Random().NextBytes(crc); + + void Action() => V1_0.WriteSegmentFooter(encoding, crc); + if (bufferSize < Crc64Length) + { + Assert.That(Action, Throws.ArgumentException); + } + else + { + Assert.That(Action, Throws.Nothing); + } + } + } +} diff --git a/sdk/storage/Azure.Storage.DataMovement.Blobs/samples/Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Blobs/samples/Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj index 7ab901e963e03..30d4b1f79daaf 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Blobs/samples/Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Blobs/samples/Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj @@ -11,6 +11,7 @@ + diff --git a/sdk/storage/Azure.Storage.DataMovement.Blobs/src/Azure.Storage.DataMovement.Blobs.csproj b/sdk/storage/Azure.Storage.DataMovement.Blobs/src/Azure.Storage.DataMovement.Blobs.csproj index 6098dcd8ba33d..93e7432f186e3 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Blobs/src/Azure.Storage.DataMovement.Blobs.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Blobs/src/Azure.Storage.DataMovement.Blobs.csproj @@ -37,6 +37,7 @@ + diff --git a/sdk/storage/Azure.Storage.DataMovement.Blobs/src/DataMovementBlobsExtensions.cs b/sdk/storage/Azure.Storage.DataMovement.Blobs/src/DataMovementBlobsExtensions.cs index 84d60b3bc37c4..2c6864f511571 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Blobs/src/DataMovementBlobsExtensions.cs +++ b/sdk/storage/Azure.Storage.DataMovement.Blobs/src/DataMovementBlobsExtensions.cs @@ -99,7 +99,7 @@ internal static StorageResourceItemProperties ToStorageResourceItemProperties(th ContentRange contentRange = !string.IsNullOrWhiteSpace(result?.Details?.ContentRange) ? ContentRange.Parse(result.Details.ContentRange) : default; if (contentRange != default) { - size = contentRange.Size; + size = contentRange.TotalResourceLength; } return new StorageResourceItemProperties( @@ -151,7 +151,7 @@ internal static StorageResourceReadStreamResult ToReadStreamStorageResourceInfo( if (contentRange != default) { range = ContentRange.ToHttpRange(contentRange); - size = contentRange.Size; + size = contentRange.TotalResourceLength; } else if (result.Details.ContentLength > 0) { diff --git a/sdk/storage/Azure.Storage.DataMovement.Blobs/tests/Azure.Storage.DataMovement.Blobs.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Blobs/tests/Azure.Storage.DataMovement.Blobs.Tests.csproj index f8b62d0b947e2..214903eb5f9c4 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Blobs/tests/Azure.Storage.DataMovement.Blobs.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Blobs/tests/Azure.Storage.DataMovement.Blobs.Tests.csproj @@ -22,11 +22,15 @@ + + + + @@ -40,6 +44,7 @@ + diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/BlobToFileSharesTests/Azure.Storage.DataMovement.Blobs.Files.Shares.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/BlobToFileSharesTests/Azure.Storage.DataMovement.Blobs.Files.Shares.Tests.csproj index a6abde432473f..66a9fea0861a2 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/BlobToFileSharesTests/Azure.Storage.DataMovement.Blobs.Files.Shares.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/BlobToFileSharesTests/Azure.Storage.DataMovement.Blobs.Files.Shares.Tests.csproj @@ -35,6 +35,7 @@ + diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/samples/Azure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/samples/Azure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj index 9cde066f64eb7..6a472b9f74158 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/samples/Azure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/samples/Azure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj @@ -1,4 +1,4 @@ - + $(RequiredTargetFrameworks) Microsoft Azure.Storage.DataMovement.Files.Shares client library samples @@ -11,6 +11,7 @@ + diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/src/DataMovementSharesExtensions.cs b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/src/DataMovementSharesExtensions.cs index 9cb7d338fcb60..16a164f61b060 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/src/DataMovementSharesExtensions.cs +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/src/DataMovementSharesExtensions.cs @@ -335,14 +335,14 @@ internal static StorageResourceReadStreamResult ToStorageResourceReadStreamResul ContentRange contentRange = !string.IsNullOrWhiteSpace(info?.Details?.ContentRange) ? ContentRange.Parse(info.Details.ContentRange) : default; if (contentRange != default) { - size = contentRange.Size; + size = contentRange.TotalResourceLength; } return new StorageResourceReadStreamResult( content: info?.Content, range: ContentRange.ToHttpRange(contentRange), properties: new StorageResourceItemProperties( - resourceLength: contentRange.Size, + resourceLength: contentRange.TotalResourceLength, eTag: info.Details.ETag, lastModifiedTime: info.Details.LastModified, properties: properties)); diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Azure.Storage.DataMovement.Files.Shares.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Azure.Storage.DataMovement.Files.Shares.Tests.csproj index 8e574bca36a48..d75775beceafd 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Azure.Storage.DataMovement.Files.Shares.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Azure.Storage.DataMovement.Files.Shares.Tests.csproj @@ -27,6 +27,7 @@ + diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Shared/DisposingShare.cs b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Shared/DisposingShare.cs index ae3bc879f717e..577ee7bb9a480 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Shared/DisposingShare.cs +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Shared/DisposingShare.cs @@ -17,7 +17,7 @@ public class DisposingShare : IDisposingContainer public static async Task CreateAsync(ShareClient share, IDictionary metadata) { - await share.CreateIfNotExistsAsync(metadata: metadata); + await share.CreateIfNotExistsAsync(new() { Metadata = metadata }); return new DisposingShare(share); } diff --git a/sdk/storage/Azure.Storage.DataMovement/src/Azure.Storage.DataMovement.csproj b/sdk/storage/Azure.Storage.DataMovement/src/Azure.Storage.DataMovement.csproj index 5aaf548493b15..dd30659cf0a5d 100644 --- a/sdk/storage/Azure.Storage.DataMovement/src/Azure.Storage.DataMovement.csproj +++ b/sdk/storage/Azure.Storage.DataMovement/src/Azure.Storage.DataMovement.csproj @@ -1,4 +1,4 @@ - + $(RequiredTargetFrameworks);net6.0 diff --git a/sdk/storage/Azure.Storage.DataMovement/tests/Azure.Storage.DataMovement.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement/tests/Azure.Storage.DataMovement.Tests.csproj index b5e3c42359976..7a40eb8026443 100644 --- a/sdk/storage/Azure.Storage.DataMovement/tests/Azure.Storage.DataMovement.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement/tests/Azure.Storage.DataMovement.Tests.csproj @@ -34,6 +34,7 @@ + diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs index a202d6300f50e..7f856db5829ac 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs @@ -2,7 +2,7 @@ namespace Azure.Storage.Files.DataLake { public partial class DataLakeClientOptions : Azure.Core.ClientOptions { - public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2024_11_04) { } + public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Files.DataLake.Models.DataLakeAudience? Audience { get { throw null; } set { } } public Azure.Storage.Files.DataLake.Models.DataLakeCustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs index a202d6300f50e..7f856db5829ac 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs @@ -2,7 +2,7 @@ namespace Azure.Storage.Files.DataLake { public partial class DataLakeClientOptions : Azure.Core.ClientOptions { - public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2024_11_04) { } + public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Files.DataLake.Models.DataLakeAudience? Audience { get { throw null; } set { } } public Azure.Storage.Files.DataLake.Models.DataLakeCustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/assets.json b/sdk/storage/Azure.Storage.Files.DataLake/assets.json index 4a64b8398f656..5127ea7e0c4db 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/assets.json +++ b/sdk/storage/Azure.Storage.Files.DataLake/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Files.DataLake", - "Tag": "net/storage/Azure.Storage.Files.DataLake_d74597f1e3" + "Tag": "net/storage/Azure.Storage.Files.DataLake_48a38da58a" } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Files.DataLake/samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj index c230f2ed8fa20..eecbe0543fe87 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Files.DataLake/samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj @@ -15,6 +15,7 @@ + diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj b/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj index 3c551e05c24c2..f8652fd283e36 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj @@ -42,6 +42,7 @@ + @@ -81,6 +82,10 @@ + + + + diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs index 2da5eb76349eb..aaa8f514c6e44 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs @@ -16,6 +16,7 @@ using Azure.Storage.Common; using Azure.Storage.Files.DataLake.Models; using Azure.Storage.Sas; +using Azure.Storage.Shared; using Metadata = System.Collections.Generic.IDictionary; namespace Azure.Storage.Files.DataLake @@ -2332,13 +2333,39 @@ internal virtual async Task AppendInternal( using (ClientConfiguration.Pipeline.BeginLoggingScope(nameof(DataLakeFileClient))) { // compute hash BEFORE attaching progress handler - ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); + ContentHasher.GetHashResult hashResult = null; + long contentLength = (content?.Length - content?.Position) ?? 0; + long? structuredContentLength = default; + string structuredBodyType = null; + if (content != null && + validationOptions != null && + validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64) + { + // report progress in terms of caller bytes, not encoded bytes + structuredContentLength = contentLength; + structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; + content = content.WithNoDispose().WithProgress(progressHandler); + content = validationOptions.PrecalculatedChecksum.IsEmpty + ? new StructuredMessageEncodingStream( + content, + Constants.StructuredMessage.DefaultSegmentContentLength, + StructuredMessage.Flags.StorageCrc64) + : new StructuredMessagePrecalculatedCrcWrapperStream( + content, + validationOptions.PrecalculatedChecksum.Span); + contentLength = content.Length - content.Position; + } + else + { + // compute hash BEFORE attaching progress handler + hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + content = content?.WithNoDispose().WithProgress(progressHandler); + } - content = content?.WithNoDispose().WithProgress(progressHandler); ClientConfiguration.Pipeline.LogMethodEnter( nameof(DataLakeFileClient), message: @@ -2373,6 +2400,8 @@ internal virtual async Task AppendInternal( encryptionKey: ClientConfiguration.CustomerProvidedKey?.EncryptionKey, encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, leaseId: leaseId, leaseAction: leaseAction, leaseDuration: leaseDurationLong, @@ -2392,6 +2421,8 @@ internal virtual async Task AppendInternal( encryptionKey: ClientConfiguration.CustomerProvidedKey?.EncryptionKey, encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, leaseId: leaseId, leaseAction: leaseAction, leaseDuration: leaseDurationLong, diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md b/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md index ec9675a014f70..a8340f1092bcb 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md @@ -23,7 +23,7 @@ directive: if (property.includes('/{filesystem}/{path}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/FileSystem") && false == param['$ref'].endsWith("#/parameters/Path"))}); - } + } else if (property.includes('/{filesystem}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/FileSystem"))}); @@ -127,7 +127,7 @@ directive: } $[newName] = $[oldName]; delete $[oldName]; - } + } else if (property.includes('/{filesystem}')) { var oldName = property; diff --git a/sdk/storage/Azure.Storage.Files.DataLake/tests/Azure.Storage.Files.DataLake.Tests.csproj b/sdk/storage/Azure.Storage.Files.DataLake/tests/Azure.Storage.Files.DataLake.Tests.csproj index bef13bb21a1c6..1fa78690077be 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/tests/Azure.Storage.Files.DataLake.Tests.csproj +++ b/sdk/storage/Azure.Storage.Files.DataLake/tests/Azure.Storage.Files.DataLake.Tests.csproj @@ -6,6 +6,9 @@ Microsoft Azure.Storage.Files.DataLake client library tests false + + DataLakeSDK + diff --git a/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeFileClientTransferValidationTests.cs b/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeFileClientTransferValidationTests.cs index 4bdefdbf756cd..5067f98517bd2 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeFileClientTransferValidationTests.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeFileClientTransferValidationTests.cs @@ -34,7 +34,10 @@ protected override async Task> Get StorageChecksumAlgorithm uploadAlgorithm = StorageChecksumAlgorithm.None, StorageChecksumAlgorithm downloadAlgorithm = StorageChecksumAlgorithm.None) { - var disposingFileSystem = await ClientBuilder.GetNewFileSystem(service: service, fileSystemName: containerName); + var disposingFileSystem = await ClientBuilder.GetNewFileSystem( + service: service, + fileSystemName: containerName, + publicAccessType: PublicAccessType.None); disposingFileSystem.FileSystem.ClientConfiguration.TransferValidation.Upload.ChecksumAlgorithm = uploadAlgorithm; disposingFileSystem.FileSystem.ClientConfiguration.TransferValidation.Download.ChecksumAlgorithm = downloadAlgorithm; diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs index cf8ce32808d81..473ffb67af41f 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs @@ -115,7 +115,7 @@ public ShareClient(System.Uri shareUri, Azure.Storage.StorageSharedKeyCredential } public partial class ShareClientOptions : Azure.Core.ClientOptions { - public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2024_11_04) { } + public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2025_01_05) { } public bool? AllowSourceTrailingDot { get { throw null; } set { } } public bool? AllowTrailingDot { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.ShareAudience? Audience { get { throw null; } set { } } @@ -808,6 +808,7 @@ public partial class ShareFileDownloadInfo : System.IDisposable { internal ShareFileDownloadInfo() { } public System.IO.Stream Content { get { throw null; } } + public byte[] ContentCrc { get { throw null; } } public byte[] ContentHash { get { throw null; } } public long ContentLength { get { throw null; } } public string ContentType { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs index cf8ce32808d81..473ffb67af41f 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs @@ -115,7 +115,7 @@ public ShareClient(System.Uri shareUri, Azure.Storage.StorageSharedKeyCredential } public partial class ShareClientOptions : Azure.Core.ClientOptions { - public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2024_11_04) { } + public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2025_01_05) { } public bool? AllowSourceTrailingDot { get { throw null; } set { } } public bool? AllowTrailingDot { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.ShareAudience? Audience { get { throw null; } set { } } @@ -808,6 +808,7 @@ public partial class ShareFileDownloadInfo : System.IDisposable { internal ShareFileDownloadInfo() { } public System.IO.Stream Content { get { throw null; } } + public byte[] ContentCrc { get { throw null; } } public byte[] ContentHash { get { throw null; } } public long ContentLength { get { throw null; } } public string ContentType { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/assets.json b/sdk/storage/Azure.Storage.Files.Shares/assets.json index c2b5c3d31e6a2..c33c8bb335398 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/assets.json +++ b/sdk/storage/Azure.Storage.Files.Shares/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Files.Shares", - "Tag": "net/storage/Azure.Storage.Files.Shares_df67d82d59" + "Tag": "net/storage/Azure.Storage.Files.Shares_4b545ae555" } diff --git a/sdk/storage/Azure.Storage.Files.Shares/samples/Azure.Storage.Files.Shares.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Files.Shares/samples/Azure.Storage.Files.Shares.Samples.Tests.csproj index 0bcec423c144d..d1efeca0c2da2 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/samples/Azure.Storage.Files.Shares.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Files.Shares/samples/Azure.Storage.Files.Shares.Samples.Tests.csproj @@ -16,6 +16,7 @@ + PreserveNewest diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Azure.Storage.Files.Shares.csproj b/sdk/storage/Azure.Storage.Files.Shares/src/Azure.Storage.Files.Shares.csproj index 740160b155650..d136154f5d3d4 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Azure.Storage.Files.Shares.csproj +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Azure.Storage.Files.Shares.csproj @@ -1,4 +1,4 @@ - + $(RequiredTargetFrameworks);net6.0 @@ -42,6 +42,7 @@ + @@ -85,6 +86,11 @@ + + + + + diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileDownloadInfo.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileDownloadInfo.cs index 0165af94435a0..4037cbdfd875e 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileDownloadInfo.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileDownloadInfo.cs @@ -38,6 +38,12 @@ public partial class ShareFileDownloadInfo : IDisposable, IDownloadedContent public byte[] ContentHash { get; internal set; } #pragma warning restore CA1819 // Properties should not return arrays + /// + /// When requested using , this value contains the CRC for the download blob range. + /// This value may only become populated once the network stream is fully consumed. + /// + public byte[] ContentCrc { get; internal set; } + /// /// Details returned when downloading a file /// diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/ShareErrors.cs b/sdk/storage/Azure.Storage.Files.Shares/src/ShareErrors.cs index f776384d06add..0b27510aaa6c4 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/ShareErrors.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/ShareErrors.cs @@ -17,20 +17,5 @@ public static InvalidOperationException FileOrShareMissing( string fileClient, string shareClient) => new InvalidOperationException($"{leaseClient} requires either a {fileClient} or {shareClient}"); - - public static void AssertAlgorithmSupport(StorageChecksumAlgorithm? algorithm) - { - StorageChecksumAlgorithm resolved = (algorithm ?? StorageChecksumAlgorithm.None).ResolveAuto(); - switch (resolved) - { - case StorageChecksumAlgorithm.None: - case StorageChecksumAlgorithm.MD5: - return; - case StorageChecksumAlgorithm.StorageCrc64: - throw new ArgumentException("Azure File Shares do not support CRC-64."); - default: - throw new ArgumentException($"{nameof(StorageChecksumAlgorithm)} does not support value {Enum.GetName(typeof(StorageChecksumAlgorithm), resolved) ?? ((int)resolved).ToString(CultureInfo.InvariantCulture)}."); - } - } } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs index f713200a524de..ea3f8554b944d 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs @@ -2397,51 +2397,70 @@ private async Task> DownloadInternal( // Wrap the response Content in a RetriableStream so we // can return it before it's finished downloading, but still // allow retrying if it fails. - initialResponse.Value.Content = RetriableStream.Create( - stream, - startOffset => - { - (Response Response, Stream ContentStream) = StartDownloadAsync( - range, - validationOptions, - conditions, - startOffset, - async, - cancellationToken) - .EnsureCompleted(); - if (etag != Response.GetRawResponse().Headers.ETag) - { - throw new ShareFileModifiedException( - "File has been modified concurrently", - Uri, etag, Response.GetRawResponse().Headers.ETag.GetValueOrDefault(), range); - } - return ContentStream; - }, - async startOffset => + async ValueTask> Factory(long offset, bool async, CancellationToken cancellationToken) + { + (Response response, Stream contentStream) = await StartDownloadAsync( + range, + validationOptions, + conditions, + offset, + async, + cancellationToken).ConfigureAwait(false); + if (etag != response.GetRawResponse().Headers.ETag) { - (Response Response, Stream ContentStream) = await StartDownloadAsync( - range, - validationOptions, - conditions, - startOffset, - async, - cancellationToken) - .ConfigureAwait(false); - if (etag != Response.GetRawResponse().Headers.ETag) + throw new ShareFileModifiedException( + "File has been modified concurrently", + Uri, etag, response.GetRawResponse().Headers.ETag.GetValueOrDefault(), range); + } + return response; + } + async ValueTask<(Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData)> StructuredMessageFactory( + long offset, bool async, CancellationToken cancellationToken) + { + Response result = await Factory(offset, async, cancellationToken).ConfigureAwait(false); + return StructuredMessageDecodingStream.WrapStream(result.Value.Content, result.Value.ContentLength); + } + + if (initialResponse.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) + { + (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = StructuredMessageDecodingStream.WrapStream( + initialResponse.Value.Content, initialResponse.Value.ContentLength); + initialResponse.Value.Content = new StructuredMessageDecodingRetriableStream( + decodingStream, + decodedData, + StructuredMessage.Flags.StorageCrc64, + startOffset => StructuredMessageFactory(startOffset, async: false, cancellationToken) + .EnsureCompleted(), + async startOffset => await StructuredMessageFactory(startOffset, async: true, cancellationToken) + .ConfigureAwait(false), + decodedData => { - throw new ShareFileModifiedException( - "File has been modified concurrently", - Uri, etag, Response.GetRawResponse().Headers.ETag.GetValueOrDefault(), range); - } - return ContentStream; - }, - ClientConfiguration.Pipeline.ResponseClassifier, - Constants.MaxReliabilityRetries); + initialResponse.Value.ContentCrc = new byte[StructuredMessage.Crc64Length]; + decodedData.Crc.WriteCrc64(initialResponse.Value.ContentCrc); + }, + ClientConfiguration.Pipeline.ResponseClassifier, + Constants.MaxReliabilityRetries); + } + else + { + initialResponse.Value.Content = RetriableStream.Create( + initialResponse.Value.Content, + startOffset => Factory(startOffset, async: false, cancellationToken) + .EnsureCompleted().Value.Content, + async startOffset => (await Factory(startOffset, async: true, cancellationToken) + .ConfigureAwait(false)).Value.Content, + ClientConfiguration.Pipeline.ResponseClassifier, + Constants.MaxReliabilityRetries); + } // buffer response stream and ensure it matches the transactional hash if any // Storage will not return a hash for payload >4MB, so this buffer is capped similarly // hashing is opt-in, so this buffer is part of that opt-in - if (validationOptions != default && validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && validationOptions.AutoValidateChecksum) + if (validationOptions != default && + validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && + validationOptions.AutoValidateChecksum && + // structured message decoding does the validation for us + !initialResponse.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) { // safe-buffer; transactional hash download limit well below maxInt var readDestStream = new MemoryStream((int)initialResponse.Value.ContentLength); @@ -2524,8 +2543,6 @@ await ContentHasher.AssertResponseHashMatchInternal( bool async = true, CancellationToken cancellationToken = default) { - ShareErrors.AssertAlgorithmSupport(transferValidationOverride?.ChecksumAlgorithm); - // calculation gets illegible with null coalesce; just pre-initialize var pageRange = range; pageRange = new HttpRange( @@ -2535,13 +2552,27 @@ await ContentHasher.AssertResponseHashMatchInternal( (long?)null); ClientConfiguration.Pipeline.LogTrace($"Download {Uri} with range: {pageRange}"); - ResponseWithHeaders response; + bool? rangeGetContentMD5 = null; + string structuredBodyType = null; + switch (transferValidationOverride?.ChecksumAlgorithm.ResolveAuto()) + { + case StorageChecksumAlgorithm.MD5: + rangeGetContentMD5 = true; + break; + case StorageChecksumAlgorithm.StorageCrc64: + structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; + break; + default: + break; + } + ResponseWithHeaders response; if (async) { response = await FileRestClient.DownloadAsync( range: pageRange == default ? null : pageRange.ToString(), - rangeGetContentMD5: transferValidationOverride?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.MD5 ? true : null, + rangeGetContentMD5: rangeGetContentMD5, + structuredBodyType: structuredBodyType, shareFileRequestConditions: conditions, cancellationToken: cancellationToken) .ConfigureAwait(false); @@ -2550,7 +2581,8 @@ await ContentHasher.AssertResponseHashMatchInternal( { response = FileRestClient.Download( range: pageRange == default ? null : pageRange.ToString(), - rangeGetContentMD5: transferValidationOverride?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.MD5 ? true : null, + rangeGetContentMD5: rangeGetContentMD5, + structuredBodyType: structuredBodyType, shareFileRequestConditions: conditions, cancellationToken: cancellationToken); } @@ -4630,7 +4662,6 @@ internal async Task> UploadRangeInternal( CancellationToken cancellationToken) { UploadTransferValidationOptions validationOptions = transferValidationOverride ?? ClientConfiguration.TransferValidation.Upload; - ShareErrors.AssertAlgorithmSupport(validationOptions?.ChecksumAlgorithm); using (ClientConfiguration.Pipeline.BeginLoggingScope(nameof(ShareFileClient))) { @@ -4646,14 +4677,38 @@ internal async Task> UploadRangeInternal( scope.Start(); Errors.VerifyStreamPosition(content, nameof(content)); - // compute hash BEFORE attaching progress handler - ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - - content = content.WithNoDispose().WithProgress(progressHandler); + ContentHasher.GetHashResult hashResult = null; + long contentLength = (content?.Length - content?.Position) ?? 0; + long? structuredContentLength = default; + string structuredBodyType = null; + if (validationOptions != null && + validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64) + { + // report progress in terms of caller bytes, not encoded bytes + structuredContentLength = contentLength; + contentLength = (content?.Length - content?.Position) ?? 0; + structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; + content = content.WithNoDispose().WithProgress(progressHandler); + content = validationOptions.PrecalculatedChecksum.IsEmpty + ? new StructuredMessageEncodingStream( + content, + Constants.StructuredMessage.DefaultSegmentContentLength, + StructuredMessage.Flags.StorageCrc64) + : new StructuredMessagePrecalculatedCrcWrapperStream( + content, + validationOptions.PrecalculatedChecksum.Span); + contentLength = (content?.Length - content?.Position) ?? 0; + } + else + { + // compute hash BEFORE attaching progress handler + hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + content = content.WithNoDispose().WithProgress(progressHandler); + } ResponseWithHeaders response; @@ -4666,6 +4721,8 @@ internal async Task> UploadRangeInternal( fileLastWrittenMode: fileLastWrittenMode, optionalbody: content, contentMD5: hashResult?.MD5AsArray, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, shareFileRequestConditions: conditions, cancellationToken: cancellationToken) .ConfigureAwait(false); @@ -4679,6 +4736,8 @@ internal async Task> UploadRangeInternal( fileLastWrittenMode: fileLastWrittenMode, optionalbody: content, contentMD5: hashResult?.MD5AsArray, + structuredBodyType: structuredBodyType, + structuredContentLength: structuredContentLength, shareFileRequestConditions: conditions, cancellationToken: cancellationToken); } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md index 2bcc0e37ee65a..ca0e5ae4c9160 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md +++ b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md @@ -25,7 +25,7 @@ directive: if (property.includes('/{shareName}/{directory}/{fileName}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ShareName") && false == param['$ref'].endsWith("#/parameters/DirectoryPath") && false == param['$ref'].endsWith("#/parameters/FilePath"))}); - } + } else if (property.includes('/{shareName}/{directory}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ShareName") && false == param['$ref'].endsWith("#/parameters/DirectoryPath"))}); @@ -46,7 +46,7 @@ directive: $.Metrics.type = "object"; ``` -### Times aren't required +### Times aren't required ``` yaml directive: - from: swagger-document diff --git a/sdk/storage/Azure.Storage.Files.Shares/tests/Azure.Storage.Files.Shares.Tests.csproj b/sdk/storage/Azure.Storage.Files.Shares/tests/Azure.Storage.Files.Shares.Tests.csproj index 398a4b6367489..d09dd8fe8949f 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/tests/Azure.Storage.Files.Shares.Tests.csproj +++ b/sdk/storage/Azure.Storage.Files.Shares/tests/Azure.Storage.Files.Shares.Tests.csproj @@ -17,6 +17,7 @@ + PreserveNewest diff --git a/sdk/storage/Azure.Storage.Files.Shares/tests/ShareFileClientTransferValidationTests.cs b/sdk/storage/Azure.Storage.Files.Shares/tests/ShareFileClientTransferValidationTests.cs index 3dcdb21f27b36..9fd8905e388b1 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/tests/ShareFileClientTransferValidationTests.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/tests/ShareFileClientTransferValidationTests.cs @@ -64,10 +64,6 @@ protected override async Task GetResourceClientAsync( private void AssertSupportsHashAlgorithm(StorageChecksumAlgorithm algorithm) { - if (algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64) - { - TestHelper.AssertInconclusiveRecordingFriendly(Recording.Mode, "Azure File Share does not support CRC64."); - } } protected override async Task UploadPartitionAsync(ShareFileClient client, Stream source, UploadTransferValidationOptions transferValidation) @@ -147,8 +143,44 @@ protected override async Task SetupDataAsync(ShareFileClient client, Stream data public override void TestAutoResolve() { Assert.AreEqual( - StorageChecksumAlgorithm.MD5, + StorageChecksumAlgorithm.StorageCrc64, TransferValidationOptionsExtensions.ResolveAuto(StorageChecksumAlgorithm.Auto)); } + + [Test] + public async Task StructuredMessagePopulatesCrcDownloadStreaming() + { + await using DisposingShare disposingContainer = await ClientBuilder.GetTestShareAsync(); + + const int dataLength = Constants.KB; + byte[] data = GetRandomBuffer(dataLength); + byte[] dataCrc = new byte[8]; + StorageCrc64Calculator.ComputeSlicedSafe(data, 0L).WriteCrc64(dataCrc); + + ShareFileClient file = disposingContainer.Container.GetRootDirectoryClient().GetFileClient(GetNewResourceName()); + await file.CreateAsync(data.Length); + await file.UploadAsync(new MemoryStream(data)); + + Response response = await file.DownloadAsync(new ShareFileDownloadOptions() + { + TransferValidation = new DownloadTransferValidationOptions + { + ChecksumAlgorithm = StorageChecksumAlgorithm.StorageCrc64 + } + }); + + // crc is not present until response stream is consumed + Assert.That(response.Value.ContentCrc, Is.Null); + + byte[] downloadedData; + using (MemoryStream ms = new()) + { + await response.Value.Content.CopyToAsync(ms); + downloadedData = ms.ToArray(); + } + + Assert.That(response.Value.ContentCrc, Is.EqualTo(dataCrc)); + Assert.That(downloadedData, Is.EqualTo(data)); + } } } diff --git a/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs b/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs index 96bc919c7a719..9f440eb3639d7 100644 --- a/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs +++ b/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs @@ -74,7 +74,7 @@ public QueueClient(System.Uri queueUri, Azure.Storage.StorageSharedKeyCredential } public partial class QueueClientOptions : Azure.Core.ClientOptions { - public QueueClientOptions(Azure.Storage.Queues.QueueClientOptions.ServiceVersion version = Azure.Storage.Queues.QueueClientOptions.ServiceVersion.V2024_11_04) { } + public QueueClientOptions(Azure.Storage.Queues.QueueClientOptions.ServiceVersion version = Azure.Storage.Queues.QueueClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Queues.Models.QueueAudience? Audience { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } public System.Uri GeoRedundantSecondaryUri { get { throw null; } set { } } @@ -426,7 +426,7 @@ public event System.EventHandler + PreserveNewest diff --git a/sdk/storage/Azure.Storage.Queues/tests/Azure.Storage.Queues.Tests.csproj b/sdk/storage/Azure.Storage.Queues/tests/Azure.Storage.Queues.Tests.csproj index e0a6fab3c753b..4d0334255f041 100644 --- a/sdk/storage/Azure.Storage.Queues/tests/Azure.Storage.Queues.Tests.csproj +++ b/sdk/storage/Azure.Storage.Queues/tests/Azure.Storage.Queues.Tests.csproj @@ -21,6 +21,7 @@ + From a9d6c257ec7098740bf2c29c8c3a179eba195674 Mon Sep 17 00:00:00 2001 From: Sean McCullough <44180881+seanmcc-msft@users.noreply.github.com> Date: Wed, 25 Sep 2024 18:24:34 -0500 Subject: [PATCH 20/25] Revert "Structured message cherrypick stg96" (#46284) --- ...e.Storage.Blobs.Batch.Samples.Tests.csproj | 1 - .../Azure.Storage.Blobs.Batch.Tests.csproj | 3 +- ...rage.Blobs.ChangeFeed.Samples.Tests.csproj | 3 +- ...zure.Storage.Blobs.ChangeFeed.Tests.csproj | 3 +- .../api/Azure.Storage.Blobs.net6.0.cs | 7 +- .../api/Azure.Storage.Blobs.netstandard2.0.cs | 7 +- .../api/Azure.Storage.Blobs.netstandard2.1.cs | 7 +- sdk/storage/Azure.Storage.Blobs/assets.json | 2 +- .../Azure.Storage.Blobs.Samples.Tests.csproj | 1 - .../src/AppendBlobClient.cs | 45 +- .../src/Azure.Storage.Blobs.csproj | 7 - .../Azure.Storage.Blobs/src/BlobBaseClient.cs | 110 +--- .../src/BlobClientOptions.cs | 2 - .../src/BlobClientSideDecryptor.cs | 2 +- .../src/BlockBlobClient.cs | 92 +-- .../src/Models/BlobDownloadDetails.cs | 8 - .../src/Models/BlobDownloadInfo.cs | 10 - .../src/Models/BlobDownloadStreamingResult.cs | 8 - .../Azure.Storage.Blobs/src/PageBlobClient.cs | 49 +- .../src/PartitionedDownloader.cs | 95 ++- .../Azure.Storage.Blobs/src/autorest.md | 4 +- .../tests/Azure.Storage.Blobs.Tests.csproj | 3 - .../BlobBaseClientTransferValidationTests.cs | 114 ++-- .../tests/ClientSideEncryptionTests.cs | 2 +- .../tests/PartitionedDownloaderTests.cs | 2 +- .../Azure.Storage.Common.Samples.Tests.csproj | 1 - .../src/Shared/ChecksumExtensions.cs | 22 - .../src/Shared/Constants.cs | 9 - .../src/Shared/ContentRange.cs | 18 +- .../src/Shared/ContentRangeExtensions.cs | 14 - .../src/Shared/Errors.Clients.cs | 10 - .../Azure.Storage.Common/src/Shared/Errors.cs | 19 - .../src/Shared/LazyLoadingReadOnlyStream.cs | 40 +- .../src/Shared/PooledMemoryStream.cs | 2 +- .../src/Shared/StorageCrc64Composer.cs | 48 +- .../StorageRequestValidationPipelinePolicy.cs | 29 - .../src/Shared/StorageVersionExtensions.cs | 2 +- .../src/Shared/StreamExtensions.cs | 22 +- .../src/Shared/StructuredMessage.cs | 244 -------- ...tructuredMessageDecodingRetriableStream.cs | 264 --------- .../Shared/StructuredMessageDecodingStream.cs | 542 ----------------- .../Shared/StructuredMessageEncodingStream.cs | 545 ------------------ ...redMessagePrecalculatedCrcWrapperStream.cs | 451 --------------- .../TransferValidationOptionsExtensions.cs | 7 + .../tests/Azure.Storage.Common.Tests.csproj | 9 - .../tests/Shared/FaultyStream.cs | 13 +- .../Shared/ObserveStructuredMessagePolicy.cs | 85 --- .../tests/Shared/RequestExtensions.cs | 27 - .../Shared/TamperStreamContentsPolicy.cs | 11 +- .../Shared/TransferValidationTestBase.cs | 325 +++-------- ...uredMessageDecodingRetriableStreamTests.cs | 246 -------- .../StructuredMessageDecodingStreamTests.cs | 323 ----------- .../StructuredMessageEncodingStreamTests.cs | 271 --------- .../tests/StructuredMessageHelper.cs | 68 --- .../StructuredMessageStreamRoundtripTests.cs | 127 ---- .../tests/StructuredMessageTests.cs | 114 ---- ...ge.DataMovement.Blobs.Samples.Tests.csproj | 1 - .../Azure.Storage.DataMovement.Blobs.csproj | 1 - .../src/DataMovementBlobsExtensions.cs | 4 +- ...re.Storage.DataMovement.Blobs.Tests.csproj | 5 - ...taMovement.Blobs.Files.Shares.Tests.csproj | 1 - ...Movement.Files.Shares.Samples.Tests.csproj | 3 +- .../src/DataMovementSharesExtensions.cs | 4 +- ...age.DataMovement.Files.Shares.Tests.csproj | 1 - .../tests/Shared/DisposingShare.cs | 7 +- .../src/Azure.Storage.DataMovement.csproj | 2 +- .../Azure.Storage.DataMovement.Tests.csproj | 1 - .../Azure.Storage.Files.DataLake.net6.0.cs | 2 +- ...e.Storage.Files.DataLake.netstandard2.0.cs | 2 +- .../Azure.Storage.Files.DataLake/assets.json | 2 +- ...torage.Files.DataLake.Samples.Tests.csproj | 1 - .../src/Azure.Storage.Files.DataLake.csproj | 5 - .../src/DataLakeFileClient.cs | 43 +- .../src/autorest.md | 4 +- .../Azure.Storage.Files.DataLake.Tests.csproj | 3 - ...taLakeFileClientTransferValidationTests.cs | 5 +- .../api/Azure.Storage.Files.Shares.net6.0.cs | 3 +- ...ure.Storage.Files.Shares.netstandard2.0.cs | 3 +- .../Azure.Storage.Files.Shares/assets.json | 2 +- ....Storage.Files.Shares.Samples.Tests.csproj | 1 - .../src/Azure.Storage.Files.Shares.csproj | 8 +- .../src/Models/ShareFileDownloadInfo.cs | 6 - .../src/ShareErrors.cs | 15 + .../src/ShareFileClient.cs | 165 ++---- .../src/autorest.md | 4 +- .../Azure.Storage.Files.Shares.Tests.csproj | 1 - .../ShareFileClientTransferValidationTests.cs | 42 +- .../api/Azure.Storage.Queues.net6.0.cs | 4 +- .../Azure.Storage.Queues.netstandard2.0.cs | 4 +- .../Azure.Storage.Queues.netstandard2.1.cs | 4 +- .../Azure.Storage.Queues.Samples.Tests.csproj | 1 - .../tests/Azure.Storage.Queues.Tests.csproj | 1 - 92 files changed, 410 insertions(+), 4446 deletions(-) delete mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/ChecksumExtensions.cs delete mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/ContentRangeExtensions.cs delete mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessage.cs delete mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingRetriableStream.cs delete mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingStream.cs delete mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageEncodingStream.cs delete mode 100644 sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessagePrecalculatedCrcWrapperStream.cs delete mode 100644 sdk/storage/Azure.Storage.Common/tests/Shared/ObserveStructuredMessagePolicy.cs delete mode 100644 sdk/storage/Azure.Storage.Common/tests/Shared/RequestExtensions.cs delete mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingRetriableStreamTests.cs delete mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingStreamTests.cs delete mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageEncodingStreamTests.cs delete mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageHelper.cs delete mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageStreamRoundtripTests.cs delete mode 100644 sdk/storage/Azure.Storage.Common/tests/StructuredMessageTests.cs diff --git a/sdk/storage/Azure.Storage.Blobs.Batch/samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Blobs.Batch/samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj index 6009a5336b8b9..3dea34a02b7ea 100644 --- a/sdk/storage/Azure.Storage.Blobs.Batch/samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs.Batch/samples/Azure.Storage.Blobs.Batch.Samples.Tests.csproj @@ -17,7 +17,6 @@ - PreserveNewest diff --git a/sdk/storage/Azure.Storage.Blobs.Batch/tests/Azure.Storage.Blobs.Batch.Tests.csproj b/sdk/storage/Azure.Storage.Blobs.Batch/tests/Azure.Storage.Blobs.Batch.Tests.csproj index 286ab317256bf..2b77907e9aaac 100644 --- a/sdk/storage/Azure.Storage.Blobs.Batch/tests/Azure.Storage.Blobs.Batch.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs.Batch/tests/Azure.Storage.Blobs.Batch.Tests.csproj @@ -23,7 +23,6 @@ - PreserveNewest @@ -43,4 +42,4 @@ - + \ No newline at end of file diff --git a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/samples/Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/samples/Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj index 6f8fcaf6528b3..7711cae537db6 100644 --- a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/samples/Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/samples/Azure.Storage.Blobs.ChangeFeed.Samples.Tests.csproj @@ -1,4 +1,4 @@ - + $(RequiredTargetFrameworks) Microsoft Azure.Storage.Blobs.ChangeFeed client library samples @@ -14,7 +14,6 @@ - diff --git a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/Azure.Storage.Blobs.ChangeFeed.Tests.csproj b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/Azure.Storage.Blobs.ChangeFeed.Tests.csproj index 8cf13cd60744f..9682ab15ecd60 100644 --- a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/Azure.Storage.Blobs.ChangeFeed.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/tests/Azure.Storage.Blobs.ChangeFeed.Tests.csproj @@ -17,7 +17,6 @@ - @@ -29,4 +28,4 @@ PreserveNewest - + \ No newline at end of file diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs index 822d5b41d1404..25640917de5bb 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs @@ -51,7 +51,7 @@ public BlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredential c } public partial class BlobClientOptions : Azure.Core.ClientOptions { - public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) { } + public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) { } public Azure.Storage.Blobs.Models.BlobAudience? Audience { get { throw null; } set { } } public Azure.Storage.Blobs.Models.CustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } @@ -522,7 +522,6 @@ public BlobDownloadDetails() { } public long BlobSequenceNumber { get { throw null; } } public Azure.Storage.Blobs.Models.BlobType BlobType { get { throw null; } } public string CacheControl { get { throw null; } } - public byte[] ContentCrc { get { throw null; } } public string ContentDisposition { get { throw null; } } public string ContentEncoding { get { throw null; } } public byte[] ContentHash { get { throw null; } } @@ -568,7 +567,6 @@ internal BlobDownloadInfo() { } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public string ContentType { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } - public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadOptions @@ -590,7 +588,6 @@ public partial class BlobDownloadStreamingResult : System.IDisposable internal BlobDownloadStreamingResult() { } public System.IO.Stream Content { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } - public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadToOptions @@ -1853,7 +1850,7 @@ public PageBlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti } public partial class SpecializedBlobClientOptions : Azure.Storage.Blobs.BlobClientOptions { - public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } + public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } public Azure.Storage.ClientSideEncryptionOptions ClientSideEncryption { get { throw null; } set { } } } public static partial class SpecializedBlobExtensions diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs index 822d5b41d1404..25640917de5bb 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs @@ -51,7 +51,7 @@ public BlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredential c } public partial class BlobClientOptions : Azure.Core.ClientOptions { - public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) { } + public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) { } public Azure.Storage.Blobs.Models.BlobAudience? Audience { get { throw null; } set { } } public Azure.Storage.Blobs.Models.CustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } @@ -522,7 +522,6 @@ public BlobDownloadDetails() { } public long BlobSequenceNumber { get { throw null; } } public Azure.Storage.Blobs.Models.BlobType BlobType { get { throw null; } } public string CacheControl { get { throw null; } } - public byte[] ContentCrc { get { throw null; } } public string ContentDisposition { get { throw null; } } public string ContentEncoding { get { throw null; } } public byte[] ContentHash { get { throw null; } } @@ -568,7 +567,6 @@ internal BlobDownloadInfo() { } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public string ContentType { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } - public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadOptions @@ -590,7 +588,6 @@ public partial class BlobDownloadStreamingResult : System.IDisposable internal BlobDownloadStreamingResult() { } public System.IO.Stream Content { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } - public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadToOptions @@ -1853,7 +1850,7 @@ public PageBlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti } public partial class SpecializedBlobClientOptions : Azure.Storage.Blobs.BlobClientOptions { - public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } + public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } public Azure.Storage.ClientSideEncryptionOptions ClientSideEncryption { get { throw null; } set { } } } public static partial class SpecializedBlobExtensions diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs index 822d5b41d1404..25640917de5bb 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs @@ -51,7 +51,7 @@ public BlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredential c } public partial class BlobClientOptions : Azure.Core.ClientOptions { - public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) { } + public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) { } public Azure.Storage.Blobs.Models.BlobAudience? Audience { get { throw null; } set { } } public Azure.Storage.Blobs.Models.CustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } @@ -522,7 +522,6 @@ public BlobDownloadDetails() { } public long BlobSequenceNumber { get { throw null; } } public Azure.Storage.Blobs.Models.BlobType BlobType { get { throw null; } } public string CacheControl { get { throw null; } } - public byte[] ContentCrc { get { throw null; } } public string ContentDisposition { get { throw null; } } public string ContentEncoding { get { throw null; } } public byte[] ContentHash { get { throw null; } } @@ -568,7 +567,6 @@ internal BlobDownloadInfo() { } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public string ContentType { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } - public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadOptions @@ -590,7 +588,6 @@ public partial class BlobDownloadStreamingResult : System.IDisposable internal BlobDownloadStreamingResult() { } public System.IO.Stream Content { get { throw null; } } public Azure.Storage.Blobs.Models.BlobDownloadDetails Details { get { throw null; } } - public bool ExpectTrailingDetails { get { throw null; } } public void Dispose() { } } public partial class BlobDownloadToOptions @@ -1853,7 +1850,7 @@ public PageBlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti } public partial class SpecializedBlobClientOptions : Azure.Storage.Blobs.BlobClientOptions { - public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } + public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } public Azure.Storage.ClientSideEncryptionOptions ClientSideEncryption { get { throw null; } set { } } } public static partial class SpecializedBlobExtensions diff --git a/sdk/storage/Azure.Storage.Blobs/assets.json b/sdk/storage/Azure.Storage.Blobs/assets.json index 1994292f7b658..0facb33e2a026 100644 --- a/sdk/storage/Azure.Storage.Blobs/assets.json +++ b/sdk/storage/Azure.Storage.Blobs/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Blobs", - "Tag": "net/storage/Azure.Storage.Blobs_c5174c4663" + "Tag": "net/storage/Azure.Storage.Blobs_5c382dfb14" } diff --git a/sdk/storage/Azure.Storage.Blobs/samples/Azure.Storage.Blobs.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Blobs/samples/Azure.Storage.Blobs.Samples.Tests.csproj index 568dd6cba9516..77fd767c3486c 100644 --- a/sdk/storage/Azure.Storage.Blobs/samples/Azure.Storage.Blobs.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs/samples/Azure.Storage.Blobs.Samples.Tests.csproj @@ -16,7 +16,6 @@ - diff --git a/sdk/storage/Azure.Storage.Blobs/src/AppendBlobClient.cs b/sdk/storage/Azure.Storage.Blobs/src/AppendBlobClient.cs index 9a110cf8eb13a..e70d5e02c82d7 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/AppendBlobClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/AppendBlobClient.cs @@ -1242,39 +1242,14 @@ internal async Task> AppendBlockInternal( BlobErrors.VerifyHttpsCustomerProvidedKey(Uri, ClientConfiguration.CustomerProvidedKey); Errors.VerifyStreamPosition(content, nameof(content)); - ContentHasher.GetHashResult hashResult = null; - long contentLength = (content?.Length - content?.Position) ?? 0; - long? structuredContentLength = default; - string structuredBodyType = null; - if (validationOptions != null && - validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && - ClientSideEncryption == null) // don't allow feature combination - { - // report progress in terms of caller bytes, not encoded bytes - structuredContentLength = contentLength; - contentLength = (content?.Length - content?.Position) ?? 0; - structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; - content = content.WithNoDispose().WithProgress(progressHandler); - content = validationOptions.PrecalculatedChecksum.IsEmpty - ? new StructuredMessageEncodingStream( - content, - Constants.StructuredMessage.DefaultSegmentContentLength, - StructuredMessage.Flags.StorageCrc64) - : new StructuredMessagePrecalculatedCrcWrapperStream( - content, - validationOptions.PrecalculatedChecksum.Span); - contentLength = (content?.Length - content?.Position) ?? 0; - } - else - { - // compute hash BEFORE attaching progress handler - hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - content = content.WithNoDispose().WithProgress(progressHandler); - } + // compute hash BEFORE attaching progress handler + ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + + content = content.WithNoDispose().WithProgress(progressHandler); ResponseWithHeaders response; @@ -1292,8 +1267,6 @@ internal async Task> AppendBlockInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, ifModifiedSince: conditions?.IfModifiedSince, ifUnmodifiedSince: conditions?.IfUnmodifiedSince, ifMatch: conditions?.IfMatch?.ToString(), @@ -1316,8 +1289,6 @@ internal async Task> AppendBlockInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, ifModifiedSince: conditions?.IfModifiedSince, ifUnmodifiedSince: conditions?.IfUnmodifiedSince, ifMatch: conditions?.IfMatch?.ToString(), diff --git a/sdk/storage/Azure.Storage.Blobs/src/Azure.Storage.Blobs.csproj b/sdk/storage/Azure.Storage.Blobs/src/Azure.Storage.Blobs.csproj index e29acc40ca38b..8b09c620d1654 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Azure.Storage.Blobs.csproj +++ b/sdk/storage/Azure.Storage.Blobs/src/Azure.Storage.Blobs.csproj @@ -52,8 +52,6 @@ - - @@ -93,11 +91,6 @@ - - - - - diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs b/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs index 6b95b04c703db..aa91edb9f6c41 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlobBaseClient.cs @@ -1031,7 +1031,6 @@ private async Task> DownloadInternal( ContentHash = blobDownloadDetails.ContentHash, ContentLength = blobDownloadDetails.ContentLength, ContentType = blobDownloadDetails.ContentType, - ExpectTrailingDetails = blobDownloadStreamingResult.ExpectTrailingDetails, }, response.GetRawResponse()); } #endregion @@ -1548,52 +1547,30 @@ internal virtual async ValueTask> Download // Wrap the response Content in a RetriableStream so we // can return it before it's finished downloading, but still // allow retrying if it fails. - ValueTask> Factory(long offset, bool async, CancellationToken cancellationToken) - => StartDownloadAsync( - range, - conditionsWithEtag, - validationOptions, - offset, - async, - cancellationToken); - async ValueTask<(Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData)> StructuredMessageFactory( - long offset, bool async, CancellationToken cancellationToken) - { - Response result = await Factory(offset, async, cancellationToken).ConfigureAwait(false); - return StructuredMessageDecodingStream.WrapStream(result.Value.Content, result.Value.Details.ContentLength); - } - Stream stream; - if (response.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) - { - (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = StructuredMessageDecodingStream.WrapStream( - response.Value.Content, response.Value.Details.ContentLength); - stream = new StructuredMessageDecodingRetriableStream( - decodingStream, - decodedData, - StructuredMessage.Flags.StorageCrc64, - startOffset => StructuredMessageFactory(startOffset, async: false, cancellationToken) - .EnsureCompleted(), - async startOffset => await StructuredMessageFactory(startOffset, async: true, cancellationToken) - .ConfigureAwait(false), - decodedData => - { - response.Value.Details.ContentCrc = new byte[StructuredMessage.Crc64Length]; - decodedData.Crc.WriteCrc64(response.Value.Details.ContentCrc); - }, - ClientConfiguration.Pipeline.ResponseClassifier, - Constants.MaxReliabilityRetries); - } - else - { - stream = RetriableStream.Create( - response.Value.Content, - startOffset => Factory(startOffset, async: false, cancellationToken) - .EnsureCompleted().Value.Content, - async startOffset => (await Factory(startOffset, async: true, cancellationToken) - .ConfigureAwait(false)).Value.Content, - ClientConfiguration.Pipeline.ResponseClassifier, - Constants.MaxReliabilityRetries); - } + Stream stream = RetriableStream.Create( + response.Value.Content, + startOffset => + StartDownloadAsync( + range, + conditionsWithEtag, + validationOptions, + startOffset, + async, + cancellationToken) + .EnsureCompleted() + .Value.Content, + async startOffset => + (await StartDownloadAsync( + range, + conditionsWithEtag, + validationOptions, + startOffset, + async, + cancellationToken) + .ConfigureAwait(false)) + .Value.Content, + ClientConfiguration.Pipeline.ResponseClassifier, + Constants.MaxReliabilityRetries); stream = stream.WithNoDispose().WithProgress(progressHandler); @@ -1601,11 +1578,7 @@ ValueTask> Factory(long offset, bool async * Buffer response stream and ensure it matches the transactional checksum if any. * Storage will not return a checksum for payload >4MB, so this buffer is capped similarly. * Checksum validation is opt-in, so this buffer is part of that opt-in. */ - if (validationOptions != default && - validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && - validationOptions.AutoValidateChecksum && - // structured message decoding does the validation for us - !response.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) + if (validationOptions != default && validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && validationOptions.AutoValidateChecksum) { // safe-buffer; transactional hash download limit well below maxInt var readDestStream = new MemoryStream((int)response.Value.Details.ContentLength); @@ -1676,8 +1649,8 @@ await ContentHasher.AssertResponseHashMatchInternal( /// notifications that the operation should be cancelled. /// /// - /// A describing the - /// downloaded blob. contains + /// A describing the + /// downloaded blob. contains /// the blob's data. /// /// @@ -1716,29 +1689,13 @@ private async ValueTask> StartDownloadAsyn operationName: nameof(BlobBaseClient.Download), parameterName: nameof(conditions)); - bool? rangeGetContentMD5 = null; - bool? rangeGetContentCRC64 = null; - string structuredBodyType = null; - switch (validationOptions?.ChecksumAlgorithm.ResolveAuto()) - { - case StorageChecksumAlgorithm.MD5: - rangeGetContentMD5 = true; - break; - case StorageChecksumAlgorithm.StorageCrc64: - structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; - break; - default: - break; - } - if (async) { response = await BlobRestClient.DownloadAsync( range: pageRange?.ToString(), leaseId: conditions?.LeaseId, - rangeGetContentMD5: rangeGetContentMD5, - rangeGetContentCRC64: rangeGetContentCRC64, - structuredBodyType: structuredBodyType, + rangeGetContentMD5: validationOptions?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.MD5 ? true : null, + rangeGetContentCRC64: validationOptions?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 ? true : null, encryptionKey: ClientConfiguration.CustomerProvidedKey?.EncryptionKey, encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, @@ -1755,9 +1712,8 @@ private async ValueTask> StartDownloadAsyn response = BlobRestClient.Download( range: pageRange?.ToString(), leaseId: conditions?.LeaseId, - rangeGetContentMD5: rangeGetContentMD5, - rangeGetContentCRC64: rangeGetContentCRC64, - structuredBodyType: structuredBodyType, + rangeGetContentMD5: validationOptions?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.MD5 ? true : null, + rangeGetContentCRC64: validationOptions?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 ? true : null, encryptionKey: ClientConfiguration.CustomerProvidedKey?.EncryptionKey, encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, @@ -1773,11 +1729,9 @@ private async ValueTask> StartDownloadAsyn long length = response.IsUnavailable() ? 0 : response.Headers.ContentLength ?? 0; ClientConfiguration.Pipeline.LogTrace($"Response: {response.GetRawResponse().Status}, ContentLength: {length}"); - Response result = Response.FromValue( + return Response.FromValue( response.ToBlobDownloadStreamingResult(), response.GetRawResponse()); - result.Value.ExpectTrailingDetails = structuredBodyType != null; - return result; } #endregion diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs b/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs index f312e621bffc4..b16cefc83a535 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlobClientOptions.cs @@ -318,8 +318,6 @@ private void AddHeadersAndQueryParameters() Diagnostics.LoggedHeaderNames.Add("x-ms-encryption-key-sha256"); Diagnostics.LoggedHeaderNames.Add("x-ms-copy-source-error-code"); Diagnostics.LoggedHeaderNames.Add("x-ms-copy-source-status-code"); - Diagnostics.LoggedHeaderNames.Add("x-ms-structured-body"); - Diagnostics.LoggedHeaderNames.Add("x-ms-structured-content-length"); Diagnostics.LoggedQueryParameters.Add("comp"); Diagnostics.LoggedQueryParameters.Add("maxresults"); diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlobClientSideDecryptor.cs b/sdk/storage/Azure.Storage.Blobs/src/BlobClientSideDecryptor.cs index 59b036d4b20bd..9006282fab5b7 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlobClientSideDecryptor.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlobClientSideDecryptor.cs @@ -186,7 +186,7 @@ private static bool CanIgnorePadding(ContentRange? contentRange) // did we request the last block? // end is inclusive/0-index, so end = n and size = n+1 means we requested the last block - if (contentRange.Value.TotalResourceLength - contentRange.Value.End == 1) + if (contentRange.Value.Size - contentRange.Value.End == 1) { return false; } diff --git a/sdk/storage/Azure.Storage.Blobs/src/BlockBlobClient.cs b/sdk/storage/Azure.Storage.Blobs/src/BlockBlobClient.cs index 00e6bf0780e2f..f5348303e57f0 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/BlockBlobClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/BlockBlobClient.cs @@ -875,35 +875,14 @@ internal virtual async Task> UploadInternal( scope.Start(); Errors.VerifyStreamPosition(content, nameof(content)); - ContentHasher.GetHashResult hashResult = null; - long contentLength = (content?.Length - content?.Position) ?? 0; - long? structuredContentLength = default; - string structuredBodyType = null; - if (content != null && - validationOptions != null && - validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && - ClientSideEncryption == null) // don't allow feature combination - { - // report progress in terms of caller bytes, not encoded bytes - structuredContentLength = contentLength; - structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; - content = content.WithNoDispose().WithProgress(progressHandler); - content = new StructuredMessageEncodingStream( - content, - Constants.StructuredMessage.DefaultSegmentContentLength, - StructuredMessage.Flags.StorageCrc64); - contentLength = content.Length - content.Position; - } - else - { - // compute hash BEFORE attaching progress handler - hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - content = content.WithNoDispose().WithProgress(progressHandler); - } + // compute hash BEFORE attaching progress handler + ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + + content = content?.WithNoDispose().WithProgress(progressHandler); ResponseWithHeaders response; @@ -942,8 +921,6 @@ internal virtual async Task> UploadInternal( legalHold: legalHold, transactionalContentMD5: hashResult?.MD5AsArray, transactionalContentCrc64: hashResult?.StorageCrc64AsArray, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, cancellationToken: cancellationToken) .ConfigureAwait(false); } @@ -976,8 +953,6 @@ internal virtual async Task> UploadInternal( legalHold: legalHold, transactionalContentMD5: hashResult?.MD5AsArray, transactionalContentCrc64: hashResult?.StorageCrc64AsArray, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, cancellationToken: cancellationToken); } @@ -1330,39 +1305,14 @@ internal virtual async Task> StageBlockInternal( Errors.VerifyStreamPosition(content, nameof(content)); - ContentHasher.GetHashResult hashResult = null; - long contentLength = (content?.Length - content?.Position) ?? 0; - long? structuredContentLength = default; - string structuredBodyType = null; - if (validationOptions != null && - validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && - ClientSideEncryption == null) // don't allow feature combination - { - // report progress in terms of caller bytes, not encoded bytes - structuredContentLength = contentLength; - contentLength = (content?.Length - content?.Position) ?? 0; - structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; - content = content.WithNoDispose().WithProgress(progressHandler); - content = validationOptions.PrecalculatedChecksum.IsEmpty - ? new StructuredMessageEncodingStream( - content, - Constants.StructuredMessage.DefaultSegmentContentLength, - StructuredMessage.Flags.StorageCrc64) - : new StructuredMessagePrecalculatedCrcWrapperStream( - content, - validationOptions.PrecalculatedChecksum.Span); - contentLength = (content?.Length - content?.Position) ?? 0; - } - else - { - // compute hash BEFORE attaching progress handler - hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - content = content.WithNoDispose().WithProgress(progressHandler); - } + // compute hash BEFORE attaching progress handler + ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + + content = content.WithNoDispose().WithProgress(progressHandler); ResponseWithHeaders response; @@ -1370,7 +1320,7 @@ internal virtual async Task> StageBlockInternal( { response = await BlockBlobRestClient.StageBlockAsync( blockId: base64BlockId, - contentLength: contentLength, + contentLength: (content?.Length - content?.Position) ?? 0, body: content, transactionalContentCrc64: hashResult?.StorageCrc64AsArray, transactionalContentMD5: hashResult?.MD5AsArray, @@ -1379,8 +1329,6 @@ internal virtual async Task> StageBlockInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, cancellationToken: cancellationToken) .ConfigureAwait(false); } @@ -1388,7 +1336,7 @@ internal virtual async Task> StageBlockInternal( { response = BlockBlobRestClient.StageBlock( blockId: base64BlockId, - contentLength: contentLength, + contentLength: (content?.Length - content?.Position) ?? 0, body: content, transactionalContentCrc64: hashResult?.StorageCrc64AsArray, transactionalContentMD5: hashResult?.MD5AsArray, @@ -1397,8 +1345,6 @@ internal virtual async Task> StageBlockInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, cancellationToken: cancellationToken); } @@ -2845,7 +2791,7 @@ internal async Task OpenWriteInternal( immutabilityPolicy: default, legalHold: default, progressHandler: default, - transferValidationOverride: new() { ChecksumAlgorithm = StorageChecksumAlgorithm.None }, + transferValidationOverride: default, operationName: default, async: async, cancellationToken: cancellationToken) diff --git a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadDetails.cs b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadDetails.cs index 0490ec239798e..bc119822cdc12 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadDetails.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadDetails.cs @@ -34,14 +34,6 @@ public class BlobDownloadDetails public byte[] ContentHash { get; internal set; } #pragma warning restore CA1819 // Properties should not return arrays - /// - /// When requested using , this value contains the CRC for the download blob range. - /// This value may only become populated once the network stream is fully consumed. If this instance is accessed through - /// , the network stream has already been consumed. Otherwise, consume the content stream before - /// checking this value. - /// - public byte[] ContentCrc { get; internal set; } - /// /// Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob. /// diff --git a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadInfo.cs b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadInfo.cs index b42801e36ab55..e034573b54b3a 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadInfo.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadInfo.cs @@ -4,8 +4,6 @@ using System; using System.ComponentModel; using System.IO; -using System.Threading.Tasks; -using Azure.Core; using Azure.Storage.Shared; namespace Azure.Storage.Blobs.Models @@ -51,14 +49,6 @@ public class BlobDownloadInfo : IDisposable, IDownloadedContent /// public BlobDownloadDetails Details { get; internal set; } - /// - /// Indicates some contents of are mixed into the response stream. - /// They will not be set until has been fully consumed. These details - /// will be extracted from the content stream by the library before the calling code can - /// encounter them. - /// - public bool ExpectTrailingDetails { get; internal set; } - /// /// Constructor. /// diff --git a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadStreamingResult.cs b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadStreamingResult.cs index 9b7d4d4e00dad..4fbada6e67aad 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadStreamingResult.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/Models/BlobDownloadStreamingResult.cs @@ -24,14 +24,6 @@ internal BlobDownloadStreamingResult() { } /// public Stream Content { get; internal set; } - /// - /// Indicates some contents of are mixed into the response stream. - /// They will not be set until has been fully consumed. These details - /// will be extracted from the content stream by the library before the calling code can - /// encounter them. - /// - public bool ExpectTrailingDetails { get; internal set; } - /// /// Disposes the by calling Dispose on the underlying stream. /// diff --git a/sdk/storage/Azure.Storage.Blobs/src/PageBlobClient.cs b/sdk/storage/Azure.Storage.Blobs/src/PageBlobClient.cs index 7038897531fbb..fa575e41b8ebe 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/PageBlobClient.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/PageBlobClient.cs @@ -1363,42 +1363,15 @@ internal async Task> UploadPagesInternal( scope.Start(); Errors.VerifyStreamPosition(content, nameof(content)); - ContentHasher.GetHashResult hashResult = null; - long contentLength = (content?.Length - content?.Position) ?? 0; - long? structuredContentLength = default; - string structuredBodyType = null; - HttpRange range; - if (validationOptions != null && - validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && - ClientSideEncryption == null) // don't allow feature combination - { - // report progress in terms of caller bytes, not encoded bytes - structuredContentLength = contentLength; - contentLength = (content?.Length - content?.Position) ?? 0; - range = new HttpRange(offset, (content?.Length - content?.Position) ?? null); - structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; - content = content?.WithNoDispose().WithProgress(progressHandler); - content = validationOptions.PrecalculatedChecksum.IsEmpty - ? new StructuredMessageEncodingStream( - content, - Constants.StructuredMessage.DefaultSegmentContentLength, - StructuredMessage.Flags.StorageCrc64) - : new StructuredMessagePrecalculatedCrcWrapperStream( - content, - validationOptions.PrecalculatedChecksum.Span); - contentLength = (content?.Length - content?.Position) ?? 0; - } - else - { - // compute hash BEFORE attaching progress handler - hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - content = content?.WithNoDispose().WithProgress(progressHandler); - range = new HttpRange(offset, (content?.Length - content?.Position) ?? null); - } + // compute hash BEFORE attaching progress handler + ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + + content = content?.WithNoDispose().WithProgress(progressHandler); + HttpRange range = new HttpRange(offset, (content?.Length - content?.Position) ?? null); ResponseWithHeaders response; @@ -1415,8 +1388,6 @@ internal async Task> UploadPagesInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, ifSequenceNumberLessThanOrEqualTo: conditions?.IfSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan: conditions?.IfSequenceNumberLessThan, ifSequenceNumberEqualTo: conditions?.IfSequenceNumberEqual, @@ -1441,8 +1412,6 @@ internal async Task> UploadPagesInternal( encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, encryptionScope: ClientConfiguration.EncryptionScope, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, ifSequenceNumberLessThanOrEqualTo: conditions?.IfSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan: conditions?.IfSequenceNumberLessThan, ifSequenceNumberEqualTo: conditions?.IfSequenceNumberEqual, diff --git a/sdk/storage/Azure.Storage.Blobs/src/PartitionedDownloader.cs b/sdk/storage/Azure.Storage.Blobs/src/PartitionedDownloader.cs index 1b14bcf98ec04..2c52d0c256e34 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/PartitionedDownloader.cs +++ b/sdk/storage/Azure.Storage.Blobs/src/PartitionedDownloader.cs @@ -22,8 +22,6 @@ internal class PartitionedDownloader private const string _operationName = nameof(BlobBaseClient) + "." + nameof(BlobBaseClient.DownloadTo); private const string _innerOperationName = nameof(BlobBaseClient) + "." + nameof(BlobBaseClient.DownloadStreaming); - private const int Crc64Len = Constants.StorageCrc64SizeInBytes; - /// /// The client used to download the blob. /// @@ -50,7 +48,6 @@ internal class PartitionedDownloader /// private readonly StorageChecksumAlgorithm _validationAlgorithm; private readonly int _checksumSize; - // TODO disabling master crc temporarily. segment CRCs still handled. private bool UseMasterCrc => _validationAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64; private StorageCrc64HashAlgorithm _masterCrcCalculator = null; @@ -203,31 +200,20 @@ public async Task DownloadToInternal( } // Destination wrapped in master crc step if needed (must wait until after encryption wrap check) - byte[] composedCrcBuf = default; + Memory composedCrc = default; if (UseMasterCrc) { _masterCrcCalculator = StorageCrc64HashAlgorithm.Create(); destination = ChecksumCalculatingStream.GetWriteStream(destination, _masterCrcCalculator.Append); - disposables.Add(_arrayPool.RentDisposable(Crc64Len, out composedCrcBuf)); - composedCrcBuf.Clear(); + disposables.Add(_arrayPool.RentAsMemoryDisposable( + Constants.StorageCrc64SizeInBytes, out composedCrc)); + composedCrc.Span.Clear(); } // If the first segment was the entire blob, we'll copy that to // the output stream and finish now - long initialLength; - long totalLength; - // Get blob content length downloaded from content range when available to handle transit encoding - if (string.IsNullOrWhiteSpace(initialResponse.Value.Details.ContentRange)) - { - initialLength = initialResponse.Value.Details.ContentLength; - totalLength = 0; - } - else - { - ContentRange recievedRange = ContentRange.Parse(initialResponse.Value.Details.ContentRange); - initialLength = recievedRange.GetRangeLength(); - totalLength = recievedRange.TotalResourceLength.Value; - } + long initialLength = initialResponse.Value.Details.ContentLength; + long totalLength = ParseRangeTotalLength(initialResponse.Value.Details.ContentRange); if (initialLength == totalLength) { await HandleOneShotDownload(initialResponse, destination, async, cancellationToken) @@ -253,16 +239,15 @@ await HandleOneShotDownload(initialResponse, destination, async, cancellationTok } else { - using (_arrayPool.RentDisposable(_checksumSize, out byte[] partitionChecksum)) + using (_arrayPool.RentAsMemoryDisposable(_checksumSize, out Memory partitionChecksum)) { - await CopyToInternal(initialResponse, destination, new(partitionChecksum, 0, _checksumSize), async, cancellationToken).ConfigureAwait(false); + await CopyToInternal(initialResponse, destination, partitionChecksum, async, cancellationToken).ConfigureAwait(false); if (UseMasterCrc) { StorageCrc64Composer.Compose( - (composedCrcBuf, 0L), - (partitionChecksum, initialResponse.Value.Details.ContentRange.GetContentRangeLengthOrDefault() - ?? initialResponse.Value.Details.ContentLength) - ).AsSpan(0, Crc64Len).CopyTo(composedCrcBuf); + (composedCrc.ToArray(), 0L), + (partitionChecksum.ToArray(), initialResponse.Value.Details.ContentLength) + ).CopyTo(composedCrc); } } } @@ -301,16 +286,15 @@ await HandleOneShotDownload(initialResponse, destination, async, cancellationTok else { Response result = await responseValueTask.ConfigureAwait(false); - using (_arrayPool.RentDisposable(_checksumSize, out byte[] partitionChecksum)) + using (_arrayPool.RentAsMemoryDisposable(_checksumSize, out Memory partitionChecksum)) { - await CopyToInternal(result, destination, new(partitionChecksum, 0, _checksumSize), async, cancellationToken).ConfigureAwait(false); + await CopyToInternal(result, destination, partitionChecksum, async, cancellationToken).ConfigureAwait(false); if (UseMasterCrc) { StorageCrc64Composer.Compose( - (composedCrcBuf, 0L), - (partitionChecksum, result.Value.Details.ContentRange.GetContentRangeLengthOrDefault() - ?? result.Value.Details.ContentLength) - ).AsSpan(0, Crc64Len).CopyTo(composedCrcBuf); + (composedCrc.ToArray(), 0L), + (partitionChecksum.ToArray(), result.Value.Details.ContentLength) + ).CopyTo(composedCrc); } } } @@ -326,7 +310,7 @@ await HandleOneShotDownload(initialResponse, destination, async, cancellationTok } #pragma warning restore AZC0110 // DO NOT use await keyword in possibly synchronous scope. - await FinalizeDownloadInternal(destination, composedCrcBuf?.AsMemory(0, Crc64Len) ?? default, async, cancellationToken) + await FinalizeDownloadInternal(destination, composedCrc, async, cancellationToken) .ConfigureAwait(false); return initialResponse.GetRawResponse(); @@ -344,7 +328,7 @@ async Task ConsumeQueuedTask() // CopyToAsync causes ConsumeQueuedTask to wait until the // download is complete - using (_arrayPool.RentDisposable(_checksumSize, out byte[] partitionChecksum)) + using (_arrayPool.RentAsMemoryDisposable(_checksumSize, out Memory partitionChecksum)) { await CopyToInternal( response, @@ -353,14 +337,13 @@ await CopyToInternal( async, cancellationToken) .ConfigureAwait(false); - if (UseMasterCrc) - { - StorageCrc64Composer.Compose( - (composedCrcBuf, 0L), - (partitionChecksum, response.Value.Details.ContentRange.GetContentRangeLengthOrDefault() - ?? response.Value.Details.ContentLength) - ).AsSpan(0, Crc64Len).CopyTo(composedCrcBuf); - } + if (UseMasterCrc) + { + StorageCrc64Composer.Compose( + (composedCrc.ToArray(), 0L), + (partitionChecksum.ToArray(), response.Value.Details.ContentLength) + ).CopyTo(composedCrc); + } } } } @@ -396,7 +379,7 @@ await FinalizeDownloadInternal(destination, partitionChecksum, async, cancellati private async Task FinalizeDownloadInternal( Stream destination, - ReadOnlyMemory composedCrc, + Memory composedCrc, bool async, CancellationToken cancellationToken) { @@ -412,6 +395,20 @@ private async Task FinalizeDownloadInternal( } } + private static long ParseRangeTotalLength(string range) + { + if (range == null) + { + return 0; + } + int lengthSeparator = range.IndexOf("/", StringComparison.InvariantCultureIgnoreCase); + if (lengthSeparator == -1) + { + throw BlobErrors.ParsingFullHttpRangeFailed(range); + } + return long.Parse(range.Substring(lengthSeparator + 1), CultureInfo.InvariantCulture); + } + private async Task CopyToInternal( Response response, Stream destination, @@ -420,9 +417,7 @@ private async Task CopyToInternal( CancellationToken cancellationToken) { CancellationHelper.ThrowIfCancellationRequested(cancellationToken); - // if structured message, this crc is validated in the decoding process. don't decode it here. - bool structuredMessage = response.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader); - using IHasher hasher = structuredMessage ? null : ContentHasher.GetHasherFromAlgorithmId(_validationAlgorithm); + using IHasher hasher = ContentHasher.GetHasherFromAlgorithmId(_validationAlgorithm); using Stream rawSource = response.Value.Content; using Stream source = hasher != null ? ChecksumCalculatingStream.GetReadStream(rawSource, hasher.AppendHash) @@ -434,13 +429,7 @@ await source.CopyToInternal( cancellationToken) .ConfigureAwait(false); - // with structured message, the message integrity will already be validated, - // but we can still get the checksum out of the response object - if (structuredMessage) - { - response.Value.Details.ContentCrc?.CopyTo(checksumBuffer.Span); - } - else if (hasher != null) + if (hasher != null) { hasher.GetFinalHash(checksumBuffer.Span); (ReadOnlyMemory checksum, StorageChecksumAlgorithm _) diff --git a/sdk/storage/Azure.Storage.Blobs/src/autorest.md b/sdk/storage/Azure.Storage.Blobs/src/autorest.md index a96db9856ca58..7160bd89aba05 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/autorest.md +++ b/sdk/storage/Azure.Storage.Blobs/src/autorest.md @@ -34,7 +34,7 @@ directive: if (property.includes('/{containerName}/{blob}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))}); - } + } else if (property.includes('/{containerName}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))}); @@ -158,7 +158,7 @@ directive: var newName = property.replace('/{containerName}/{blob}', ''); $[newName] = $[oldName]; delete $[oldName]; - } + } else if (property.includes('/{containerName}')) { var oldName = property; diff --git a/sdk/storage/Azure.Storage.Blobs/tests/Azure.Storage.Blobs.Tests.csproj b/sdk/storage/Azure.Storage.Blobs/tests/Azure.Storage.Blobs.Tests.csproj index 1c3856c83b64e..62c7b6d17e63e 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/Azure.Storage.Blobs.Tests.csproj +++ b/sdk/storage/Azure.Storage.Blobs/tests/Azure.Storage.Blobs.Tests.csproj @@ -6,9 +6,6 @@ Microsoft Azure.Storage.Blobs client library tests false - - BlobSDK - diff --git a/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTransferValidationTests.cs b/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTransferValidationTests.cs index 3ec448e6d1ed0..73d11612f1d8c 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTransferValidationTests.cs +++ b/sdk/storage/Azure.Storage.Blobs/tests/BlobBaseClientTransferValidationTests.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Buffers; using System.IO; using System.Threading.Tasks; using Azure.Core.TestFramework; @@ -39,10 +37,7 @@ protected override async Task> GetDispo StorageChecksumAlgorithm uploadAlgorithm = StorageChecksumAlgorithm.None, StorageChecksumAlgorithm downloadAlgorithm = StorageChecksumAlgorithm.None) { - var disposingContainer = await ClientBuilder.GetTestContainerAsync( - service: service, - containerName: containerName, - publicAccessType: PublicAccessType.None); + var disposingContainer = await ClientBuilder.GetTestContainerAsync(service: service, containerName: containerName); disposingContainer.Container.ClientConfiguration.TransferValidation.Upload.ChecksumAlgorithm = uploadAlgorithm; disposingContainer.Container.ClientConfiguration.TransferValidation.Download.ChecksumAlgorithm = downloadAlgorithm; @@ -96,96 +91,57 @@ public override void TestAutoResolve() } #region Added Tests - [Test] - public virtual async Task OlderServiceVersionThrowsOnStructuredMessage() + [TestCaseSource("GetValidationAlgorithms")] + public async Task ExpectedDownloadStreamingStreamTypeReturned(StorageChecksumAlgorithm algorithm) { - // use service version before structured message was introduced - await using DisposingContainer disposingContainer = await ClientBuilder.GetTestContainerAsync( - service: ClientBuilder.GetServiceClient_SharedKey( - InstrumentClientOptions(new BlobClientOptions(BlobClientOptions.ServiceVersion.V2024_11_04))), - publicAccessType: PublicAccessType.None); + await using var test = await GetDisposingContainerAsync(); // Arrange - const int dataLength = Constants.KB; - var data = GetRandomBuffer(dataLength); - - var resourceName = GetNewResourceName(); - var blob = InstrumentClient(disposingContainer.Container.GetBlobClient(GetNewResourceName())); - await blob.UploadAsync(BinaryData.FromBytes(data)); - - var validationOptions = new DownloadTransferValidationOptions + var data = GetRandomBuffer(Constants.KB); + BlobClient blob = InstrumentClient(test.Container.GetBlobClient(GetNewResourceName())); + using (var stream = new MemoryStream(data)) { - ChecksumAlgorithm = StorageChecksumAlgorithm.StorageCrc64 - }; - AsyncTestDelegate operation = async () => await (await blob.DownloadStreamingAsync( - new BlobDownloadOptions - { - Range = new HttpRange(length: Constants.StructuredMessage.MaxDownloadCrcWithHeader + 1), - TransferValidation = validationOptions, - })).Value.Content.CopyToAsync(Stream.Null); - Assert.That(operation, Throws.TypeOf()); - } - - [Test] - public async Task StructuredMessagePopulatesCrcDownloadStreaming() - { - await using DisposingContainer disposingContainer = await ClientBuilder.GetTestContainerAsync( - publicAccessType: PublicAccessType.None); - - const int dataLength = Constants.KB; - byte[] data = GetRandomBuffer(dataLength); - byte[] dataCrc = new byte[8]; - StorageCrc64Calculator.ComputeSlicedSafe(data, 0L).WriteCrc64(dataCrc); - - var blob = disposingContainer.Container.GetBlobClient(GetNewResourceName()); - await blob.UploadAsync(BinaryData.FromBytes(data)); + await blob.UploadAsync(stream); + } + // don't make options instance at all for no hash request + DownloadTransferValidationOptions transferValidation = algorithm == StorageChecksumAlgorithm.None + ? default + : new DownloadTransferValidationOptions { ChecksumAlgorithm = algorithm }; - Response response = await blob.DownloadStreamingAsync(new() + // Act + Response response = await blob.DownloadStreamingAsync(new BlobDownloadOptions { - TransferValidation = new DownloadTransferValidationOptions - { - ChecksumAlgorithm = StorageChecksumAlgorithm.StorageCrc64 - } + TransferValidation = transferValidation, + Range = new HttpRange(length: data.Length) }); - // crc is not present until response stream is consumed - Assert.That(response.Value.Details.ContentCrc, Is.Null); - - byte[] downloadedData; - using (MemoryStream ms = new()) - { - await response.Value.Content.CopyToAsync(ms); - downloadedData = ms.ToArray(); - } - - Assert.That(response.Value.Details.ContentCrc, Is.EqualTo(dataCrc)); - Assert.That(downloadedData, Is.EqualTo(data)); + // Assert + // validated stream is buffered + Assert.AreEqual(typeof(MemoryStream), response.Value.Content.GetType()); } [Test] - public async Task StructuredMessagePopulatesCrcDownloadContent() + public async Task ExpectedDownloadStreamingStreamTypeReturned_None() { - await using DisposingContainer disposingContainer = await ClientBuilder.GetTestContainerAsync( - publicAccessType: PublicAccessType.None); + await using var test = await GetDisposingContainerAsync(); - const int dataLength = Constants.KB; - byte[] data = GetRandomBuffer(dataLength); - byte[] dataCrc = new byte[8]; - StorageCrc64Calculator.ComputeSlicedSafe(data, 0L).WriteCrc64(dataCrc); - - var blob = disposingContainer.Container.GetBlobClient(GetNewResourceName()); - await blob.UploadAsync(BinaryData.FromBytes(data)); + // Arrange + var data = GetRandomBuffer(Constants.KB); + BlobClient blob = InstrumentClient(test.Container.GetBlobClient(GetNewResourceName())); + using (var stream = new MemoryStream(data)) + { + await blob.UploadAsync(stream); + } - Response response = await blob.DownloadContentAsync(new BlobDownloadOptions() + // Act + Response response = await blob.DownloadStreamingAsync(new BlobDownloadOptions { - TransferValidation = new DownloadTransferValidationOptions - { - ChecksumAlgorithm = StorageChecksumAlgorithm.StorageCrc64 - } + Range = new HttpRange(length: data.Length) }); - Assert.That(response.Value.Details.ContentCrc, Is.EqualTo(dataCrc)); - Assert.That(response.Value.Content.ToArray(), Is.EqualTo(data)); + // Assert + // unvalidated stream type is private; just check we didn't get back a buffered stream + Assert.AreNotEqual(typeof(MemoryStream), response.Value.Content.GetType()); } #endregion } diff --git a/sdk/storage/Azure.Storage.Blobs/tests/ClientSideEncryptionTests.cs b/sdk/storage/Azure.Storage.Blobs/tests/ClientSideEncryptionTests.cs index e85ff3aa5473f..5d391440ea1b6 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/ClientSideEncryptionTests.cs +++ b/sdk/storage/Azure.Storage.Blobs/tests/ClientSideEncryptionTests.cs @@ -1343,7 +1343,7 @@ public void CanParseLargeContentRange() { long compareValue = (long)Int32.MaxValue + 1; //Increase max int32 by one ContentRange contentRange = ContentRange.Parse($"bytes 0 {compareValue} {compareValue}"); - Assert.AreEqual((long)Int32.MaxValue + 1, contentRange.TotalResourceLength); + Assert.AreEqual((long)Int32.MaxValue + 1, contentRange.Size); Assert.AreEqual(0, contentRange.Start); Assert.AreEqual((long)Int32.MaxValue + 1, contentRange.End); } diff --git a/sdk/storage/Azure.Storage.Blobs/tests/PartitionedDownloaderTests.cs b/sdk/storage/Azure.Storage.Blobs/tests/PartitionedDownloaderTests.cs index af408264c5bfa..d8d4756a510c1 100644 --- a/sdk/storage/Azure.Storage.Blobs/tests/PartitionedDownloaderTests.cs +++ b/sdk/storage/Azure.Storage.Blobs/tests/PartitionedDownloaderTests.cs @@ -305,7 +305,7 @@ public Response GetStream(HttpRange range, BlobRequ ContentHash = new byte[] { 1, 2, 3 }, LastModified = DateTimeOffset.Now, Metadata = new Dictionary() { { "meta", "data" } }, - ContentRange = $"bytes {range.Offset}-{Math.Max(1, range.Offset + contentLength - 1)}/{_length}", + ContentRange = $"bytes {range.Offset}-{range.Offset + contentLength}/{_length}", ETag = s_etag, ContentEncoding = "test", CacheControl = "test", diff --git a/sdk/storage/Azure.Storage.Common/samples/Azure.Storage.Common.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Common/samples/Azure.Storage.Common.Samples.Tests.csproj index aeca4497a8770..7d454aeaa0af2 100644 --- a/sdk/storage/Azure.Storage.Common/samples/Azure.Storage.Common.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Common/samples/Azure.Storage.Common.Samples.Tests.csproj @@ -19,7 +19,6 @@ - PreserveNewest diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/ChecksumExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/ChecksumExtensions.cs deleted file mode 100644 index 48304640eee43..0000000000000 --- a/sdk/storage/Azure.Storage.Common/src/Shared/ChecksumExtensions.cs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers.Binary; - -namespace Azure.Storage; - -internal static class ChecksumExtensions -{ - public static void WriteCrc64(this ulong crc, Span dest) - => BinaryPrimitives.WriteUInt64LittleEndian(dest, crc); - - public static bool TryWriteCrc64(this ulong crc, Span dest) - => BinaryPrimitives.TryWriteUInt64LittleEndian(dest, crc); - - public static ulong ReadCrc64(this ReadOnlySpan crc) - => BinaryPrimitives.ReadUInt64LittleEndian(crc); - - public static bool TryReadCrc64(this ReadOnlySpan crc, out ulong value) - => BinaryPrimitives.TryReadUInt64LittleEndian(crc, out value); -} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs b/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs index 35d5c1f1fde8c..3e00882188fba 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/Constants.cs @@ -657,15 +657,6 @@ internal static class AccountResources internal static readonly int[] PathStylePorts = { 10000, 10001, 10002, 10003, 10004, 10100, 10101, 10102, 10103, 10104, 11000, 11001, 11002, 11003, 11004, 11100, 11101, 11102, 11103, 11104 }; } - internal static class StructuredMessage - { - public const string StructuredMessageHeader = "x-ms-structured-body"; - public const string StructuredContentLength = "x-ms-structured-content-length"; - public const string CrcStructuredMessage = "XSM/1.0; properties=crc64"; - public const int DefaultSegmentContentLength = 4 * MB; - public const int MaxDownloadCrcWithHeader = 4 * MB; - } - internal static class ClientSideEncryption { public const string HttpMessagePropertyKeyV1 = "Azure.Storage.StorageTelemetryPolicy.ClientSideEncryption.V1"; diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/ContentRange.cs b/sdk/storage/Azure.Storage.Common/src/Shared/ContentRange.cs index cb3b0a7bee189..f656382efad2b 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/ContentRange.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/ContentRange.cs @@ -82,20 +82,20 @@ public RangeUnit(string value) public long? End { get; } /// - /// Size of the entire resource this range is from, measured in this instance's . + /// Size of this range, measured in this instance's . /// - public long? TotalResourceLength { get; } + public long? Size { get; } /// /// Unit this range is measured in. Generally "bytes". /// public RangeUnit Unit { get; } - public ContentRange(RangeUnit unit, long? start, long? end, long? totalResourceLength) + public ContentRange(RangeUnit unit, long? start, long? end, long? size) { Start = start; End = end; - TotalResourceLength = totalResourceLength; + Size = size; Unit = unit; } @@ -113,7 +113,7 @@ public static ContentRange Parse(string headerValue) string unit = default; long? start = default; long? end = default; - long? resourceSize = default; + long? size = default; try { @@ -136,10 +136,10 @@ public static ContentRange Parse(string headerValue) var rawSize = tokens[blobSizeIndex]; if (rawSize != WildcardMarker) { - resourceSize = long.Parse(rawSize, CultureInfo.InvariantCulture); + size = long.Parse(rawSize, CultureInfo.InvariantCulture); } - return new ContentRange(unit, start, end, resourceSize); + return new ContentRange(unit, start, end, size); } catch (IndexOutOfRangeException) { @@ -165,7 +165,7 @@ public static HttpRange ToHttpRange(ContentRange contentRange) /// /// Indicates whether this instance and a specified are equal /// - public bool Equals(ContentRange other) => (other.Start == Start) && (other.End == End) && (other.Unit == Unit) && (other.TotalResourceLength == TotalResourceLength); + public bool Equals(ContentRange other) => (other.Start == Start) && (other.End == End) && (other.Unit == Unit) && (other.Size == Size); /// /// Determines if two values are the same. @@ -185,6 +185,6 @@ public static HttpRange ToHttpRange(ContentRange contentRange) /// [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => HashCodeBuilder.Combine(Start, End, TotalResourceLength, Unit.GetHashCode()); + public override int GetHashCode() => HashCodeBuilder.Combine(Start, End, Size, Unit.GetHashCode()); } } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/ContentRangeExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/ContentRangeExtensions.cs deleted file mode 100644 index 160a69b19a9c8..0000000000000 --- a/sdk/storage/Azure.Storage.Common/src/Shared/ContentRangeExtensions.cs +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Azure.Storage.Cryptography; - -internal static class ContentRangeExtensions -{ - public static long? GetContentRangeLengthOrDefault(this string contentRange) - => string.IsNullOrWhiteSpace(contentRange) - ? default : ContentRange.Parse(contentRange).GetRangeLength(); - - public static long GetRangeLength(this ContentRange contentRange) - => contentRange.End.Value - contentRange.Start.Value + 1; -} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs index 867607e551e6a..2a5fe38668104 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.Clients.cs @@ -3,7 +3,6 @@ using System; using System.Globalization; -using System.IO; using System.Linq; using System.Security.Authentication; using System.Xml.Serialization; @@ -106,18 +105,9 @@ public static ArgumentException VersionNotSupported(string paramName) public static RequestFailedException ClientRequestIdMismatch(Response response, string echo, string original) => new RequestFailedException(response.Status, $"Response x-ms-client-request-id '{echo}' does not match the original expected request id, '{original}'.", null); - public static InvalidDataException StructuredMessageNotAcknowledgedGET(Response response) - => new InvalidDataException($"Response does not acknowledge structured message was requested. Unknown data structure in response body."); - - public static InvalidDataException StructuredMessageNotAcknowledgedPUT(Response response) - => new InvalidDataException($"Response does not acknowledge structured message was sent. Unexpected data may have been persisted to storage."); - public static ArgumentException TransactionalHashingNotSupportedWithClientSideEncryption() => new ArgumentException("Client-side encryption and transactional hashing are not supported at the same time."); - public static InvalidDataException ExpectedStructuredMessage() - => new InvalidDataException($"Expected {Constants.StructuredMessage.StructuredMessageHeader} in response, but found none."); - public static void VerifyHttpsTokenAuth(Uri uri) { if (uri.Scheme != Constants.Https) diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.cs b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.cs index e3372665928c1..6b89a59011d51 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/Errors.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/Errors.cs @@ -72,9 +72,6 @@ public static ArgumentException CannotDeferTransactionalHashVerification() public static ArgumentException CannotInitializeWriteStreamWithData() => new ArgumentException("Initialized buffer for StorageWriteStream must be empty."); - public static InvalidDataException InvalidStructuredMessage(string optionalMessage = default) - => new InvalidDataException(("Invalid structured message data. " + optionalMessage ?? "").Trim()); - internal static void VerifyStreamPosition(Stream stream, string streamName) { if (stream != null && stream.CanSeek && stream.Length > 0 && stream.Position >= stream.Length) @@ -83,22 +80,6 @@ internal static void VerifyStreamPosition(Stream stream, string streamName) } } - internal static void AssertBufferMinimumSize(ReadOnlySpan buffer, int minSize, string paramName) - { - if (buffer.Length < minSize) - { - throw new ArgumentException($"Expected buffer Length of at least {minSize} bytes. Got {buffer.Length}.", paramName); - } - } - - internal static void AssertBufferExactSize(ReadOnlySpan buffer, int size, string paramName) - { - if (buffer.Length != size) - { - throw new ArgumentException($"Expected buffer Length of exactly {size} bytes. Got {buffer.Length}.", paramName); - } - } - public static void ThrowIfParamNull(object obj, string paramName) { if (obj == null) diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/LazyLoadingReadOnlyStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/LazyLoadingReadOnlyStream.cs index fe2db427bef02..c3e9c641c3fea 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/LazyLoadingReadOnlyStream.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/LazyLoadingReadOnlyStream.cs @@ -249,9 +249,41 @@ private async Task DownloadInternal(bool async, CancellationToken cancellat response = await _downloadInternalFunc(range, _validationOptions, async, cancellationToken).ConfigureAwait(false); using Stream networkStream = response.Value.Content; - // use stream copy to ensure consumption of any trailing metadata (e.g. structured message) - // allow buffer limits to catch the error of data size mismatch - int totalCopiedBytes = (int) await networkStream.CopyToInternal(new MemoryStream(_buffer), async, cancellationToken).ConfigureAwait((false)); + + // The number of bytes we just downloaded. + long downloadSize = GetResponseRange(response.GetRawResponse()).Length.Value; + + // The number of bytes we copied in the last loop. + int copiedBytes; + + // Bytes we have copied so far. + int totalCopiedBytes = 0; + + // Bytes remaining to copy. It is save to truncate the long because we asked for a max of int _buffer size bytes. + int remainingBytes = (int)downloadSize; + + do + { + if (async) + { + copiedBytes = await networkStream.ReadAsync( + buffer: _buffer, + offset: totalCopiedBytes, + count: remainingBytes, + cancellationToken: cancellationToken).ConfigureAwait(false); + } + else + { + copiedBytes = networkStream.Read( + buffer: _buffer, + offset: totalCopiedBytes, + count: remainingBytes); + } + + totalCopiedBytes += copiedBytes; + remainingBytes -= copiedBytes; + } + while (copiedBytes != 0); _bufferPosition = 0; _bufferLength = totalCopiedBytes; @@ -259,7 +291,7 @@ private async Task DownloadInternal(bool async, CancellationToken cancellat // if we deferred transactional hash validation on download, validate now // currently we always defer but that may change - if (_validationOptions != default && _validationOptions.ChecksumAlgorithm == StorageChecksumAlgorithm.MD5 && !_validationOptions.AutoValidateChecksum) // TODO better condition + if (_validationOptions != default && _validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && !_validationOptions.AutoValidateChecksum) { ContentHasher.AssertResponseHashMatch(_buffer, _bufferPosition, _bufferLength, _validationOptions.ChecksumAlgorithm, response.GetRawResponse()); } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/PooledMemoryStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/PooledMemoryStream.cs index 6070329d10d3d..3e218d18a90af 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/PooledMemoryStream.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/PooledMemoryStream.cs @@ -251,7 +251,7 @@ public override int Read(byte[] buffer, int offset, int count) Length - Position, bufferCount - (Position - offsetOfBuffer), count - read); - Array.Copy(currentBuffer, Position - offsetOfBuffer, buffer, offset + read, toCopy); + Array.Copy(currentBuffer, Position - offsetOfBuffer, buffer, read, toCopy); read += toCopy; Position += toCopy; } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StorageCrc64Composer.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StorageCrc64Composer.cs index 307ff23b21144..ab6b76d78a87e 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StorageCrc64Composer.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StorageCrc64Composer.cs @@ -12,52 +12,22 @@ namespace Azure.Storage /// internal static class StorageCrc64Composer { - public static byte[] Compose(params (byte[] Crc64, long OriginalDataLength)[] partitions) - => Compose(partitions.AsEnumerable()); - - public static byte[] Compose(IEnumerable<(byte[] Crc64, long OriginalDataLength)> partitions) - { - ulong result = Compose(partitions.Select(tup => (BitConverter.ToUInt64(tup.Crc64, 0), tup.OriginalDataLength))); - return BitConverter.GetBytes(result); - } - - public static byte[] Compose(params (ReadOnlyMemory Crc64, long OriginalDataLength)[] partitions) - => Compose(partitions.AsEnumerable()); - - public static byte[] Compose(IEnumerable<(ReadOnlyMemory Crc64, long OriginalDataLength)> partitions) + public static Memory Compose(params (byte[] Crc64, long OriginalDataLength)[] partitions) { -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - ulong result = Compose(partitions.Select(tup => (BitConverter.ToUInt64(tup.Crc64.Span), tup.OriginalDataLength))); -#else - ulong result = Compose(partitions.Select(tup => (System.BitConverter.ToUInt64(tup.Crc64.ToArray(), 0), tup.OriginalDataLength))); -#endif - return BitConverter.GetBytes(result); + return Compose(partitions.AsEnumerable()); } - public static byte[] Compose( - ReadOnlySpan leftCrc64, long leftOriginalDataLength, - ReadOnlySpan rightCrc64, long rightOriginalDataLength) + public static Memory Compose(IEnumerable<(byte[] Crc64, long OriginalDataLength)> partitions) { -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - ulong result = Compose( - (BitConverter.ToUInt64(leftCrc64), leftOriginalDataLength), - (BitConverter.ToUInt64(rightCrc64), rightOriginalDataLength)); -#else - ulong result = Compose( - (BitConverter.ToUInt64(leftCrc64.ToArray(), 0), leftOriginalDataLength), - (BitConverter.ToUInt64(rightCrc64.ToArray(), 0), rightOriginalDataLength)); -#endif - return BitConverter.GetBytes(result); + ulong result = Compose(partitions.Select(tup => (BitConverter.ToUInt64(tup.Crc64, 0), tup.OriginalDataLength))); + return new Memory(BitConverter.GetBytes(result)); } - public static ulong Compose(params (ulong Crc64, long OriginalDataLength)[] partitions) - => Compose(partitions.AsEnumerable()); - public static ulong Compose(IEnumerable<(ulong Crc64, long OriginalDataLength)> partitions) { ulong composedCrc = 0; long composedDataLength = 0; - foreach ((ulong crc64, long originalDataLength) in partitions) + foreach (var tup in partitions) { composedCrc = StorageCrc64Calculator.Concatenate( uInitialCrcAB: 0, @@ -65,9 +35,9 @@ public static ulong Compose(IEnumerable<(ulong Crc64, long OriginalDataLength)> uFinalCrcA: composedCrc, uSizeA: (ulong) composedDataLength, uInitialCrcB: 0, - uFinalCrcB: crc64, - uSizeB: (ulong)originalDataLength); - composedDataLength += originalDataLength; + uFinalCrcB: tup.Crc64, + uSizeB: (ulong)tup.OriginalDataLength); + composedDataLength += tup.OriginalDataLength; } return composedCrc; } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StorageRequestValidationPipelinePolicy.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StorageRequestValidationPipelinePolicy.cs index 9f4ddb5249e82..0cef4f4d8d4ed 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StorageRequestValidationPipelinePolicy.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StorageRequestValidationPipelinePolicy.cs @@ -33,35 +33,6 @@ public override void OnReceivedResponse(HttpMessage message) { throw Errors.ClientRequestIdMismatch(message.Response, echo.First(), original); } - - if (message.Request.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader) && - message.Request.Headers.Contains(Constants.StructuredMessage.StructuredContentLength)) - { - AssertStructuredMessageAcknowledgedPUT(message); - } - else if (message.Request.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) - { - AssertStructuredMessageAcknowledgedGET(message); - } - } - - private static void AssertStructuredMessageAcknowledgedPUT(HttpMessage message) - { - if (!message.Response.IsError && - !message.Response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) - { - throw Errors.StructuredMessageNotAcknowledgedPUT(message.Response); - } - } - - private static void AssertStructuredMessageAcknowledgedGET(HttpMessage message) - { - if (!message.Response.IsError && - !(message.Response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader) && - message.Response.Headers.Contains(Constants.StructuredMessage.StructuredContentLength))) - { - throw Errors.StructuredMessageNotAcknowledgedGET(message.Response); - } } } } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs index 44c0973ea9be1..2a7bd90fb82a1 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs @@ -46,7 +46,7 @@ internal static class StorageVersionExtensions /// public const ServiceVersion LatestVersion = #if BlobSDK || QueueSDK || FileSDK || DataLakeSDK || ChangeFeedSDK || DataMovementSDK || BlobDataMovementSDK || ShareDataMovementSDK - ServiceVersion.V2025_01_05; + ServiceVersion.V2024_11_04; #else ERROR_STORAGE_SERVICE_NOT_DEFINED; #endif diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StreamExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StreamExtensions.cs index c8803ecf421e7..31f121d414ea4 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StreamExtensions.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StreamExtensions.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Buffers; using System.IO; using System.Threading; using System.Threading.Tasks; @@ -50,7 +48,7 @@ public static async Task WriteInternal( } } - public static Task CopyToInternal( + public static Task CopyToInternal( this Stream src, Stream dest, bool async, @@ -81,33 +79,21 @@ public static Task CopyToInternal( /// Cancellation token for the operation. /// /// - public static async Task CopyToInternal( + public static async Task CopyToInternal( this Stream src, Stream dest, int bufferSize, bool async, CancellationToken cancellationToken) { - using IDisposable _ = ArrayPool.Shared.RentDisposable(bufferSize, out byte[] buffer); - long totalRead = 0; - int read; if (async) { - while (0 < (read = await src.ReadAsync(buffer, 0, buffer.Length, cancellationToken).ConfigureAwait(false))) - { - totalRead += read; - await dest.WriteAsync(buffer, 0, read, cancellationToken).ConfigureAwait(false); - } + await src.CopyToAsync(dest, bufferSize, cancellationToken).ConfigureAwait(false); } else { - while (0 < (read = src.Read(buffer, 0, buffer.Length))) - { - totalRead += read; - dest.Write(buffer, 0, read); - } + src.CopyTo(dest, bufferSize); } - return totalRead; } } } diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessage.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessage.cs deleted file mode 100644 index a0a46837797b9..0000000000000 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessage.cs +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers; -using System.Buffers.Binary; -using System.IO; -using Azure.Storage.Common; - -namespace Azure.Storage.Shared; - -internal static class StructuredMessage -{ - public const int Crc64Length = 8; - - [Flags] - public enum Flags - { - None = 0, - StorageCrc64 = 1, - } - - public static class V1_0 - { - public const byte MessageVersionByte = 1; - - public const int StreamHeaderLength = 13; - public const int StreamHeaderVersionOffset = 0; - public const int StreamHeaderMessageLengthOffset = 1; - public const int StreamHeaderFlagsOffset = 9; - public const int StreamHeaderSegmentCountOffset = 11; - - public const int SegmentHeaderLength = 10; - public const int SegmentHeaderNumOffset = 0; - public const int SegmentHeaderContentLengthOffset = 2; - - #region Stream Header - public static void ReadStreamHeader( - ReadOnlySpan buffer, - out long messageLength, - out Flags flags, - out int totalSegments) - { - Errors.AssertBufferExactSize(buffer, 13, nameof(buffer)); - if (buffer[StreamHeaderVersionOffset] != 1) - { - throw new InvalidDataException("Unrecognized version of structured message."); - } - messageLength = (long)BinaryPrimitives.ReadUInt64LittleEndian(buffer.Slice(StreamHeaderMessageLengthOffset, 8)); - flags = (Flags)BinaryPrimitives.ReadUInt16LittleEndian(buffer.Slice(StreamHeaderFlagsOffset, 2)); - totalSegments = BinaryPrimitives.ReadUInt16LittleEndian(buffer.Slice(StreamHeaderSegmentCountOffset, 2)); - } - - public static int WriteStreamHeader( - Span buffer, - long messageLength, - Flags flags, - int totalSegments) - { - const int versionOffset = 0; - const int messageLengthOffset = 1; - const int flagsOffset = 9; - const int numSegmentsOffset = 11; - - Errors.AssertBufferMinimumSize(buffer, StreamHeaderLength, nameof(buffer)); - - buffer[versionOffset] = MessageVersionByte; - BinaryPrimitives.WriteUInt64LittleEndian(buffer.Slice(messageLengthOffset, 8), (ulong)messageLength); - BinaryPrimitives.WriteUInt16LittleEndian(buffer.Slice(flagsOffset, 2), (ushort)flags); - BinaryPrimitives.WriteUInt16LittleEndian(buffer.Slice(numSegmentsOffset, 2), (ushort)totalSegments); - - return StreamHeaderLength; - } - - /// - /// Gets stream header in a buffer rented from the provided ArrayPool. - /// - /// - /// Disposable to return the buffer to the pool. - /// - public static IDisposable GetStreamHeaderBytes( - ArrayPool pool, - out Memory bytes, - long messageLength, - Flags flags, - int totalSegments) - { - Argument.AssertNotNull(pool, nameof(pool)); - IDisposable disposable = pool.RentAsMemoryDisposable(StreamHeaderLength, out bytes); - WriteStreamHeader(bytes.Span, messageLength, flags, totalSegments); - return disposable; - } - #endregion - - #region StreamFooter - public static int GetStreamFooterSize(Flags flags) - => flags.HasFlag(Flags.StorageCrc64) ? Crc64Length : 0; - - public static void ReadStreamFooter( - ReadOnlySpan buffer, - Flags flags, - out ulong crc64) - { - int expectedBufferSize = GetSegmentFooterSize(flags); - Errors.AssertBufferExactSize(buffer, expectedBufferSize, nameof(buffer)); - - crc64 = flags.HasFlag(Flags.StorageCrc64) ? buffer.ReadCrc64() : default; - } - - public static int WriteStreamFooter(Span buffer, ReadOnlySpan crc64 = default) - { - int requiredSpace = 0; - if (!crc64.IsEmpty) - { - Errors.AssertBufferExactSize(crc64, Crc64Length, nameof(crc64)); - requiredSpace += Crc64Length; - } - - Errors.AssertBufferMinimumSize(buffer, requiredSpace, nameof(buffer)); - int offset = 0; - if (!crc64.IsEmpty) - { - crc64.CopyTo(buffer.Slice(offset, Crc64Length)); - offset += Crc64Length; - } - - return offset; - } - - /// - /// Gets stream header in a buffer rented from the provided ArrayPool. - /// - /// - /// Disposable to return the buffer to the pool. - /// - public static IDisposable GetStreamFooterBytes( - ArrayPool pool, - out Memory bytes, - ReadOnlySpan crc64 = default) - { - Argument.AssertNotNull(pool, nameof(pool)); - IDisposable disposable = pool.RentAsMemoryDisposable(StreamHeaderLength, out bytes); - WriteStreamFooter(bytes.Span, crc64); - return disposable; - } - #endregion - - #region SegmentHeader - public static void ReadSegmentHeader( - ReadOnlySpan buffer, - out int segmentNum, - out long contentLength) - { - Errors.AssertBufferExactSize(buffer, 10, nameof(buffer)); - segmentNum = BinaryPrimitives.ReadUInt16LittleEndian(buffer.Slice(0, 2)); - contentLength = (long)BinaryPrimitives.ReadUInt64LittleEndian(buffer.Slice(2, 8)); - } - - public static int WriteSegmentHeader(Span buffer, int segmentNum, long segmentLength) - { - const int segmentNumOffset = 0; - const int segmentLengthOffset = 2; - - Errors.AssertBufferMinimumSize(buffer, SegmentHeaderLength, nameof(buffer)); - - BinaryPrimitives.WriteUInt16LittleEndian(buffer.Slice(segmentNumOffset, 2), (ushort)segmentNum); - BinaryPrimitives.WriteUInt64LittleEndian(buffer.Slice(segmentLengthOffset, 8), (ulong)segmentLength); - - return SegmentHeaderLength; - } - - /// - /// Gets segment header in a buffer rented from the provided ArrayPool. - /// - /// - /// Disposable to return the buffer to the pool. - /// - public static IDisposable GetSegmentHeaderBytes( - ArrayPool pool, - out Memory bytes, - int segmentNum, - long segmentLength) - { - Argument.AssertNotNull(pool, nameof(pool)); - IDisposable disposable = pool.RentAsMemoryDisposable(SegmentHeaderLength, out bytes); - WriteSegmentHeader(bytes.Span, segmentNum, segmentLength); - return disposable; - } - #endregion - - #region SegmentFooter - public static int GetSegmentFooterSize(Flags flags) - => flags.HasFlag(Flags.StorageCrc64) ? Crc64Length : 0; - - public static void ReadSegmentFooter( - ReadOnlySpan buffer, - Flags flags, - out ulong crc64) - { - int expectedBufferSize = GetSegmentFooterSize(flags); - Errors.AssertBufferExactSize(buffer, expectedBufferSize, nameof(buffer)); - - crc64 = flags.HasFlag(Flags.StorageCrc64) ? buffer.ReadCrc64() : default; - } - - public static int WriteSegmentFooter(Span buffer, ReadOnlySpan crc64 = default) - { - int requiredSpace = 0; - if (!crc64.IsEmpty) - { - Errors.AssertBufferExactSize(crc64, Crc64Length, nameof(crc64)); - requiredSpace += Crc64Length; - } - - Errors.AssertBufferMinimumSize(buffer, requiredSpace, nameof(buffer)); - int offset = 0; - if (!crc64.IsEmpty) - { - crc64.CopyTo(buffer.Slice(offset, Crc64Length)); - offset += Crc64Length; - } - - return offset; - } - - /// - /// Gets stream header in a buffer rented from the provided ArrayPool. - /// - /// - /// Disposable to return the buffer to the pool. - /// - public static IDisposable GetSegmentFooterBytes( - ArrayPool pool, - out Memory bytes, - ReadOnlySpan crc64 = default) - { - Argument.AssertNotNull(pool, nameof(pool)); - IDisposable disposable = pool.RentAsMemoryDisposable(StreamHeaderLength, out bytes); - WriteSegmentFooter(bytes.Span, crc64); - return disposable; - } - #endregion - } -} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingRetriableStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingRetriableStream.cs deleted file mode 100644 index 22dfaef259972..0000000000000 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingRetriableStream.cs +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers; -using System.Buffers.Binary; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using Azure.Core; -using Azure.Core.Pipeline; - -namespace Azure.Storage.Shared; - -internal class StructuredMessageDecodingRetriableStream : Stream -{ - public class DecodedData - { - public ulong Crc { get; set; } - } - - private readonly Stream _innerRetriable; - private long _decodedBytesRead; - - private readonly StructuredMessage.Flags _expectedFlags; - private readonly List _decodedDatas; - private readonly Action _onComplete; - - private StorageCrc64HashAlgorithm _totalContentCrc; - - private readonly Func _decodingStreamFactory; - private readonly Func> _decodingAsyncStreamFactory; - - public StructuredMessageDecodingRetriableStream( - Stream initialDecodingStream, - StructuredMessageDecodingStream.RawDecodedData initialDecodedData, - StructuredMessage.Flags expectedFlags, - Func decodingStreamFactory, - Func> decodingAsyncStreamFactory, - Action onComplete, - ResponseClassifier responseClassifier, - int maxRetries) - { - _decodingStreamFactory = decodingStreamFactory; - _decodingAsyncStreamFactory = decodingAsyncStreamFactory; - _innerRetriable = RetriableStream.Create(initialDecodingStream, StreamFactory, StreamFactoryAsync, responseClassifier, maxRetries); - _decodedDatas = new() { initialDecodedData }; - _expectedFlags = expectedFlags; - _onComplete = onComplete; - - if (expectedFlags.HasFlag(StructuredMessage.Flags.StorageCrc64)) - { - _totalContentCrc = StorageCrc64HashAlgorithm.Create(); - } - } - - private Stream StreamFactory(long _) - { - long offset = _decodedDatas.SelectMany(d => d.SegmentCrcs).Select(s => s.SegmentLen).Sum(); - (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = _decodingStreamFactory(offset); - _decodedDatas.Add(decodedData); - FastForwardInternal(decodingStream, _decodedBytesRead - offset, false).EnsureCompleted(); - return decodingStream; - } - - private async ValueTask StreamFactoryAsync(long _) - { - long offset = _decodedDatas.SelectMany(d => d.SegmentCrcs).Select(s => s.SegmentLen).Sum(); - (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = await _decodingAsyncStreamFactory(offset).ConfigureAwait(false); - _decodedDatas.Add(decodedData); - await FastForwardInternal(decodingStream, _decodedBytesRead - offset, true).ConfigureAwait(false); - return decodingStream; - } - - private static async ValueTask FastForwardInternal(Stream stream, long bytes, bool async) - { - using (ArrayPool.Shared.RentDisposable(4 * Constants.KB, out byte[] buffer)) - { - if (async) - { - while (bytes > 0) - { - bytes -= await stream.ReadAsync(buffer, 0, (int)Math.Min(bytes, buffer.Length)).ConfigureAwait(false); - } - } - else - { - while (bytes > 0) - { - bytes -= stream.Read(buffer, 0, (int)Math.Min(bytes, buffer.Length)); - } - } - } - } - - protected override void Dispose(bool disposing) - { - _decodedDatas.Clear(); - _innerRetriable.Dispose(); - } - - private void OnCompleted() - { - DecodedData final = new(); - if (_totalContentCrc != null) - { - final.Crc = ValidateCrc(); - } - _onComplete?.Invoke(final); - } - - private ulong ValidateCrc() - { - using IDisposable _ = ArrayPool.Shared.RentDisposable(StructuredMessage.Crc64Length * 2, out byte[] buf); - Span calculatedBytes = new(buf, 0, StructuredMessage.Crc64Length); - _totalContentCrc.GetCurrentHash(calculatedBytes); - ulong calculated = BinaryPrimitives.ReadUInt64LittleEndian(calculatedBytes); - - ulong reported = _decodedDatas.Count == 1 - ? _decodedDatas.First().TotalCrc.Value - : StorageCrc64Composer.Compose(_decodedDatas.SelectMany(d => d.SegmentCrcs)); - - if (calculated != reported) - { - Span reportedBytes = new(buf, calculatedBytes.Length, StructuredMessage.Crc64Length); - BinaryPrimitives.WriteUInt64LittleEndian(reportedBytes, reported); - throw Errors.ChecksumMismatch(calculatedBytes, reportedBytes); - } - - return calculated; - } - - #region Read - public override int Read(byte[] buffer, int offset, int count) - { - int read = _innerRetriable.Read(buffer, offset, count); - _decodedBytesRead += read; - if (read == 0) - { - OnCompleted(); - } - else - { - _totalContentCrc?.Append(new ReadOnlySpan(buffer, offset, read)); - } - return read; - } - - public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - { - int read = await _innerRetriable.ReadAsync(buffer, offset, count, cancellationToken).ConfigureAwait(false); - _decodedBytesRead += read; - if (read == 0) - { - OnCompleted(); - } - else - { - _totalContentCrc?.Append(new ReadOnlySpan(buffer, offset, read)); - } - return read; - } - -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - public override int Read(Span buffer) - { - int read = _innerRetriable.Read(buffer); - _decodedBytesRead += read; - if (read == 0) - { - OnCompleted(); - } - else - { - _totalContentCrc?.Append(buffer.Slice(0, read)); - } - return read; - } - - public override async ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) - { - int read = await _innerRetriable.ReadAsync(buffer, cancellationToken).ConfigureAwait(false); - _decodedBytesRead += read; - if (read == 0) - { - OnCompleted(); - } - else - { - _totalContentCrc?.Append(buffer.Span.Slice(0, read)); - } - return read; - } -#endif - - public override int ReadByte() - { - int val = _innerRetriable.ReadByte(); - _decodedBytesRead += 1; - if (val == -1) - { - OnCompleted(); - } - return val; - } - - public override int EndRead(IAsyncResult asyncResult) - { - int read = _innerRetriable.EndRead(asyncResult); - _decodedBytesRead += read; - if (read == 0) - { - OnCompleted(); - } - return read; - } - #endregion - - #region Passthru - public override bool CanRead => _innerRetriable.CanRead; - - public override bool CanSeek => _innerRetriable.CanSeek; - - public override bool CanWrite => _innerRetriable.CanWrite; - - public override bool CanTimeout => _innerRetriable.CanTimeout; - - public override long Length => _innerRetriable.Length; - - public override long Position { get => _innerRetriable.Position; set => _innerRetriable.Position = value; } - - public override void Flush() => _innerRetriable.Flush(); - - public override Task FlushAsync(CancellationToken cancellationToken) => _innerRetriable.FlushAsync(cancellationToken); - - public override long Seek(long offset, SeekOrigin origin) => _innerRetriable.Seek(offset, origin); - - public override void SetLength(long value) => _innerRetriable.SetLength(value); - - public override void Write(byte[] buffer, int offset, int count) => _innerRetriable.Write(buffer, offset, count); - - public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) => _innerRetriable.WriteAsync(buffer, offset, count, cancellationToken); - - public override void WriteByte(byte value) => _innerRetriable.WriteByte(value); - - public override IAsyncResult BeginWrite(byte[] buffer, int offset, int count, AsyncCallback callback, object state) => _innerRetriable.BeginWrite(buffer, offset, count, callback, state); - - public override void EndWrite(IAsyncResult asyncResult) => _innerRetriable.EndWrite(asyncResult); - - public override IAsyncResult BeginRead(byte[] buffer, int offset, int count, AsyncCallback callback, object state) => _innerRetriable.BeginRead(buffer, offset, count, callback, state); - - public override int ReadTimeout { get => _innerRetriable.ReadTimeout; set => _innerRetriable.ReadTimeout = value; } - - public override int WriteTimeout { get => _innerRetriable.WriteTimeout; set => _innerRetriable.WriteTimeout = value; } - -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - public override void Write(ReadOnlySpan buffer) => _innerRetriable.Write(buffer); - - public override ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) => _innerRetriable.WriteAsync(buffer, cancellationToken); -#endif - #endregion -} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingStream.cs deleted file mode 100644 index e6b193ae18260..0000000000000 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageDecodingStream.cs +++ /dev/null @@ -1,542 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers; -using System.Buffers.Binary; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using Azure.Storage.Common; - -namespace Azure.Storage.Shared; - -/// -/// Decodes a structured message stream as the data is read. -/// -/// -/// Wraps the inner stream in a , which avoids using its internal -/// buffer if individual Read() calls are larger than it. This ensures one of the three scenarios -/// -/// -/// Read buffer >= stream buffer: -/// There is enough space in the read buffer for inline metadata to be safely -/// extracted in only one read to the true inner stream. -/// -/// -/// Read buffer < next inline metadata: -/// The stream buffer has been activated, and we can read multiple small times from the inner stream -/// without multi-reading the real stream, even when partway through an existing stream buffer. -/// -/// -/// Else: -/// Same as #1, but also the already-allocated stream buffer has been used to slightly improve -/// resource churn when reading inner stream. -/// -/// -/// -internal class StructuredMessageDecodingStream : Stream -{ - internal class RawDecodedData - { - public long? InnerStreamLength { get; set; } - public int? TotalSegments { get; set; } - public StructuredMessage.Flags? Flags { get; set; } - public List<(ulong SegmentCrc, long SegmentLen)> SegmentCrcs { get; } = new(); - public ulong? TotalCrc { get; set; } - public bool DecodeCompleted { get; set; } - } - - private enum SMRegion - { - StreamHeader, - StreamFooter, - SegmentHeader, - SegmentFooter, - SegmentContent, - } - - private readonly Stream _innerBufferedStream; - - private byte[] _metadataBuffer = ArrayPool.Shared.Rent(Constants.KB); - private int _metadataBufferOffset = 0; - private int _metadataBufferLength = 0; - - private int _streamHeaderLength; - private int _streamFooterLength; - private int _segmentHeaderLength; - private int _segmentFooterLength; - - private long? _expectedInnerStreamLength; - - private bool _disposed; - - private readonly RawDecodedData _decodedData; - private StorageCrc64HashAlgorithm _totalContentCrc; - private StorageCrc64HashAlgorithm _segmentCrc; - - private readonly bool _validateChecksums; - - public override bool CanRead => true; - - public override bool CanWrite => false; - - public override bool CanSeek => false; - - public override bool CanTimeout => _innerBufferedStream.CanTimeout; - - public override int ReadTimeout => _innerBufferedStream.ReadTimeout; - - public override int WriteTimeout => _innerBufferedStream.WriteTimeout; - - public override long Length => throw new NotSupportedException(); - - public override long Position - { - get => throw new NotSupportedException(); - set => throw new NotSupportedException(); - } - - public static (Stream DecodedStream, RawDecodedData DecodedData) WrapStream( - Stream innerStream, - long? expextedStreamLength = default) - { - RawDecodedData data = new(); - return (new StructuredMessageDecodingStream(innerStream, data, expextedStreamLength), data); - } - - private StructuredMessageDecodingStream( - Stream innerStream, - RawDecodedData decodedData, - long? expectedStreamLength) - { - Argument.AssertNotNull(innerStream, nameof(innerStream)); - Argument.AssertNotNull(decodedData, nameof(decodedData)); - - _expectedInnerStreamLength = expectedStreamLength; - _innerBufferedStream = new BufferedStream(innerStream); - _decodedData = decodedData; - - // Assumes stream will be structured message 1.0. Will validate this when consuming stream. - _streamHeaderLength = StructuredMessage.V1_0.StreamHeaderLength; - _segmentHeaderLength = StructuredMessage.V1_0.SegmentHeaderLength; - - _validateChecksums = true; - } - - #region Write - public override void Flush() => throw new NotSupportedException(); - - public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException(); - - public override void SetLength(long value) => throw new NotSupportedException(); - #endregion - - #region Read - public override int Read(byte[] buf, int offset, int count) - { - int decodedRead; - int read; - do - { - read = _innerBufferedStream.Read(buf, offset, count); - _innerStreamConsumed += read; - decodedRead = Decode(new Span(buf, offset, read)); - } while (decodedRead <= 0 && read > 0); - - if (read <= 0) - { - AssertDecodeFinished(); - } - - return decodedRead; - } - - public override async Task ReadAsync(byte[] buf, int offset, int count, CancellationToken cancellationToken) - { - int decodedRead; - int read; - do - { - read = await _innerBufferedStream.ReadAsync(buf, offset, count, cancellationToken).ConfigureAwait(false); - _innerStreamConsumed += read; - decodedRead = Decode(new Span(buf, offset, read)); - } while (decodedRead <= 0 && read > 0); - - if (read <= 0) - { - AssertDecodeFinished(); - } - - return decodedRead; - } - -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - public override int Read(Span buf) - { - int decodedRead; - int read; - do - { - read = _innerBufferedStream.Read(buf); - _innerStreamConsumed += read; - decodedRead = Decode(buf.Slice(0, read)); - } while (decodedRead <= 0 && read > 0); - - if (read <= 0) - { - AssertDecodeFinished(); - } - - return decodedRead; - } - - public override async ValueTask ReadAsync(Memory buf, CancellationToken cancellationToken = default) - { - int decodedRead; - int read; - do - { - read = await _innerBufferedStream.ReadAsync(buf).ConfigureAwait(false); - _innerStreamConsumed += read; - decodedRead = Decode(buf.Slice(0, read).Span); - } while (decodedRead <= 0 && read > 0); - - if (read <= 0) - { - AssertDecodeFinished(); - } - - return decodedRead; - } -#endif - - private void AssertDecodeFinished() - { - if (_streamFooterLength > 0 && !_decodedData.DecodeCompleted) - { - throw Errors.InvalidStructuredMessage("Premature end of stream."); - } - _decodedData.DecodeCompleted = true; - } - - private long _innerStreamConsumed = 0; - private long _decodedContentConsumed = 0; - private SMRegion _currentRegion = SMRegion.StreamHeader; - private int _currentSegmentNum = 0; - private long _currentSegmentContentLength; - private long _currentSegmentContentRemaining; - private long CurrentRegionLength => _currentRegion switch - { - SMRegion.StreamHeader => _streamHeaderLength, - SMRegion.StreamFooter => _streamFooterLength, - SMRegion.SegmentHeader => _segmentHeaderLength, - SMRegion.SegmentFooter => _segmentFooterLength, - SMRegion.SegmentContent => _currentSegmentContentLength, - _ => 0, - }; - - /// - /// Decodes given bytes in place. Decoding based on internal stream position info. - /// Decoded data size will be less than or equal to encoded data length. - /// - /// - /// Length of the decoded data in . - /// - private int Decode(Span buffer) - { - if (buffer.IsEmpty) - { - return 0; - } - List<(int Offset, int Count)> gaps = new(); - - int bufferConsumed = ProcessMetadataBuffer(buffer); - - if (bufferConsumed > 0) - { - gaps.Add((0, bufferConsumed)); - } - - while (bufferConsumed < buffer.Length) - { - if (_currentRegion == SMRegion.SegmentContent) - { - int read = (int)Math.Min(buffer.Length - bufferConsumed, _currentSegmentContentRemaining); - _totalContentCrc?.Append(buffer.Slice(bufferConsumed, read)); - _segmentCrc?.Append(buffer.Slice(bufferConsumed, read)); - bufferConsumed += read; - _decodedContentConsumed += read; - _currentSegmentContentRemaining -= read; - if (_currentSegmentContentRemaining == 0) - { - _currentRegion = SMRegion.SegmentFooter; - } - } - else if (buffer.Length - bufferConsumed < CurrentRegionLength) - { - SavePartialMetadata(buffer.Slice(bufferConsumed)); - gaps.Add((bufferConsumed, buffer.Length - bufferConsumed)); - bufferConsumed = buffer.Length; - } - else - { - int processed = _currentRegion switch - { - SMRegion.StreamHeader => ProcessStreamHeader(buffer.Slice(bufferConsumed)), - SMRegion.StreamFooter => ProcessStreamFooter(buffer.Slice(bufferConsumed)), - SMRegion.SegmentHeader => ProcessSegmentHeader(buffer.Slice(bufferConsumed)), - SMRegion.SegmentFooter => ProcessSegmentFooter(buffer.Slice(bufferConsumed)), - _ => 0, - }; - // TODO surface error if processed is 0 - gaps.Add((bufferConsumed, processed)); - bufferConsumed += processed; - } - } - - if (gaps.Count == 0) - { - return buffer.Length; - } - - // gaps is already sorted by offset due to how it was assembled - int gap = 0; - for (int i = gaps.First().Offset; i < buffer.Length; i++) - { - if (gaps.Count > 0 && gaps.First().Offset == i) - { - int count = gaps.First().Count; - gap += count; - i += count - 1; - gaps.RemoveAt(0); - } - else - { - buffer[i - gap] = buffer[i]; - } - } - return buffer.Length - gap; - } - - /// - /// Processes metadata in the internal buffer, if any. Appends any necessary data - /// from the append buffer to complete metadata. - /// - /// - /// Bytes consumed from . - /// - private int ProcessMetadataBuffer(ReadOnlySpan append) - { - if (_metadataBufferLength == 0) - { - return 0; - } - if (_currentRegion == SMRegion.SegmentContent) - { - return 0; - } - int appended = 0; - if (_metadataBufferLength < CurrentRegionLength && append.Length > 0) - { - appended = Math.Min((int)CurrentRegionLength - _metadataBufferLength, append.Length); - SavePartialMetadata(append.Slice(0, appended)); - } - if (_metadataBufferLength == CurrentRegionLength) - { - Span metadata = new(_metadataBuffer, _metadataBufferOffset, (int)CurrentRegionLength); - switch (_currentRegion) - { - case SMRegion.StreamHeader: - ProcessStreamHeader(metadata); - break; - case SMRegion.StreamFooter: - ProcessStreamFooter(metadata); - break; - case SMRegion.SegmentHeader: - ProcessSegmentHeader(metadata); - break; - case SMRegion.SegmentFooter: - ProcessSegmentFooter(metadata); - break; - } - _metadataBufferOffset = 0; - _metadataBufferLength = 0; - } - return appended; - } - - private void SavePartialMetadata(ReadOnlySpan span) - { - // safety array resize w/ArrayPool - if (_metadataBufferLength + span.Length > _metadataBuffer.Length) - { - ResizeMetadataBuffer(2 * (_metadataBufferLength + span.Length)); - } - - // realign any existing content if necessary - if (_metadataBufferLength != 0 && _metadataBufferOffset != 0) - { - // don't use Array.Copy() to move elements in the same array - for (int i = 0; i < _metadataBufferLength; i++) - { - _metadataBuffer[i] = _metadataBuffer[i + _metadataBufferOffset]; - } - _metadataBufferOffset = 0; - } - - span.CopyTo(new Span(_metadataBuffer, _metadataBufferOffset + _metadataBufferLength, span.Length)); - _metadataBufferLength += span.Length; - } - - private int ProcessStreamHeader(ReadOnlySpan span) - { - StructuredMessage.V1_0.ReadStreamHeader( - span.Slice(0, _streamHeaderLength), - out long streamLength, - out StructuredMessage.Flags flags, - out int totalSegments); - - _decodedData.InnerStreamLength = streamLength; - _decodedData.Flags = flags; - _decodedData.TotalSegments = totalSegments; - - if (_expectedInnerStreamLength.HasValue && _expectedInnerStreamLength.Value != streamLength) - { - throw Errors.InvalidStructuredMessage("Unexpected message size."); - } - - if (_decodedData.Flags.Value.HasFlag(StructuredMessage.Flags.StorageCrc64)) - { - _segmentFooterLength = StructuredMessage.Crc64Length; - _streamFooterLength = StructuredMessage.Crc64Length; - if (_validateChecksums) - { - _segmentCrc = StorageCrc64HashAlgorithm.Create(); - _totalContentCrc = StorageCrc64HashAlgorithm.Create(); - } - } - _currentRegion = SMRegion.SegmentHeader; - return _streamHeaderLength; - } - - private int ProcessStreamFooter(ReadOnlySpan span) - { - int footerLen = StructuredMessage.V1_0.GetStreamFooterSize(_decodedData.Flags.Value); - StructuredMessage.V1_0.ReadStreamFooter( - span.Slice(0, footerLen), - _decodedData.Flags.Value, - out ulong reportedCrc); - if (_decodedData.Flags.Value.HasFlag(StructuredMessage.Flags.StorageCrc64)) - { - if (_validateChecksums) - { - ValidateCrc64(_totalContentCrc, reportedCrc); - } - _decodedData.TotalCrc = reportedCrc; - } - - if (_innerStreamConsumed != _decodedData.InnerStreamLength) - { - throw Errors.InvalidStructuredMessage("Unexpected message size."); - } - if (_currentSegmentNum != _decodedData.TotalSegments) - { - throw Errors.InvalidStructuredMessage("Missing expected message segments."); - } - - _decodedData.DecodeCompleted = true; - return footerLen; - } - - private int ProcessSegmentHeader(ReadOnlySpan span) - { - StructuredMessage.V1_0.ReadSegmentHeader( - span.Slice(0, _segmentHeaderLength), - out int newSegNum, - out _currentSegmentContentLength); - _currentSegmentContentRemaining = _currentSegmentContentLength; - if (newSegNum != _currentSegmentNum + 1) - { - throw Errors.InvalidStructuredMessage("Unexpected segment number in structured message."); - } - _currentSegmentNum = newSegNum; - _currentRegion = SMRegion.SegmentContent; - return _segmentHeaderLength; - } - - private int ProcessSegmentFooter(ReadOnlySpan span) - { - int footerLen = StructuredMessage.V1_0.GetSegmentFooterSize(_decodedData.Flags.Value); - StructuredMessage.V1_0.ReadSegmentFooter( - span.Slice(0, footerLen), - _decodedData.Flags.Value, - out ulong reportedCrc); - if (_decodedData.Flags.Value.HasFlag(StructuredMessage.Flags.StorageCrc64)) - { - if (_validateChecksums) - { - ValidateCrc64(_segmentCrc, reportedCrc); - _segmentCrc = StorageCrc64HashAlgorithm.Create(); - } - _decodedData.SegmentCrcs.Add((reportedCrc, _currentSegmentContentLength)); - } - _currentRegion = _currentSegmentNum == _decodedData.TotalSegments ? SMRegion.StreamFooter : SMRegion.SegmentHeader; - return footerLen; - } - - private static void ValidateCrc64(StorageCrc64HashAlgorithm calculation, ulong reported) - { - using IDisposable _ = ArrayPool.Shared.RentDisposable(StructuredMessage.Crc64Length * 2, out byte[] buf); - Span calculatedBytes = new(buf, 0, StructuredMessage.Crc64Length); - Span reportedBytes = new(buf, calculatedBytes.Length, StructuredMessage.Crc64Length); - calculation.GetCurrentHash(calculatedBytes); - reported.WriteCrc64(reportedBytes); - if (!calculatedBytes.SequenceEqual(reportedBytes)) - { - throw Errors.ChecksumMismatch(calculatedBytes, reportedBytes); - } - } - #endregion - - public override long Seek(long offset, SeekOrigin origin) - => throw new NotSupportedException(); - - protected override void Dispose(bool disposing) - { - base.Dispose(disposing); - - if (_disposed) - { - return; - } - - if (disposing) - { - _innerBufferedStream.Dispose(); - _disposed = true; - } - } - - private void ResizeMetadataBuffer(int newSize) - { - byte[] newBuf = ArrayPool.Shared.Rent(newSize); - Array.Copy(_metadataBuffer, _metadataBufferOffset, newBuf, 0, _metadataBufferLength); - ArrayPool.Shared.Return(_metadataBuffer); - _metadataBuffer = newBuf; - } - - private void AlignMetadataBuffer() - { - if (_metadataBufferOffset != 0 && _metadataBufferLength != 0) - { - for (int i = 0; i < _metadataBufferLength; i++) - { - _metadataBuffer[i] = _metadataBuffer[_metadataBufferOffset + i]; - } - _metadataBufferOffset = 0; - } - } -} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageEncodingStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageEncodingStream.cs deleted file mode 100644 index cb0ef340155ec..0000000000000 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessageEncodingStream.cs +++ /dev/null @@ -1,545 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers; -using System.IO; -using System.Threading; -using System.Threading.Tasks; -using Azure.Core.Pipeline; -using Azure.Storage.Common; - -namespace Azure.Storage.Shared; - -internal class StructuredMessageEncodingStream : Stream -{ - private readonly Stream _innerStream; - - private readonly int _streamHeaderLength; - private readonly int _streamFooterLength; - private readonly int _segmentHeaderLength; - private readonly int _segmentFooterLength; - private readonly int _segmentContentLength; - - private readonly StructuredMessage.Flags _flags; - private bool _disposed; - - private bool UseCrcSegment => _flags.HasFlag(StructuredMessage.Flags.StorageCrc64); - private readonly StorageCrc64HashAlgorithm _totalCrc; - private StorageCrc64HashAlgorithm _segmentCrc; - private readonly byte[] _segmentCrcs; - private int _latestSegmentCrcd = 0; - - #region Segments - /// - /// Gets the 1-indexed segment number the underlying stream is currently positioned in. - /// 1-indexed to match segment labelling as specified by SM spec. - /// - private int CurrentInnerSegment => (int)Math.Floor(_innerStream.Position / (float)_segmentContentLength) + 1; - - /// - /// Gets the 1-indexed segment number the encoded data stream is currently positioned in. - /// 1-indexed to match segment labelling as specified by SM spec. - /// - private int CurrentEncodingSegment - { - get - { - // edge case: always on final segment when at end of inner stream - if (_innerStream.Position == _innerStream.Length) - { - return TotalSegments; - } - // when writing footer, inner stream is positioned at next segment, - // but this stream is still writing the previous one - if (_currentRegion == SMRegion.SegmentFooter) - { - return CurrentInnerSegment - 1; - } - return CurrentInnerSegment; - } - } - - /// - /// Segment length including header and footer. - /// - private int SegmentTotalLength => _segmentHeaderLength + _segmentContentLength + _segmentFooterLength; - - private int TotalSegments => GetTotalSegments(_innerStream, _segmentContentLength); - private static int GetTotalSegments(Stream innerStream, long segmentContentLength) - { - return (int)Math.Ceiling(innerStream.Length / (float)segmentContentLength); - } - #endregion - - public override bool CanRead => true; - - public override bool CanWrite => false; - - public override bool CanSeek => _innerStream.CanSeek; - - public override bool CanTimeout => _innerStream.CanTimeout; - - public override int ReadTimeout => _innerStream.ReadTimeout; - - public override int WriteTimeout => _innerStream.WriteTimeout; - - public override long Length => - _streamHeaderLength + _streamFooterLength + - (_segmentHeaderLength + _segmentFooterLength) * TotalSegments + - _innerStream.Length; - - #region Position - private enum SMRegion - { - StreamHeader, - StreamFooter, - SegmentHeader, - SegmentFooter, - SegmentContent, - } - - private SMRegion _currentRegion = SMRegion.StreamHeader; - private int _currentRegionPosition = 0; - - private long _maxSeekPosition = 0; - - public override long Position - { - get - { - return _currentRegion switch - { - SMRegion.StreamHeader => _currentRegionPosition, - SMRegion.StreamFooter => _streamHeaderLength + - TotalSegments * (_segmentHeaderLength + _segmentFooterLength) + - _innerStream.Length + - _currentRegionPosition, - SMRegion.SegmentHeader => _innerStream.Position + - _streamHeaderLength + - (CurrentEncodingSegment - 1) * (_segmentHeaderLength + _segmentFooterLength) + - _currentRegionPosition, - SMRegion.SegmentFooter => _innerStream.Position + - _streamHeaderLength + - // Inner stream has moved to next segment but we're still writing the previous segment footer - CurrentEncodingSegment * (_segmentHeaderLength + _segmentFooterLength) - - _segmentFooterLength + _currentRegionPosition, - SMRegion.SegmentContent => _innerStream.Position + - _streamHeaderLength + - CurrentEncodingSegment * (_segmentHeaderLength + _segmentFooterLength) - - _segmentFooterLength, - _ => throw new InvalidDataException($"{nameof(StructuredMessageEncodingStream)} invalid state."), - }; - } - set - { - Argument.AssertInRange(value, 0, _maxSeekPosition, nameof(value)); - if (value < _streamHeaderLength) - { - _currentRegion = SMRegion.StreamHeader; - _currentRegionPosition = (int)value; - _innerStream.Position = 0; - return; - } - if (value >= Length - _streamFooterLength) - { - _currentRegion = SMRegion.StreamFooter; - _currentRegionPosition = (int)(value - (Length - _streamFooterLength)); - _innerStream.Position = _innerStream.Length; - return; - } - int newSegmentNum = 1 + (int)Math.Floor((value - _streamHeaderLength) / (double)(_segmentHeaderLength + _segmentFooterLength + _segmentContentLength)); - int segmentPosition = (int)(value - _streamHeaderLength - - ((newSegmentNum - 1) * (_segmentHeaderLength + _segmentFooterLength + _segmentContentLength))); - - if (segmentPosition < _segmentHeaderLength) - { - _currentRegion = SMRegion.SegmentHeader; - _currentRegionPosition = (int)((value - _streamHeaderLength) % SegmentTotalLength); - _innerStream.Position = (newSegmentNum - 1) * _segmentContentLength; - return; - } - if (segmentPosition < _segmentHeaderLength + _segmentContentLength) - { - _currentRegion = SMRegion.SegmentContent; - _currentRegionPosition = (int)((value - _streamHeaderLength) % SegmentTotalLength) - - _segmentHeaderLength; - _innerStream.Position = (newSegmentNum - 1) * _segmentContentLength + _currentRegionPosition; - return; - } - - _currentRegion = SMRegion.SegmentFooter; - _currentRegionPosition = (int)((value - _streamHeaderLength) % SegmentTotalLength) - - _segmentHeaderLength - _segmentContentLength; - _innerStream.Position = newSegmentNum * _segmentContentLength; - } - } - #endregion - - public StructuredMessageEncodingStream( - Stream innerStream, - int segmentContentLength, - StructuredMessage.Flags flags) - { - Argument.AssertNotNull(innerStream, nameof(innerStream)); - if (innerStream.GetLengthOrDefault() == default) - { - throw new ArgumentException("Stream must have known length.", nameof(innerStream)); - } - if (innerStream.Position != 0) - { - throw new ArgumentException("Stream must be at starting position.", nameof(innerStream)); - } - // stream logic likely breaks down with segment length of 1; enforce >=2 rather than just positive number - // real world scenarios will probably use a minimum of tens of KB - Argument.AssertInRange(segmentContentLength, 2, int.MaxValue, nameof(segmentContentLength)); - - _flags = flags; - _segmentContentLength = segmentContentLength; - - _streamHeaderLength = StructuredMessage.V1_0.StreamHeaderLength; - _streamFooterLength = UseCrcSegment ? StructuredMessage.Crc64Length : 0; - _segmentHeaderLength = StructuredMessage.V1_0.SegmentHeaderLength; - _segmentFooterLength = UseCrcSegment ? StructuredMessage.Crc64Length : 0; - - if (UseCrcSegment) - { - _totalCrc = StorageCrc64HashAlgorithm.Create(); - _segmentCrc = StorageCrc64HashAlgorithm.Create(); - _segmentCrcs = ArrayPool.Shared.Rent( - GetTotalSegments(innerStream, segmentContentLength) * StructuredMessage.Crc64Length); - innerStream = ChecksumCalculatingStream.GetReadStream(innerStream, span => - { - _totalCrc.Append(span); - _segmentCrc.Append(span); - }); - } - - _innerStream = innerStream; - } - - #region Write - public override void Flush() => throw new NotSupportedException(); - - public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException(); - - public override void SetLength(long value) => throw new NotSupportedException(); - #endregion - - #region Read - public override int Read(byte[] buffer, int offset, int count) - => ReadInternal(buffer, offset, count, async: false, cancellationToken: default).EnsureCompleted(); - - public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - => await ReadInternal(buffer, offset, count, async: true, cancellationToken).ConfigureAwait(false); - - private async ValueTask ReadInternal(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken) - { - int totalRead = 0; - bool readInner = false; - while (totalRead < count && Position < Length) - { - int subreadOffset = offset + totalRead; - int subreadCount = count - totalRead; - switch (_currentRegion) - { - case SMRegion.StreamHeader: - totalRead += ReadFromStreamHeader(new Span(buffer, subreadOffset, subreadCount)); - break; - case SMRegion.StreamFooter: - totalRead += ReadFromStreamFooter(new Span(buffer, subreadOffset, subreadCount)); - break; - case SMRegion.SegmentHeader: - totalRead += ReadFromSegmentHeader(new Span(buffer, subreadOffset, subreadCount)); - break; - case SMRegion.SegmentFooter: - totalRead += ReadFromSegmentFooter(new Span(buffer, subreadOffset, subreadCount)); - break; - case SMRegion.SegmentContent: - // don't double read from stream. Allow caller to multi-read when desired. - if (readInner) - { - UpdateLatestPosition(); - return totalRead; - } - totalRead += await ReadFromInnerStreamInternal( - buffer, subreadOffset, subreadCount, async, cancellationToken).ConfigureAwait(false); - readInner = true; - break; - default: - break; - } - } - UpdateLatestPosition(); - return totalRead; - } - -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - public override int Read(Span buffer) - { - int totalRead = 0; - bool readInner = false; - while (totalRead < buffer.Length && Position < Length) - { - switch (_currentRegion) - { - case SMRegion.StreamHeader: - totalRead += ReadFromStreamHeader(buffer.Slice(totalRead)); - break; - case SMRegion.StreamFooter: - totalRead += ReadFromStreamFooter(buffer.Slice(totalRead)); - break; - case SMRegion.SegmentHeader: - totalRead += ReadFromSegmentHeader(buffer.Slice(totalRead)); - break; - case SMRegion.SegmentFooter: - totalRead += ReadFromSegmentFooter(buffer.Slice(totalRead)); - break; - case SMRegion.SegmentContent: - // don't double read from stream. Allow caller to multi-read when desired. - if (readInner) - { - UpdateLatestPosition(); - return totalRead; - } - totalRead += ReadFromInnerStream(buffer.Slice(totalRead)); - readInner = true; - break; - default: - break; - } - } - UpdateLatestPosition(); - return totalRead; - } - - public override async ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) - { - int totalRead = 0; - bool readInner = false; - while (totalRead < buffer.Length && Position < Length) - { - switch (_currentRegion) - { - case SMRegion.StreamHeader: - totalRead += ReadFromStreamHeader(buffer.Slice(totalRead).Span); - break; - case SMRegion.StreamFooter: - totalRead += ReadFromStreamFooter(buffer.Slice(totalRead).Span); - break; - case SMRegion.SegmentHeader: - totalRead += ReadFromSegmentHeader(buffer.Slice(totalRead).Span); - break; - case SMRegion.SegmentFooter: - totalRead += ReadFromSegmentFooter(buffer.Slice(totalRead).Span); - break; - case SMRegion.SegmentContent: - // don't double read from stream. Allow caller to multi-read when desired. - if (readInner) - { - UpdateLatestPosition(); - return totalRead; - } - totalRead += await ReadFromInnerStreamAsync(buffer.Slice(totalRead), cancellationToken).ConfigureAwait(false); - readInner = true; - break; - default: - break; - } - } - UpdateLatestPosition(); - return totalRead; - } -#endif - - #region Read Headers/Footers - private int ReadFromStreamHeader(Span buffer) - { - int read = Math.Min(buffer.Length, _streamHeaderLength - _currentRegionPosition); - using IDisposable _ = StructuredMessage.V1_0.GetStreamHeaderBytes( - ArrayPool.Shared, out Memory headerBytes, Length, _flags, TotalSegments); - headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); - _currentRegionPosition += read; - - if (_currentRegionPosition == _streamHeaderLength) - { - _currentRegion = SMRegion.SegmentHeader; - _currentRegionPosition = 0; - } - - return read; - } - - private int ReadFromStreamFooter(Span buffer) - { - int read = Math.Min(buffer.Length, _segmentFooterLength - _currentRegionPosition); - if (read <= 0) - { - return 0; - } - - using IDisposable _ = StructuredMessage.V1_0.GetStreamFooterBytes( - ArrayPool.Shared, - out Memory footerBytes, - crc64: UseCrcSegment - ? _totalCrc.GetCurrentHash() // TODO array pooling - : default); - footerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); - _currentRegionPosition += read; - - return read; - } - - private int ReadFromSegmentHeader(Span buffer) - { - int read = Math.Min(buffer.Length, _segmentHeaderLength - _currentRegionPosition); - using IDisposable _ = StructuredMessage.V1_0.GetSegmentHeaderBytes( - ArrayPool.Shared, - out Memory headerBytes, - CurrentInnerSegment, - Math.Min(_segmentContentLength, _innerStream.Length - _innerStream.Position)); - headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); - _currentRegionPosition += read; - - if (_currentRegionPosition == _segmentHeaderLength) - { - _currentRegion = SMRegion.SegmentContent; - _currentRegionPosition = 0; - } - - return read; - } - - private int ReadFromSegmentFooter(Span buffer) - { - int read = Math.Min(buffer.Length, _segmentFooterLength - _currentRegionPosition); - if (read < 0) - { - return 0; - } - - using IDisposable _ = StructuredMessage.V1_0.GetSegmentFooterBytes( - ArrayPool.Shared, - out Memory headerBytes, - crc64: UseCrcSegment - ? new Span( - _segmentCrcs, - (CurrentEncodingSegment-1) * _totalCrc.HashLengthInBytes, - _totalCrc.HashLengthInBytes) - : default); - headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); - _currentRegionPosition += read; - - if (_currentRegionPosition == _segmentFooterLength) - { - _currentRegion = _innerStream.Position == _innerStream.Length - ? SMRegion.StreamFooter : SMRegion.SegmentHeader; - _currentRegionPosition = 0; - } - - return read; - } - #endregion - - #region ReadUnderlyingStream - private int MaxInnerStreamRead => _segmentContentLength - _currentRegionPosition; - - private void CleanupContentSegment() - { - if (_currentRegionPosition == _segmentContentLength || _innerStream.Position >= _innerStream.Length) - { - _currentRegion = SMRegion.SegmentFooter; - _currentRegionPosition = 0; - if (UseCrcSegment && CurrentEncodingSegment - 1 == _latestSegmentCrcd) - { - _segmentCrc.GetCurrentHash(new Span( - _segmentCrcs, - _latestSegmentCrcd * _segmentCrc.HashLengthInBytes, - _segmentCrc.HashLengthInBytes)); - _latestSegmentCrcd++; - _segmentCrc = StorageCrc64HashAlgorithm.Create(); - } - } - } - - private async ValueTask ReadFromInnerStreamInternal( - byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken) - { - int read = async - ? await _innerStream.ReadAsync(buffer, offset, Math.Min(count, MaxInnerStreamRead)).ConfigureAwait(false) - : _innerStream.Read(buffer, offset, Math.Min(count, MaxInnerStreamRead)); - _currentRegionPosition += read; - CleanupContentSegment(); - return read; - } - -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - private int ReadFromInnerStream(Span buffer) - { - if (MaxInnerStreamRead < buffer.Length) - { - buffer = buffer.Slice(0, MaxInnerStreamRead); - } - int read = _innerStream.Read(buffer); - _currentRegionPosition += read; - CleanupContentSegment(); - return read; - } - - private async ValueTask ReadFromInnerStreamAsync(Memory buffer, CancellationToken cancellationToken) - { - if (MaxInnerStreamRead < buffer.Length) - { - buffer = buffer.Slice(0, MaxInnerStreamRead); - } - int read = await _innerStream.ReadAsync(buffer, cancellationToken).ConfigureAwait(false); - _currentRegionPosition += read; - CleanupContentSegment(); - return read; - } -#endif - #endregion - - // don't allow stream to seek too far forward. track how far the stream has been naturally read. - private void UpdateLatestPosition() - { - if (_maxSeekPosition < Position) - { - _maxSeekPosition = Position; - } - } - #endregion - - public override long Seek(long offset, SeekOrigin origin) - { - switch (origin) - { - case SeekOrigin.Begin: - Position = offset; - break; - case SeekOrigin.Current: - Position += offset; - break; - case SeekOrigin.End: - Position = Length + offset; - break; - } - return Position; - } - - protected override void Dispose(bool disposing) - { - base.Dispose(disposing); - - if (_disposed) - { - return; - } - - if (disposing) - { - _innerStream.Dispose(); - _disposed = true; - } - } -} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessagePrecalculatedCrcWrapperStream.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessagePrecalculatedCrcWrapperStream.cs deleted file mode 100644 index 3569ef4339735..0000000000000 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StructuredMessagePrecalculatedCrcWrapperStream.cs +++ /dev/null @@ -1,451 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers; -using System.IO; -using System.Threading; -using System.Threading.Tasks; -using Azure.Core.Pipeline; -using Azure.Storage.Common; - -namespace Azure.Storage.Shared; - -internal class StructuredMessagePrecalculatedCrcWrapperStream : Stream -{ - private readonly Stream _innerStream; - - private readonly int _streamHeaderLength; - private readonly int _streamFooterLength; - private readonly int _segmentHeaderLength; - private readonly int _segmentFooterLength; - - private bool _disposed; - - private readonly byte[] _crc; - - public override bool CanRead => true; - - public override bool CanWrite => false; - - public override bool CanSeek => _innerStream.CanSeek; - - public override bool CanTimeout => _innerStream.CanTimeout; - - public override int ReadTimeout => _innerStream.ReadTimeout; - - public override int WriteTimeout => _innerStream.WriteTimeout; - - public override long Length => - _streamHeaderLength + _streamFooterLength + - _segmentHeaderLength + _segmentFooterLength + - _innerStream.Length; - - #region Position - private enum SMRegion - { - StreamHeader, - StreamFooter, - SegmentHeader, - SegmentFooter, - SegmentContent, - } - - private SMRegion _currentRegion = SMRegion.StreamHeader; - private int _currentRegionPosition = 0; - - private long _maxSeekPosition = 0; - - public override long Position - { - get - { - return _currentRegion switch - { - SMRegion.StreamHeader => _currentRegionPosition, - SMRegion.SegmentHeader => _innerStream.Position + - _streamHeaderLength + - _currentRegionPosition, - SMRegion.SegmentContent => _streamHeaderLength + - _segmentHeaderLength + - _innerStream.Position, - SMRegion.SegmentFooter => _streamHeaderLength + - _segmentHeaderLength + - _innerStream.Length + - _currentRegionPosition, - SMRegion.StreamFooter => _streamHeaderLength + - _segmentHeaderLength + - _innerStream.Length + - _segmentFooterLength + - _currentRegionPosition, - _ => throw new InvalidDataException($"{nameof(StructuredMessageEncodingStream)} invalid state."), - }; - } - set - { - Argument.AssertInRange(value, 0, _maxSeekPosition, nameof(value)); - if (value < _streamHeaderLength) - { - _currentRegion = SMRegion.StreamHeader; - _currentRegionPosition = (int)value; - _innerStream.Position = 0; - return; - } - if (value < _streamHeaderLength + _segmentHeaderLength) - { - _currentRegion = SMRegion.SegmentHeader; - _currentRegionPosition = (int)(value - _streamHeaderLength); - _innerStream.Position = 0; - return; - } - if (value < _streamHeaderLength + _segmentHeaderLength + _innerStream.Length) - { - _currentRegion = SMRegion.SegmentContent; - _currentRegionPosition = (int)(value - _streamHeaderLength - _segmentHeaderLength); - _innerStream.Position = value - _streamHeaderLength - _segmentHeaderLength; - return; - } - if (value < _streamHeaderLength + _segmentHeaderLength + _innerStream.Length + _segmentFooterLength) - { - _currentRegion = SMRegion.SegmentFooter; - _currentRegionPosition = (int)(value - _streamHeaderLength - _segmentHeaderLength - _innerStream.Length); - _innerStream.Position = _innerStream.Length; - return; - } - - _currentRegion = SMRegion.StreamFooter; - _currentRegionPosition = (int)(value - _streamHeaderLength - _segmentHeaderLength - _innerStream.Length - _segmentFooterLength); - _innerStream.Position = _innerStream.Length; - } - } - #endregion - - public StructuredMessagePrecalculatedCrcWrapperStream( - Stream innerStream, - ReadOnlySpan precalculatedCrc) - { - Argument.AssertNotNull(innerStream, nameof(innerStream)); - if (innerStream.GetLengthOrDefault() == default) - { - throw new ArgumentException("Stream must have known length.", nameof(innerStream)); - } - if (innerStream.Position != 0) - { - throw new ArgumentException("Stream must be at starting position.", nameof(innerStream)); - } - - _streamHeaderLength = StructuredMessage.V1_0.StreamHeaderLength; - _streamFooterLength = StructuredMessage.Crc64Length; - _segmentHeaderLength = StructuredMessage.V1_0.SegmentHeaderLength; - _segmentFooterLength = StructuredMessage.Crc64Length; - - _crc = ArrayPool.Shared.Rent(StructuredMessage.Crc64Length); - precalculatedCrc.CopyTo(_crc); - - _innerStream = innerStream; - } - - #region Write - public override void Flush() => throw new NotSupportedException(); - - public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException(); - - public override void SetLength(long value) => throw new NotSupportedException(); - #endregion - - #region Read - public override int Read(byte[] buffer, int offset, int count) - => ReadInternal(buffer, offset, count, async: false, cancellationToken: default).EnsureCompleted(); - - public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - => await ReadInternal(buffer, offset, count, async: true, cancellationToken).ConfigureAwait(false); - - private async ValueTask ReadInternal(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken) - { - int totalRead = 0; - bool readInner = false; - while (totalRead < count && Position < Length) - { - int subreadOffset = offset + totalRead; - int subreadCount = count - totalRead; - switch (_currentRegion) - { - case SMRegion.StreamHeader: - totalRead += ReadFromStreamHeader(new Span(buffer, subreadOffset, subreadCount)); - break; - case SMRegion.StreamFooter: - totalRead += ReadFromStreamFooter(new Span(buffer, subreadOffset, subreadCount)); - break; - case SMRegion.SegmentHeader: - totalRead += ReadFromSegmentHeader(new Span(buffer, subreadOffset, subreadCount)); - break; - case SMRegion.SegmentFooter: - totalRead += ReadFromSegmentFooter(new Span(buffer, subreadOffset, subreadCount)); - break; - case SMRegion.SegmentContent: - // don't double read from stream. Allow caller to multi-read when desired. - if (readInner) - { - UpdateLatestPosition(); - return totalRead; - } - totalRead += await ReadFromInnerStreamInternal( - buffer, subreadOffset, subreadCount, async, cancellationToken).ConfigureAwait(false); - readInner = true; - break; - default: - break; - } - } - UpdateLatestPosition(); - return totalRead; - } - -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - public override int Read(Span buffer) - { - int totalRead = 0; - bool readInner = false; - while (totalRead < buffer.Length && Position < Length) - { - switch (_currentRegion) - { - case SMRegion.StreamHeader: - totalRead += ReadFromStreamHeader(buffer.Slice(totalRead)); - break; - case SMRegion.StreamFooter: - totalRead += ReadFromStreamFooter(buffer.Slice(totalRead)); - break; - case SMRegion.SegmentHeader: - totalRead += ReadFromSegmentHeader(buffer.Slice(totalRead)); - break; - case SMRegion.SegmentFooter: - totalRead += ReadFromSegmentFooter(buffer.Slice(totalRead)); - break; - case SMRegion.SegmentContent: - // don't double read from stream. Allow caller to multi-read when desired. - if (readInner) - { - UpdateLatestPosition(); - return totalRead; - } - totalRead += ReadFromInnerStream(buffer.Slice(totalRead)); - readInner = true; - break; - default: - break; - } - } - UpdateLatestPosition(); - return totalRead; - } - - public override async ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) - { - int totalRead = 0; - bool readInner = false; - while (totalRead < buffer.Length && Position < Length) - { - switch (_currentRegion) - { - case SMRegion.StreamHeader: - totalRead += ReadFromStreamHeader(buffer.Slice(totalRead).Span); - break; - case SMRegion.StreamFooter: - totalRead += ReadFromStreamFooter(buffer.Slice(totalRead).Span); - break; - case SMRegion.SegmentHeader: - totalRead += ReadFromSegmentHeader(buffer.Slice(totalRead).Span); - break; - case SMRegion.SegmentFooter: - totalRead += ReadFromSegmentFooter(buffer.Slice(totalRead).Span); - break; - case SMRegion.SegmentContent: - // don't double read from stream. Allow caller to multi-read when desired. - if (readInner) - { - UpdateLatestPosition(); - return totalRead; - } - totalRead += await ReadFromInnerStreamAsync(buffer.Slice(totalRead), cancellationToken).ConfigureAwait(false); - readInner = true; - break; - default: - break; - } - } - UpdateLatestPosition(); - return totalRead; - } -#endif - - #region Read Headers/Footers - private int ReadFromStreamHeader(Span buffer) - { - int read = Math.Min(buffer.Length, _streamHeaderLength - _currentRegionPosition); - using IDisposable _ = StructuredMessage.V1_0.GetStreamHeaderBytes( - ArrayPool.Shared, - out Memory headerBytes, - Length, - StructuredMessage.Flags.StorageCrc64, - totalSegments: 1); - headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); - _currentRegionPosition += read; - - if (_currentRegionPosition == _streamHeaderLength) - { - _currentRegion = SMRegion.SegmentHeader; - _currentRegionPosition = 0; - } - - return read; - } - - private int ReadFromStreamFooter(Span buffer) - { - int read = Math.Min(buffer.Length, _segmentFooterLength - _currentRegionPosition); - if (read <= 0) - { - return 0; - } - - using IDisposable _ = StructuredMessage.V1_0.GetStreamFooterBytes( - ArrayPool.Shared, - out Memory footerBytes, - new ReadOnlySpan(_crc, 0, StructuredMessage.Crc64Length)); - footerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); - _currentRegionPosition += read; - - return read; - } - - private int ReadFromSegmentHeader(Span buffer) - { - int read = Math.Min(buffer.Length, _segmentHeaderLength - _currentRegionPosition); - using IDisposable _ = StructuredMessage.V1_0.GetSegmentHeaderBytes( - ArrayPool.Shared, - out Memory headerBytes, - segmentNum: 1, - _innerStream.Length); - headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); - _currentRegionPosition += read; - - if (_currentRegionPosition == _segmentHeaderLength) - { - _currentRegion = SMRegion.SegmentContent; - _currentRegionPosition = 0; - } - - return read; - } - - private int ReadFromSegmentFooter(Span buffer) - { - int read = Math.Min(buffer.Length, _segmentFooterLength - _currentRegionPosition); - if (read < 0) - { - return 0; - } - - using IDisposable _ = StructuredMessage.V1_0.GetSegmentFooterBytes( - ArrayPool.Shared, - out Memory headerBytes, - new ReadOnlySpan(_crc, 0, StructuredMessage.Crc64Length)); - headerBytes.Slice(_currentRegionPosition, read).Span.CopyTo(buffer); - _currentRegionPosition += read; - - if (_currentRegionPosition == _segmentFooterLength) - { - _currentRegion = _innerStream.Position == _innerStream.Length - ? SMRegion.StreamFooter : SMRegion.SegmentHeader; - _currentRegionPosition = 0; - } - - return read; - } - #endregion - - #region ReadUnderlyingStream - private void CleanupContentSegment() - { - if (_innerStream.Position >= _innerStream.Length) - { - _currentRegion = SMRegion.SegmentFooter; - _currentRegionPosition = 0; - } - } - - private async ValueTask ReadFromInnerStreamInternal( - byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken) - { - int read = async - ? await _innerStream.ReadAsync(buffer, offset, count).ConfigureAwait(false) - : _innerStream.Read(buffer, offset, count); - _currentRegionPosition += read; - CleanupContentSegment(); - return read; - } - -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - private int ReadFromInnerStream(Span buffer) - { - int read = _innerStream.Read(buffer); - _currentRegionPosition += read; - CleanupContentSegment(); - return read; - } - - private async ValueTask ReadFromInnerStreamAsync(Memory buffer, CancellationToken cancellationToken) - { - int read = await _innerStream.ReadAsync(buffer, cancellationToken).ConfigureAwait(false); - _currentRegionPosition += read; - CleanupContentSegment(); - return read; - } -#endif - #endregion - - // don't allow stream to seek too far forward. track how far the stream has been naturally read. - private void UpdateLatestPosition() - { - if (_maxSeekPosition < Position) - { - _maxSeekPosition = Position; - } - } - #endregion - - public override long Seek(long offset, SeekOrigin origin) - { - switch (origin) - { - case SeekOrigin.Begin: - Position = offset; - break; - case SeekOrigin.Current: - Position += offset; - break; - case SeekOrigin.End: - Position = Length + offset; - break; - } - return Position; - } - - protected override void Dispose(bool disposing) - { - base.Dispose(disposing); - - if (_disposed) - { - return; - } - - if (disposing) - { - ArrayPool.Shared.Return(_crc); - _innerStream.Dispose(); - _disposed = true; - } - } -} diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/TransferValidationOptionsExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/TransferValidationOptionsExtensions.cs index 763d385240383..af21588b4ae09 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/TransferValidationOptionsExtensions.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/TransferValidationOptionsExtensions.cs @@ -9,7 +9,14 @@ public static StorageChecksumAlgorithm ResolveAuto(this StorageChecksumAlgorithm { if (checksumAlgorithm == StorageChecksumAlgorithm.Auto) { +#if BlobSDK || DataLakeSDK || CommonSDK return StorageChecksumAlgorithm.StorageCrc64; +#elif FileSDK // file shares don't support crc64 + return StorageChecksumAlgorithm.MD5; +#else + throw new System.NotSupportedException( + $"{typeof(TransferValidationOptionsExtensions).FullName}.{nameof(ResolveAuto)} is not supported."); +#endif } return checksumAlgorithm; } diff --git a/sdk/storage/Azure.Storage.Common/tests/Azure.Storage.Common.Tests.csproj b/sdk/storage/Azure.Storage.Common/tests/Azure.Storage.Common.Tests.csproj index 2863b85f6feb2..5db86ebee984b 100644 --- a/sdk/storage/Azure.Storage.Common/tests/Azure.Storage.Common.Tests.csproj +++ b/sdk/storage/Azure.Storage.Common/tests/Azure.Storage.Common.Tests.csproj @@ -13,12 +13,9 @@ - - - @@ -31,7 +28,6 @@ - @@ -50,11 +46,6 @@ - - - - - diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/FaultyStream.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/FaultyStream.cs index f4e4b92ed73c4..7411eb1499312 100644 --- a/sdk/storage/Azure.Storage.Common/tests/Shared/FaultyStream.cs +++ b/sdk/storage/Azure.Storage.Common/tests/Shared/FaultyStream.cs @@ -15,7 +15,6 @@ internal class FaultyStream : Stream private readonly Exception _exceptionToRaise; private int _remainingExceptions; private Action _onFault; - private long _position = 0; public FaultyStream( Stream innerStream, @@ -41,7 +40,7 @@ public FaultyStream( public override long Position { - get => CanSeek ? _innerStream.Position : _position; + get => _innerStream.Position; set => _innerStream.Position = value; } @@ -54,9 +53,7 @@ public override int Read(byte[] buffer, int offset, int count) { if (_remainingExceptions == 0 || Position + count <= _raiseExceptionAt || _raiseExceptionAt >= _innerStream.Length) { - int read = _innerStream.Read(buffer, offset, count); - _position += read; - return read; + return _innerStream.Read(buffer, offset, count); } else { @@ -64,13 +61,11 @@ public override int Read(byte[] buffer, int offset, int count) } } - public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) { if (_remainingExceptions == 0 || Position + count <= _raiseExceptionAt || _raiseExceptionAt >= _innerStream.Length) { - int read = await _innerStream.ReadAsync(buffer, offset, count, cancellationToken); - _position += read; - return read; + return _innerStream.ReadAsync(buffer, offset, count, cancellationToken); } else { diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/ObserveStructuredMessagePolicy.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/ObserveStructuredMessagePolicy.cs deleted file mode 100644 index 828c41179bba3..0000000000000 --- a/sdk/storage/Azure.Storage.Common/tests/Shared/ObserveStructuredMessagePolicy.cs +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Collections.Generic; -using System.IO; -using Azure.Core; -using Azure.Core.Pipeline; -using Azure.Storage.Shared; - -namespace Azure.Storage.Test.Shared -{ - internal class ObserveStructuredMessagePolicy : HttpPipelineSynchronousPolicy - { - private readonly HashSet _requestScopes = new(); - - private readonly HashSet _responseScopes = new(); - - public ObserveStructuredMessagePolicy() - { - } - - public override void OnSendingRequest(HttpMessage message) - { - if (_requestScopes.Count > 0) - { - byte[] encodedContent; - byte[] underlyingContent; - StructuredMessageDecodingStream.RawDecodedData decodedData; - using (MemoryStream ms = new()) - { - message.Request.Content.WriteTo(ms, default); - encodedContent = ms.ToArray(); - using (MemoryStream ms2 = new()) - { - (Stream s, decodedData) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedContent)); - s.CopyTo(ms2); - underlyingContent = ms2.ToArray(); - } - } - } - } - - public override void OnReceivedResponse(HttpMessage message) - { - } - - public IDisposable CheckRequestScope() => CheckMessageScope.CheckRequestScope(this); - - public IDisposable CheckResponseScope() => CheckMessageScope.CheckResponseScope(this); - - private class CheckMessageScope : IDisposable - { - private bool _isRequestScope; - private ObserveStructuredMessagePolicy _policy; - - public static CheckMessageScope CheckRequestScope(ObserveStructuredMessagePolicy policy) - { - CheckMessageScope result = new() - { - _isRequestScope = true, - _policy = policy - }; - result._policy._requestScopes.Add(result); - return result; - } - - public static CheckMessageScope CheckResponseScope(ObserveStructuredMessagePolicy policy) - { - CheckMessageScope result = new() - { - _isRequestScope = false, - _policy = policy - }; - result._policy._responseScopes.Add(result); - return result; - } - - public void Dispose() - { - (_isRequestScope ? _policy._requestScopes : _policy._responseScopes).Remove(this); - } - } - } -} diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/RequestExtensions.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/RequestExtensions.cs deleted file mode 100644 index ad395e862f827..0000000000000 --- a/sdk/storage/Azure.Storage.Common/tests/Shared/RequestExtensions.cs +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System.Linq; -using System.Text; -using Azure.Core; -using NUnit.Framework; - -namespace Azure.Storage; - -public static partial class RequestExtensions -{ - public static string AssertHeaderPresent(this Request request, string headerName) - { - if (request.Headers.TryGetValue(headerName, out string value)) - { - return headerName == Constants.StructuredMessage.StructuredMessageHeader ? null : value; - } - StringBuilder sb = new StringBuilder() - .AppendLine($"`{headerName}` expected on request but was not found.") - .AppendLine($"{request.Method} {request.Uri}") - .AppendLine(string.Join("\n", request.Headers.Select(h => $"{h.Name}: {h.Value}s"))) - ; - Assert.Fail(sb.ToString()); - return null; - } -} diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/TamperStreamContentsPolicy.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/TamperStreamContentsPolicy.cs index 7e6c78117f53b..f4198e9dfd532 100644 --- a/sdk/storage/Azure.Storage.Common/tests/Shared/TamperStreamContentsPolicy.cs +++ b/sdk/storage/Azure.Storage.Common/tests/Shared/TamperStreamContentsPolicy.cs @@ -14,7 +14,7 @@ internal class TamperStreamContentsPolicy : HttpPipelineSynchronousPolicy /// /// Default tampering that changes the first byte of the stream. /// - private static Func GetTamperByteStreamTransform(long position) => stream => + private static readonly Func _defaultStreamTransform = stream => { if (stream is not MemoryStream) { @@ -23,10 +23,10 @@ private static Func GetTamperByteStreamTransform(long position) stream = buffer; } - stream.Position = position; + stream.Position = 0; var firstByte = stream.ReadByte(); - stream.Position = position; + stream.Position = 0; stream.WriteByte((byte)((firstByte + 1) % byte.MaxValue)); stream.Position = 0; @@ -37,12 +37,9 @@ private static Func GetTamperByteStreamTransform(long position) public TamperStreamContentsPolicy(Func streamTransform = default) { - _streamTransform = streamTransform ?? GetTamperByteStreamTransform(0); + _streamTransform = streamTransform ?? _defaultStreamTransform; } - public static TamperStreamContentsPolicy TamperByteAt(long position) - => new(GetTamperByteStreamTransform(position)); - public bool TransformRequestBody { get; set; } public bool TransformResponseBody { get; set; } diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/TransferValidationTestBase.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/TransferValidationTestBase.cs index 248acf8811960..c18492d2fb4dd 100644 --- a/sdk/storage/Azure.Storage.Common/tests/Shared/TransferValidationTestBase.cs +++ b/sdk/storage/Azure.Storage.Common/tests/Shared/TransferValidationTestBase.cs @@ -5,13 +5,10 @@ using System.Collections.Generic; using System.IO; using System.Linq; -using System.Security.Cryptography; using System.Threading.Tasks; using Azure.Core; -using Azure.Core.Diagnostics; -using Azure.Core.Pipeline; using Azure.Core.TestFramework; -using Azure.Storage.Shared; +using FastSerialization; using NUnit.Framework; namespace Azure.Storage.Test.Shared @@ -193,15 +190,21 @@ protected string GetNewResourceName() /// The actual checksum value expected to be on the request, if known. Defaults to no specific value expected or checked. /// /// An assertion to put into a pipeline policy. - internal static Action GetRequestChecksumHeaderAssertion(StorageChecksumAlgorithm algorithm, Func isChecksumExpected = default, byte[] expectedChecksum = default) + internal static Action GetRequestChecksumAssertion(StorageChecksumAlgorithm algorithm, Func isChecksumExpected = default, byte[] expectedChecksum = default) { // action to assert a request header is as expected - void AssertChecksum(Request req, string headerName) + void AssertChecksum(RequestHeaders headers, string headerName) { - string checksum = req.AssertHeaderPresent(headerName); - if (expectedChecksum != default) + if (headers.TryGetValue(headerName, out string checksum)) { - Assert.AreEqual(Convert.ToBase64String(expectedChecksum), checksum); + if (expectedChecksum != default) + { + Assert.AreEqual(Convert.ToBase64String(expectedChecksum), checksum); + } + } + else + { + Assert.Fail($"{headerName} expected on request but was not found."); } }; @@ -216,39 +219,14 @@ void AssertChecksum(Request req, string headerName) switch (algorithm.ResolveAuto()) { case StorageChecksumAlgorithm.MD5: - AssertChecksum(request, "Content-MD5"); + AssertChecksum(request.Headers, "Content-MD5"); break; case StorageChecksumAlgorithm.StorageCrc64: - AssertChecksum(request, Constants.StructuredMessage.StructuredMessageHeader); + AssertChecksum(request.Headers, "x-ms-content-crc64"); break; default: - throw new Exception($"Bad {nameof(StorageChecksumAlgorithm)} provided to {nameof(GetRequestChecksumHeaderAssertion)}."); - } - }; - } - - internal static Action GetRequestStructuredMessageAssertion( - StructuredMessage.Flags flags, - Func isStructuredMessageExpected = default, - long? structuredContentSegmentLength = default) - { - return request => - { - // filter some requests out with predicate - if (isStructuredMessageExpected != default && !isStructuredMessageExpected(request)) - { - return; + throw new Exception($"Bad {nameof(StorageChecksumAlgorithm)} provided to {nameof(GetRequestChecksumAssertion)}."); } - - Assert.That(request.Headers.TryGetValue("x-ms-structured-body", out string structuredBody)); - Assert.That(structuredBody, Does.Contain("XSM/1.0")); - if (flags.HasFlag(StructuredMessage.Flags.StorageCrc64)) - { - Assert.That(structuredBody, Does.Contain("crc64")); - } - - Assert.That(request.Headers.TryGetValue("Content-Length", out string contentLength)); - Assert.That(request.Headers.TryGetValue("x-ms-structured-content-length", out string structuredContentLength)); }; } @@ -300,66 +278,32 @@ void AssertChecksum(ResponseHeaders headers, string headerName) AssertChecksum(response.Headers, "Content-MD5"); break; case StorageChecksumAlgorithm.StorageCrc64: - AssertChecksum(response.Headers, Constants.StructuredMessage.StructuredMessageHeader); + AssertChecksum(response.Headers, "x-ms-content-crc64"); break; default: - throw new Exception($"Bad {nameof(StorageChecksumAlgorithm)} provided to {nameof(GetRequestChecksumHeaderAssertion)}."); + throw new Exception($"Bad {nameof(StorageChecksumAlgorithm)} provided to {nameof(GetRequestChecksumAssertion)}."); } }; } - internal static Action GetResponseStructuredMessageAssertion( - StructuredMessage.Flags flags, - Func isStructuredMessageExpected = default) - { - return response => - { - // filter some requests out with predicate - if (isStructuredMessageExpected != default && !isStructuredMessageExpected(response)) - { - return; - } - - Assert.That(response.Headers.TryGetValue("x-ms-structured-body", out string structuredBody)); - Assert.That(structuredBody, Does.Contain("XSM/1.0")); - if (flags.HasFlag(StructuredMessage.Flags.StorageCrc64)) - { - Assert.That(structuredBody, Does.Contain("crc64")); - } - - Assert.That(response.Headers.TryGetValue("Content-Length", out string contentLength)); - Assert.That(response.Headers.TryGetValue("x-ms-structured-content-length", out string structuredContentLength)); - }; - } - /// /// Asserts the service returned an error that expected checksum did not match checksum on upload. /// /// Async action to upload data to service. /// Checksum algorithm used. - internal static void AssertWriteChecksumMismatch( - AsyncTestDelegate writeAction, - StorageChecksumAlgorithm algorithm, - bool expectStructuredMessage = false) + internal static void AssertWriteChecksumMismatch(AsyncTestDelegate writeAction, StorageChecksumAlgorithm algorithm) { var exception = ThrowsOrInconclusiveAsync(writeAction); - if (expectStructuredMessage) - { - Assert.That(exception.ErrorCode, Is.EqualTo("Crc64Mismatch")); - } - else + switch (algorithm.ResolveAuto()) { - switch (algorithm.ResolveAuto()) - { - case StorageChecksumAlgorithm.MD5: - Assert.That(exception.ErrorCode, Is.EqualTo("Md5Mismatch")); - break; - case StorageChecksumAlgorithm.StorageCrc64: - Assert.That(exception.ErrorCode, Is.EqualTo("Crc64Mismatch")); - break; - default: - throw new ArgumentException("Test arguments contain bad algorithm specifier."); - } + case StorageChecksumAlgorithm.MD5: + Assert.AreEqual("Md5Mismatch", exception.ErrorCode); + break; + case StorageChecksumAlgorithm.StorageCrc64: + Assert.AreEqual("Crc64Mismatch", exception.ErrorCode); + break; + default: + throw new ArgumentException("Test arguments contain bad algorithm specifier."); } } #endregion @@ -404,7 +348,6 @@ public virtual async Task UploadPartitionSuccessfulHashComputation(StorageChecks await using IDisposingContainer disposingContainer = await GetDisposingContainerAsync(); // Arrange - bool expectStructuredMessage = algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64; const int dataLength = Constants.KB; var data = GetRandomBuffer(dataLength); var validationOptions = new UploadTransferValidationOptions @@ -413,10 +356,7 @@ public virtual async Task UploadPartitionSuccessfulHashComputation(StorageChecks }; // make pipeline assertion for checking checksum was present on upload - var assertion = algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 - ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, null, dataLength) - : GetRequestChecksumHeaderAssertion(algorithm); - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(algorithm)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -466,11 +406,7 @@ public virtual async Task UploadPartitionUsePrecalculatedHash(StorageChecksumAlg }; // make pipeline assertion for checking precalculated checksum was present on upload - // precalculated partition upload will never use structured message. always check header - var assertion = GetRequestChecksumHeaderAssertion( - algorithm, - expectedChecksum: algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 ? default : precalculatedChecksum); - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(algorithm, expectedChecksum: precalculatedChecksum)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -487,12 +423,12 @@ public virtual async Task UploadPartitionUsePrecalculatedHash(StorageChecksumAlg AsyncTestDelegate operation = async () => await UploadPartitionAsync(client, stream, validationOptions); // Assert - AssertWriteChecksumMismatch(operation, algorithm, algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64); + AssertWriteChecksumMismatch(operation, algorithm); } } [TestCaseSource(nameof(GetValidationAlgorithms))] - public virtual async Task UploadPartitionTamperedStreamThrows(StorageChecksumAlgorithm algorithm) + public virtual async Task UploadPartitionMismatchedHashThrows(StorageChecksumAlgorithm algorithm) { await using IDisposingContainer disposingContainer = await GetDisposingContainerAsync(); @@ -505,7 +441,7 @@ public virtual async Task UploadPartitionTamperedStreamThrows(StorageChecksumAlg }; // Tamper with stream contents in the pipeline to simulate silent failure in the transit layer - var streamTamperPolicy = TamperStreamContentsPolicy.TamperByteAt(100); + var streamTamperPolicy = new TamperStreamContentsPolicy(); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(streamTamperPolicy, HttpPipelinePosition.PerCall); @@ -520,10 +456,9 @@ public virtual async Task UploadPartitionTamperedStreamThrows(StorageChecksumAlg // Act streamTamperPolicy.TransformRequestBody = true; AsyncTestDelegate operation = async () => await UploadPartitionAsync(client, stream, validationOptions); - using var listener = AzureEventSourceListener.CreateConsoleLogger(); + // Assert - AssertWriteChecksumMismatch(operation, algorithm, - expectStructuredMessage: algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64); + AssertWriteChecksumMismatch(operation, algorithm); } } @@ -538,10 +473,7 @@ public virtual async Task UploadPartitionUsesDefaultClientValidationOptions( var data = GetRandomBuffer(dataLength); // make pipeline assertion for checking checksum was present on upload - var assertion = clientAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 - ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, null, dataLength) - : GetRequestChecksumHeaderAssertion(clientAlgorithm); - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(clientAlgorithm)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -580,10 +512,7 @@ public virtual async Task UploadPartitionOverwritesDefaultClientValidationOption }; // make pipeline assertion for checking checksum was present on upload - var assertion = overrideAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 - ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, null, dataLength) - : GetRequestChecksumHeaderAssertion(overrideAlgorithm); - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(overrideAlgorithm)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -626,14 +555,10 @@ public virtual async Task UploadPartitionDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (request.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) + if (request.Headers.Contains("x-ms-content-crc64")) { Assert.Fail($"Hash found when none expected."); } - if (request.Headers.Contains("x-ms-structured-body")) - { - Assert.Fail($"Structured body used when none expected."); - } }); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -676,11 +601,9 @@ public virtual async Task OpenWriteSuccessfulHashComputation( }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumHeaderAssertion(algorithm)); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(algorithm)); var clientOptions = ClientBuilder.GetOptions(); - //ObserveStructuredMessagePolicy observe = new(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); - //clientOptions.AddPolicy(observe, HttpPipelinePosition.BeforeTransport); var client = await GetResourceClientAsync( disposingContainer.Container, @@ -693,7 +616,6 @@ public virtual async Task OpenWriteSuccessfulHashComputation( using var writeStream = await OpenWriteAsync(client, validationOptions, streamBufferSize); // Assert - //using var obsv = observe.CheckRequestScope(); using (checksumPipelineAssertion.CheckRequestScope()) { foreach (var _ in Enumerable.Range(0, streamWrites)) @@ -722,7 +644,7 @@ public virtual async Task OpenWriteMismatchedHashThrows(StorageChecksumAlgorithm // Tamper with stream contents in the pipeline to simulate silent failure in the transit layer var clientOptions = ClientBuilder.GetOptions(); - var tamperPolicy = TamperStreamContentsPolicy.TamperByteAt(100); + var tamperPolicy = new TamperStreamContentsPolicy(); clientOptions.AddPolicy(tamperPolicy, HttpPipelinePosition.PerCall); var client = await GetResourceClientAsync( @@ -760,7 +682,7 @@ public virtual async Task OpenWriteUsesDefaultClientValidationOptions( var data = GetRandomBuffer(dataLength); // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumHeaderAssertion(clientAlgorithm)); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(clientAlgorithm)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -804,7 +726,7 @@ public virtual async Task OpenWriteOverwritesDefaultClientValidationOptions( }; // make pipeline assertion for checking checksum was present on upload - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumHeaderAssertion(overrideAlgorithm)); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion(overrideAlgorithm)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -852,7 +774,7 @@ public virtual async Task OpenWriteDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (request.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) + if (request.Headers.Contains("x-ms-content-crc64")) { Assert.Fail($"Hash found when none expected."); } @@ -964,7 +886,7 @@ public virtual async Task ParallelUploadSplitSuccessfulHashComputation(StorageCh // make pipeline assertion for checking checksum was present on upload var checksumPipelineAssertion = new AssertMessageContentsPolicy( - checkRequest: GetRequestChecksumHeaderAssertion(algorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); + checkRequest: GetRequestChecksumAssertion(algorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -1001,10 +923,8 @@ public virtual async Task ParallelUploadOneShotSuccessfulHashComputation(Storage }; // make pipeline assertion for checking checksum was present on upload - var assertion = algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 - ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, ParallelUploadIsChecksumExpected, dataLength) - : GetRequestChecksumHeaderAssertion(algorithm, isChecksumExpected: ParallelUploadIsChecksumExpected); - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); + var checksumPipelineAssertion = new AssertMessageContentsPolicy( + checkRequest: GetRequestChecksumAssertion(algorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -1061,7 +981,7 @@ public virtual async Task ParallelUploadPrecalculatedComposableHashAccepted(Stor PrecalculatedChecksum = hash }; - var client = await GetResourceClientAsync(disposingContainer.Container, dataLength, createResource: true); + var client = await GetResourceClientAsync(disposingContainer.Container, dataLength); // Act await DoesNotThrowOrInconclusiveAsync( @@ -1091,10 +1011,8 @@ public virtual async Task ParallelUploadUsesDefaultClientValidationOptions( }; // make pipeline assertion for checking checksum was present on upload - var assertion = clientAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && !split - ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, ParallelUploadIsChecksumExpected, dataLength) - : GetRequestChecksumHeaderAssertion(clientAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected); - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion( + clientAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -1145,10 +1063,8 @@ public virtual async Task ParallelUploadOverwritesDefaultClientValidationOptions }; // make pipeline assertion for checking checksum was present on upload - var assertion = overrideAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64 && !split - ? GetRequestStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64, ParallelUploadIsChecksumExpected, dataLength) - : GetRequestChecksumHeaderAssertion(overrideAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected); - var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: assertion); + var checksumPipelineAssertion = new AssertMessageContentsPolicy(checkRequest: GetRequestChecksumAssertion( + overrideAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected)); var clientOptions = ClientBuilder.GetOptions(); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); @@ -1203,7 +1119,7 @@ public virtual async Task ParallelUploadDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (request.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) + if (request.Headers.Contains("x-ms-content-crc64")) { Assert.Fail($"Hash found when none expected."); } @@ -1268,17 +1184,15 @@ public virtual async Task ParallelDownloadSuccessfulHashVerification( }; // Act - byte[] dest; - using (MemoryStream ms = new()) + var dest = new MemoryStream(); using (checksumPipelineAssertion.CheckRequestScope()) { - await ParallelDownloadAsync(client, ms, validationOptions, transferOptions); - dest = ms.ToArray(); + await ParallelDownloadAsync(client, dest, validationOptions, transferOptions); } // Assert // Assertion was in the pipeline and the SDK not throwing means the checksum was validated - Assert.IsTrue(dest.SequenceEqual(data)); + Assert.IsTrue(dest.ToArray().SequenceEqual(data)); } [Test] @@ -1443,7 +1357,7 @@ public virtual async Task ParallelDownloadDisablesDefaultClientValidationOptions { Assert.Fail($"Hash found when none expected."); } - if (response.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) + if (response.Headers.Contains("x-ms-content-crc64")) { Assert.Fail($"Hash found when none expected."); } @@ -1651,7 +1565,7 @@ public virtual async Task OpenReadDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (response.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) + if (response.Headers.Contains("x-ms-content-crc64")) { Assert.Fail($"Hash found when none expected."); } @@ -1701,7 +1615,7 @@ public virtual async Task DownloadSuccessfulHashVerification(StorageChecksumAlgo var validationOptions = new DownloadTransferValidationOptions { ChecksumAlgorithm = algorithm }; // Act - using var dest = new MemoryStream(); + var dest = new MemoryStream(); var response = await DownloadPartitionAsync(client, dest, validationOptions, new HttpRange(length: data.Length)); // Assert @@ -1712,71 +1626,13 @@ public virtual async Task DownloadSuccessfulHashVerification(StorageChecksumAlgo Assert.True(response.Headers.Contains("Content-MD5")); break; case StorageChecksumAlgorithm.StorageCrc64: - Assert.True(response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)); + Assert.True(response.Headers.Contains("x-ms-content-crc64")); break; default: Assert.Fail("Test can't validate given algorithm type."); break; } - var result = dest.ToArray(); - Assert.IsTrue(result.SequenceEqual(data)); - } - - [TestCase(StorageChecksumAlgorithm.StorageCrc64, Constants.StructuredMessage.MaxDownloadCrcWithHeader, false, false)] - [TestCase(StorageChecksumAlgorithm.StorageCrc64, Constants.StructuredMessage.MaxDownloadCrcWithHeader-1, false, false)] - [TestCase(StorageChecksumAlgorithm.StorageCrc64, Constants.StructuredMessage.MaxDownloadCrcWithHeader+1, true, false)] - [TestCase(StorageChecksumAlgorithm.MD5, Constants.StructuredMessage.MaxDownloadCrcWithHeader+1, false, true)] - public virtual async Task DownloadApporpriatelyUsesStructuredMessage( - StorageChecksumAlgorithm algorithm, - int? downloadLen, - bool expectStructuredMessage, - bool expectThrow) - { - await using IDisposingContainer disposingContainer = await GetDisposingContainerAsync(); - - // Arrange - const int dataLength = Constants.KB; - var data = GetRandomBuffer(dataLength); - - var resourceName = GetNewResourceName(); - var client = await GetResourceClientAsync( - disposingContainer.Container, - resourceLength: dataLength, - createResource: true, - resourceName: resourceName); - await SetupDataAsync(client, new MemoryStream(data)); - - // make pipeline assertion for checking checksum was present on download - HttpPipelinePolicy checksumPipelineAssertion = new AssertMessageContentsPolicy(checkResponse: expectStructuredMessage - ? GetResponseStructuredMessageAssertion(StructuredMessage.Flags.StorageCrc64) - : GetResponseChecksumAssertion(algorithm)); - TClientOptions clientOptions = ClientBuilder.GetOptions(); - clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); - - client = await GetResourceClientAsync( - disposingContainer.Container, - resourceLength: dataLength, - resourceName: resourceName, - createResource: false, - downloadAlgorithm: algorithm, - options: clientOptions); - - var validationOptions = new DownloadTransferValidationOptions { ChecksumAlgorithm = algorithm }; - - // Act - var dest = new MemoryStream(); - AsyncTestDelegate operation = async () => await DownloadPartitionAsync( - client, dest, validationOptions, downloadLen.HasValue ? new HttpRange(length: downloadLen.Value) : default); - // Assert (policies checked use of content validation) - if (expectThrow) - { - Assert.That(operation, Throws.TypeOf()); - } - else - { - Assert.That(operation, Throws.Nothing); - Assert.IsTrue(dest.ToArray().SequenceEqual(data)); - } + Assert.IsTrue(dest.ToArray().SequenceEqual(data)); } [Test, Combinatorial] @@ -1802,9 +1658,7 @@ public virtual async Task DownloadHashMismatchThrows( // alter response contents in pipeline, forcing a checksum mismatch on verification step var clientOptions = ClientBuilder.GetOptions(); - var tamperPolicy = TamperStreamContentsPolicy.TamperByteAt(50); - tamperPolicy.TransformResponseBody = true; - clientOptions.AddPolicy(tamperPolicy, HttpPipelinePosition.PerCall); + clientOptions.AddPolicy(new TamperStreamContentsPolicy() { TransformResponseBody = true }, HttpPipelinePosition.PerCall); client = await GetResourceClientAsync( disposingContainer.Container, createResource: false, @@ -1816,7 +1670,7 @@ public virtual async Task DownloadHashMismatchThrows( AsyncTestDelegate operation = async () => await DownloadPartitionAsync(client, dest, validationOptions, new HttpRange(length: data.Length)); // Assert - if (validate || algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64) + if (validate) { // SDK responsible for finding bad checksum. Throw. ThrowsOrInconclusiveAsync(operation); @@ -1874,7 +1728,7 @@ public virtual async Task DownloadUsesDefaultClientValidationOptions( Assert.True(response.Headers.Contains("Content-MD5")); break; case StorageChecksumAlgorithm.StorageCrc64: - Assert.True(response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)); + Assert.True(response.Headers.Contains("x-ms-content-crc64")); break; default: Assert.Fail("Test can't validate given algorithm type."); @@ -1934,7 +1788,7 @@ public virtual async Task DownloadOverwritesDefaultClientValidationOptions( Assert.True(response.Headers.Contains("Content-MD5")); break; case StorageChecksumAlgorithm.StorageCrc64: - Assert.True(response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)); + Assert.True(response.Headers.Contains("x-ms-content-crc64")); break; default: Assert.Fail("Test can't validate given algorithm type."); @@ -1973,7 +1827,7 @@ public virtual async Task DownloadDisablesDefaultClientValidationOptions( { Assert.Fail($"Hash found when none expected."); } - if (response.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)) + if (response.Headers.Contains("x-ms-content-crc64")) { Assert.Fail($"Hash found when none expected."); } @@ -1996,54 +1850,7 @@ public virtual async Task DownloadDisablesDefaultClientValidationOptions( // Assert // no policies this time; just check response headers Assert.False(response.Headers.Contains("Content-MD5")); - Assert.False(response.Headers.Contains(Constants.StructuredMessage.CrcStructuredMessage)); - Assert.IsTrue(dest.ToArray().SequenceEqual(data)); - } - - [Test] - public virtual async Task DownloadRecoversFromInterruptWithValidation( - [ValueSource(nameof(GetValidationAlgorithms))] StorageChecksumAlgorithm algorithm) - { - using var _ = AzureEventSourceListener.CreateConsoleLogger(); - int dataLen = algorithm.ResolveAuto() switch { - StorageChecksumAlgorithm.StorageCrc64 => 5 * Constants.MB, // >4MB for multisegment - _ => Constants.KB, - }; - - await using IDisposingContainer disposingContainer = await GetDisposingContainerAsync(); - - // Arrange - var data = GetRandomBuffer(dataLen); - - TClientOptions options = ClientBuilder.GetOptions(); - options.AddPolicy(new FaultyDownloadPipelinePolicy(dataLen - 512, new IOException(), () => { }), HttpPipelinePosition.BeforeTransport); - var client = await GetResourceClientAsync( - disposingContainer.Container, - resourceLength: dataLen, - createResource: true, - options: options); - await SetupDataAsync(client, new MemoryStream(data)); - - var validationOptions = new DownloadTransferValidationOptions { ChecksumAlgorithm = algorithm }; - - // Act - var dest = new MemoryStream(); - var response = await DownloadPartitionAsync(client, dest, validationOptions, new HttpRange(length: data.Length)); - - // Assert - // no policies this time; just check response headers - switch (algorithm.ResolveAuto()) - { - case StorageChecksumAlgorithm.MD5: - Assert.True(response.Headers.Contains("Content-MD5")); - break; - case StorageChecksumAlgorithm.StorageCrc64: - Assert.True(response.Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)); - break; - default: - Assert.Fail("Test can't validate given algorithm type."); - break; - } + Assert.False(response.Headers.Contains("x-ms-content-crc64")); Assert.IsTrue(dest.ToArray().SequenceEqual(data)); } #endregion @@ -2084,7 +1891,7 @@ public async Task RoundtripWIthDefaults() // make pipeline assertion for checking checksum was present on upload AND download var checksumPipelineAssertion = new AssertMessageContentsPolicy( - checkRequest: GetRequestChecksumHeaderAssertion(expectedAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected), + checkRequest: GetRequestChecksumAssertion(expectedAlgorithm, isChecksumExpected: ParallelUploadIsChecksumExpected), checkResponse: GetResponseChecksumAssertion(expectedAlgorithm)); clientOptions.AddPolicy(checksumPipelineAssertion, HttpPipelinePosition.PerCall); diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingRetriableStreamTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingRetriableStreamTests.cs deleted file mode 100644 index a0f9158040b11..0000000000000 --- a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingRetriableStreamTests.cs +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers.Binary; -using System.IO; -using System.Threading; -using System.Threading.Tasks; -using Azure.Core; -using Azure.Storage.Shared; -using Azure.Storage.Test.Shared; -using Microsoft.Diagnostics.Tracing.Parsers.AspNet; -using Moq; -using NUnit.Framework; - -namespace Azure.Storage.Tests; - -[TestFixture(true)] -[TestFixture(false)] -public class StructuredMessageDecodingRetriableStreamTests -{ - public bool Async { get; } - - public StructuredMessageDecodingRetriableStreamTests(bool async) - { - Async = async; - } - - private Mock AllExceptionsRetry() - { - Mock mock = new(MockBehavior.Strict); - mock.Setup(rc => rc.IsRetriableException(It.IsAny())).Returns(true); - return mock; - } - - [Test] - public async ValueTask UninterruptedStream() - { - byte[] data = new Random().NextBytesInline(4 * Constants.KB).ToArray(); - byte[] dest = new byte[data.Length]; - - // mock with a simple MemoryStream rather than an actual StructuredMessageDecodingStream - using (Stream src = new MemoryStream(data)) - using (Stream retriableSrc = new StructuredMessageDecodingRetriableStream(src, new(), default, default, default, default, default, 1)) - using (Stream dst = new MemoryStream(dest)) - { - await retriableSrc.CopyToInternal(dst, Async, default); - } - - Assert.AreEqual(data, dest); - } - - [Test] - public async Task Interrupt_DataIntact([Values(true, false)] bool multipleInterrupts) - { - const int segments = 4; - const int segmentLen = Constants.KB; - const int readLen = 128; - const int interruptPos = segmentLen + (3 * readLen) + 10; - - Random r = new(); - byte[] data = r.NextBytesInline(segments * Constants.KB).ToArray(); - byte[] dest = new byte[data.Length]; - - // Mock a decoded data for the mocked StructuredMessageDecodingStream - StructuredMessageDecodingStream.RawDecodedData initialDecodedData = new() - { - TotalSegments = segments, - InnerStreamLength = data.Length, - Flags = StructuredMessage.Flags.StorageCrc64 - }; - // for test purposes, initialize a DecodedData, since we are not actively decoding in this test - initialDecodedData.SegmentCrcs.Add((BinaryPrimitives.ReadUInt64LittleEndian(r.NextBytesInline(StructuredMessage.Crc64Length)), segmentLen)); - - (Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData) Factory(long offset, bool faulty) - { - Stream stream = new MemoryStream(data, (int)offset, data.Length - (int)offset); - if (faulty) - { - stream = new FaultyStream(stream, interruptPos, 1, new Exception(), () => { }); - } - // Mock a decoded data for the mocked StructuredMessageDecodingStream - StructuredMessageDecodingStream.RawDecodedData decodedData = new() - { - TotalSegments = segments, - InnerStreamLength = data.Length, - Flags = StructuredMessage.Flags.StorageCrc64, - }; - // for test purposes, initialize a DecodedData, since we are not actively decoding in this test - initialDecodedData.SegmentCrcs.Add((BinaryPrimitives.ReadUInt64LittleEndian(r.NextBytesInline(StructuredMessage.Crc64Length)), segmentLen)); - return (stream, decodedData); - } - - // mock with a simple MemoryStream rather than an actual StructuredMessageDecodingStream - using (Stream src = new MemoryStream(data)) - using (Stream faultySrc = new FaultyStream(src, interruptPos, 1, new Exception(), () => { })) - using (Stream retriableSrc = new StructuredMessageDecodingRetriableStream( - faultySrc, - initialDecodedData, - default, - offset => Factory(offset, multipleInterrupts), - offset => new ValueTask<(Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData)>(Factory(offset, multipleInterrupts)), - null, - AllExceptionsRetry().Object, - int.MaxValue)) - using (Stream dst = new MemoryStream(dest)) - { - await retriableSrc.CopyToInternal(dst, readLen, Async, default); - } - - Assert.AreEqual(data, dest); - } - - [Test] - public async Task Interrupt_AppropriateRewind() - { - const int segments = 2; - const int segmentLen = Constants.KB; - const int dataLen = segments * segmentLen; - const int readLen = segmentLen / 4; - const int interruptOffset = 10; - const int interruptPos = segmentLen + (2 * readLen) + interruptOffset; - Random r = new(); - - // Mock a decoded data for the mocked StructuredMessageDecodingStream - StructuredMessageDecodingStream.RawDecodedData initialDecodedData = new() - { - TotalSegments = segments, - InnerStreamLength = segments * segmentLen, - Flags = StructuredMessage.Flags.StorageCrc64, - }; - // By the time of interrupt, there will be one segment reported - initialDecodedData.SegmentCrcs.Add((BinaryPrimitives.ReadUInt64LittleEndian(r.NextBytesInline(StructuredMessage.Crc64Length)), segmentLen)); - - Mock mock = new(MockBehavior.Strict); - mock.SetupGet(s => s.CanRead).Returns(true); - mock.SetupGet(s => s.CanSeek).Returns(false); - if (Async) - { - mock.SetupSequence(s => s.ReadAsync(It.IsAny(), It.IsAny(), It.IsAny(), default)) - .Returns(Task.FromResult(readLen)) // start first segment - .Returns(Task.FromResult(readLen)) - .Returns(Task.FromResult(readLen)) - .Returns(Task.FromResult(readLen)) // finish first segment - .Returns(Task.FromResult(readLen)) // start second segment - .Returns(Task.FromResult(readLen)) - // faulty stream interrupt - .Returns(Task.FromResult(readLen * 2)) // restart second segment. fast-forward uses an internal 4KB buffer, so it will leap the 512 byte catchup all at once - .Returns(Task.FromResult(readLen)) - .Returns(Task.FromResult(readLen)) // end second segment - .Returns(Task.FromResult(0)) // signal end of stream - .Returns(Task.FromResult(0)) // second signal needed for stream wrapping reasons - ; - } - else - { - mock.SetupSequence(s => s.Read(It.IsAny(), It.IsAny(), It.IsAny())) - .Returns(readLen) // start first segment - .Returns(readLen) - .Returns(readLen) - .Returns(readLen) // finish first segment - .Returns(readLen) // start second segment - .Returns(readLen) - // faulty stream interrupt - .Returns(readLen * 2) // restart second segment. fast-forward uses an internal 4KB buffer, so it will leap the 512 byte catchup all at once - .Returns(readLen) - .Returns(readLen) // end second segment - .Returns(0) // signal end of stream - .Returns(0) // second signal needed for stream wrapping reasons - ; - } - Stream faultySrc = new FaultyStream(mock.Object, interruptPos, 1, new Exception(), default); - Stream retriableSrc = new StructuredMessageDecodingRetriableStream( - faultySrc, - initialDecodedData, - default, - offset => (mock.Object, new()), - offset => new(Task.FromResult((mock.Object, new StructuredMessageDecodingStream.RawDecodedData()))), - null, - AllExceptionsRetry().Object, - 1); - - int totalRead = 0; - int read = 0; - byte[] buf = new byte[readLen]; - if (Async) - { - while ((read = await retriableSrc.ReadAsync(buf, 0, buf.Length)) > 0) - { - totalRead += read; - } - } - else - { - while ((read = retriableSrc.Read(buf, 0, buf.Length)) > 0) - { - totalRead += read; - } - } - await retriableSrc.CopyToInternal(Stream.Null, readLen, Async, default); - - // Asserts we read exactly the data length, excluding the fastforward of the inner stream - Assert.That(totalRead, Is.EqualTo(dataLen)); - } - - [Test] - public async Task Interrupt_ProperDecode([Values(true, false)] bool multipleInterrupts) - { - // decoding stream inserts a buffered layer of 4 KB. use larger sizes to avoid interference from it. - const int segments = 4; - const int segmentLen = 128 * Constants.KB; - const int readLen = 8 * Constants.KB; - const int interruptPos = segmentLen + (3 * readLen) + 10; - - Random r = new(); - byte[] data = r.NextBytesInline(segments * Constants.KB).ToArray(); - byte[] dest = new byte[data.Length]; - - (Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData) Factory(long offset, bool faulty) - { - Stream stream = new MemoryStream(data, (int)offset, data.Length - (int)offset); - stream = new StructuredMessageEncodingStream(stream, segmentLen, StructuredMessage.Flags.StorageCrc64); - if (faulty) - { - stream = new FaultyStream(stream, interruptPos, 1, new Exception(), () => { }); - } - return StructuredMessageDecodingStream.WrapStream(stream); - } - - (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = Factory(0, true); - using Stream retriableSrc = new StructuredMessageDecodingRetriableStream( - decodingStream, - decodedData, - default, - offset => Factory(offset, multipleInterrupts), - offset => new ValueTask<(Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData)>(Factory(offset, multipleInterrupts)), - null, - AllExceptionsRetry().Object, - int.MaxValue); - using Stream dst = new MemoryStream(dest); - - await retriableSrc.CopyToInternal(dst, readLen, Async, default); - - Assert.AreEqual(data, dest); - } -} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingStreamTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingStreamTests.cs deleted file mode 100644 index 2789672df4976..0000000000000 --- a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageDecodingStreamTests.cs +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers.Binary; -using System.Dynamic; -using System.IO; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using Azure.Storage.Blobs.Tests; -using Azure.Storage.Shared; -using NUnit.Framework; -using static Azure.Storage.Shared.StructuredMessage; - -namespace Azure.Storage.Tests -{ - [TestFixture(ReadMethod.SyncArray)] - [TestFixture(ReadMethod.AsyncArray)] -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - [TestFixture(ReadMethod.SyncSpan)] - [TestFixture(ReadMethod.AsyncMemory)] -#endif - public class StructuredMessageDecodingStreamTests - { - // Cannot just implement as passthru in the stream - // Must test each one - public enum ReadMethod - { - SyncArray, - AsyncArray, -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - SyncSpan, - AsyncMemory -#endif - } - - public ReadMethod Method { get; } - - public StructuredMessageDecodingStreamTests(ReadMethod method) - { - Method = method; - } - - private class CopyStreamException : Exception - { - public long TotalCopied { get; } - - public CopyStreamException(Exception inner, long totalCopied) - : base($"Failed read after {totalCopied}-many bytes.", inner) - { - TotalCopied = totalCopied; - } - } - private async ValueTask CopyStream(Stream source, Stream destination, int bufferSize = 81920) // number default for CopyTo impl - { - byte[] buf = new byte[bufferSize]; - int read; - long totalRead = 0; - try - { - switch (Method) - { - case ReadMethod.SyncArray: - while ((read = source.Read(buf, 0, bufferSize)) > 0) - { - totalRead += read; - destination.Write(buf, 0, read); - } - break; - case ReadMethod.AsyncArray: - while ((read = await source.ReadAsync(buf, 0, bufferSize)) > 0) - { - totalRead += read; - await destination.WriteAsync(buf, 0, read); - } - break; -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - case ReadMethod.SyncSpan: - while ((read = source.Read(new Span(buf))) > 0) - { - totalRead += read; - destination.Write(new Span(buf, 0, read)); - } - break; - case ReadMethod.AsyncMemory: - while ((read = await source.ReadAsync(new Memory(buf))) > 0) - { - totalRead += read; - await destination.WriteAsync(new Memory(buf, 0, read)); - } - break; -#endif - } - destination.Flush(); - } - catch (Exception ex) - { - throw new CopyStreamException(ex, totalRead); - } - return totalRead; - } - - [Test] - [Pairwise] - public async Task DecodesData( - [Values(2048, 2005)] int dataLength, - [Values(default, 512)] int? seglen, - [Values(8*Constants.KB, 512, 530, 3)] int readLen, - [Values(true, false)] bool useCrc) - { - int segmentContentLength = seglen ?? int.MaxValue; - Flags flags = useCrc ? Flags.StorageCrc64 : Flags.None; - - byte[] originalData = new byte[dataLength]; - new Random().NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentContentLength, flags); - - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); - byte[] decodedData; - using (MemoryStream dest = new()) - { - await CopyStream(decodingStream, dest, readLen); - decodedData = dest.ToArray(); - } - - Assert.That(new Span(decodedData).SequenceEqual(originalData)); - } - - [Test] - public void BadStreamBadVersion() - { - byte[] originalData = new byte[1024]; - new Random().NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); - - encodedData[0] = byte.MaxValue; - - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); - Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); - } - - [Test] - public async Task BadSegmentCrcThrows() - { - const int segmentLength = 256; - Random r = new(); - - byte[] originalData = new byte[2048]; - r.NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentLength, Flags.StorageCrc64); - - const int badBytePos = 1024; - encodedData[badBytePos] = (byte)~encodedData[badBytePos]; - - MemoryStream encodedDataStream = new(encodedData); - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(encodedDataStream); - - // manual try/catch to validate the proccess failed mid-stream rather than the end - const int copyBufferSize = 4; - bool caught = false; - try - { - await CopyStream(decodingStream, Stream.Null, copyBufferSize); - } - catch (CopyStreamException ex) - { - caught = true; - Assert.That(ex.TotalCopied, Is.LessThanOrEqualTo(badBytePos)); - } - Assert.That(caught); - } - - [Test] - public void BadStreamCrcThrows() - { - const int segmentLength = 256; - Random r = new(); - - byte[] originalData = new byte[2048]; - r.NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentLength, Flags.StorageCrc64); - - encodedData[originalData.Length - 1] = (byte)~encodedData[originalData.Length - 1]; - - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); - Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); - } - - [Test] - public void BadStreamWrongContentLength() - { - byte[] originalData = new byte[1024]; - new Random().NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); - - BinaryPrimitives.WriteInt64LittleEndian(new Span(encodedData, V1_0.StreamHeaderMessageLengthOffset, 8), 123456789L); - - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); - Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); - } - - [TestCase(-1)] - [TestCase(1)] - public void BadStreamWrongSegmentCount(int difference) - { - const int dataSize = 1024; - const int segmentSize = 256; - const int numSegments = 4; - - byte[] originalData = new byte[dataSize]; - new Random().NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentSize, Flags.StorageCrc64); - - // rewrite the segment count to be different than the actual number of segments - BinaryPrimitives.WriteInt16LittleEndian( - new Span(encodedData, V1_0.StreamHeaderSegmentCountOffset, 2), (short)(numSegments + difference)); - - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); - Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); - } - - [Test] - public void BadStreamWrongSegmentNum() - { - byte[] originalData = new byte[1024]; - new Random().NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); - - BinaryPrimitives.WriteInt16LittleEndian( - new Span(encodedData, V1_0.StreamHeaderLength + V1_0.SegmentHeaderNumOffset, 2), 123); - - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(encodedData)); - Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); - } - - [Test] - [Combinatorial] - public async Task BadStreamWrongContentLength( - [Values(-1, 1)] int difference, - [Values(true, false)] bool lengthProvided) - { - byte[] originalData = new byte[1024]; - new Random().NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); - - BinaryPrimitives.WriteInt64LittleEndian( - new Span(encodedData, V1_0.StreamHeaderMessageLengthOffset, 8), - encodedData.Length + difference); - - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream( - new MemoryStream(encodedData), - lengthProvided ? (long?)encodedData.Length : default); - - // manual try/catch with tiny buffer to validate the proccess failed mid-stream rather than the end - const int copyBufferSize = 4; - bool caught = false; - try - { - await CopyStream(decodingStream, Stream.Null, copyBufferSize); - } - catch (CopyStreamException ex) - { - caught = true; - if (lengthProvided) - { - Assert.That(ex.TotalCopied, Is.EqualTo(0)); - } - else - { - Assert.That(ex.TotalCopied, Is.EqualTo(originalData.Length)); - } - } - Assert.That(caught); - } - - [Test] - public void BadStreamMissingExpectedStreamFooter() - { - byte[] originalData = new byte[1024]; - new Random().NextBytes(originalData); - byte[] encodedData = StructuredMessageHelper.MakeEncodedData(originalData, 256, Flags.StorageCrc64); - - byte[] brokenData = new byte[encodedData.Length - Crc64Length]; - new Span(encodedData, 0, encodedData.Length - Crc64Length).CopyTo(brokenData); - - (Stream decodingStream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream(brokenData)); - Assert.That(async () => await CopyStream(decodingStream, Stream.Null), Throws.InnerException.TypeOf()); - } - - [Test] - public void NoSeek() - { - (Stream stream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream()); - - Assert.That(stream.CanSeek, Is.False); - Assert.That(() => stream.Length, Throws.TypeOf()); - Assert.That(() => stream.Position, Throws.TypeOf()); - Assert.That(() => stream.Position = 0, Throws.TypeOf()); - Assert.That(() => stream.Seek(0, SeekOrigin.Begin), Throws.TypeOf()); - } - - [Test] - public void NoWrite() - { - (Stream stream, _) = StructuredMessageDecodingStream.WrapStream(new MemoryStream()); - byte[] data = new byte[1024]; - new Random().NextBytes(data); - - Assert.That(stream.CanWrite, Is.False); - Assert.That(() => stream.Write(data, 0, data.Length), - Throws.TypeOf()); - Assert.That(async () => await stream.WriteAsync(data, 0, data.Length, CancellationToken.None), - Throws.TypeOf()); -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - Assert.That(() => stream.Write(new Span(data)), - Throws.TypeOf()); - Assert.That(async () => await stream.WriteAsync(new Memory(data), CancellationToken.None), - Throws.TypeOf()); -#endif - } - } -} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageEncodingStreamTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageEncodingStreamTests.cs deleted file mode 100644 index e0f91dee7de3a..0000000000000 --- a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageEncodingStreamTests.cs +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers.Binary; -using System.IO; -using System.Linq; -using System.Threading.Tasks; -using Azure.Storage.Blobs.Tests; -using Azure.Storage.Shared; -using NUnit.Framework; -using static Azure.Storage.Shared.StructuredMessage; - -namespace Azure.Storage.Tests -{ - [TestFixture(ReadMethod.SyncArray)] - [TestFixture(ReadMethod.AsyncArray)] -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - [TestFixture(ReadMethod.SyncSpan)] - [TestFixture(ReadMethod.AsyncMemory)] -#endif - public class StructuredMessageEncodingStreamTests - { - // Cannot just implement as passthru in the stream - // Must test each one - public enum ReadMethod - { - SyncArray, - AsyncArray, -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - SyncSpan, - AsyncMemory -#endif - } - - public ReadMethod Method { get; } - - public StructuredMessageEncodingStreamTests(ReadMethod method) - { - Method = method; - } - - private async ValueTask CopyStream(Stream source, Stream destination, int bufferSize = 81920) // number default for CopyTo impl - { - byte[] buf = new byte[bufferSize]; - int read; - switch (Method) - { - case ReadMethod.SyncArray: - while ((read = source.Read(buf, 0, bufferSize)) > 0) - { - destination.Write(buf, 0, read); - } - break; - case ReadMethod.AsyncArray: - while ((read = await source.ReadAsync(buf, 0, bufferSize)) > 0) - { - await destination.WriteAsync(buf, 0, read); - } - break; -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - case ReadMethod.SyncSpan: - while ((read = source.Read(new Span(buf))) > 0) - { - destination.Write(new Span(buf, 0, read)); - } - break; - case ReadMethod.AsyncMemory: - while ((read = await source.ReadAsync(new Memory(buf))) > 0) - { - await destination.WriteAsync(new Memory(buf, 0, read)); - } - break; -#endif - } - destination.Flush(); - } - - [Test] - [Pairwise] - public async Task EncodesData( - [Values(2048, 2005)] int dataLength, - [Values(default, 512)] int? seglen, - [Values(8 * Constants.KB, 512, 530, 3)] int readLen, - [Values(true, false)] bool useCrc) - { - int segmentContentLength = seglen ?? int.MaxValue; - Flags flags = useCrc ? Flags.StorageCrc64 : Flags.None; - - byte[] originalData = new byte[dataLength]; - new Random().NextBytes(originalData); - byte[] expectedEncodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentContentLength, flags); - - Stream encodingStream = new StructuredMessageEncodingStream(new MemoryStream(originalData), segmentContentLength, flags); - byte[] encodedData; - using (MemoryStream dest = new()) - { - await CopyStream(encodingStream, dest, readLen); - encodedData = dest.ToArray(); - } - - Assert.That(new Span(encodedData).SequenceEqual(expectedEncodedData)); - } - - [TestCase(0, 0)] // start - [TestCase(5, 0)] // partway through stream header - [TestCase(V1_0.StreamHeaderLength, 0)] // start of segment - [TestCase(V1_0.StreamHeaderLength + 3, 0)] // partway through segment header - [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength, 0)] // start of segment content - [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 123, 123)] // partway through segment content - [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 512, 512)] // start of segment footer - [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 515, 512)] // partway through segment footer - [TestCase(V1_0.StreamHeaderLength + 3*V1_0.SegmentHeaderLength + 2*Crc64Length + 1500, 1500)] // partway through not first segment content - public async Task Seek(int targetRewindOffset, int expectedInnerStreamPosition) - { - const int segmentLength = 512; - const int dataLength = 2055; - byte[] data = new byte[dataLength]; - new Random().NextBytes(data); - - MemoryStream dataStream = new(data); - StructuredMessageEncodingStream encodingStream = new(dataStream, segmentLength, Flags.StorageCrc64); - - // no support for seeking past existing read, need to consume whole stream before seeking - await CopyStream(encodingStream, Stream.Null); - - encodingStream.Position = targetRewindOffset; - Assert.That(encodingStream.Position, Is.EqualTo(targetRewindOffset)); - Assert.That(dataStream.Position, Is.EqualTo(expectedInnerStreamPosition)); - } - - [TestCase(0)] // start - [TestCase(5)] // partway through stream header - [TestCase(V1_0.StreamHeaderLength)] // start of segment - [TestCase(V1_0.StreamHeaderLength + 3)] // partway through segment header - [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength)] // start of segment content - [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 123)] // partway through segment content - [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 512)] // start of segment footer - [TestCase(V1_0.StreamHeaderLength + V1_0.SegmentHeaderLength + 515)] // partway through segment footer - [TestCase(V1_0.StreamHeaderLength + 2 * V1_0.SegmentHeaderLength + Crc64Length + 1500)] // partway through not first segment content - public async Task SupportsRewind(int targetRewindOffset) - { - const int segmentLength = 512; - const int dataLength = 2055; - byte[] data = new byte[dataLength]; - new Random().NextBytes(data); - - Stream encodingStream = new StructuredMessageEncodingStream(new MemoryStream(data), segmentLength, Flags.StorageCrc64); - byte[] encodedData1; - using (MemoryStream dest = new()) - { - await CopyStream(encodingStream, dest); - encodedData1 = dest.ToArray(); - } - encodingStream.Position = targetRewindOffset; - byte[] encodedData2; - using (MemoryStream dest = new()) - { - await CopyStream(encodingStream, dest); - encodedData2 = dest.ToArray(); - } - - Assert.That(new Span(encodedData1).Slice(targetRewindOffset).SequenceEqual(encodedData2)); - } - - [Test] - public async Task SupportsFastForward() - { - const int segmentLength = 512; - const int dataLength = 2055; - byte[] data = new byte[dataLength]; - new Random().NextBytes(data); - - // must have read stream to fastforward. so read whole stream upfront & save result to check later - Stream encodingStream = new StructuredMessageEncodingStream(new MemoryStream(data), segmentLength, Flags.StorageCrc64); - byte[] encodedData; - using (MemoryStream dest = new()) - { - await CopyStream(encodingStream, dest); - encodedData = dest.ToArray(); - } - - encodingStream.Position = 0; - - bool skip = false; - const int increment = 499; - while (encodingStream.Position < encodingStream.Length) - { - if (skip) - { - encodingStream.Position = Math.Min(dataLength, encodingStream.Position + increment); - skip = !skip; - continue; - } - ReadOnlyMemory expected = new(encodedData, (int)encodingStream.Position, - (int)Math.Min(increment, encodedData.Length - encodingStream.Position)); - ReadOnlyMemory actual; - using (MemoryStream dest = new(increment)) - { - await CopyStream(WindowStream.GetWindow(encodingStream, increment), dest); - actual = dest.ToArray(); - } - Assert.That(expected.Span.SequenceEqual(actual.Span)); - skip = !skip; - } - } - - [Test] - public void NotSupportsFastForwardBeyondLatestRead() - { - const int segmentLength = 512; - const int dataLength = 2055; - byte[] data = new byte[dataLength]; - new Random().NextBytes(data); - - Stream encodingStream = new StructuredMessageEncodingStream(new MemoryStream(data), segmentLength, Flags.StorageCrc64); - - Assert.That(() => encodingStream.Position = 123, Throws.TypeOf()); - } - - [Test] - [Pairwise] - public async Task WrapperStreamCorrectData( - [Values(2048, 2005)] int dataLength, - [Values(8 * Constants.KB, 512, 530, 3)] int readLen) - { - int segmentContentLength = dataLength; - Flags flags = Flags.StorageCrc64; - - byte[] originalData = new byte[dataLength]; - new Random().NextBytes(originalData); - byte[] crc = CrcInline(originalData); - byte[] expectedEncodedData = StructuredMessageHelper.MakeEncodedData(originalData, segmentContentLength, flags); - - Stream encodingStream = new StructuredMessagePrecalculatedCrcWrapperStream(new MemoryStream(originalData), crc); - byte[] encodedData; - using (MemoryStream dest = new()) - { - await CopyStream(encodingStream, dest, readLen); - encodedData = dest.ToArray(); - } - - Assert.That(new Span(encodedData).SequenceEqual(expectedEncodedData)); - } - - private static void AssertExpectedStreamHeader(ReadOnlySpan actual, int originalDataLength, Flags flags, int expectedSegments) - { - int expectedFooterLen = flags.HasFlag(Flags.StorageCrc64) ? Crc64Length : 0; - - Assert.That(actual.Length, Is.EqualTo(V1_0.StreamHeaderLength)); - Assert.That(actual[0], Is.EqualTo(1)); - Assert.That(BinaryPrimitives.ReadInt64LittleEndian(actual.Slice(1, 8)), - Is.EqualTo(V1_0.StreamHeaderLength + expectedSegments * (V1_0.SegmentHeaderLength + expectedFooterLen) + originalDataLength)); - Assert.That(BinaryPrimitives.ReadInt16LittleEndian(actual.Slice(9, 2)), Is.EqualTo((short)flags)); - Assert.That(BinaryPrimitives.ReadInt16LittleEndian(actual.Slice(11, 2)), Is.EqualTo((short)expectedSegments)); - } - - private static void AssertExpectedSegmentHeader(ReadOnlySpan actual, int segmentNum, long contentLength) - { - Assert.That(BinaryPrimitives.ReadInt16LittleEndian(actual.Slice(0, 2)), Is.EqualTo((short) segmentNum)); - Assert.That(BinaryPrimitives.ReadInt64LittleEndian(actual.Slice(2, 8)), Is.EqualTo(contentLength)); - } - - private static byte[] CrcInline(ReadOnlySpan data) - { - var crc = StorageCrc64HashAlgorithm.Create(); - crc.Append(data); - return crc.GetCurrentHash(); - } - } -} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageHelper.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageHelper.cs deleted file mode 100644 index 59e80320d96a0..0000000000000 --- a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageHelper.cs +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; -using Azure.Storage.Shared; -using static Azure.Storage.Shared.StructuredMessage; - -namespace Azure.Storage.Blobs.Tests -{ - internal class StructuredMessageHelper - { - public static byte[] MakeEncodedData(ReadOnlySpan data, long segmentContentLength, Flags flags) - { - int segmentCount = (int) Math.Ceiling(data.Length / (double)segmentContentLength); - int segmentFooterLen = flags.HasFlag(Flags.StorageCrc64) ? 8 : 0; - int streamFooterLen = flags.HasFlag(Flags.StorageCrc64) ? 8 : 0; - - byte[] encodedData = new byte[ - V1_0.StreamHeaderLength + - segmentCount*(V1_0.SegmentHeaderLength + segmentFooterLen) + - streamFooterLen + - data.Length]; - V1_0.WriteStreamHeader( - new Span(encodedData, 0, V1_0.StreamHeaderLength), - encodedData.Length, - flags, - segmentCount); - - int i = V1_0.StreamHeaderLength; - int j = 0; - foreach (int seg in Enumerable.Range(1, segmentCount)) - { - int segContentLen = Math.Min((int)segmentContentLength, data.Length - j); - V1_0.WriteSegmentHeader( - new Span(encodedData, i, V1_0.SegmentHeaderLength), - seg, - segContentLen); - i += V1_0.SegmentHeaderLength; - - data.Slice(j, segContentLen) - .CopyTo(new Span(encodedData).Slice(i)); - i += segContentLen; - - if (flags.HasFlag(Flags.StorageCrc64)) - { - var crc = StorageCrc64HashAlgorithm.Create(); - crc.Append(data.Slice(j, segContentLen)); - crc.GetCurrentHash(new Span(encodedData, i, Crc64Length)); - i += Crc64Length; - } - j += segContentLen; - } - - if (flags.HasFlag(Flags.StorageCrc64)) - { - var crc = StorageCrc64HashAlgorithm.Create(); - crc.Append(data); - crc.GetCurrentHash(new Span(encodedData, i, Crc64Length)); - } - - return encodedData; - } - } -} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageStreamRoundtripTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageStreamRoundtripTests.cs deleted file mode 100644 index 61583aa1ebe4e..0000000000000 --- a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageStreamRoundtripTests.cs +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.IO; -using System.Linq; -using System.Threading.Tasks; -using Azure.Storage.Shared; -using NUnit.Framework; -using static Azure.Storage.Shared.StructuredMessage; - -namespace Azure.Storage.Tests -{ - [TestFixture(ReadMethod.SyncArray)] - [TestFixture(ReadMethod.AsyncArray)] -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - [TestFixture(ReadMethod.SyncSpan)] - [TestFixture(ReadMethod.AsyncMemory)] -#endif - public class StructuredMessageStreamRoundtripTests - { - // Cannot just implement as passthru in the stream - // Must test each one - public enum ReadMethod - { - SyncArray, - AsyncArray, -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - SyncSpan, - AsyncMemory -#endif - } - - public ReadMethod Method { get; } - - public StructuredMessageStreamRoundtripTests(ReadMethod method) - { - Method = method; - } - - private class CopyStreamException : Exception - { - public long TotalCopied { get; } - - public CopyStreamException(Exception inner, long totalCopied) - : base($"Failed read after {totalCopied}-many bytes.", inner) - { - TotalCopied = totalCopied; - } - } - private async ValueTask CopyStream(Stream source, Stream destination, int bufferSize = 81920) // number default for CopyTo impl - { - byte[] buf = new byte[bufferSize]; - int read; - long totalRead = 0; - try - { - switch (Method) - { - case ReadMethod.SyncArray: - while ((read = source.Read(buf, 0, bufferSize)) > 0) - { - totalRead += read; - destination.Write(buf, 0, read); - } - break; - case ReadMethod.AsyncArray: - while ((read = await source.ReadAsync(buf, 0, bufferSize)) > 0) - { - totalRead += read; - await destination.WriteAsync(buf, 0, read); - } - break; -#if NETSTANDARD2_1_OR_GREATER || NETCOREAPP3_0_OR_GREATER - case ReadMethod.SyncSpan: - while ((read = source.Read(new Span(buf))) > 0) - { - totalRead += read; - destination.Write(new Span(buf, 0, read)); - } - break; - case ReadMethod.AsyncMemory: - while ((read = await source.ReadAsync(new Memory(buf))) > 0) - { - totalRead += read; - await destination.WriteAsync(new Memory(buf, 0, read)); - } - break; -#endif - } - destination.Flush(); - } - catch (Exception ex) - { - throw new CopyStreamException(ex, totalRead); - } - return totalRead; - } - - [Test] - [Pairwise] - public async Task RoundTrip( - [Values(2048, 2005)] int dataLength, - [Values(default, 512)] int? seglen, - [Values(8 * Constants.KB, 512, 530, 3)] int readLen, - [Values(true, false)] bool useCrc) - { - int segmentLength = seglen ?? int.MaxValue; - Flags flags = useCrc ? Flags.StorageCrc64 : Flags.None; - - byte[] originalData = new byte[dataLength]; - new Random().NextBytes(originalData); - - byte[] roundtripData; - using (MemoryStream source = new(originalData)) - using (Stream encode = new StructuredMessageEncodingStream(source, segmentLength, flags)) - using (Stream decode = StructuredMessageDecodingStream.WrapStream(encode).DecodedStream) - using (MemoryStream dest = new()) - { - await CopyStream(source, dest, readLen); - roundtripData = dest.ToArray(); - } - - Assert.That(originalData.SequenceEqual(roundtripData)); - } - } -} diff --git a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageTests.cs b/sdk/storage/Azure.Storage.Common/tests/StructuredMessageTests.cs deleted file mode 100644 index b4f1dfe178246..0000000000000 --- a/sdk/storage/Azure.Storage.Common/tests/StructuredMessageTests.cs +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Buffers.Binary; -using System.Collections.Generic; -using NUnit.Framework; -using static Azure.Storage.Shared.StructuredMessage; - -namespace Azure.Storage.Tests -{ - public class StructuredMessageTests - { - [TestCase(1024, Flags.None, 2)] - [TestCase(2000, Flags.StorageCrc64, 4)] - public void EncodeStreamHeader(int messageLength, int flags, int numSegments) - { - Span encoding = new(new byte[V1_0.StreamHeaderLength]); - V1_0.WriteStreamHeader(encoding, messageLength, (Flags)flags, numSegments); - - Assert.That(encoding[0], Is.EqualTo((byte)1)); - Assert.That(BinaryPrimitives.ReadUInt64LittleEndian(encoding.Slice(1, 8)), Is.EqualTo(messageLength)); - Assert.That(BinaryPrimitives.ReadUInt16LittleEndian(encoding.Slice(9, 2)), Is.EqualTo(flags)); - Assert.That(BinaryPrimitives.ReadUInt16LittleEndian(encoding.Slice(11, 2)), Is.EqualTo(numSegments)); - } - - [TestCase(V1_0.StreamHeaderLength)] - [TestCase(V1_0.StreamHeaderLength + 1)] - [TestCase(V1_0.StreamHeaderLength - 1)] - public void EncodeStreamHeaderRejectBadBufferSize(int bufferSize) - { - Random r = new(); - byte[] encoding = new byte[bufferSize]; - - void Action() => V1_0.WriteStreamHeader(encoding, r.Next(2, int.MaxValue), Flags.StorageCrc64, r.Next(2, int.MaxValue)); - if (bufferSize < V1_0.StreamHeaderLength) - { - Assert.That(Action, Throws.ArgumentException); - } - else - { - Assert.That(Action, Throws.Nothing); - } - } - - [TestCase(1, 1024)] - [TestCase(5, 39578)] - public void EncodeSegmentHeader(int segmentNum, int contentLength) - { - Span encoding = new(new byte[V1_0.SegmentHeaderLength]); - V1_0.WriteSegmentHeader(encoding, segmentNum, contentLength); - - Assert.That(BinaryPrimitives.ReadUInt16LittleEndian(encoding.Slice(0, 2)), Is.EqualTo(segmentNum)); - Assert.That(BinaryPrimitives.ReadUInt64LittleEndian(encoding.Slice(2, 8)), Is.EqualTo(contentLength)); - } - - [TestCase(V1_0.SegmentHeaderLength)] - [TestCase(V1_0.SegmentHeaderLength + 1)] - [TestCase(V1_0.SegmentHeaderLength - 1)] - public void EncodeSegmentHeaderRejectBadBufferSize(int bufferSize) - { - Random r = new(); - byte[] encoding = new byte[bufferSize]; - - void Action() => V1_0.WriteSegmentHeader(encoding, r.Next(1, int.MaxValue), r.Next(2, int.MaxValue)); - if (bufferSize < V1_0.SegmentHeaderLength) - { - Assert.That(Action, Throws.ArgumentException); - } - else - { - Assert.That(Action, Throws.Nothing); - } - } - - [TestCase(true)] - [TestCase(false)] - public void EncodeSegmentFooter(bool useCrc) - { - Span encoding = new(new byte[Crc64Length]); - Span crc = useCrc ? new Random().NextBytesInline(Crc64Length) : default; - V1_0.WriteSegmentFooter(encoding, crc); - - if (useCrc) - { - Assert.That(encoding.SequenceEqual(crc), Is.True); - } - else - { - Assert.That(encoding.SequenceEqual(new Span(new byte[Crc64Length])), Is.True); - } - } - - [TestCase(Crc64Length)] - [TestCase(Crc64Length + 1)] - [TestCase(Crc64Length - 1)] - public void EncodeSegmentFooterRejectBadBufferSize(int bufferSize) - { - byte[] encoding = new byte[bufferSize]; - byte[] crc = new byte[Crc64Length]; - new Random().NextBytes(crc); - - void Action() => V1_0.WriteSegmentFooter(encoding, crc); - if (bufferSize < Crc64Length) - { - Assert.That(Action, Throws.ArgumentException); - } - else - { - Assert.That(Action, Throws.Nothing); - } - } - } -} diff --git a/sdk/storage/Azure.Storage.DataMovement.Blobs/samples/Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Blobs/samples/Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj index 30d4b1f79daaf..7ab901e963e03 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Blobs/samples/Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Blobs/samples/Azure.Storage.DataMovement.Blobs.Samples.Tests.csproj @@ -11,7 +11,6 @@ - diff --git a/sdk/storage/Azure.Storage.DataMovement.Blobs/src/Azure.Storage.DataMovement.Blobs.csproj b/sdk/storage/Azure.Storage.DataMovement.Blobs/src/Azure.Storage.DataMovement.Blobs.csproj index 93e7432f186e3..6098dcd8ba33d 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Blobs/src/Azure.Storage.DataMovement.Blobs.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Blobs/src/Azure.Storage.DataMovement.Blobs.csproj @@ -37,7 +37,6 @@ - diff --git a/sdk/storage/Azure.Storage.DataMovement.Blobs/src/DataMovementBlobsExtensions.cs b/sdk/storage/Azure.Storage.DataMovement.Blobs/src/DataMovementBlobsExtensions.cs index 2c6864f511571..84d60b3bc37c4 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Blobs/src/DataMovementBlobsExtensions.cs +++ b/sdk/storage/Azure.Storage.DataMovement.Blobs/src/DataMovementBlobsExtensions.cs @@ -99,7 +99,7 @@ internal static StorageResourceItemProperties ToStorageResourceItemProperties(th ContentRange contentRange = !string.IsNullOrWhiteSpace(result?.Details?.ContentRange) ? ContentRange.Parse(result.Details.ContentRange) : default; if (contentRange != default) { - size = contentRange.TotalResourceLength; + size = contentRange.Size; } return new StorageResourceItemProperties( @@ -151,7 +151,7 @@ internal static StorageResourceReadStreamResult ToReadStreamStorageResourceInfo( if (contentRange != default) { range = ContentRange.ToHttpRange(contentRange); - size = contentRange.TotalResourceLength; + size = contentRange.Size; } else if (result.Details.ContentLength > 0) { diff --git a/sdk/storage/Azure.Storage.DataMovement.Blobs/tests/Azure.Storage.DataMovement.Blobs.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Blobs/tests/Azure.Storage.DataMovement.Blobs.Tests.csproj index 214903eb5f9c4..f8b62d0b947e2 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Blobs/tests/Azure.Storage.DataMovement.Blobs.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Blobs/tests/Azure.Storage.DataMovement.Blobs.Tests.csproj @@ -22,15 +22,11 @@ - - - - @@ -44,7 +40,6 @@ - diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/BlobToFileSharesTests/Azure.Storage.DataMovement.Blobs.Files.Shares.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/BlobToFileSharesTests/Azure.Storage.DataMovement.Blobs.Files.Shares.Tests.csproj index 66a9fea0861a2..a6abde432473f 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/BlobToFileSharesTests/Azure.Storage.DataMovement.Blobs.Files.Shares.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/BlobToFileSharesTests/Azure.Storage.DataMovement.Blobs.Files.Shares.Tests.csproj @@ -35,7 +35,6 @@ - diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/samples/Azure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/samples/Azure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj index 6a472b9f74158..9cde066f64eb7 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/samples/Azure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/samples/Azure.Storage.DataMovement.Files.Shares.Samples.Tests.csproj @@ -1,4 +1,4 @@ - + $(RequiredTargetFrameworks) Microsoft Azure.Storage.DataMovement.Files.Shares client library samples @@ -11,7 +11,6 @@ - diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/src/DataMovementSharesExtensions.cs b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/src/DataMovementSharesExtensions.cs index 16a164f61b060..9cb7d338fcb60 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/src/DataMovementSharesExtensions.cs +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/src/DataMovementSharesExtensions.cs @@ -335,14 +335,14 @@ internal static StorageResourceReadStreamResult ToStorageResourceReadStreamResul ContentRange contentRange = !string.IsNullOrWhiteSpace(info?.Details?.ContentRange) ? ContentRange.Parse(info.Details.ContentRange) : default; if (contentRange != default) { - size = contentRange.TotalResourceLength; + size = contentRange.Size; } return new StorageResourceReadStreamResult( content: info?.Content, range: ContentRange.ToHttpRange(contentRange), properties: new StorageResourceItemProperties( - resourceLength: contentRange.TotalResourceLength, + resourceLength: contentRange.Size, eTag: info.Details.ETag, lastModifiedTime: info.Details.LastModified, properties: properties)); diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Azure.Storage.DataMovement.Files.Shares.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Azure.Storage.DataMovement.Files.Shares.Tests.csproj index d75775beceafd..8e574bca36a48 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Azure.Storage.DataMovement.Files.Shares.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Azure.Storage.DataMovement.Files.Shares.Tests.csproj @@ -27,7 +27,6 @@ - diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Shared/DisposingShare.cs b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Shared/DisposingShare.cs index 577ee7bb9a480..d5defd931e31d 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Shared/DisposingShare.cs +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Shared/DisposingShare.cs @@ -6,6 +6,7 @@ using System.Threading.Tasks; using BaseShares::Azure.Storage.Files.Shares; using Azure.Storage.Test.Shared; +using BaseShares::Azure.Storage.Files.Shares.Models; namespace Azure.Storage.DataMovement.Files.Shares.Tests { @@ -17,7 +18,11 @@ public class DisposingShare : IDisposingContainer public static async Task CreateAsync(ShareClient share, IDictionary metadata) { - await share.CreateIfNotExistsAsync(new() { Metadata = metadata }); + ShareCreateOptions options = new ShareCreateOptions + { + Metadata = metadata + }; + await share.CreateIfNotExistsAsync(options); return new DisposingShare(share); } diff --git a/sdk/storage/Azure.Storage.DataMovement/src/Azure.Storage.DataMovement.csproj b/sdk/storage/Azure.Storage.DataMovement/src/Azure.Storage.DataMovement.csproj index dd30659cf0a5d..5aaf548493b15 100644 --- a/sdk/storage/Azure.Storage.DataMovement/src/Azure.Storage.DataMovement.csproj +++ b/sdk/storage/Azure.Storage.DataMovement/src/Azure.Storage.DataMovement.csproj @@ -1,4 +1,4 @@ - + $(RequiredTargetFrameworks);net6.0 diff --git a/sdk/storage/Azure.Storage.DataMovement/tests/Azure.Storage.DataMovement.Tests.csproj b/sdk/storage/Azure.Storage.DataMovement/tests/Azure.Storage.DataMovement.Tests.csproj index 7a40eb8026443..b5e3c42359976 100644 --- a/sdk/storage/Azure.Storage.DataMovement/tests/Azure.Storage.DataMovement.Tests.csproj +++ b/sdk/storage/Azure.Storage.DataMovement/tests/Azure.Storage.DataMovement.Tests.csproj @@ -34,7 +34,6 @@ - diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs index 7f856db5829ac..a202d6300f50e 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs @@ -2,7 +2,7 @@ namespace Azure.Storage.Files.DataLake { public partial class DataLakeClientOptions : Azure.Core.ClientOptions { - public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2025_01_05) { } + public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2024_11_04) { } public Azure.Storage.Files.DataLake.Models.DataLakeAudience? Audience { get { throw null; } set { } } public Azure.Storage.Files.DataLake.Models.DataLakeCustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs index 7f856db5829ac..a202d6300f50e 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs @@ -2,7 +2,7 @@ namespace Azure.Storage.Files.DataLake { public partial class DataLakeClientOptions : Azure.Core.ClientOptions { - public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2025_01_05) { } + public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2024_11_04) { } public Azure.Storage.Files.DataLake.Models.DataLakeAudience? Audience { get { throw null; } set { } } public Azure.Storage.Files.DataLake.Models.DataLakeCustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/assets.json b/sdk/storage/Azure.Storage.Files.DataLake/assets.json index 5127ea7e0c4db..4a64b8398f656 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/assets.json +++ b/sdk/storage/Azure.Storage.Files.DataLake/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Files.DataLake", - "Tag": "net/storage/Azure.Storage.Files.DataLake_48a38da58a" + "Tag": "net/storage/Azure.Storage.Files.DataLake_d74597f1e3" } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Files.DataLake/samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj index eecbe0543fe87..c230f2ed8fa20 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Files.DataLake/samples/Azure.Storage.Files.DataLake.Samples.Tests.csproj @@ -15,7 +15,6 @@ - diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj b/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj index f8652fd283e36..3c551e05c24c2 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj @@ -42,7 +42,6 @@ - @@ -82,10 +81,6 @@ - - - - diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs index aaa8f514c6e44..2da5eb76349eb 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs @@ -16,7 +16,6 @@ using Azure.Storage.Common; using Azure.Storage.Files.DataLake.Models; using Azure.Storage.Sas; -using Azure.Storage.Shared; using Metadata = System.Collections.Generic.IDictionary; namespace Azure.Storage.Files.DataLake @@ -2333,39 +2332,13 @@ internal virtual async Task AppendInternal( using (ClientConfiguration.Pipeline.BeginLoggingScope(nameof(DataLakeFileClient))) { // compute hash BEFORE attaching progress handler - ContentHasher.GetHashResult hashResult = null; - long contentLength = (content?.Length - content?.Position) ?? 0; - long? structuredContentLength = default; - string structuredBodyType = null; - if (content != null && - validationOptions != null && - validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64) - { - // report progress in terms of caller bytes, not encoded bytes - structuredContentLength = contentLength; - structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; - content = content.WithNoDispose().WithProgress(progressHandler); - content = validationOptions.PrecalculatedChecksum.IsEmpty - ? new StructuredMessageEncodingStream( - content, - Constants.StructuredMessage.DefaultSegmentContentLength, - StructuredMessage.Flags.StorageCrc64) - : new StructuredMessagePrecalculatedCrcWrapperStream( - content, - validationOptions.PrecalculatedChecksum.Span); - contentLength = content.Length - content.Position; - } - else - { - // compute hash BEFORE attaching progress handler - hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - content = content?.WithNoDispose().WithProgress(progressHandler); - } + ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + content = content?.WithNoDispose().WithProgress(progressHandler); ClientConfiguration.Pipeline.LogMethodEnter( nameof(DataLakeFileClient), message: @@ -2400,8 +2373,6 @@ internal virtual async Task AppendInternal( encryptionKey: ClientConfiguration.CustomerProvidedKey?.EncryptionKey, encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, leaseId: leaseId, leaseAction: leaseAction, leaseDuration: leaseDurationLong, @@ -2421,8 +2392,6 @@ internal virtual async Task AppendInternal( encryptionKey: ClientConfiguration.CustomerProvidedKey?.EncryptionKey, encryptionKeySha256: ClientConfiguration.CustomerProvidedKey?.EncryptionKeyHash, encryptionAlgorithm: ClientConfiguration.CustomerProvidedKey?.EncryptionAlgorithm == null ? null : EncryptionAlgorithmTypeInternal.AES256, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, leaseId: leaseId, leaseAction: leaseAction, leaseDuration: leaseDurationLong, diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md b/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md index a8340f1092bcb..ec9675a014f70 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/autorest.md @@ -23,7 +23,7 @@ directive: if (property.includes('/{filesystem}/{path}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/FileSystem") && false == param['$ref'].endsWith("#/parameters/Path"))}); - } + } else if (property.includes('/{filesystem}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/FileSystem"))}); @@ -127,7 +127,7 @@ directive: } $[newName] = $[oldName]; delete $[oldName]; - } + } else if (property.includes('/{filesystem}')) { var oldName = property; diff --git a/sdk/storage/Azure.Storage.Files.DataLake/tests/Azure.Storage.Files.DataLake.Tests.csproj b/sdk/storage/Azure.Storage.Files.DataLake/tests/Azure.Storage.Files.DataLake.Tests.csproj index 1fa78690077be..bef13bb21a1c6 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/tests/Azure.Storage.Files.DataLake.Tests.csproj +++ b/sdk/storage/Azure.Storage.Files.DataLake/tests/Azure.Storage.Files.DataLake.Tests.csproj @@ -6,9 +6,6 @@ Microsoft Azure.Storage.Files.DataLake client library tests false - - DataLakeSDK - diff --git a/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeFileClientTransferValidationTests.cs b/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeFileClientTransferValidationTests.cs index 5067f98517bd2..4bdefdbf756cd 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeFileClientTransferValidationTests.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeFileClientTransferValidationTests.cs @@ -34,10 +34,7 @@ protected override async Task> Get StorageChecksumAlgorithm uploadAlgorithm = StorageChecksumAlgorithm.None, StorageChecksumAlgorithm downloadAlgorithm = StorageChecksumAlgorithm.None) { - var disposingFileSystem = await ClientBuilder.GetNewFileSystem( - service: service, - fileSystemName: containerName, - publicAccessType: PublicAccessType.None); + var disposingFileSystem = await ClientBuilder.GetNewFileSystem(service: service, fileSystemName: containerName); disposingFileSystem.FileSystem.ClientConfiguration.TransferValidation.Upload.ChecksumAlgorithm = uploadAlgorithm; disposingFileSystem.FileSystem.ClientConfiguration.TransferValidation.Download.ChecksumAlgorithm = downloadAlgorithm; diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs index 473ffb67af41f..cf8ce32808d81 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs @@ -115,7 +115,7 @@ public ShareClient(System.Uri shareUri, Azure.Storage.StorageSharedKeyCredential } public partial class ShareClientOptions : Azure.Core.ClientOptions { - public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2025_01_05) { } + public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2024_11_04) { } public bool? AllowSourceTrailingDot { get { throw null; } set { } } public bool? AllowTrailingDot { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.ShareAudience? Audience { get { throw null; } set { } } @@ -808,7 +808,6 @@ public partial class ShareFileDownloadInfo : System.IDisposable { internal ShareFileDownloadInfo() { } public System.IO.Stream Content { get { throw null; } } - public byte[] ContentCrc { get { throw null; } } public byte[] ContentHash { get { throw null; } } public long ContentLength { get { throw null; } } public string ContentType { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs index 473ffb67af41f..cf8ce32808d81 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs @@ -115,7 +115,7 @@ public ShareClient(System.Uri shareUri, Azure.Storage.StorageSharedKeyCredential } public partial class ShareClientOptions : Azure.Core.ClientOptions { - public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2025_01_05) { } + public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2024_11_04) { } public bool? AllowSourceTrailingDot { get { throw null; } set { } } public bool? AllowTrailingDot { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.ShareAudience? Audience { get { throw null; } set { } } @@ -808,7 +808,6 @@ public partial class ShareFileDownloadInfo : System.IDisposable { internal ShareFileDownloadInfo() { } public System.IO.Stream Content { get { throw null; } } - public byte[] ContentCrc { get { throw null; } } public byte[] ContentHash { get { throw null; } } public long ContentLength { get { throw null; } } public string ContentType { get { throw null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/assets.json b/sdk/storage/Azure.Storage.Files.Shares/assets.json index c33c8bb335398..c2b5c3d31e6a2 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/assets.json +++ b/sdk/storage/Azure.Storage.Files.Shares/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "net", "TagPrefix": "net/storage/Azure.Storage.Files.Shares", - "Tag": "net/storage/Azure.Storage.Files.Shares_4b545ae555" + "Tag": "net/storage/Azure.Storage.Files.Shares_df67d82d59" } diff --git a/sdk/storage/Azure.Storage.Files.Shares/samples/Azure.Storage.Files.Shares.Samples.Tests.csproj b/sdk/storage/Azure.Storage.Files.Shares/samples/Azure.Storage.Files.Shares.Samples.Tests.csproj index d1efeca0c2da2..0bcec423c144d 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/samples/Azure.Storage.Files.Shares.Samples.Tests.csproj +++ b/sdk/storage/Azure.Storage.Files.Shares/samples/Azure.Storage.Files.Shares.Samples.Tests.csproj @@ -16,7 +16,6 @@ - PreserveNewest diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Azure.Storage.Files.Shares.csproj b/sdk/storage/Azure.Storage.Files.Shares/src/Azure.Storage.Files.Shares.csproj index d136154f5d3d4..740160b155650 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Azure.Storage.Files.Shares.csproj +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Azure.Storage.Files.Shares.csproj @@ -1,4 +1,4 @@ - + $(RequiredTargetFrameworks);net6.0 @@ -42,7 +42,6 @@ - @@ -86,11 +85,6 @@ - - - - - diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileDownloadInfo.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileDownloadInfo.cs index 4037cbdfd875e..0165af94435a0 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileDownloadInfo.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Models/ShareFileDownloadInfo.cs @@ -38,12 +38,6 @@ public partial class ShareFileDownloadInfo : IDisposable, IDownloadedContent public byte[] ContentHash { get; internal set; } #pragma warning restore CA1819 // Properties should not return arrays - /// - /// When requested using , this value contains the CRC for the download blob range. - /// This value may only become populated once the network stream is fully consumed. - /// - public byte[] ContentCrc { get; internal set; } - /// /// Details returned when downloading a file /// diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/ShareErrors.cs b/sdk/storage/Azure.Storage.Files.Shares/src/ShareErrors.cs index 0b27510aaa6c4..f776384d06add 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/ShareErrors.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/ShareErrors.cs @@ -17,5 +17,20 @@ public static InvalidOperationException FileOrShareMissing( string fileClient, string shareClient) => new InvalidOperationException($"{leaseClient} requires either a {fileClient} or {shareClient}"); + + public static void AssertAlgorithmSupport(StorageChecksumAlgorithm? algorithm) + { + StorageChecksumAlgorithm resolved = (algorithm ?? StorageChecksumAlgorithm.None).ResolveAuto(); + switch (resolved) + { + case StorageChecksumAlgorithm.None: + case StorageChecksumAlgorithm.MD5: + return; + case StorageChecksumAlgorithm.StorageCrc64: + throw new ArgumentException("Azure File Shares do not support CRC-64."); + default: + throw new ArgumentException($"{nameof(StorageChecksumAlgorithm)} does not support value {Enum.GetName(typeof(StorageChecksumAlgorithm), resolved) ?? ((int)resolved).ToString(CultureInfo.InvariantCulture)}."); + } + } } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs index ea3f8554b944d..f713200a524de 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/ShareFileClient.cs @@ -2397,70 +2397,51 @@ private async Task> DownloadInternal( // Wrap the response Content in a RetriableStream so we // can return it before it's finished downloading, but still // allow retrying if it fails. - async ValueTask> Factory(long offset, bool async, CancellationToken cancellationToken) - { - (Response response, Stream contentStream) = await StartDownloadAsync( - range, - validationOptions, - conditions, - offset, - async, - cancellationToken).ConfigureAwait(false); - if (etag != response.GetRawResponse().Headers.ETag) + initialResponse.Value.Content = RetriableStream.Create( + stream, + startOffset => { - throw new ShareFileModifiedException( - "File has been modified concurrently", - Uri, etag, response.GetRawResponse().Headers.ETag.GetValueOrDefault(), range); - } - return response; - } - async ValueTask<(Stream DecodingStream, StructuredMessageDecodingStream.RawDecodedData DecodedData)> StructuredMessageFactory( - long offset, bool async, CancellationToken cancellationToken) - { - Response result = await Factory(offset, async, cancellationToken).ConfigureAwait(false); - return StructuredMessageDecodingStream.WrapStream(result.Value.Content, result.Value.ContentLength); - } - - if (initialResponse.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) - { - (Stream decodingStream, StructuredMessageDecodingStream.RawDecodedData decodedData) = StructuredMessageDecodingStream.WrapStream( - initialResponse.Value.Content, initialResponse.Value.ContentLength); - initialResponse.Value.Content = new StructuredMessageDecodingRetriableStream( - decodingStream, - decodedData, - StructuredMessage.Flags.StorageCrc64, - startOffset => StructuredMessageFactory(startOffset, async: false, cancellationToken) - .EnsureCompleted(), - async startOffset => await StructuredMessageFactory(startOffset, async: true, cancellationToken) - .ConfigureAwait(false), - decodedData => + (Response Response, Stream ContentStream) = StartDownloadAsync( + range, + validationOptions, + conditions, + startOffset, + async, + cancellationToken) + .EnsureCompleted(); + if (etag != Response.GetRawResponse().Headers.ETag) { - initialResponse.Value.ContentCrc = new byte[StructuredMessage.Crc64Length]; - decodedData.Crc.WriteCrc64(initialResponse.Value.ContentCrc); - }, - ClientConfiguration.Pipeline.ResponseClassifier, - Constants.MaxReliabilityRetries); - } - else - { - initialResponse.Value.Content = RetriableStream.Create( - initialResponse.Value.Content, - startOffset => Factory(startOffset, async: false, cancellationToken) - .EnsureCompleted().Value.Content, - async startOffset => (await Factory(startOffset, async: true, cancellationToken) - .ConfigureAwait(false)).Value.Content, - ClientConfiguration.Pipeline.ResponseClassifier, - Constants.MaxReliabilityRetries); - } + throw new ShareFileModifiedException( + "File has been modified concurrently", + Uri, etag, Response.GetRawResponse().Headers.ETag.GetValueOrDefault(), range); + } + return ContentStream; + }, + async startOffset => + { + (Response Response, Stream ContentStream) = await StartDownloadAsync( + range, + validationOptions, + conditions, + startOffset, + async, + cancellationToken) + .ConfigureAwait(false); + if (etag != Response.GetRawResponse().Headers.ETag) + { + throw new ShareFileModifiedException( + "File has been modified concurrently", + Uri, etag, Response.GetRawResponse().Headers.ETag.GetValueOrDefault(), range); + } + return ContentStream; + }, + ClientConfiguration.Pipeline.ResponseClassifier, + Constants.MaxReliabilityRetries); // buffer response stream and ensure it matches the transactional hash if any // Storage will not return a hash for payload >4MB, so this buffer is capped similarly // hashing is opt-in, so this buffer is part of that opt-in - if (validationOptions != default && - validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && - validationOptions.AutoValidateChecksum && - // structured message decoding does the validation for us - !initialResponse.GetRawResponse().Headers.Contains(Constants.StructuredMessage.StructuredMessageHeader)) + if (validationOptions != default && validationOptions.ChecksumAlgorithm != StorageChecksumAlgorithm.None && validationOptions.AutoValidateChecksum) { // safe-buffer; transactional hash download limit well below maxInt var readDestStream = new MemoryStream((int)initialResponse.Value.ContentLength); @@ -2543,6 +2524,8 @@ await ContentHasher.AssertResponseHashMatchInternal( bool async = true, CancellationToken cancellationToken = default) { + ShareErrors.AssertAlgorithmSupport(transferValidationOverride?.ChecksumAlgorithm); + // calculation gets illegible with null coalesce; just pre-initialize var pageRange = range; pageRange = new HttpRange( @@ -2552,27 +2535,13 @@ await ContentHasher.AssertResponseHashMatchInternal( (long?)null); ClientConfiguration.Pipeline.LogTrace($"Download {Uri} with range: {pageRange}"); - bool? rangeGetContentMD5 = null; - string structuredBodyType = null; - switch (transferValidationOverride?.ChecksumAlgorithm.ResolveAuto()) - { - case StorageChecksumAlgorithm.MD5: - rangeGetContentMD5 = true; - break; - case StorageChecksumAlgorithm.StorageCrc64: - structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; - break; - default: - break; - } - ResponseWithHeaders response; + if (async) { response = await FileRestClient.DownloadAsync( range: pageRange == default ? null : pageRange.ToString(), - rangeGetContentMD5: rangeGetContentMD5, - structuredBodyType: structuredBodyType, + rangeGetContentMD5: transferValidationOverride?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.MD5 ? true : null, shareFileRequestConditions: conditions, cancellationToken: cancellationToken) .ConfigureAwait(false); @@ -2581,8 +2550,7 @@ await ContentHasher.AssertResponseHashMatchInternal( { response = FileRestClient.Download( range: pageRange == default ? null : pageRange.ToString(), - rangeGetContentMD5: rangeGetContentMD5, - structuredBodyType: structuredBodyType, + rangeGetContentMD5: transferValidationOverride?.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.MD5 ? true : null, shareFileRequestConditions: conditions, cancellationToken: cancellationToken); } @@ -4662,6 +4630,7 @@ internal async Task> UploadRangeInternal( CancellationToken cancellationToken) { UploadTransferValidationOptions validationOptions = transferValidationOverride ?? ClientConfiguration.TransferValidation.Upload; + ShareErrors.AssertAlgorithmSupport(validationOptions?.ChecksumAlgorithm); using (ClientConfiguration.Pipeline.BeginLoggingScope(nameof(ShareFileClient))) { @@ -4677,38 +4646,14 @@ internal async Task> UploadRangeInternal( scope.Start(); Errors.VerifyStreamPosition(content, nameof(content)); - ContentHasher.GetHashResult hashResult = null; - long contentLength = (content?.Length - content?.Position) ?? 0; - long? structuredContentLength = default; - string structuredBodyType = null; - if (validationOptions != null && - validationOptions.ChecksumAlgorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64) - { - // report progress in terms of caller bytes, not encoded bytes - structuredContentLength = contentLength; - contentLength = (content?.Length - content?.Position) ?? 0; - structuredBodyType = Constants.StructuredMessage.CrcStructuredMessage; - content = content.WithNoDispose().WithProgress(progressHandler); - content = validationOptions.PrecalculatedChecksum.IsEmpty - ? new StructuredMessageEncodingStream( - content, - Constants.StructuredMessage.DefaultSegmentContentLength, - StructuredMessage.Flags.StorageCrc64) - : new StructuredMessagePrecalculatedCrcWrapperStream( - content, - validationOptions.PrecalculatedChecksum.Span); - contentLength = (content?.Length - content?.Position) ?? 0; - } - else - { - // compute hash BEFORE attaching progress handler - hashResult = await ContentHasher.GetHashOrDefaultInternal( - content, - validationOptions, - async, - cancellationToken).ConfigureAwait(false); - content = content.WithNoDispose().WithProgress(progressHandler); - } + // compute hash BEFORE attaching progress handler + ContentHasher.GetHashResult hashResult = await ContentHasher.GetHashOrDefaultInternal( + content, + validationOptions, + async, + cancellationToken).ConfigureAwait(false); + + content = content.WithNoDispose().WithProgress(progressHandler); ResponseWithHeaders response; @@ -4721,8 +4666,6 @@ internal async Task> UploadRangeInternal( fileLastWrittenMode: fileLastWrittenMode, optionalbody: content, contentMD5: hashResult?.MD5AsArray, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, shareFileRequestConditions: conditions, cancellationToken: cancellationToken) .ConfigureAwait(false); @@ -4736,8 +4679,6 @@ internal async Task> UploadRangeInternal( fileLastWrittenMode: fileLastWrittenMode, optionalbody: content, contentMD5: hashResult?.MD5AsArray, - structuredBodyType: structuredBodyType, - structuredContentLength: structuredContentLength, shareFileRequestConditions: conditions, cancellationToken: cancellationToken); } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md index ca0e5ae4c9160..2bcc0e37ee65a 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md +++ b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md @@ -25,7 +25,7 @@ directive: if (property.includes('/{shareName}/{directory}/{fileName}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ShareName") && false == param['$ref'].endsWith("#/parameters/DirectoryPath") && false == param['$ref'].endsWith("#/parameters/FilePath"))}); - } + } else if (property.includes('/{shareName}/{directory}')) { $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ShareName") && false == param['$ref'].endsWith("#/parameters/DirectoryPath"))}); @@ -46,7 +46,7 @@ directive: $.Metrics.type = "object"; ``` -### Times aren't required +### Times aren't required ``` yaml directive: - from: swagger-document diff --git a/sdk/storage/Azure.Storage.Files.Shares/tests/Azure.Storage.Files.Shares.Tests.csproj b/sdk/storage/Azure.Storage.Files.Shares/tests/Azure.Storage.Files.Shares.Tests.csproj index d09dd8fe8949f..398a4b6367489 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/tests/Azure.Storage.Files.Shares.Tests.csproj +++ b/sdk/storage/Azure.Storage.Files.Shares/tests/Azure.Storage.Files.Shares.Tests.csproj @@ -17,7 +17,6 @@ - PreserveNewest diff --git a/sdk/storage/Azure.Storage.Files.Shares/tests/ShareFileClientTransferValidationTests.cs b/sdk/storage/Azure.Storage.Files.Shares/tests/ShareFileClientTransferValidationTests.cs index 9fd8905e388b1..3dcdb21f27b36 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/tests/ShareFileClientTransferValidationTests.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/tests/ShareFileClientTransferValidationTests.cs @@ -64,6 +64,10 @@ protected override async Task GetResourceClientAsync( private void AssertSupportsHashAlgorithm(StorageChecksumAlgorithm algorithm) { + if (algorithm.ResolveAuto() == StorageChecksumAlgorithm.StorageCrc64) + { + TestHelper.AssertInconclusiveRecordingFriendly(Recording.Mode, "Azure File Share does not support CRC64."); + } } protected override async Task UploadPartitionAsync(ShareFileClient client, Stream source, UploadTransferValidationOptions transferValidation) @@ -143,44 +147,8 @@ protected override async Task SetupDataAsync(ShareFileClient client, Stream data public override void TestAutoResolve() { Assert.AreEqual( - StorageChecksumAlgorithm.StorageCrc64, + StorageChecksumAlgorithm.MD5, TransferValidationOptionsExtensions.ResolveAuto(StorageChecksumAlgorithm.Auto)); } - - [Test] - public async Task StructuredMessagePopulatesCrcDownloadStreaming() - { - await using DisposingShare disposingContainer = await ClientBuilder.GetTestShareAsync(); - - const int dataLength = Constants.KB; - byte[] data = GetRandomBuffer(dataLength); - byte[] dataCrc = new byte[8]; - StorageCrc64Calculator.ComputeSlicedSafe(data, 0L).WriteCrc64(dataCrc); - - ShareFileClient file = disposingContainer.Container.GetRootDirectoryClient().GetFileClient(GetNewResourceName()); - await file.CreateAsync(data.Length); - await file.UploadAsync(new MemoryStream(data)); - - Response response = await file.DownloadAsync(new ShareFileDownloadOptions() - { - TransferValidation = new DownloadTransferValidationOptions - { - ChecksumAlgorithm = StorageChecksumAlgorithm.StorageCrc64 - } - }); - - // crc is not present until response stream is consumed - Assert.That(response.Value.ContentCrc, Is.Null); - - byte[] downloadedData; - using (MemoryStream ms = new()) - { - await response.Value.Content.CopyToAsync(ms); - downloadedData = ms.ToArray(); - } - - Assert.That(response.Value.ContentCrc, Is.EqualTo(dataCrc)); - Assert.That(downloadedData, Is.EqualTo(data)); - } } } diff --git a/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs b/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs index 9f440eb3639d7..96bc919c7a719 100644 --- a/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs +++ b/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs @@ -74,7 +74,7 @@ public QueueClient(System.Uri queueUri, Azure.Storage.StorageSharedKeyCredential } public partial class QueueClientOptions : Azure.Core.ClientOptions { - public QueueClientOptions(Azure.Storage.Queues.QueueClientOptions.ServiceVersion version = Azure.Storage.Queues.QueueClientOptions.ServiceVersion.V2025_01_05) { } + public QueueClientOptions(Azure.Storage.Queues.QueueClientOptions.ServiceVersion version = Azure.Storage.Queues.QueueClientOptions.ServiceVersion.V2024_11_04) { } public Azure.Storage.Queues.Models.QueueAudience? Audience { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } public System.Uri GeoRedundantSecondaryUri { get { throw null; } set { } } @@ -426,7 +426,7 @@ public event System.EventHandler - PreserveNewest diff --git a/sdk/storage/Azure.Storage.Queues/tests/Azure.Storage.Queues.Tests.csproj b/sdk/storage/Azure.Storage.Queues/tests/Azure.Storage.Queues.Tests.csproj index 4d0334255f041..e0a6fab3c753b 100644 --- a/sdk/storage/Azure.Storage.Queues/tests/Azure.Storage.Queues.Tests.csproj +++ b/sdk/storage/Azure.Storage.Queues/tests/Azure.Storage.Queues.Tests.csproj @@ -21,7 +21,6 @@ - From 2924019c6b2ffd59314a37e319726699a889367d Mon Sep 17 00:00:00 2001 From: Sean McCullough <44180881+seanmcc-msft@users.noreply.github.com> Date: Wed, 25 Sep 2024 21:30:56 -0500 Subject: [PATCH 21/25] STG 96 beta changelogs (#46288) --- sdk/storage/Azure.Storage.Blobs.Batch/CHANGELOG.md | 7 +------ .../Azure.Storage.Blobs.ChangeFeed/CHANGELOG.md | 7 +------ sdk/storage/Azure.Storage.Blobs/CHANGELOG.md | 10 +++------- sdk/storage/Azure.Storage.Common/CHANGELOG.md | 7 +------ sdk/storage/Azure.Storage.Files.DataLake/CHANGELOG.md | 8 ++------ sdk/storage/Azure.Storage.Files.Shares/CHANGELOG.md | 10 ++++------ sdk/storage/Azure.Storage.Queues/CHANGELOG.md | 7 +------ 7 files changed, 13 insertions(+), 43 deletions(-) diff --git a/sdk/storage/Azure.Storage.Blobs.Batch/CHANGELOG.md b/sdk/storage/Azure.Storage.Blobs.Batch/CHANGELOG.md index 5952194b2d2ea..64c9a4b2a297d 100644 --- a/sdk/storage/Azure.Storage.Blobs.Batch/CHANGELOG.md +++ b/sdk/storage/Azure.Storage.Blobs.Batch/CHANGELOG.md @@ -3,12 +3,7 @@ ## 12.20.0-beta.1 (Unreleased) ### Features Added - -### Breaking Changes - -### Bugs Fixed - -### Other Changes +- Added support for service version 2025-01-05. ## 12.19.0 (2024-09-18) diff --git a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/CHANGELOG.md b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/CHANGELOG.md index 8e03ceb02b5fe..6fccde404a9b0 100644 --- a/sdk/storage/Azure.Storage.Blobs.ChangeFeed/CHANGELOG.md +++ b/sdk/storage/Azure.Storage.Blobs.ChangeFeed/CHANGELOG.md @@ -3,12 +3,7 @@ ## 12.0.0-preview.50 (Unreleased) ### Features Added - -### Breaking Changes - -### Bugs Fixed - -### Other Changes +- Added support for service version 2025-01-05. ## 12.0.0-preview.49 (2024-09-18) diff --git a/sdk/storage/Azure.Storage.Blobs/CHANGELOG.md b/sdk/storage/Azure.Storage.Blobs/CHANGELOG.md index 4b554b4de802c..4989b077abb4c 100644 --- a/sdk/storage/Azure.Storage.Blobs/CHANGELOG.md +++ b/sdk/storage/Azure.Storage.Blobs/CHANGELOG.md @@ -3,13 +3,9 @@ ## 12.23.0-beta.1 (Unreleased) ### Features Added -- Added GenerateUserDelegationSasUri() for BlobBaseClient and BlobContainerClient - -### Breaking Changes - -### Bugs Fixed - -### Other Changes +- Added support for service version 2025-01-05. +- Added GenerateUserDelegationSasUri() to BlobBaseClient and BlobContainerClient. +- Added BlobErrorCode.BlobAccessTierNotSupportedForAccountType enum value. ## 12.22.0 (2024-09-18) diff --git a/sdk/storage/Azure.Storage.Common/CHANGELOG.md b/sdk/storage/Azure.Storage.Common/CHANGELOG.md index b45093ef4fdce..aad2683bdfb5b 100644 --- a/sdk/storage/Azure.Storage.Common/CHANGELOG.md +++ b/sdk/storage/Azure.Storage.Common/CHANGELOG.md @@ -3,12 +3,7 @@ ## 12.22.0-beta.1 (Unreleased) ### Features Added - -### Breaking Changes - -### Bugs Fixed - -### Other Changes +- This release contains bug fixes to improve quality. ## 12.21.0 (2024-09-18) diff --git a/sdk/storage/Azure.Storage.Files.DataLake/CHANGELOG.md b/sdk/storage/Azure.Storage.Files.DataLake/CHANGELOG.md index 16b6af245d029..33308a624cadd 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/CHANGELOG.md +++ b/sdk/storage/Azure.Storage.Files.DataLake/CHANGELOG.md @@ -3,14 +3,10 @@ ## 12.21.0-beta.1 (Unreleased) ### Features Added +- Added support for service version 2025-01-05. - Added GenerateUserDelegationSasUri() for DataLakePathClient, DataLakeFileSystemClient, and DataLakeDirectoryClient - Deprecated Read()/ReadAsync() in favor of ReadStreaming()/ReadStreamingAsync() and ReadContent()/ReadContentAsync() for DataLake #45418 - -### Breaking Changes - -### Bugs Fixed - -### Other Changes +- Added GenerateUserDelegationSasUri() to DataLakeFileSystemClient, DataLakePathClient, DataLakeDirectoryClient, and DataLakeFileClient. ## 12.20.0 (2024-09-18) diff --git a/sdk/storage/Azure.Storage.Files.Shares/CHANGELOG.md b/sdk/storage/Azure.Storage.Files.Shares/CHANGELOG.md index d4dca5b45c76f..554b55af32ad5 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/CHANGELOG.md +++ b/sdk/storage/Azure.Storage.Files.Shares/CHANGELOG.md @@ -3,12 +3,10 @@ ## 12.21.0-beta.1 (Unreleased) ### Features Added - -### Breaking Changes - -### Bugs Fixed - -### Other Changes +- Added support for service version 2025-01-05. +- Added support for the provisioned V2 billing model. +- Added support for specifying the binary file permission format for ShareFileClient.StartCopy() and .StartCopyAsync(). +- Added ShareAccessTier.Premium enum value. ## 12.20.0 (2024-09-18) diff --git a/sdk/storage/Azure.Storage.Queues/CHANGELOG.md b/sdk/storage/Azure.Storage.Queues/CHANGELOG.md index b53155e9f6dae..dc6bb8705a693 100644 --- a/sdk/storage/Azure.Storage.Queues/CHANGELOG.md +++ b/sdk/storage/Azure.Storage.Queues/CHANGELOG.md @@ -3,12 +3,7 @@ ## 12.21.0-beta.1 (Unreleased) ### Features Added - -### Breaking Changes - -### Bugs Fixed - -### Other Changes +- Added support for service version 2025-01-05. ## 12.20.0 (2024-09-18) From b357870d90a1b3146b2ac3a649dfa23e4e05f853 Mon Sep 17 00:00:00 2001 From: Sean McCullough <44180881+seanmcc-msft@users.noreply.github.com> Date: Mon, 30 Sep 2024 11:52:53 -0500 Subject: [PATCH 22/25] Updated swagger readmes to point at main (#46348) --- sdk/storage/Azure.Storage.Blobs/src/autorest.md | 2 +- .../src/Generated/ShareCreateHeaders.cs | 8 +++++--- .../src/Generated/ShareDeleteHeaders.cs | 8 ++++---- .../src/Generated/ShareGetPropertiesHeaders.cs | 6 +++--- .../src/Generated/ShareRestClient.cs | 16 ++++++++-------- .../src/Generated/ShareRestoreHeaders.cs | 2 +- .../src/Generated/ShareSetPropertiesHeaders.cs | 6 +++--- .../Azure.Storage.Files.Shares/src/autorest.md | 2 +- 8 files changed, 26 insertions(+), 24 deletions(-) diff --git a/sdk/storage/Azure.Storage.Blobs/src/autorest.md b/sdk/storage/Azure.Storage.Blobs/src/autorest.md index 7160bd89aba05..6c18c66066ebd 100644 --- a/sdk/storage/Azure.Storage.Blobs/src/autorest.md +++ b/sdk/storage/Azure.Storage.Blobs/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/a936baeb45003f1d31ce855084b2e54365af78af/specification/storage/data-plane/Microsoft.BlobStorage/stable/2025-01-05/blob.json + - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/ae95eb6a4701d844bada7d1c4f5ecf4a7444e5b8/specification/storage/data-plane/Microsoft.BlobStorage/stable/2025-01-05/blob.json generation1-convenience-client: true # https://github.com/Azure/autorest/issues/4075 skip-semantics-validation: true diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareCreateHeaders.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareCreateHeaders.cs index 38995245569cb..c06e37fdb62dc 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareCreateHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareCreateHeaders.cs @@ -23,11 +23,13 @@ public ShareCreateHeaders(Response response) public string Version => _response.Headers.TryGetValue("x-ms-version", out string value) ? value : null; /// Returns the current share quota in GB. public long? Quota => _response.Headers.TryGetValue("x-ms-share-quota", out long? value) ? value : null; - /// The provisioned IOPS of the share. If this is not specified, compute the recommended IOPS of the share using the formula for a share in this media tier (SSD/HDD as appropriate). + /// The provisioned IOPS of the share. public long? ShareProvisionedIops => _response.Headers.TryGetValue("x-ms-share-provisioned-iops", out long? value) ? value : null; - /// The provisioned throughput of the share. If this is not specified, compute the recommended throughput of the share using the formula for a share in this media tier (SSD/HDD as appropriate). + /// The provisioned throughput of the share. public long? ShareProvisionedBandwidthMibps => _response.Headers.TryGetValue("x-ms-share-provisioned-bandwidth-mibps", out long? value) ? value : null; - /// ShareIncludedBurstIops. + /// Returns the calculated burst IOPS of the share. public long? ShareIncludedBurstIops => _response.Headers.TryGetValue("x-ms-share-included-burst-iops", out long? value) ? value : null; + /// Returned the calculated maximum burst credits. + public long? MaxBurstCreditsForIops => _response.Headers.TryGetValue("x-ms-share-max-burst-credits-for-iops", out long? value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareDeleteHeaders.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareDeleteHeaders.cs index f0831ee19381d..c0521e8c8e75a 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareDeleteHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareDeleteHeaders.cs @@ -18,9 +18,9 @@ public ShareDeleteHeaders(Response response) } /// Indicates the version of the File service used to execute the request. public string Version => _response.Headers.TryGetValue("x-ms-version", out string value) ? value : null; - /// The "live share" portion of the data that the customer will be billed for in the soft-deleted capacity (logical storage size). - public long? XMsShareUsageBytes => _response.Headers.TryGetValue("x-ms-share-usage-bytes", out long? value) ? value : null; - /// The snapshot share portion of the data that the customer will be billed for in the soft-deleted capacity (this is the delta, or "physical storage size"). - public long? XMsShareSnapshotUsageBytes => _response.Headers.TryGetValue("x-ms-share-snapshot-usage-bytes", out long? value) ? value : null; + /// Returned only for provisioned v2 file shares. Returns an approximate used storage size of the share, in bytes. + public long? XMsFileShareUsageBytes => _response.Headers.TryGetValue("x-ms-file-share-usage-bytes", out long? value) ? value : null; + /// Returned only for provisioned v2 file shares. Returns an approximate used snapshot storage size of the share, in bytes. + public long? XMsFileShareSnapshotUsageBytes => _response.Headers.TryGetValue("x-ms-file-share-snapshot-usage-bytes", out long? value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareGetPropertiesHeaders.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareGetPropertiesHeaders.cs index 0465b6de626d5..a0a18081ee687 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareGetPropertiesHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareGetPropertiesHeaders.cs @@ -35,7 +35,7 @@ public ShareGetPropertiesHeaders(Response response) public int? ProvisionedEgressMBps => _response.Headers.TryGetValue("x-ms-share-provisioned-egress-mbps", out int? value) ? value : null; /// Returns the current share next allowed quota downgrade time. public DateTimeOffset? NextAllowedQuotaDowngradeTime => _response.Headers.TryGetValue("x-ms-share-next-allowed-quota-downgrade-time", out DateTimeOffset? value) ? value : null; - /// Returns the current share provisioned bandwidth in megabits per second. + /// Returns the current share provisioned bandwidth in mebibytes per second. public int? ProvisionedBandwidthMibps => _response.Headers.TryGetValue("x-ms-share-provisioned-bandwidth-mibps", out int? value) ? value : null; /// When a share is leased, specifies whether the lease is of infinite or fixed duration. public ShareLeaseDuration? LeaseDuration => _response.Headers.TryGetValue("x-ms-lease-duration", out string value) ? value.ToShareLeaseDuration() : null; @@ -65,9 +65,9 @@ public ShareGetPropertiesHeaders(Response response) public long? IncludedBurstIops => _response.Headers.TryGetValue("x-ms-share-included-burst-iops", out long? value) ? value : null; /// Returned the calculated maximum burst credits. This is not the current burst credit level, but the maximum burst credits the share can have. public long? MaxBurstCreditsForIops => _response.Headers.TryGetValue("x-ms-share-max-burst-credits-for-iops", out long? value) ? value : null; - /// Return timestamp for provisioned IOPS following existing rules for provisioned storage GiB. + /// Returns the current share next allowed provisioned iops downgrade time. public DateTimeOffset? NextAllowedProvisionedIopsDowngradeTime => _response.Headers.TryGetValue("x-ms-share-next-allowed-provisioned-iops-downgrade-time", out DateTimeOffset? value) ? value : null; - /// Return timestamp for provisioned throughput following existing rules for provisioned storage GiB. + /// Returns the current share next allowed provisioned bandwidth downgrade time. public DateTimeOffset? NextAllowedProvisionedBandwidthDowngradeTime => _response.Headers.TryGetValue("x-ms-share-next-allowed-provisioned-bandwidth-downgrade-time", out DateTimeOffset? value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs index 7a9fbec28b317..69bb02404dd49 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestClient.cs @@ -121,8 +121,8 @@ internal HttpMessage CreateCreateRequest(int? timeout, IDictionary Optional. Boolean. Default if not specified is false. This property enables paid bursting. /// Optional. Integer. Default if not specified is the maximum throughput the file share can support. Current maximum for a file share is 10,340 MiB/sec. /// Optional. Integer. Default if not specified is the maximum IOPS the file share can support. Current maximum for a file share is 102,400 IOPS. - /// Optional. The provisioned IOPS of the share. If this is not specified, compute the recommended IOPS of the share using the formula for a share in this media tier (SSD/HDD as appropriate). The provisioned IOPS of the share is always explicitly stored on the share object, even if the recommendation formula is used. - /// Optional. The provisioned throughput of the share. If this is not specified, compute the recommended throughput of the share using the formula for a share in this media tier (SSD/HDD as appropriate). + /// Optional. Supported in version 2025-01-05 and later. Only allowed for provisioned v2 file shares. Specifies the provisioned number of input/output operations per second (IOPS) of the share. If this is not specified, the provisioned IOPS is set to value calculated based on recommendation formula. + /// Optional. Supported in version 2025-01-05 and later. Only allowed for provisioned v2 file shares. Specifies the provisioned bandwidth of the share, in mebibytes per second (MiBps). If this is not specified, the provisioned bandwidth is set to value calculated based on recommendation formula. /// The cancellation token to use. public async Task> CreateAsync(int? timeout = null, IDictionary metadata = null, int? quota = null, ShareAccessTier? accessTier = null, string enabledProtocols = null, ShareRootSquash? rootSquash = null, bool? enableSnapshotVirtualDirectoryAccess = null, bool? paidBurstingEnabled = null, long? paidBurstingMaxBandwidthMibps = null, long? paidBurstingMaxIops = null, long? shareProvisionedIops = null, long? shareProvisionedBandwidthMibps = null, CancellationToken cancellationToken = default) { @@ -149,8 +149,8 @@ public async Task> CreateAsync(int? time /// Optional. Boolean. Default if not specified is false. This property enables paid bursting. /// Optional. Integer. Default if not specified is the maximum throughput the file share can support. Current maximum for a file share is 10,340 MiB/sec. /// Optional. Integer. Default if not specified is the maximum IOPS the file share can support. Current maximum for a file share is 102,400 IOPS. - /// Optional. The provisioned IOPS of the share. If this is not specified, compute the recommended IOPS of the share using the formula for a share in this media tier (SSD/HDD as appropriate). The provisioned IOPS of the share is always explicitly stored on the share object, even if the recommendation formula is used. - /// Optional. The provisioned throughput of the share. If this is not specified, compute the recommended throughput of the share using the formula for a share in this media tier (SSD/HDD as appropriate). + /// Optional. Supported in version 2025-01-05 and later. Only allowed for provisioned v2 file shares. Specifies the provisioned number of input/output operations per second (IOPS) of the share. If this is not specified, the provisioned IOPS is set to value calculated based on recommendation formula. + /// Optional. Supported in version 2025-01-05 and later. Only allowed for provisioned v2 file shares. Specifies the provisioned bandwidth of the share, in mebibytes per second (MiBps). If this is not specified, the provisioned bandwidth is set to value calculated based on recommendation formula. /// The cancellation token to use. public ResponseWithHeaders Create(int? timeout = null, IDictionary metadata = null, int? quota = null, ShareAccessTier? accessTier = null, string enabledProtocols = null, ShareRootSquash? rootSquash = null, bool? enableSnapshotVirtualDirectoryAccess = null, bool? paidBurstingEnabled = null, long? paidBurstingMaxBandwidthMibps = null, long? paidBurstingMaxIops = null, long? shareProvisionedIops = null, long? shareProvisionedBandwidthMibps = null, CancellationToken cancellationToken = default) { @@ -1001,8 +1001,8 @@ internal HttpMessage CreateSetPropertiesRequest(int? timeout, int? quota, ShareA /// Optional. Boolean. Default if not specified is false. This property enables paid bursting. /// Optional. Integer. Default if not specified is the maximum throughput the file share can support. Current maximum for a file share is 10,340 MiB/sec. /// Optional. Integer. Default if not specified is the maximum IOPS the file share can support. Current maximum for a file share is 102,400 IOPS. - /// Optional. The provisioned IOPS of the share. If this is not specified, compute the recommended IOPS of the share using the formula for a share in this media tier (SSD/HDD as appropriate). The provisioned IOPS of the share is always explicitly stored on the share object, even if the recommendation formula is used. - /// Optional. The provisioned throughput of the share. If this is not specified, compute the recommended throughput of the share using the formula for a share in this media tier (SSD/HDD as appropriate). + /// Optional. Supported in version 2025-01-05 and later. Only allowed for provisioned v2 file shares. Specifies the provisioned number of input/output operations per second (IOPS) of the share. If this is not specified, the provisioned IOPS is set to value calculated based on recommendation formula. + /// Optional. Supported in version 2025-01-05 and later. Only allowed for provisioned v2 file shares. Specifies the provisioned bandwidth of the share, in mebibytes per second (MiBps). If this is not specified, the provisioned bandwidth is set to value calculated based on recommendation formula. /// Parameter group. /// The cancellation token to use. public async Task> SetPropertiesAsync(int? timeout = null, int? quota = null, ShareAccessTier? accessTier = null, ShareRootSquash? rootSquash = null, bool? enableSnapshotVirtualDirectoryAccess = null, bool? paidBurstingEnabled = null, long? paidBurstingMaxBandwidthMibps = null, long? paidBurstingMaxIops = null, long? shareProvisionedIops = null, long? shareProvisionedBandwidthMibps = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) @@ -1028,8 +1028,8 @@ public async Task> SetPropertiesA /// Optional. Boolean. Default if not specified is false. This property enables paid bursting. /// Optional. Integer. Default if not specified is the maximum throughput the file share can support. Current maximum for a file share is 10,340 MiB/sec. /// Optional. Integer. Default if not specified is the maximum IOPS the file share can support. Current maximum for a file share is 102,400 IOPS. - /// Optional. The provisioned IOPS of the share. If this is not specified, compute the recommended IOPS of the share using the formula for a share in this media tier (SSD/HDD as appropriate). The provisioned IOPS of the share is always explicitly stored on the share object, even if the recommendation formula is used. - /// Optional. The provisioned throughput of the share. If this is not specified, compute the recommended throughput of the share using the formula for a share in this media tier (SSD/HDD as appropriate). + /// Optional. Supported in version 2025-01-05 and later. Only allowed for provisioned v2 file shares. Specifies the provisioned number of input/output operations per second (IOPS) of the share. If this is not specified, the provisioned IOPS is set to value calculated based on recommendation formula. + /// Optional. Supported in version 2025-01-05 and later. Only allowed for provisioned v2 file shares. Specifies the provisioned bandwidth of the share, in mebibytes per second (MiBps). If this is not specified, the provisioned bandwidth is set to value calculated based on recommendation formula. /// Parameter group. /// The cancellation token to use. public ResponseWithHeaders SetProperties(int? timeout = null, int? quota = null, ShareAccessTier? accessTier = null, ShareRootSquash? rootSquash = null, bool? enableSnapshotVirtualDirectoryAccess = null, bool? paidBurstingEnabled = null, long? paidBurstingMaxBandwidthMibps = null, long? paidBurstingMaxIops = null, long? shareProvisionedIops = null, long? shareProvisionedBandwidthMibps = null, ShareFileRequestConditions shareFileRequestConditions = null, CancellationToken cancellationToken = default) diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestoreHeaders.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestoreHeaders.cs index 38d5ffc7b9409..7cbb4e511e775 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestoreHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareRestoreHeaders.cs @@ -25,7 +25,7 @@ public ShareRestoreHeaders(Response response) public long? Quota => _response.Headers.TryGetValue("x-ms-share-quota", out long? value) ? value : null; /// Returns the current share provisioned ipos. public long? ProvisionedIops => _response.Headers.TryGetValue("x-ms-share-provisioned-iops", out long? value) ? value : null; - /// Returns the current share provisioned bandwidth in megabits per second. + /// Returns the current share provisioned bandwidth in mebibytes per second. public long? ProvisionedBandwidthMibps => _response.Headers.TryGetValue("x-ms-share-provisioned-bandwidth-mibps", out long? value) ? value : null; /// Return the calculated burst IOPS of the share. public long? IncludedBurstIops => _response.Headers.TryGetValue("x-ms-share-included-burst-iops", out long? value) ? value : null; diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareSetPropertiesHeaders.cs b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareSetPropertiesHeaders.cs index 9bacf8101fd8e..d12ce08289db9 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareSetPropertiesHeaders.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/src/Generated/ShareSetPropertiesHeaders.cs @@ -25,7 +25,7 @@ public ShareSetPropertiesHeaders(Response response) public long? Quota => _response.Headers.TryGetValue("x-ms-share-quota", out long? value) ? value : null; /// Returns the current share provisioned ipos. public long? ProvisionedIops => _response.Headers.TryGetValue("x-ms-share-provisioned-iops", out long? value) ? value : null; - /// Returns the current share provisioned bandwidth in megabits per second. + /// Returns the current share provisioned bandwidth in mebibytes per second. public long? ProvisionedBandwidthMibps => _response.Headers.TryGetValue("x-ms-share-provisioned-bandwidth-mibps", out long? value) ? value : null; /// Return the calculated burst IOPS of the share. public long? IncludedBurstIops => _response.Headers.TryGetValue("x-ms-share-included-burst-iops", out long? value) ? value : null; @@ -33,9 +33,9 @@ public ShareSetPropertiesHeaders(Response response) public long? MaxBurstCreditsForIops => _response.Headers.TryGetValue("x-ms-share-max-burst-credits-for-iops", out long? value) ? value : null; /// Returns the current share next allowed quota downgrade time. public DateTimeOffset? NextAllowedQuotaDowngradeTime => _response.Headers.TryGetValue("x-ms-share-next-allowed-quota-downgrade-time", out DateTimeOffset? value) ? value : null; - /// Return timestamp for provisioned IOPS following existing rules for provisioned storage GiB. + /// Returns the current share next allowed provisioned iops downgrade time. public DateTimeOffset? NextAllowedProvisionedIopsDowngradeTime => _response.Headers.TryGetValue("x-ms-share-next-allowed-provisioned-iops-downgrade-time", out DateTimeOffset? value) ? value : null; - /// Return timestamp for provisioned throughput following existing rules for provisioned storage GiB. + /// Returns the current share next allowed provisioned bandwidth downgrade time. public DateTimeOffset? NextAllowedProvisionedBandwidthDowngradeTime => _response.Headers.TryGetValue("x-ms-share-next-allowed-provisioned-bandwidth-downgrade-time", out DateTimeOffset? value) ? value : null; } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md index 2bcc0e37ee65a..ed634ae302734 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md +++ b/sdk/storage/Azure.Storage.Files.Shares/src/autorest.md @@ -4,7 +4,7 @@ Run `dotnet build /t:GenerateCode` to generate code. ``` yaml input-file: - - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/a936baeb45003f1d31ce855084b2e54365af78af/specification/storage/data-plane/Microsoft.FileStorage/stable/2025-01-05/file.json + - https://raw.githubusercontent.com/Azure/azure-rest-api-specs/ae95eb6a4701d844bada7d1c4f5ecf4a7444e5b8/specification/storage/data-plane/Microsoft.FileStorage/stable/2025-01-05/file.json generation1-convenience-client: true # https://github.com/Azure/autorest/issues/4075 skip-semantics-validation: true From 1cd9e0e8157423c998f4f7f6ea0efe1cee717380 Mon Sep 17 00:00:00 2001 From: Sean McCullough <44180881+seanmcc-msft@users.noreply.github.com> Date: Tue, 1 Oct 2024 17:31:45 -0500 Subject: [PATCH 23/25] API view comments (#46391) --- .../api/Azure.Storage.Files.DataLake.net6.0.cs | 16 ++++++++-------- ...zure.Storage.Files.DataLake.netstandard2.0.cs | 16 ++++++++-------- .../src/DataLakeFileClient.cs | 16 ++++++++-------- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs index a202d6300f50e..eaf0e01ba3c40 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs @@ -194,17 +194,17 @@ public DataLakeFileClient(System.Uri fileUri, Azure.Storage.StorageSharedKeyCred [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Threading.Tasks.Task> ReadAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response ReadContent() { throw null; } - public virtual Azure.Response ReadContent(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual Azure.Response ReadContent(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReadContent(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReadContent(System.Threading.CancellationToken cancellationToken) { throw null; } public virtual System.Threading.Tasks.Task> ReadContentAsync() { throw null; } - public virtual System.Threading.Tasks.Task> ReadContentAsync(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual System.Threading.Tasks.Task> ReadContentAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> ReadContentAsync(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> ReadContentAsync(System.Threading.CancellationToken cancellationToken) { throw null; } public virtual Azure.Response ReadStreaming() { throw null; } - public virtual Azure.Response ReadStreaming(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual Azure.Response ReadStreaming(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReadStreaming(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReadStreaming(System.Threading.CancellationToken cancellationToken) { throw null; } public virtual System.Threading.Tasks.Task> ReadStreamingAsync() { throw null; } - public virtual System.Threading.Tasks.Task> ReadStreamingAsync(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual System.Threading.Tasks.Task> ReadStreamingAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> ReadStreamingAsync(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> ReadStreamingAsync(System.Threading.CancellationToken cancellationToken) { throw null; } public virtual Azure.Response ReadTo(System.IO.Stream destination, Azure.Storage.Files.DataLake.Models.DataLakeFileReadToOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual Azure.Response ReadTo(System.IO.Stream destination, Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions, Azure.Storage.StorageTransferOptions transferOptions, System.Threading.CancellationToken cancellationToken) { throw null; } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs index a202d6300f50e..eaf0e01ba3c40 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs @@ -194,17 +194,17 @@ public DataLakeFileClient(System.Uri fileUri, Azure.Storage.StorageSharedKeyCred [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual System.Threading.Tasks.Task> ReadAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response ReadContent() { throw null; } - public virtual Azure.Response ReadContent(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual Azure.Response ReadContent(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReadContent(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReadContent(System.Threading.CancellationToken cancellationToken) { throw null; } public virtual System.Threading.Tasks.Task> ReadContentAsync() { throw null; } - public virtual System.Threading.Tasks.Task> ReadContentAsync(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual System.Threading.Tasks.Task> ReadContentAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> ReadContentAsync(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> ReadContentAsync(System.Threading.CancellationToken cancellationToken) { throw null; } public virtual Azure.Response ReadStreaming() { throw null; } - public virtual Azure.Response ReadStreaming(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual Azure.Response ReadStreaming(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReadStreaming(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReadStreaming(System.Threading.CancellationToken cancellationToken) { throw null; } public virtual System.Threading.Tasks.Task> ReadStreamingAsync() { throw null; } - public virtual System.Threading.Tasks.Task> ReadStreamingAsync(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual System.Threading.Tasks.Task> ReadStreamingAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> ReadStreamingAsync(Azure.Storage.Files.DataLake.Models.DataLakeFileReadOptions options, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> ReadStreamingAsync(System.Threading.CancellationToken cancellationToken) { throw null; } public virtual Azure.Response ReadTo(System.IO.Stream destination, Azure.Storage.Files.DataLake.Models.DataLakeFileReadToOptions options = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] public virtual Azure.Response ReadTo(System.IO.Stream destination, Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions conditions, Azure.Storage.StorageTransferOptions transferOptions, System.Threading.CancellationToken cancellationToken) { throw null; } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs index 2da5eb76349eb..e755faff2f5a7 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeFileClient.cs @@ -3384,7 +3384,7 @@ public virtual async Task> ReadStreami /// a failure occurs. /// public virtual Response ReadStreaming( - CancellationToken cancellationToken = default) + CancellationToken cancellationToken) { DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadStreaming)}"); @@ -3433,7 +3433,7 @@ public virtual Response ReadStreaming( /// a failure occurs. /// public virtual async Task> ReadStreamingAsync( - CancellationToken cancellationToken = default) + CancellationToken cancellationToken) { DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadStreaming)}"); @@ -3486,7 +3486,7 @@ public virtual async Task> ReadStreami /// a failure occurs. /// public virtual Response ReadStreaming( - DataLakeFileReadOptions options = default, + DataLakeFileReadOptions options, CancellationToken cancellationToken = default) { DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadStreaming)}"); @@ -3540,7 +3540,7 @@ public virtual Response ReadStreaming( /// a failure occurs. /// public virtual async Task> ReadStreamingAsync( - DataLakeFileReadOptions options = default, + DataLakeFileReadOptions options, CancellationToken cancellationToken = default) { DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadStreaming)}"); @@ -3679,7 +3679,7 @@ public virtual async Task> ReadContentAsync() /// a failure occurs. /// public virtual Response ReadContent( - CancellationToken cancellationToken = default) + CancellationToken cancellationToken) { DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadContent)}"); @@ -3728,7 +3728,7 @@ public virtual Response ReadContent( /// a failure occurs. /// public virtual async Task> ReadContentAsync( - CancellationToken cancellationToken = default) + CancellationToken cancellationToken) { DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadContent)}"); @@ -3781,7 +3781,7 @@ public virtual async Task> ReadContentAsync( /// a failure occurs. /// public virtual Response ReadContent( - DataLakeFileReadOptions options = default, + DataLakeFileReadOptions options, CancellationToken cancellationToken = default) { DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadContent)}"); @@ -3835,7 +3835,7 @@ public virtual Response ReadContent( /// a failure occurs. /// public virtual async Task> ReadContentAsync( - DataLakeFileReadOptions options = default, + DataLakeFileReadOptions options, CancellationToken cancellationToken = default) { DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(DataLakeFileClient)}.{nameof(ReadContent)}"); From 2272ba15375aa20a8f89eb9f61dead29b9609335 Mon Sep 17 00:00:00 2001 From: Amanda Nguyen <48961492+amnguye@users.noreply.github.com> Date: Tue, 1 Oct 2024 17:16:31 -0700 Subject: [PATCH 24/25] Testing to see if moving Models around would resolve issue (#46392) --- .../tests/Shared/DisposingShare.cs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Shared/DisposingShare.cs b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Shared/DisposingShare.cs index d5defd931e31d..e3490265574ac 100644 --- a/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Shared/DisposingShare.cs +++ b/sdk/storage/Azure.Storage.DataMovement.Files.Shares/tests/Shared/DisposingShare.cs @@ -6,7 +6,6 @@ using System.Threading.Tasks; using BaseShares::Azure.Storage.Files.Shares; using Azure.Storage.Test.Shared; -using BaseShares::Azure.Storage.Files.Shares.Models; namespace Azure.Storage.DataMovement.Files.Shares.Tests { @@ -18,7 +17,7 @@ public class DisposingShare : IDisposingContainer public static async Task CreateAsync(ShareClient share, IDictionary metadata) { - ShareCreateOptions options = new ShareCreateOptions + BaseShares::Azure.Storage.Files.Shares.Models.ShareCreateOptions options = new() { Metadata = metadata }; From 6da45e12e68af50ecaf6316127f235afab05012a Mon Sep 17 00:00:00 2001 From: Sean McCullough <44180881+seanmcc-msft@users.noreply.github.com> Date: Mon, 7 Oct 2024 14:04:41 -0500 Subject: [PATCH 25/25] Enabled STG 96 live tests (#46289) --- .../Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs | 4 ++-- .../api/Azure.Storage.Blobs.netstandard2.0.cs | 4 ++-- .../api/Azure.Storage.Blobs.netstandard2.1.cs | 4 ++-- .../src/Shared/StorageVersionExtensions.cs | 2 +- .../api/Azure.Storage.Files.DataLake.net6.0.cs | 2 +- .../api/Azure.Storage.Files.DataLake.netstandard2.0.cs | 2 +- .../api/Azure.Storage.Files.Shares.net6.0.cs | 2 +- .../api/Azure.Storage.Files.Shares.netstandard2.0.cs | 2 +- .../Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs | 4 ++-- .../api/Azure.Storage.Queues.netstandard2.0.cs | 4 ++-- .../api/Azure.Storage.Queues.netstandard2.1.cs | 4 ++-- 11 files changed, 17 insertions(+), 17 deletions(-) diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs index 25640917de5bb..d93de39ce28c0 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.net6.0.cs @@ -51,7 +51,7 @@ public BlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredential c } public partial class BlobClientOptions : Azure.Core.ClientOptions { - public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) { } + public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Blobs.Models.BlobAudience? Audience { get { throw null; } set { } } public Azure.Storage.Blobs.Models.CustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } @@ -1850,7 +1850,7 @@ public PageBlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti } public partial class SpecializedBlobClientOptions : Azure.Storage.Blobs.BlobClientOptions { - public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } + public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } public Azure.Storage.ClientSideEncryptionOptions ClientSideEncryption { get { throw null; } set { } } } public static partial class SpecializedBlobExtensions diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs index 25640917de5bb..d93de39ce28c0 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.0.cs @@ -51,7 +51,7 @@ public BlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredential c } public partial class BlobClientOptions : Azure.Core.ClientOptions { - public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) { } + public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Blobs.Models.BlobAudience? Audience { get { throw null; } set { } } public Azure.Storage.Blobs.Models.CustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } @@ -1850,7 +1850,7 @@ public PageBlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti } public partial class SpecializedBlobClientOptions : Azure.Storage.Blobs.BlobClientOptions { - public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } + public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } public Azure.Storage.ClientSideEncryptionOptions ClientSideEncryption { get { throw null; } set { } } } public static partial class SpecializedBlobExtensions diff --git a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs index 25640917de5bb..d93de39ce28c0 100644 --- a/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs +++ b/sdk/storage/Azure.Storage.Blobs/api/Azure.Storage.Blobs.netstandard2.1.cs @@ -51,7 +51,7 @@ public BlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredential c } public partial class BlobClientOptions : Azure.Core.ClientOptions { - public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) { } + public BlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Blobs.Models.BlobAudience? Audience { get { throw null; } set { } } public Azure.Storage.Blobs.Models.CustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } @@ -1850,7 +1850,7 @@ public PageBlobClient(System.Uri blobUri, Azure.Storage.StorageSharedKeyCredenti } public partial class SpecializedBlobClientOptions : Azure.Storage.Blobs.BlobClientOptions { - public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2024_11_04) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } + public SpecializedBlobClientOptions(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion version = Azure.Storage.Blobs.BlobClientOptions.ServiceVersion.V2025_01_05) : base (default(Azure.Storage.Blobs.BlobClientOptions.ServiceVersion)) { } public Azure.Storage.ClientSideEncryptionOptions ClientSideEncryption { get { throw null; } set { } } } public static partial class SpecializedBlobExtensions diff --git a/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs b/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs index 2a7bd90fb82a1..44c0973ea9be1 100644 --- a/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs +++ b/sdk/storage/Azure.Storage.Common/src/Shared/StorageVersionExtensions.cs @@ -46,7 +46,7 @@ internal static class StorageVersionExtensions /// public const ServiceVersion LatestVersion = #if BlobSDK || QueueSDK || FileSDK || DataLakeSDK || ChangeFeedSDK || DataMovementSDK || BlobDataMovementSDK || ShareDataMovementSDK - ServiceVersion.V2024_11_04; + ServiceVersion.V2025_01_05; #else ERROR_STORAGE_SERVICE_NOT_DEFINED; #endif diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs index eaf0e01ba3c40..c5b8a7798a0cf 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.net6.0.cs @@ -2,7 +2,7 @@ namespace Azure.Storage.Files.DataLake { public partial class DataLakeClientOptions : Azure.Core.ClientOptions { - public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2024_11_04) { } + public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Files.DataLake.Models.DataLakeAudience? Audience { get { throw null; } set { } } public Azure.Storage.Files.DataLake.Models.DataLakeCustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } diff --git a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs index eaf0e01ba3c40..c5b8a7798a0cf 100644 --- a/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.DataLake/api/Azure.Storage.Files.DataLake.netstandard2.0.cs @@ -2,7 +2,7 @@ namespace Azure.Storage.Files.DataLake { public partial class DataLakeClientOptions : Azure.Core.ClientOptions { - public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2024_11_04) { } + public DataLakeClientOptions(Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion version = Azure.Storage.Files.DataLake.DataLakeClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Files.DataLake.Models.DataLakeAudience? Audience { get { throw null; } set { } } public Azure.Storage.Files.DataLake.Models.DataLakeCustomerProvidedKey? CustomerProvidedKey { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs index cf8ce32808d81..b1b355dda471c 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.net6.0.cs @@ -115,7 +115,7 @@ public ShareClient(System.Uri shareUri, Azure.Storage.StorageSharedKeyCredential } public partial class ShareClientOptions : Azure.Core.ClientOptions { - public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2024_11_04) { } + public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2025_01_05) { } public bool? AllowSourceTrailingDot { get { throw null; } set { } } public bool? AllowTrailingDot { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.ShareAudience? Audience { get { throw null; } set { } } diff --git a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs index cf8ce32808d81..b1b355dda471c 100644 --- a/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs +++ b/sdk/storage/Azure.Storage.Files.Shares/api/Azure.Storage.Files.Shares.netstandard2.0.cs @@ -115,7 +115,7 @@ public ShareClient(System.Uri shareUri, Azure.Storage.StorageSharedKeyCredential } public partial class ShareClientOptions : Azure.Core.ClientOptions { - public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2024_11_04) { } + public ShareClientOptions(Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion version = Azure.Storage.Files.Shares.ShareClientOptions.ServiceVersion.V2025_01_05) { } public bool? AllowSourceTrailingDot { get { throw null; } set { } } public bool? AllowTrailingDot { get { throw null; } set { } } public Azure.Storage.Files.Shares.Models.ShareAudience? Audience { get { throw null; } set { } } diff --git a/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs b/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs index 96bc919c7a719..9f440eb3639d7 100644 --- a/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs +++ b/sdk/storage/Azure.Storage.Queues/api/Azure.Storage.Queues.net6.0.cs @@ -74,7 +74,7 @@ public QueueClient(System.Uri queueUri, Azure.Storage.StorageSharedKeyCredential } public partial class QueueClientOptions : Azure.Core.ClientOptions { - public QueueClientOptions(Azure.Storage.Queues.QueueClientOptions.ServiceVersion version = Azure.Storage.Queues.QueueClientOptions.ServiceVersion.V2024_11_04) { } + public QueueClientOptions(Azure.Storage.Queues.QueueClientOptions.ServiceVersion version = Azure.Storage.Queues.QueueClientOptions.ServiceVersion.V2025_01_05) { } public Azure.Storage.Queues.Models.QueueAudience? Audience { get { throw null; } set { } } public bool EnableTenantDiscovery { get { throw null; } set { } } public System.Uri GeoRedundantSecondaryUri { get { throw null; } set { } } @@ -426,7 +426,7 @@ public event System.EventHandler