diff --git a/api/swagger.yml b/api/swagger.yml index 039303747f9..e569617af5a 100644 --- a/api/swagger.yml +++ b/api/swagger.yml @@ -1253,6 +1253,20 @@ components: type: string pre_sign_multipart_upload: type: boolean + blockstore_id: + type: string + blockstore_description: + type: string + blockstore_extras: + type: object + description: blockstore specific properties + additionalProperties: + type: string + + StorageConfigList: + type: array + items: + $ref: "#/components/schemas/StorageConfig" Config: type: object @@ -1261,6 +1275,8 @@ components: $ref: "#/components/schemas/VersionConfig" storage_config: $ref: "#/components/schemas/StorageConfig" + storage_config_list: + $ref: "#/components/schemas/StorageConfigList" VersionConfig: type: object properties: diff --git a/clients/java/api/openapi.yaml b/clients/java/api/openapi.yaml index 458f93d2741..1e773c02ca4 100644 --- a/clients/java/api/openapi.yaml +++ b/clients/java/api/openapi.yaml @@ -8990,9 +8990,13 @@ components: type: object StorageConfig: example: + blockstore_description: blockstore_description pre_sign_multipart_upload: true + blockstore_extras: + key: blockstore_extras blockstore_namespace_example: blockstore_namespace_example blockstore_namespace_ValidityRegex: blockstore_namespace_ValidityRegex + blockstore_id: blockstore_id blockstore_type: blockstore_type pre_sign_support_ui: true import_support: true @@ -9018,6 +9022,15 @@ components: type: string pre_sign_multipart_upload: type: boolean + blockstore_id: + type: string + blockstore_description: + type: string + blockstore_extras: + additionalProperties: + type: string + description: blockstore specific properties + type: object required: - blockstore_namespace_ValidityRegex - blockstore_namespace_example @@ -9027,12 +9040,47 @@ components: - pre_sign_support - pre_sign_support_ui type: object + StorageConfigList: + items: + $ref: '#/components/schemas/StorageConfig' + type: array Config: example: + storage_config_list: + - blockstore_description: blockstore_description + pre_sign_multipart_upload: true + blockstore_extras: + key: blockstore_extras + blockstore_namespace_example: blockstore_namespace_example + blockstore_namespace_ValidityRegex: blockstore_namespace_ValidityRegex + blockstore_id: blockstore_id + blockstore_type: blockstore_type + pre_sign_support_ui: true + import_support: true + import_validity_regex: import_validity_regex + default_namespace_prefix: default_namespace_prefix + pre_sign_support: true + - blockstore_description: blockstore_description + pre_sign_multipart_upload: true + blockstore_extras: + key: blockstore_extras + blockstore_namespace_example: blockstore_namespace_example + blockstore_namespace_ValidityRegex: blockstore_namespace_ValidityRegex + blockstore_id: blockstore_id + blockstore_type: blockstore_type + pre_sign_support_ui: true + import_support: true + import_validity_regex: import_validity_regex + default_namespace_prefix: default_namespace_prefix + pre_sign_support: true storage_config: + blockstore_description: blockstore_description pre_sign_multipart_upload: true + blockstore_extras: + key: blockstore_extras blockstore_namespace_example: blockstore_namespace_example blockstore_namespace_ValidityRegex: blockstore_namespace_ValidityRegex + blockstore_id: blockstore_id blockstore_type: blockstore_type pre_sign_support_ui: true import_support: true @@ -9049,6 +9097,10 @@ components: $ref: '#/components/schemas/VersionConfig' storage_config: $ref: '#/components/schemas/StorageConfig' + storage_config_list: + items: + $ref: '#/components/schemas/StorageConfig' + type: array type: object VersionConfig: example: diff --git a/clients/java/docs/Config.md b/clients/java/docs/Config.md index 5729991b035..d02ca9fe711 100644 --- a/clients/java/docs/Config.md +++ b/clients/java/docs/Config.md @@ -9,6 +9,7 @@ |------------ | ------------- | ------------- | -------------| |**versionConfig** | [**VersionConfig**](VersionConfig.md) | | [optional] | |**storageConfig** | [**StorageConfig**](StorageConfig.md) | | [optional] | +|**storageConfigList** | [**List<StorageConfig>**](StorageConfig.md) | | [optional] | diff --git a/clients/java/docs/StorageConfig.md b/clients/java/docs/StorageConfig.md index ff981182e0c..cb1c11f6fb7 100644 --- a/clients/java/docs/StorageConfig.md +++ b/clients/java/docs/StorageConfig.md @@ -16,6 +16,9 @@ |**importSupport** | **Boolean** | | | |**importValidityRegex** | **String** | | | |**preSignMultipartUpload** | **Boolean** | | [optional] | +|**blockstoreId** | **String** | | [optional] | +|**blockstoreDescription** | **String** | | [optional] | +|**blockstoreExtras** | **Map<String, String>** | blockstore specific properties | [optional] | diff --git a/clients/java/src/main/java/io/lakefs/clients/sdk/model/Config.java b/clients/java/src/main/java/io/lakefs/clients/sdk/model/Config.java index 296d9134df1..5cef9444bc2 100644 --- a/clients/java/src/main/java/io/lakefs/clients/sdk/model/Config.java +++ b/clients/java/src/main/java/io/lakefs/clients/sdk/model/Config.java @@ -22,7 +22,9 @@ import io.lakefs.clients.sdk.model.StorageConfig; import io.lakefs.clients.sdk.model.VersionConfig; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import com.google.gson.Gson; import com.google.gson.GsonBuilder; @@ -62,6 +64,10 @@ public class Config { @SerializedName(SERIALIZED_NAME_STORAGE_CONFIG) private StorageConfig storageConfig; + public static final String SERIALIZED_NAME_STORAGE_CONFIG_LIST = "storage_config_list"; + @SerializedName(SERIALIZED_NAME_STORAGE_CONFIG_LIST) + private List storageConfigList; + public Config() { } @@ -106,6 +112,35 @@ public void setStorageConfig(StorageConfig storageConfig) { this.storageConfig = storageConfig; } + + public Config storageConfigList(List storageConfigList) { + + this.storageConfigList = storageConfigList; + return this; + } + + public Config addStorageConfigListItem(StorageConfig storageConfigListItem) { + if (this.storageConfigList == null) { + this.storageConfigList = new ArrayList<>(); + } + this.storageConfigList.add(storageConfigListItem); + return this; + } + + /** + * Get storageConfigList + * @return storageConfigList + **/ + @javax.annotation.Nullable + public List getStorageConfigList() { + return storageConfigList; + } + + + public void setStorageConfigList(List storageConfigList) { + this.storageConfigList = storageConfigList; + } + /** * A container for additional, undeclared properties. * This is a holder for any undeclared properties as specified with @@ -162,13 +197,14 @@ public boolean equals(Object o) { } Config config = (Config) o; return Objects.equals(this.versionConfig, config.versionConfig) && - Objects.equals(this.storageConfig, config.storageConfig)&& + Objects.equals(this.storageConfig, config.storageConfig) && + Objects.equals(this.storageConfigList, config.storageConfigList)&& Objects.equals(this.additionalProperties, config.additionalProperties); } @Override public int hashCode() { - return Objects.hash(versionConfig, storageConfig, additionalProperties); + return Objects.hash(versionConfig, storageConfig, storageConfigList, additionalProperties); } @Override @@ -177,6 +213,7 @@ public String toString() { sb.append("class Config {\n"); sb.append(" versionConfig: ").append(toIndentedString(versionConfig)).append("\n"); sb.append(" storageConfig: ").append(toIndentedString(storageConfig)).append("\n"); + sb.append(" storageConfigList: ").append(toIndentedString(storageConfigList)).append("\n"); sb.append(" additionalProperties: ").append(toIndentedString(additionalProperties)).append("\n"); sb.append("}"); return sb.toString(); @@ -202,6 +239,7 @@ private String toIndentedString(Object o) { openapiFields = new HashSet(); openapiFields.add("version_config"); openapiFields.add("storage_config"); + openapiFields.add("storage_config_list"); // a set of required properties/fields (JSON key names) openapiRequiredFields = new HashSet(); @@ -228,6 +266,20 @@ public static void validateJsonElement(JsonElement jsonElement) throws IOExcepti if (jsonObj.get("storage_config") != null && !jsonObj.get("storage_config").isJsonNull()) { StorageConfig.validateJsonElement(jsonObj.get("storage_config")); } + if (jsonObj.get("storage_config_list") != null && !jsonObj.get("storage_config_list").isJsonNull()) { + JsonArray jsonArraystorageConfigList = jsonObj.getAsJsonArray("storage_config_list"); + if (jsonArraystorageConfigList != null) { + // ensure the json data is an array + if (!jsonObj.get("storage_config_list").isJsonArray()) { + throw new IllegalArgumentException(String.format("Expected the field `storage_config_list` to be an array in the JSON string but got `%s`", jsonObj.get("storage_config_list").toString())); + } + + // validate the optional field `storage_config_list` (array) + for (int i = 0; i < jsonArraystorageConfigList.size(); i++) { + StorageConfig.validateJsonElement(jsonArraystorageConfigList.get(i)); + }; + } + } } public static class CustomTypeAdapterFactory implements TypeAdapterFactory { diff --git a/clients/java/src/main/java/io/lakefs/clients/sdk/model/StorageConfig.java b/clients/java/src/main/java/io/lakefs/clients/sdk/model/StorageConfig.java index f4f6759e280..fe4f03bb82e 100644 --- a/clients/java/src/main/java/io/lakefs/clients/sdk/model/StorageConfig.java +++ b/clients/java/src/main/java/io/lakefs/clients/sdk/model/StorageConfig.java @@ -21,6 +21,8 @@ import com.google.gson.stream.JsonWriter; import java.io.IOException; import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; import com.google.gson.Gson; import com.google.gson.GsonBuilder; @@ -88,6 +90,18 @@ public class StorageConfig { @SerializedName(SERIALIZED_NAME_PRE_SIGN_MULTIPART_UPLOAD) private Boolean preSignMultipartUpload; + public static final String SERIALIZED_NAME_BLOCKSTORE_ID = "blockstore_id"; + @SerializedName(SERIALIZED_NAME_BLOCKSTORE_ID) + private String blockstoreId; + + public static final String SERIALIZED_NAME_BLOCKSTORE_DESCRIPTION = "blockstore_description"; + @SerializedName(SERIALIZED_NAME_BLOCKSTORE_DESCRIPTION) + private String blockstoreDescription; + + public static final String SERIALIZED_NAME_BLOCKSTORE_EXTRAS = "blockstore_extras"; + @SerializedName(SERIALIZED_NAME_BLOCKSTORE_EXTRAS) + private Map blockstoreExtras = new HashMap<>(); + public StorageConfig() { } @@ -279,6 +293,77 @@ public void setPreSignMultipartUpload(Boolean preSignMultipartUpload) { this.preSignMultipartUpload = preSignMultipartUpload; } + + public StorageConfig blockstoreId(String blockstoreId) { + + this.blockstoreId = blockstoreId; + return this; + } + + /** + * Get blockstoreId + * @return blockstoreId + **/ + @javax.annotation.Nullable + public String getBlockstoreId() { + return blockstoreId; + } + + + public void setBlockstoreId(String blockstoreId) { + this.blockstoreId = blockstoreId; + } + + + public StorageConfig blockstoreDescription(String blockstoreDescription) { + + this.blockstoreDescription = blockstoreDescription; + return this; + } + + /** + * Get blockstoreDescription + * @return blockstoreDescription + **/ + @javax.annotation.Nullable + public String getBlockstoreDescription() { + return blockstoreDescription; + } + + + public void setBlockstoreDescription(String blockstoreDescription) { + this.blockstoreDescription = blockstoreDescription; + } + + + public StorageConfig blockstoreExtras(Map blockstoreExtras) { + + this.blockstoreExtras = blockstoreExtras; + return this; + } + + public StorageConfig putBlockstoreExtrasItem(String key, String blockstoreExtrasItem) { + if (this.blockstoreExtras == null) { + this.blockstoreExtras = new HashMap<>(); + } + this.blockstoreExtras.put(key, blockstoreExtrasItem); + return this; + } + + /** + * blockstore specific properties + * @return blockstoreExtras + **/ + @javax.annotation.Nullable + public Map getBlockstoreExtras() { + return blockstoreExtras; + } + + + public void setBlockstoreExtras(Map blockstoreExtras) { + this.blockstoreExtras = blockstoreExtras; + } + /** * A container for additional, undeclared properties. * This is a holder for any undeclared properties as specified with @@ -342,13 +427,16 @@ public boolean equals(Object o) { Objects.equals(this.preSignSupportUi, storageConfig.preSignSupportUi) && Objects.equals(this.importSupport, storageConfig.importSupport) && Objects.equals(this.importValidityRegex, storageConfig.importValidityRegex) && - Objects.equals(this.preSignMultipartUpload, storageConfig.preSignMultipartUpload)&& + Objects.equals(this.preSignMultipartUpload, storageConfig.preSignMultipartUpload) && + Objects.equals(this.blockstoreId, storageConfig.blockstoreId) && + Objects.equals(this.blockstoreDescription, storageConfig.blockstoreDescription) && + Objects.equals(this.blockstoreExtras, storageConfig.blockstoreExtras)&& Objects.equals(this.additionalProperties, storageConfig.additionalProperties); } @Override public int hashCode() { - return Objects.hash(blockstoreType, blockstoreNamespaceExample, blockstoreNamespaceValidityRegex, defaultNamespacePrefix, preSignSupport, preSignSupportUi, importSupport, importValidityRegex, preSignMultipartUpload, additionalProperties); + return Objects.hash(blockstoreType, blockstoreNamespaceExample, blockstoreNamespaceValidityRegex, defaultNamespacePrefix, preSignSupport, preSignSupportUi, importSupport, importValidityRegex, preSignMultipartUpload, blockstoreId, blockstoreDescription, blockstoreExtras, additionalProperties); } @Override @@ -364,6 +452,9 @@ public String toString() { sb.append(" importSupport: ").append(toIndentedString(importSupport)).append("\n"); sb.append(" importValidityRegex: ").append(toIndentedString(importValidityRegex)).append("\n"); sb.append(" preSignMultipartUpload: ").append(toIndentedString(preSignMultipartUpload)).append("\n"); + sb.append(" blockstoreId: ").append(toIndentedString(blockstoreId)).append("\n"); + sb.append(" blockstoreDescription: ").append(toIndentedString(blockstoreDescription)).append("\n"); + sb.append(" blockstoreExtras: ").append(toIndentedString(blockstoreExtras)).append("\n"); sb.append(" additionalProperties: ").append(toIndentedString(additionalProperties)).append("\n"); sb.append("}"); return sb.toString(); @@ -396,6 +487,9 @@ private String toIndentedString(Object o) { openapiFields.add("import_support"); openapiFields.add("import_validity_regex"); openapiFields.add("pre_sign_multipart_upload"); + openapiFields.add("blockstore_id"); + openapiFields.add("blockstore_description"); + openapiFields.add("blockstore_extras"); // a set of required properties/fields (JSON key names) openapiRequiredFields = new HashSet(); @@ -443,6 +537,12 @@ public static void validateJsonElement(JsonElement jsonElement) throws IOExcepti if (!jsonObj.get("import_validity_regex").isJsonPrimitive()) { throw new IllegalArgumentException(String.format("Expected the field `import_validity_regex` to be a primitive type in the JSON string but got `%s`", jsonObj.get("import_validity_regex").toString())); } + if ((jsonObj.get("blockstore_id") != null && !jsonObj.get("blockstore_id").isJsonNull()) && !jsonObj.get("blockstore_id").isJsonPrimitive()) { + throw new IllegalArgumentException(String.format("Expected the field `blockstore_id` to be a primitive type in the JSON string but got `%s`", jsonObj.get("blockstore_id").toString())); + } + if ((jsonObj.get("blockstore_description") != null && !jsonObj.get("blockstore_description").isJsonNull()) && !jsonObj.get("blockstore_description").isJsonPrimitive()) { + throw new IllegalArgumentException(String.format("Expected the field `blockstore_description` to be a primitive type in the JSON string but got `%s`", jsonObj.get("blockstore_description").toString())); + } } public static class CustomTypeAdapterFactory implements TypeAdapterFactory { diff --git a/clients/java/src/test/java/io/lakefs/clients/sdk/model/ConfigTest.java b/clients/java/src/test/java/io/lakefs/clients/sdk/model/ConfigTest.java index e91fe70936e..af2ad0986b6 100644 --- a/clients/java/src/test/java/io/lakefs/clients/sdk/model/ConfigTest.java +++ b/clients/java/src/test/java/io/lakefs/clients/sdk/model/ConfigTest.java @@ -21,7 +21,9 @@ import io.lakefs.clients.sdk.model.StorageConfig; import io.lakefs.clients.sdk.model.VersionConfig; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; @@ -55,4 +57,12 @@ public void storageConfigTest() { // TODO: test storageConfig } + /** + * Test the property 'storageConfigList' + */ + @Test + public void storageConfigListTest() { + // TODO: test storageConfigList + } + } diff --git a/clients/java/src/test/java/io/lakefs/clients/sdk/model/StorageConfigTest.java b/clients/java/src/test/java/io/lakefs/clients/sdk/model/StorageConfigTest.java index 7d9c0f88b49..51155667264 100644 --- a/clients/java/src/test/java/io/lakefs/clients/sdk/model/StorageConfigTest.java +++ b/clients/java/src/test/java/io/lakefs/clients/sdk/model/StorageConfigTest.java @@ -20,6 +20,8 @@ import com.google.gson.stream.JsonWriter; import java.io.IOException; import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; @@ -109,4 +111,28 @@ public void preSignMultipartUploadTest() { // TODO: test preSignMultipartUpload } + /** + * Test the property 'blockstoreId' + */ + @Test + public void blockstoreIdTest() { + // TODO: test blockstoreId + } + + /** + * Test the property 'blockstoreDescription' + */ + @Test + public void blockstoreDescriptionTest() { + // TODO: test blockstoreDescription + } + + /** + * Test the property 'blockstoreExtras' + */ + @Test + public void blockstoreExtrasTest() { + // TODO: test blockstoreExtras + } + } diff --git a/clients/python/docs/Config.md b/clients/python/docs/Config.md index a082349a18b..e6ef30ea386 100644 --- a/clients/python/docs/Config.md +++ b/clients/python/docs/Config.md @@ -7,6 +7,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **version_config** | [**VersionConfig**](VersionConfig.md) | | [optional] **storage_config** | [**StorageConfig**](StorageConfig.md) | | [optional] +**storage_config_list** | [**List[StorageConfig]**](StorageConfig.md) | | [optional] ## Example diff --git a/clients/python/docs/StorageConfig.md b/clients/python/docs/StorageConfig.md index 786ea65af02..9f7c5a0ceab 100644 --- a/clients/python/docs/StorageConfig.md +++ b/clients/python/docs/StorageConfig.md @@ -14,6 +14,9 @@ Name | Type | Description | Notes **import_support** | **bool** | | **import_validity_regex** | **str** | | **pre_sign_multipart_upload** | **bool** | | [optional] +**blockstore_id** | **str** | | [optional] +**blockstore_description** | **str** | | [optional] +**blockstore_extras** | **Dict[str, str]** | blockstore specific properties | [optional] ## Example diff --git a/clients/python/lakefs_sdk/models/config.py b/clients/python/lakefs_sdk/models/config.py index 46f91c0b1b5..029d6988162 100644 --- a/clients/python/lakefs_sdk/models/config.py +++ b/clients/python/lakefs_sdk/models/config.py @@ -19,11 +19,11 @@ import json -from typing import Optional +from typing import List, Optional try: - from pydantic.v1 import BaseModel + from pydantic.v1 import BaseModel, conlist except ImportError: - from pydantic import BaseModel + from pydantic import BaseModel, conlist from lakefs_sdk.models.storage_config import StorageConfig from lakefs_sdk.models.version_config import VersionConfig @@ -33,7 +33,8 @@ class Config(BaseModel): """ version_config: Optional[VersionConfig] = None storage_config: Optional[StorageConfig] = None - __properties = ["version_config", "storage_config"] + storage_config_list: Optional[conlist(StorageConfig)] = None + __properties = ["version_config", "storage_config", "storage_config_list"] class Config: """Pydantic configuration""" @@ -65,6 +66,13 @@ def to_dict(self): # override the default output from pydantic by calling `to_dict()` of storage_config if self.storage_config: _dict['storage_config'] = self.storage_config.to_dict() + # override the default output from pydantic by calling `to_dict()` of each item in storage_config_list (list) + _items = [] + if self.storage_config_list: + for _item in self.storage_config_list: + if _item: + _items.append(_item.to_dict()) + _dict['storage_config_list'] = _items return _dict @classmethod @@ -78,7 +86,8 @@ def from_dict(cls, obj: dict) -> Config: _obj = Config.parse_obj({ "version_config": VersionConfig.from_dict(obj.get("version_config")) if obj.get("version_config") is not None else None, - "storage_config": StorageConfig.from_dict(obj.get("storage_config")) if obj.get("storage_config") is not None else None + "storage_config": StorageConfig.from_dict(obj.get("storage_config")) if obj.get("storage_config") is not None else None, + "storage_config_list": [StorageConfig.from_dict(_item) for _item in obj.get("storage_config_list")] if obj.get("storage_config_list") is not None else None }) return _obj diff --git a/clients/python/lakefs_sdk/models/storage_config.py b/clients/python/lakefs_sdk/models/storage_config.py index eddeab29b3e..004ea1eb4c9 100644 --- a/clients/python/lakefs_sdk/models/storage_config.py +++ b/clients/python/lakefs_sdk/models/storage_config.py @@ -19,7 +19,7 @@ import json -from typing import Optional +from typing import Dict, Optional try: from pydantic.v1 import BaseModel, Field, StrictBool, StrictStr except ImportError: @@ -38,7 +38,10 @@ class StorageConfig(BaseModel): import_support: StrictBool = Field(...) import_validity_regex: StrictStr = Field(...) pre_sign_multipart_upload: Optional[StrictBool] = None - __properties = ["blockstore_type", "blockstore_namespace_example", "blockstore_namespace_ValidityRegex", "default_namespace_prefix", "pre_sign_support", "pre_sign_support_ui", "import_support", "import_validity_regex", "pre_sign_multipart_upload"] + blockstore_id: Optional[StrictStr] = None + blockstore_description: Optional[StrictStr] = None + blockstore_extras: Optional[Dict[str, StrictStr]] = Field(None, description="blockstore specific properties") + __properties = ["blockstore_type", "blockstore_namespace_example", "blockstore_namespace_ValidityRegex", "default_namespace_prefix", "pre_sign_support", "pre_sign_support_ui", "import_support", "import_validity_regex", "pre_sign_multipart_upload", "blockstore_id", "blockstore_description", "blockstore_extras"] class Config: """Pydantic configuration""" @@ -84,7 +87,10 @@ def from_dict(cls, obj: dict) -> StorageConfig: "pre_sign_support_ui": obj.get("pre_sign_support_ui"), "import_support": obj.get("import_support"), "import_validity_regex": obj.get("import_validity_regex"), - "pre_sign_multipart_upload": obj.get("pre_sign_multipart_upload") + "pre_sign_multipart_upload": obj.get("pre_sign_multipart_upload"), + "blockstore_id": obj.get("blockstore_id"), + "blockstore_description": obj.get("blockstore_description"), + "blockstore_extras": obj.get("blockstore_extras") }) return _obj diff --git a/clients/python/test/test_config.py b/clients/python/test/test_config.py index 8fb17bdadd6..ada1db75430 100644 --- a/clients/python/test/test_config.py +++ b/clients/python/test/test_config.py @@ -53,7 +53,29 @@ def make_instance(self, include_optional): pre_sign_support_ui = True, import_support = True, import_validity_regex = '', - pre_sign_multipart_upload = True, ) + pre_sign_multipart_upload = True, + blockstore_id = '', + blockstore_description = '', + blockstore_extras = { + 'key' : '' + }, ), + storage_config_list = [ + lakefs_sdk.models.storage_config.StorageConfig( + blockstore_type = '', + blockstore_namespace_example = '', + blockstore_namespace_validity_regex = '', + default_namespace_prefix = '', + pre_sign_support = True, + pre_sign_support_ui = True, + import_support = True, + import_validity_regex = '', + pre_sign_multipart_upload = True, + blockstore_id = '', + blockstore_description = '', + blockstore_extras = { + 'key' : '' + }, ) + ] ) else : return Config( diff --git a/clients/python/test/test_storage_config.py b/clients/python/test/test_storage_config.py index eb4230a0476..0e3e29f6f50 100644 --- a/clients/python/test/test_storage_config.py +++ b/clients/python/test/test_storage_config.py @@ -47,7 +47,12 @@ def make_instance(self, include_optional): pre_sign_support_ui = True, import_support = True, import_validity_regex = '', - pre_sign_multipart_upload = True + pre_sign_multipart_upload = True, + blockstore_id = '', + blockstore_description = '', + blockstore_extras = { + 'key' : '' + } ) else : return StorageConfig( diff --git a/clients/rust/docs/Config.md b/clients/rust/docs/Config.md index 10a01b7a533..e66bf663b5e 100644 --- a/clients/rust/docs/Config.md +++ b/clients/rust/docs/Config.md @@ -6,6 +6,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **version_config** | Option<[**models::VersionConfig**](VersionConfig.md)> | | [optional] **storage_config** | Option<[**models::StorageConfig**](StorageConfig.md)> | | [optional] +**storage_config_list** | Option<[**Vec**](StorageConfig.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/clients/rust/docs/StorageConfig.md b/clients/rust/docs/StorageConfig.md index 1a56c66a73b..a2b01658611 100644 --- a/clients/rust/docs/StorageConfig.md +++ b/clients/rust/docs/StorageConfig.md @@ -13,6 +13,9 @@ Name | Type | Description | Notes **import_support** | **bool** | | **import_validity_regex** | **String** | | **pre_sign_multipart_upload** | Option<**bool**> | | [optional] +**blockstore_id** | Option<**String**> | | [optional] +**blockstore_description** | Option<**String**> | | [optional] +**blockstore_extras** | Option<**std::collections::HashMap**> | blockstore specific properties | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/clients/rust/src/models/config.rs b/clients/rust/src/models/config.rs index 324bdde728f..b53a888617e 100644 --- a/clients/rust/src/models/config.rs +++ b/clients/rust/src/models/config.rs @@ -16,6 +16,8 @@ pub struct Config { pub version_config: Option>, #[serde(rename = "storage_config", skip_serializing_if = "Option::is_none")] pub storage_config: Option>, + #[serde(rename = "storage_config_list", skip_serializing_if = "Option::is_none")] + pub storage_config_list: Option>, } impl Config { @@ -23,6 +25,7 @@ impl Config { Config { version_config: None, storage_config: None, + storage_config_list: None, } } } diff --git a/clients/rust/src/models/storage_config.rs b/clients/rust/src/models/storage_config.rs index 5060d55c11c..a69449a7b06 100644 --- a/clients/rust/src/models/storage_config.rs +++ b/clients/rust/src/models/storage_config.rs @@ -30,6 +30,13 @@ pub struct StorageConfig { pub import_validity_regex: String, #[serde(rename = "pre_sign_multipart_upload", skip_serializing_if = "Option::is_none")] pub pre_sign_multipart_upload: Option, + #[serde(rename = "blockstore_id", skip_serializing_if = "Option::is_none")] + pub blockstore_id: Option, + #[serde(rename = "blockstore_description", skip_serializing_if = "Option::is_none")] + pub blockstore_description: Option, + /// blockstore specific properties + #[serde(rename = "blockstore_extras", skip_serializing_if = "Option::is_none")] + pub blockstore_extras: Option>, } impl StorageConfig { @@ -44,6 +51,9 @@ impl StorageConfig { import_support, import_validity_regex, pre_sign_multipart_upload: None, + blockstore_id: None, + blockstore_description: None, + blockstore_extras: None, } } } diff --git a/cmd/lakefs/cmd/run.go b/cmd/lakefs/cmd/run.go index 0acd03a2221..66264a94093 100644 --- a/cmd/lakefs/cmd/run.go +++ b/cmd/lakefs/cmd/run.go @@ -137,7 +137,8 @@ var runCmd = &cobra.Command{ Short: "Run lakeFS", Run: func(cmd *cobra.Command, args []string) { logger := logging.ContextUnavailable() - cfg := loadConfig().GetBaseConfig() + cfg := loadConfig() + baseCfg := cfg.GetBaseConfig() viper.WatchConfig() viper.OnConfigChange(func(in fsnotify.Event) { var c config.BaseConfig @@ -156,7 +157,7 @@ var runCmd = &cobra.Command{ logger.WithField("version", version.Version).Info("lakeFS run") - kvParams, err := kvparams.NewConfig(&cfg.Database) + kvParams, err := kvparams.NewConfig(&baseCfg.Database) if err != nil { logger.WithError(err).Fatal("Get KV params") } @@ -174,14 +175,14 @@ var runCmd = &cobra.Command{ migrator := kv.NewDatabaseMigrator(kvParams) multipartTracker := multipart.NewTracker(kvStore) actionsStore := actions.NewActionsKVStore(kvStore) - authMetadataManager := auth.NewKVMetadataManager(version.Version, cfg.Installation.FixedID, cfg.Database.Type, kvStore) + authMetadataManager := auth.NewKVMetadataManager(version.Version, baseCfg.Installation.FixedID, baseCfg.Database.Type, kvStore) idGen := &actions.DecreasingIDGenerator{} - authService := NewAuthService(ctx, cfg, logger, kvStore, authMetadataManager) + authService := NewAuthService(ctx, baseCfg, logger, kvStore, authMetadataManager) // initialize authentication service var authenticationService authentication.Service - if cfg.IsAuthenticationTypeAPI() { - authenticationService, err = authentication.NewAPIService(cfg.Auth.AuthenticationAPI.Endpoint, cfg.Auth.CookieAuthVerification.ValidateIDTokenClaims, logger.WithField("service", "authentication_api"), cfg.Auth.AuthenticationAPI.ExternalPrincipalsEnabled) + if baseCfg.IsAuthenticationTypeAPI() { + authenticationService, err = authentication.NewAPIService(baseCfg.Auth.AuthenticationAPI.Endpoint, baseCfg.Auth.CookieAuthVerification.ValidateIDTokenClaims, logger.WithField("service", "authentication_api"), baseCfg.Auth.AuthenticationAPI.ExternalPrincipalsEnabled) if err != nil { logger.WithError(err).Fatal("failed to create authentication service") } @@ -189,19 +190,19 @@ var runCmd = &cobra.Command{ authenticationService = authentication.NewDummyService() } - cloudMetadataProvider := stats.BuildMetadataProvider(logger, cfg) - blockstoreType := cfg.Blockstore.Type + cloudMetadataProvider := stats.BuildMetadataProvider(logger, baseCfg) + blockstoreType := baseCfg.Blockstore.Type if blockstoreType == "mem" { printLocalWarning(os.Stderr, fmt.Sprintf("blockstore type %s", blockstoreType)) logger.WithField("adapter_type", blockstoreType).Warn("Block adapter NOT SUPPORTED for production use") } metadata := stats.NewMetadata(ctx, logger, blockstoreType, authMetadataManager, cloudMetadataProvider) - bufferedCollector := stats.NewBufferedCollector(metadata.InstallationID, stats.Config(cfg.Stats), + bufferedCollector := stats.NewBufferedCollector(metadata.InstallationID, stats.Config(baseCfg.Stats), stats.WithLogger(logger.WithField("service", "stats_collector"))) // init block store - blockStore, err := blockfactory.BuildBlockAdapter(ctx, bufferedCollector, cfg) + blockStore, err := blockfactory.BuildBlockAdapter(ctx, bufferedCollector, baseCfg) if err != nil { logger.WithError(err).Fatal("Failed to create block adapter") } @@ -211,7 +212,7 @@ var runCmd = &cobra.Command{ bufferedCollector.CollectMetadata(metadata) c, err := catalog.New(ctx, catalog.Config{ - Config: cfg, + Config: baseCfg, KVStore: kvStore, PathProvider: upload.DefaultPathProvider, }) @@ -222,9 +223,9 @@ var runCmd = &cobra.Command{ // usage report setup - default usage reporter is a no-op usageReporter := stats.DefaultUsageReporter - if cfg.UsageReport.Enabled { + if baseCfg.UsageReport.Enabled { ur := stats.NewUsageReporter(metadata.InstallationID, kvStore) - ur.Start(ctx, cfg.UsageReport.FlushInterval, logger.WithField("service", "usage_report")) + ur.Start(ctx, baseCfg.UsageReport.FlushInterval, logger.WithField("service", "usage_report")) usageReporter = ur } @@ -238,14 +239,14 @@ var runCmd = &cobra.Command{ // initial setup - support only when a local database is configured. // local database lock will make sure that only one instance will run the setup. if (kvParams.Type == local.DriverName || kvParams.Type == mem.DriverName) && - cfg.Installation.UserName != "" && cfg.Installation.AccessKeyID.SecureValue() != "" && cfg.Installation.SecretAccessKey.SecureValue() != "" { - setupCreds, err := setupLakeFS(ctx, cfg, authMetadataManager, authService, cfg.Installation.UserName, - cfg.Installation.AccessKeyID.SecureValue(), cfg.Installation.SecretAccessKey.SecureValue()) + baseCfg.Installation.UserName != "" && baseCfg.Installation.AccessKeyID.SecureValue() != "" && baseCfg.Installation.SecretAccessKey.SecureValue() != "" { + setupCreds, err := setupLakeFS(ctx, baseCfg, authMetadataManager, authService, baseCfg.Installation.UserName, + baseCfg.Installation.AccessKeyID.SecureValue(), baseCfg.Installation.SecretAccessKey.SecureValue()) if err != nil { - logger.WithError(err).WithField("admin", cfg.Installation.UserName).Fatal("Failed to initial setup environment") + logger.WithError(err).WithField("admin", baseCfg.Installation.UserName).Fatal("Failed to initial setup environment") } if setupCreds != nil { - logger.WithField("admin", cfg.Installation.UserName).Info("Initial setup completed successfully") + logger.WithField("admin", baseCfg.Installation.UserName).Info("Initial setup completed successfully") } } @@ -256,8 +257,8 @@ var runCmd = &cobra.Command{ catalog.NewActionsOutputWriter(c.BlockAdapter), idGen, bufferedCollector, - actions.Config(cfg.Actions), - cfg.ListenAddress, + actions.Config(baseCfg.Actions), + baseCfg.ListenAddress, ) // wire actions into entry catalog @@ -269,8 +270,8 @@ var runCmd = &cobra.Command{ } // remote authenticator setup - if cfg.Auth.RemoteAuthenticator.Enabled { - remoteAuthenticator, err := authremote.NewAuthenticator(authremote.AuthenticatorConfig(cfg.Auth.RemoteAuthenticator), authService, logger) + if baseCfg.Auth.RemoteAuthenticator.Enabled { + remoteAuthenticator, err := authremote.NewAuthenticator(authremote.AuthenticatorConfig(baseCfg.Auth.RemoteAuthenticator), authService, logger) if err != nil { logger.WithError(err).Fatal("failed to create remote authenticator") } @@ -278,10 +279,10 @@ var runCmd = &cobra.Command{ middlewareAuthenticator = append(middlewareAuthenticator, remoteAuthenticator) } - auditChecker := version.NewDefaultAuditChecker(cfg.Security.AuditCheckURL, metadata.InstallationID, version.NewDefaultVersionSource(cfg.Security.CheckLatestVersionCache)) + auditChecker := version.NewDefaultAuditChecker(baseCfg.Security.AuditCheckURL, metadata.InstallationID, version.NewDefaultVersionSource(baseCfg.Security.CheckLatestVersionCache)) defer auditChecker.Close() if !version.IsVersionUnreleased() { - auditChecker.StartPeriodicCheck(ctx, cfg.Security.AuditCheckInterval, logger) + auditChecker.StartPeriodicCheck(ctx, baseCfg.Security.AuditCheckInterval, logger) } allowForeign, err := cmd.Flags().GetBool(mismatchedReposFlagName) @@ -310,24 +311,24 @@ var runCmd = &cobra.Command{ actionsService, auditChecker, logger.WithField("service", "api_gateway"), - cfg.Gateways.S3.DomainNames, - cfg.UISnippets(), + baseCfg.Gateways.S3.DomainNames, + baseCfg.UISnippets(), upload.DefaultPathProvider, usageReporter, ) // init gateway server var s3FallbackURL *url.URL - if cfg.Gateways.S3.FallbackURL != "" { - s3FallbackURL, err = url.Parse(cfg.Gateways.S3.FallbackURL) + if baseCfg.Gateways.S3.FallbackURL != "" { + s3FallbackURL, err = url.Parse(baseCfg.Gateways.S3.FallbackURL) if err != nil { logger.WithError(err).Fatal("Failed to parse s3 fallback URL") } } // setup authenticator for s3 gateway to also support swagger auth - oidcConfig := api.OIDCConfig(cfg.Auth.OIDC) - cookieAuthConfig := api.CookieAuthConfig(cfg.Auth.CookieAuthVerification) + oidcConfig := api.OIDCConfig(baseCfg.Auth.OIDC) + cookieAuthConfig := api.CookieAuthConfig(baseCfg.Auth.CookieAuthVerification) apiAuthenticator, err := api.GenericAuthMiddleware( logger.WithField("service", "s3_gateway"), middlewareAuthenticator, @@ -340,19 +341,19 @@ var runCmd = &cobra.Command{ } s3gatewayHandler := gateway.NewHandler( - cfg.Gateways.S3.Region, + baseCfg.Gateways.S3.Region, c, multipartTracker, blockStore, authService, - cfg.Gateways.S3.DomainNames, + baseCfg.Gateways.S3.DomainNames, bufferedCollector, upload.DefaultPathProvider, s3FallbackURL, - cfg.Logging.AuditLogLevel, - cfg.Logging.TraceRequestHeaders, - cfg.Gateways.S3.VerifyUnsupported, - cfg.IsAdvancedAuth(), + baseCfg.Logging.AuditLogLevel, + baseCfg.Logging.TraceRequestHeaders, + baseCfg.Gateways.S3.VerifyUnsupported, + baseCfg.IsAdvancedAuth(), ) s3gatewayHandler = apiAuthenticator(s3gatewayHandler) @@ -361,14 +362,14 @@ var runCmd = &cobra.Command{ bufferedCollector.CollectEvent(stats.Event{Class: "global", Name: "run"}) - logger.WithField("listen_address", cfg.ListenAddress).Info("starting HTTP server") + logger.WithField("listen_address", baseCfg.ListenAddress).Info("starting HTTP server") server := &http.Server{ - Addr: cfg.ListenAddress, + Addr: baseCfg.ListenAddress, ReadHeaderTimeout: time.Minute, Handler: http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { // If the request has the S3 GW domain (exact or subdomain) - or carries an AWS sig, serve S3GW - if httputil.HostMatches(request, cfg.Gateways.S3.DomainNames) || - httputil.HostSubdomainOf(request, cfg.Gateways.S3.DomainNames) || + if httputil.HostMatches(request, baseCfg.Gateways.S3.DomainNames) || + httputil.HostSubdomainOf(request, baseCfg.Gateways.S3.DomainNames) || sig.IsAWSSignedRequest(request) { s3gatewayHandler.ServeHTTP(writer, request) return @@ -383,13 +384,13 @@ var runCmd = &cobra.Command{ go func() { var err error - if cfg.TLS.Enabled { - err = server.ListenAndServeTLS(cfg.TLS.CertFile, cfg.TLS.KeyFile) + if baseCfg.TLS.Enabled { + err = server.ListenAndServeTLS(baseCfg.TLS.CertFile, baseCfg.TLS.KeyFile) } else { err = server.ListenAndServe() } if err != nil && !errors.Is(err, http.ErrServerClosed) { - _, _ = fmt.Fprintf(os.Stderr, "Failed to listen on %s: %v\n", cfg.ListenAddress, err) + _, _ = fmt.Fprintf(os.Stderr, "Failed to listen on %s: %v\n", baseCfg.ListenAddress, err) os.Exit(1) } }() diff --git a/docs/assets/js/swagger.yml b/docs/assets/js/swagger.yml index 039303747f9..0fd98e96f06 100644 --- a/docs/assets/js/swagger.yml +++ b/docs/assets/js/swagger.yml @@ -1253,6 +1253,20 @@ components: type: string pre_sign_multipart_upload: type: boolean + blockstore_id: + type: string + blockstore_description: + type: string + blockstore_extras: + type: object + description: blockstore specific properties + additionalProperties: + type: string + + StorageConfigList: + type: array + items: + $ref: "#/components/schemas/StorageConfig" Config: type: object @@ -1261,6 +1275,8 @@ components: $ref: "#/components/schemas/VersionConfig" storage_config: $ref: "#/components/schemas/StorageConfig" + storage_config_list: + $ref: "#/components/schemas/StorageConfigList" VersionConfig: type: object properties: diff --git a/pkg/api/controller.go b/pkg/api/controller.go index 8c0ce67be80..6c074d2ea15 100644 --- a/pkg/api/controller.go +++ b/pkg/api/controller.go @@ -88,7 +88,7 @@ type Migrator interface { } type Controller struct { - Config *config.BaseConfig + Config config.Config Catalog *catalog.Catalog Authenticator auth.Authenticator Auth auth.Service @@ -108,7 +108,7 @@ type Controller struct { var usageCounter = stats.NewUsageCounter() -func NewController(cfg *config.BaseConfig, catalog *catalog.Catalog, authenticator auth.Authenticator, authService auth.Service, authenticationService authentication.Service, blockAdapter block.Adapter, metadataManager auth.MetadataManager, migrator Migrator, collector stats.Collector, cloudMetadataProvider cloud.MetadataProvider, actions actionsHandler, auditChecker AuditChecker, logger logging.Logger, sessionStore sessions.Store, pathProvider upload.PathProvider, usageReporter stats.UsageReporterOperations) *Controller { +func NewController(cfg config.Config, catalog *catalog.Catalog, authenticator auth.Authenticator, authService auth.Service, authenticationService authentication.Service, blockAdapter block.Adapter, metadataManager auth.MetadataManager, migrator Migrator, collector stats.Collector, cloudMetadataProvider cloud.MetadataProvider, actions actionsHandler, auditChecker AuditChecker, logger logging.Logger, sessionStore sessions.Store, pathProvider upload.PathProvider, usageReporter stats.UsageReporterOperations) *Controller { return &Controller{ Config: cfg, Catalog: catalog, @@ -565,7 +565,7 @@ func (c *Controller) Login(w http.ResponseWriter, r *http.Request, body apigen.L } loginTime := time.Now() - duration := c.Config.Auth.LoginDuration + duration := c.Config.GetBaseConfig().Auth.LoginDuration expires := loginTime.Add(duration) secret := c.Auth.SecretStore().SharedSecret() @@ -609,13 +609,13 @@ func (c *Controller) ExternalPrincipalLogin(w http.ResponseWriter, r *http.Reque return } c.Logger.WithField("user_id", externalPrincipalIDInfo.UserID).Debug("got external principal ID info, generating a new JWT") - duration := c.Config.Auth.LoginDuration + duration := c.Config.GetBaseConfig().Auth.LoginDuration if swag.IntValue(body.TokenExpirationDuration) > 0 { duration = time.Second * time.Duration(*body.TokenExpirationDuration) } - if duration > c.Config.Auth.LoginMaxDuration { - c.Logger.WithFields(logging.Fields{"duration": duration, "max_duration": c.Config.Auth.LoginMaxDuration}).Warn("Login duration exceeds maximum allowed, using maximum allowed") - duration = c.Config.Auth.LoginMaxDuration + if duration > c.Config.GetBaseConfig().Auth.LoginMaxDuration { + c.Logger.WithFields(logging.Fields{"duration": duration, "max_duration": c.Config.GetBaseConfig().Auth.LoginMaxDuration}).Warn("Login duration exceeds maximum allowed, using maximum allowed") + duration = c.Config.GetBaseConfig().Auth.LoginMaxDuration } loginTime := time.Now() expires := loginTime.Add(duration) @@ -1160,7 +1160,7 @@ func (c *Controller) AddGroupMembership(w http.ResponseWriter, r *http.Request, } func (c *Controller) ListGroupPolicies(w http.ResponseWriter, r *http.Request, groupID string, params apigen.ListGroupPoliciesParams) { - if c.Config.IsAuthUISimplified() { + if c.Config.GetBaseConfig().IsAuthUISimplified() { writeError(w, r, http.StatusNotImplemented, "Not implemented") return } @@ -1217,7 +1217,7 @@ func serializePolicy(p *model.Policy) apigen.Policy { } func (c *Controller) DetachPolicyFromGroup(w http.ResponseWriter, r *http.Request, groupID, policyID string) { - if c.Config.IsAuthUISimplified() { + if c.Config.GetBaseConfig().IsAuthUISimplified() { writeError(w, r, http.StatusNotImplemented, "Not implemented") return } @@ -1239,7 +1239,7 @@ func (c *Controller) DetachPolicyFromGroup(w http.ResponseWriter, r *http.Reques } func (c *Controller) AttachPolicyToGroup(w http.ResponseWriter, r *http.Request, groupID, policyID string) { - if c.Config.IsAuthUISimplified() { + if c.Config.GetBaseConfig().IsAuthUISimplified() { writeError(w, r, http.StatusNotImplemented, "Not implemented") return } @@ -1262,7 +1262,7 @@ func (c *Controller) AttachPolicyToGroup(w http.ResponseWriter, r *http.Request, } func (c *Controller) ListPolicies(w http.ResponseWriter, r *http.Request, params apigen.ListPoliciesParams) { - if c.Config.IsAuthUISimplified() { + if c.Config.GetBaseConfig().IsAuthUISimplified() { writeError(w, r, http.StatusNotImplemented, "Not implemented") return } @@ -1301,7 +1301,7 @@ func (c *Controller) ListPolicies(w http.ResponseWriter, r *http.Request, params } func (c *Controller) CreatePolicy(w http.ResponseWriter, r *http.Request, body apigen.CreatePolicyJSONRequestBody) { - if c.Config.IsAuthUISimplified() { + if c.Config.GetBaseConfig().IsAuthUISimplified() { writeError(w, r, http.StatusNotImplemented, "Not implemented") return } @@ -1347,7 +1347,7 @@ func (c *Controller) CreatePolicy(w http.ResponseWriter, r *http.Request, body a } func (c *Controller) DeletePolicy(w http.ResponseWriter, r *http.Request, policyID string) { - if c.Config.IsAuthUISimplified() { + if c.Config.GetBaseConfig().IsAuthUISimplified() { writeError(w, r, http.StatusNotImplemented, "Not implemented") return } @@ -1373,7 +1373,7 @@ func (c *Controller) DeletePolicy(w http.ResponseWriter, r *http.Request, policy } func (c *Controller) GetPolicy(w http.ResponseWriter, r *http.Request, policyID string) { - if c.Config.IsAuthUISimplified() { + if c.Config.GetBaseConfig().IsAuthUISimplified() { writeError(w, r, http.StatusNotImplemented, "Not implemented") return } @@ -1401,7 +1401,7 @@ func (c *Controller) GetPolicy(w http.ResponseWriter, r *http.Request, policyID } func (c *Controller) UpdatePolicy(w http.ResponseWriter, r *http.Request, body apigen.UpdatePolicyJSONRequestBody, policyID string) { - if c.Config.IsAuthUISimplified() { + if c.Config.GetBaseConfig().IsAuthUISimplified() { writeError(w, r, http.StatusNotImplemented, "Not implemented") return } @@ -1728,7 +1728,7 @@ func (c *Controller) ListUserGroups(w http.ResponseWriter, r *http.Request, user } func (c *Controller) ListUserPolicies(w http.ResponseWriter, r *http.Request, userID string, params apigen.ListUserPoliciesParams) { - if c.Config.IsAuthUISimplified() { + if c.Config.GetBaseConfig().IsAuthUISimplified() { writeError(w, r, http.StatusNotImplemented, "Not implemented") return } @@ -1773,7 +1773,7 @@ func (c *Controller) ListUserPolicies(w http.ResponseWriter, r *http.Request, us } func (c *Controller) DetachPolicyFromUser(w http.ResponseWriter, r *http.Request, userID, policyID string) { - if c.Config.IsAuthUISimplified() { + if c.Config.GetBaseConfig().IsAuthUISimplified() { writeError(w, r, http.StatusNotImplemented, "Not implemented") return } @@ -1795,7 +1795,7 @@ func (c *Controller) DetachPolicyFromUser(w http.ResponseWriter, r *http.Request } func (c *Controller) AttachPolicyToUser(w http.ResponseWriter, r *http.Request, userID, policyID string) { - if c.Config.IsAuthUISimplified() { + if c.Config.GetBaseConfig().IsAuthUISimplified() { writeError(w, r, http.StatusNotImplemented, "Not implemented") return } @@ -1823,7 +1823,6 @@ func (c *Controller) GetConfig(w http.ResponseWriter, r *http.Request) { writeError(w, r, http.StatusUnauthorized, ErrAuthenticatingRequest) return } - var storageCfg apigen.StorageConfig internalError := false if !c.authorizeCallback(w, r, permissions.Node{ Permission: permissions.Permission{ @@ -1842,12 +1841,13 @@ func (c *Controller) GetConfig(w http.ResponseWriter, r *http.Request) { if internalError { return } - } else { - storageCfg = c.getStorageConfig() } + storageCfg := c.getStorageConfig() + //TODO (niro): Needs to be populated + storageListCfg := apigen.StorageConfigList{} versionConfig := c.getVersionConfig() - writeResponse(w, r, http.StatusOK, apigen.Config{StorageConfig: &storageCfg, VersionConfig: &versionConfig}) + writeResponse(w, r, http.StatusOK, apigen.Config{StorageConfig: &storageCfg, VersionConfig: &versionConfig, StorageConfigList: &storageListCfg}) } func (c *Controller) GetStorageConfig(w http.ResponseWriter, r *http.Request) { @@ -1866,11 +1866,11 @@ func (c *Controller) GetStorageConfig(w http.ResponseWriter, r *http.Request) { func (c *Controller) getStorageConfig() apigen.StorageConfig { info := c.BlockAdapter.GetStorageNamespaceInfo() defaultNamespacePrefix := swag.String(info.DefaultNamespacePrefix) - if c.Config.Blockstore.DefaultNamespacePrefix != nil { - defaultNamespacePrefix = c.Config.Blockstore.DefaultNamespacePrefix + if c.Config.GetBaseConfig().Blockstore.DefaultNamespacePrefix != nil { + defaultNamespacePrefix = c.Config.GetBaseConfig().Blockstore.DefaultNamespacePrefix } return apigen.StorageConfig{ - BlockstoreType: c.Config.Blockstore.Type, + BlockstoreType: c.Config.GetBaseConfig().Blockstore.Type, BlockstoreNamespaceValidityRegex: info.ValidityRegex, BlockstoreNamespaceExample: info.Example, DefaultNamespacePrefix: defaultNamespacePrefix, @@ -1965,7 +1965,7 @@ func (c *Controller) CreateRepository(w http.ResponseWriter, r *http.Request, bo return } - if !c.Config.Installation.AllowInterRegionStorage { + if !c.Config.GetBaseConfig().Installation.AllowInterRegionStorage { if err := block.ValidateInterRegionStorage(r.Context(), c.BlockAdapter, body.StorageNamespace); err != nil { writeError(w, r, http.StatusBadRequest, err) return @@ -2083,14 +2083,14 @@ func (c *Controller) ensureStorageNamespace(ctx context.Context, storageNamespac dummyData = "this is dummy data - created by lakeFS to check accessibility" dummyObjName = "dummy" ) - dummyKey := fmt.Sprintf("%s/%s", c.Config.Committed.BlockStoragePrefix, dummyObjName) + dummyKey := fmt.Sprintf("%s/%s", c.Config.GetBaseConfig().Committed.BlockStoragePrefix, dummyObjName) objLen := int64(len(dummyData)) // check if the dummy file exist in the root of the storage namespace // this serves two purposes, first, we maintain safety check for older lakeFS version. // second, in scenarios where lakeFS shouldn't have access to the root namespace (i.e pre-sign URL only). - if c.Config.Graveler.EnsureReadableRootNamespace { + if c.Config.GetBaseConfig().Graveler.EnsureReadableRootNamespace { rootObj := block.ObjectPointer{ StorageNamespace: storageNamespace, IdentifierType: block.IdentifierTypeRelative, @@ -3857,7 +3857,7 @@ func (c *Controller) DumpRefs(w http.ResponseWriter, r *http.Request, repository _, err = c.BlockAdapter.Put(ctx, block.ObjectPointer{ StorageNamespace: repo.StorageNamespace, IdentifierType: block.IdentifierTypeRelative, - Identifier: fmt.Sprintf("%s/refs_manifest.json", c.Config.Committed.BlockStoragePrefix), + Identifier: fmt.Sprintf("%s/refs_manifest.json", c.Config.GetBaseConfig().Committed.BlockStoragePrefix), }, int64(len(manifestBytes)), bytes.NewReader(manifestBytes), block.PutOpts{}) if err != nil { writeError(w, r, http.StatusInternalServerError, err) @@ -4964,10 +4964,10 @@ func (c *Controller) GetSetupState(w http.ResponseWriter, r *http.Request) { ctx := r.Context() // external auth reports as initialized to avoid triggering the setup wizard - if c.Config.Auth.UIConfig.RBAC == config.AuthRBACExternal { + if c.Config.GetBaseConfig().Auth.UIConfig.RBAC == config.AuthRBACExternal { response := apigen.SetupState{ State: swag.String(string(auth.SetupStateInitialized)), - LoginConfig: newLoginConfig(c.Config), + LoginConfig: newLoginConfig(c.Config.GetBaseConfig()), CommPrefsMissing: swag.Bool(false), } writeResponse(w, r, http.StatusOK, response) @@ -4985,13 +4985,13 @@ func (c *Controller) GetSetupState(w http.ResponseWriter, r *http.Request) { response := apigen.SetupState{ State: swag.String(string(savedState)), - LoginConfig: newLoginConfig(c.Config), + LoginConfig: newLoginConfig(c.Config.GetBaseConfig()), } // if email subscription is disabled in the config, set the missing flag to false. // otherwise, check if the comm prefs are set. // if they are, set the missing flag to false. - if !c.Config.EmailSubscription.Enabled { + if !c.Config.GetBaseConfig().EmailSubscription.Enabled { response.CommPrefsMissing = swag.Bool(false) writeResponse(w, r, http.StatusOK, response) return @@ -5038,7 +5038,7 @@ func (c *Controller) Setup(w http.ResponseWriter, r *http.Request, body apigen.S return } - if c.Config.Auth.UIConfig.RBAC == config.AuthRBACExternal { + if c.Config.GetBaseConfig().Auth.UIConfig.RBAC == config.AuthRBACExternal { // nothing to do - users are managed elsewhere writeResponse(w, r, http.StatusOK, apigen.CredentialsWithSecret{}) return @@ -5046,9 +5046,9 @@ func (c *Controller) Setup(w http.ResponseWriter, r *http.Request, body apigen.S var cred *model.Credential if body.Key == nil { - cred, err = setup.CreateInitialAdminUser(ctx, c.Auth, c.Config, c.MetadataManager, body.Username) + cred, err = setup.CreateInitialAdminUser(ctx, c.Auth, c.Config.GetBaseConfig(), c.MetadataManager, body.Username) } else { - cred, err = setup.CreateInitialAdminUserWithKeys(ctx, c.Auth, c.Config, c.MetadataManager, body.Username, &body.Key.AccessKeyId, &body.Key.SecretAccessKey) + cred, err = setup.CreateInitialAdminUserWithKeys(ctx, c.Auth, c.Config.GetBaseConfig(), c.MetadataManager, body.Username, &body.Key.AccessKeyId, &body.Key.SecretAccessKey) } if err != nil { writeError(w, r, http.StatusInternalServerError, err) @@ -5103,7 +5103,7 @@ func (c *Controller) SetupCommPrefs(w http.ResponseWriter, r *http.Request, body InstallationID: installationID, FeatureUpdates: commPrefs.FeatureUpdates, SecurityUpdates: commPrefs.SecurityUpdates, - BlockstoreType: c.Config.BlockstoreType(), + BlockstoreType: c.Config.GetBaseConfig().BlockstoreType(), } // collect comm prefs go c.Collector.CollectCommPrefs(commPrefsED) @@ -5156,7 +5156,7 @@ func (c *Controller) getVersionConfig() apigen.VersionConfig { } } - if c.Config.Security.CheckLatestVersion { + if c.Config.GetBaseConfig().Security.CheckLatestVersion { latest, err := c.AuditChecker.CheckLatestVersion() // set upgrade recommended based on latest version if err != nil { @@ -5847,5 +5847,5 @@ func (c *Controller) ListUserExternalPrincipals(w http.ResponseWriter, r *http.R func (c *Controller) isExternalPrincipalNotSupported(ctx context.Context) bool { // if IsAuthUISimplified true then it means the user not using RBAC model - return c.Config.IsAuthUISimplified() || !c.Auth.IsExternalPrincipalsEnabled(ctx) + return c.Config.GetBaseConfig().IsAuthUISimplified() || !c.Auth.IsExternalPrincipalsEnabled(ctx) } diff --git a/pkg/api/serve.go b/pkg/api/serve.go index b396e73d1d4..b87459303a0 100644 --- a/pkg/api/serve.go +++ b/pkg/api/serve.go @@ -33,15 +33,15 @@ const ( extensionValidationExcludeBody = "x-validation-exclude-body" ) -func Serve(cfg *config.BaseConfig, catalog *catalog.Catalog, middlewareAuthenticator auth.Authenticator, authService auth.Service, authenticationService authentication.Service, blockAdapter block.Adapter, metadataManager auth.MetadataManager, migrator Migrator, collector stats.Collector, cloudMetadataProvider cloud.MetadataProvider, actions actionsHandler, auditChecker AuditChecker, logger logging.Logger, gatewayDomains []string, snippets []params.CodeSnippet, pathProvider upload.PathProvider, usageReporter stats.UsageReporterOperations) http.Handler { +func Serve(cfg config.Config, catalog *catalog.Catalog, middlewareAuthenticator auth.Authenticator, authService auth.Service, authenticationService authentication.Service, blockAdapter block.Adapter, metadataManager auth.MetadataManager, migrator Migrator, collector stats.Collector, cloudMetadataProvider cloud.MetadataProvider, actions actionsHandler, auditChecker AuditChecker, logger logging.Logger, gatewayDomains []string, snippets []params.CodeSnippet, pathProvider upload.PathProvider, usageReporter stats.UsageReporterOperations) http.Handler { logger.Info("initialize OpenAPI server") swagger, err := apigen.GetSwagger() if err != nil { panic(err) } sessionStore := sessions.NewCookieStore(authService.SecretStore().SharedSecret()) - oidcConfig := OIDCConfig(cfg.Auth.OIDC) - cookieAuthConfig := CookieAuthConfig(cfg.Auth.CookieAuthVerification) + oidcConfig := OIDCConfig(cfg.GetBaseConfig().Auth.OIDC) + cookieAuthConfig := CookieAuthConfig(cfg.GetBaseConfig().Auth.CookieAuthVerification) r := chi.NewRouter() apiRouter := r.With( OapiRequestValidatorWithOptions(swagger, &openapi3filter.Options{ @@ -50,9 +50,9 @@ func Serve(cfg *config.BaseConfig, catalog *catalog.Catalog, middlewareAuthentic httputil.LoggingMiddleware( httputil.RequestIDHeaderName, logging.Fields{logging.ServiceNameFieldKey: LoggerServiceName}, - cfg.Logging.AuditLogLevel, - cfg.Logging.TraceRequestHeaders, - cfg.IsAdvancedAuth()), + cfg.GetBaseConfig().Logging.AuditLogLevel, + cfg.GetBaseConfig().Logging.TraceRequestHeaders, + cfg.GetBaseConfig().IsAdvancedAuth()), AuthMiddleware(logger, swagger, middlewareAuthenticator, authService, sessionStore, &oidcConfig, &cookieAuthConfig), MetricsMiddleware(swagger), ) @@ -64,12 +64,12 @@ func Serve(cfg *config.BaseConfig, catalog *catalog.Catalog, middlewareAuthentic r.Mount("/_pprof/", httputil.ServePPROF("/_pprof/")) r.Mount("/openapi.json", http.HandlerFunc(swaggerSpecHandler)) r.Mount(apiutil.BaseURL, http.HandlerFunc(InvalidAPIEndpointHandler)) - r.Mount("/logout", NewLogoutHandler(sessionStore, logger, cfg.Auth.LogoutRedirectURL)) + r.Mount("/logout", NewLogoutHandler(sessionStore, logger, cfg.GetBaseConfig().Auth.LogoutRedirectURL)) // Configuration flag to control if the embedded UI is served // or not and assign the correct handler for each case. var rootHandler http.Handler - if cfg.UI.Enabled { + if cfg.GetBaseConfig().UI.Enabled { // Handler which serves the embedded UI // as well as handles erroneous S3 gateway requests // and returns a compatible response