diff --git a/assets/training/model_management/components/convert_model_to_mlflow/spec.yaml b/assets/training/model_management/components/convert_model_to_mlflow/spec.yaml
index fc8e168813..4a3d383277 100644
--- a/assets/training/model_management/components/convert_model_to_mlflow/spec.yaml
+++ b/assets/training/model_management/components/convert_model_to_mlflow/spec.yaml
@@ -1,7 +1,7 @@
 $schema: https://azuremlschemas.azureedge.net/latest/commandComponent.schema.json
 
 name: convert_model_to_mlflow
-version: 0.0.35
+version: 0.0.36
 type: command
 
 is_deterministic: True
diff --git a/assets/training/model_management/components/import_model/spec.yaml b/assets/training/model_management/components/import_model/spec.yaml
index 850714a8bc..dad916e20f 100644
--- a/assets/training/model_management/components/import_model/spec.yaml
+++ b/assets/training/model_management/components/import_model/spec.yaml
@@ -4,7 +4,7 @@ type: pipeline
 name: import_model
 display_name: Import model
 description: Import a model into a workspace or a registry
-version: 0.0.41
+version: 0.0.42
 
 # Pipeline inputs
 inputs:
@@ -252,6 +252,42 @@ jobs:
       validation_info:
         type: uri_file
 
+  model_framework_selector:
+    type: command
+    component: azureml:model_framework_selector:0.0.1
+    compute: ${{parent.inputs.compute}}
+    resources:
+      instance_type: '${{parent.inputs.instance_type}}'
+    identity:
+      type: user_identity
+    inputs:
+      validation_info: '${{parent.jobs.validation_trigger_import.outputs.validation_info}}'
+      model_framework: '${{parent.inputs.model_framework}}'
+    outputs:
+      is_mmd_framework:
+        type: uri_file
+
+  is_mmd_model:
+    type: if_else
+    condition: ${{parent.jobs.model_framework_selector.outputs.is_mmd_framework}}
+    true_block: ${{parent.jobs.download_mmd_model}}
+    false_block: ${{parent.jobs.download_model}}
+
+  download_mmd_model:
+    component: azureml:mmdetection_image_objectdetection_instancesegmentation_model_import:0.0.19
+    compute: ${{parent.inputs.compute}}
+    resources:
+      instance_type: '${{parent.inputs.instance_type}}'
+    identity:
+      type: user_identity
+    inputs:
+      model_family: 'MmDetectionImage'
+      model_name: ${{parent.inputs.model_id}}
+      download_from_source: true
+    outputs:
+      output_dir:
+        type: uri_file
+
   download_model:
     component: azureml:download_model:0.0.30
     compute: ${{parent.inputs.compute}}
@@ -262,7 +298,6 @@ jobs:
     inputs:
       model_source: ${{parent.inputs.model_source}}
       model_id: ${{parent.inputs.model_id}}
-      validation_info: ${{parent.jobs.validation_trigger_import.outputs.validation_info}}
       update_existing_model: ${{parent.inputs.update_existing_model}}
       token: ${{parent.inputs.token}}
     outputs:
@@ -272,7 +307,7 @@ jobs:
         type: uri_folder
 
   convert_model_to_mlflow:
-    component: azureml:convert_model_to_mlflow:0.0.35
+    component: azureml:convert_model_to_mlflow:0.0.36
     compute: ${{parent.inputs.compute}}
     resources:
       instance_type: '${{parent.inputs.instance_type}}'
@@ -286,6 +321,7 @@ jobs:
       model_framework: ${{parent.inputs.model_framework}}
       model_download_metadata: ${{parent.jobs.download_model.outputs.model_download_metadata}}
       model_path: ${{parent.jobs.download_model.outputs.model_output}}
+      model_path_mmd:  ${{parent.jobs.download_mmd_model.outputs.output_dir}}
       hf_config_args: ${{parent.inputs.hf_config_args}}
       hf_tokenizer_args:  ${{parent.inputs.hf_tokenizer_args}}
       hf_model_args: ${{parent.inputs.hf_model_args}}
diff --git a/assets/training/model_management/components/model_framework_selector/asset.yaml b/assets/training/model_management/components/model_framework_selector/asset.yaml
new file mode 100644
index 0000000000..ea30e0bd5b
--- /dev/null
+++ b/assets/training/model_management/components/model_framework_selector/asset.yaml
@@ -0,0 +1,3 @@
+type: component
+spec: spec.yaml
+categories: ["Models"]
diff --git a/assets/training/model_management/components/model_framework_selector/spec.yaml b/assets/training/model_management/components/model_framework_selector/spec.yaml
new file mode 100644
index 0000000000..74b2ab6ae6
--- /dev/null
+++ b/assets/training/model_management/components/model_framework_selector/spec.yaml
@@ -0,0 +1,42 @@
+$schema: https://azuremlschemas.azureedge.net/latest/commandComponent.schema.json
+
+type: command
+name: model_framework_selector
+display_name: Model Framework Selector
+description: Checks the framework for model
+version: 0.0.1
+
+is_deterministic: True
+
+inputs:
+
+  model_framework:
+    type: string
+    enum:
+      - Huggingface
+      - MMLab
+      - llava
+      - AutoML
+    default: Huggingface
+    optional: false
+    description: Framework from which model is imported from.
+
+  validation_info:
+    type: uri_file
+    description: Path to the validation info file
+    optional: false
+
+# Pipeline outputs
+outputs:
+  is_mmd_framework:
+    type: boolean
+    mode: rw_mount
+    is_control: true
+
+environment: azureml://registries/azureml/environments/model-evaluation/versions/37
+code: ../../src
+command: mldesigner execute --source run_model_framework_selector.py --name validate --inputs model_framework='${{inputs.model_framework}}' --outputs output='${{outputs.is_mmd_framework}}'
+
+tags:
+    Preview: ""
+    Internal: ""
diff --git a/assets/training/model_management/src/azureml/model/mgmt/processors/pyfunc/convertors.py b/assets/training/model_management/src/azureml/model/mgmt/processors/pyfunc/convertors.py
index f4c45a3393..036ee6ec55 100644
--- a/assets/training/model_management/src/azureml/model/mgmt/processors/pyfunc/convertors.py
+++ b/assets/training/model_management/src/azureml/model/mgmt/processors/pyfunc/convertors.py
@@ -193,11 +193,11 @@ def save_as_mlflow(self):
         mlflow_model_wrapper = ImagesDetectionMLflowModelWrapper(task_type=self._task)
         artifacts_dict = self._prepare_artifacts_dict()
         if self._task == MMLabDetectionTasks.MM_OBJECT_DETECTION.value:
-            pip_requirements = os.path.join(self.MODEL_DIR, "mmdet-od-requirements.txt")
+            conda_env_file = os.path.join(self.MODEL_DIR, "conda_od.yaml")
         elif self._task == MMLabDetectionTasks.MM_INSTANCE_SEGMENTATION.value:
-            pip_requirements = os.path.join(self.MODEL_DIR, "mmdet-is-requirements.txt")
+            conda_env_file = os.path.join(self.MODEL_DIR, "conda_is.yaml")
         else:
-            pip_requirements = None
+            conda_env_file = None
         code_path = [
             os.path.join(self.MODEL_DIR, "detection_predict.py"),
             os.path.join(self.MODEL_DIR, "config.py"),
@@ -206,8 +206,8 @@ def save_as_mlflow(self):
         super()._save(
             mlflow_model_wrapper=mlflow_model_wrapper,
             artifacts_dict=artifacts_dict,
-            pip_requirements=pip_requirements,
             code_path=code_path,
+            conda_env=conda_env_file,
         )
 
     def _prepare_artifacts_dict(self) -> Dict:
diff --git a/assets/training/model_management/src/azureml/model/mgmt/processors/pyfunc/vision/conda_is.yaml b/assets/training/model_management/src/azureml/model/mgmt/processors/pyfunc/vision/conda_is.yaml
new file mode 100644
index 0000000000..96a48f8c67
--- /dev/null
+++ b/assets/training/model_management/src/azureml/model/mgmt/processors/pyfunc/vision/conda_is.yaml
@@ -0,0 +1,19 @@
+channels:
+- conda-forge
+dependencies:
+- python=3.9.19
+- pip<=24.0
+- pip:
+  - mlflow==2.12.1
+  - cloudpickle==2.2.1
+  - datasets==2.15.0
+  - openmim==0.3.9
+  - torch==2.0.1
+  - torchvision==0.15.2
+  - transformers==4.38.2
+  - accelerate==0.27.2
+  - albumentations==1.3.0
+  - scikit-image==0.19.3
+  - simplification==0.7.10
+  - fairscale==0.4.13
+name: mlflow-env
diff --git a/assets/training/model_management/src/azureml/model/mgmt/processors/pyfunc/vision/conda_od.yaml b/assets/training/model_management/src/azureml/model/mgmt/processors/pyfunc/vision/conda_od.yaml
new file mode 100644
index 0000000000..738e6fd5f8
--- /dev/null
+++ b/assets/training/model_management/src/azureml/model/mgmt/processors/pyfunc/vision/conda_od.yaml
@@ -0,0 +1,17 @@
+channels:
+- conda-forge
+dependencies:
+- python=3.9.19
+- pip<=24.0
+- pip:
+  - mlflow==2.12.1
+  - cloudpickle==2.2.1
+  - datasets==2.15.0
+  - openmim==0.3.9
+  - torch==2.0.1
+  - torchvision==0.15.2
+  - transformers==4.38.2
+  - accelerate==0.27.2
+  - albumentations==1.3.0
+  - fairscale==0.4.13
+name: mlflow-env
diff --git a/assets/training/model_management/src/run_model_framework_selector.py b/assets/training/model_management/src/run_model_framework_selector.py
new file mode 100644
index 0000000000..ee6ff82240
--- /dev/null
+++ b/assets/training/model_management/src/run_model_framework_selector.py
@@ -0,0 +1,26 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+
+"""Select Model Framework Component."""
+from azureml.model.mgmt.config import ModelFramework
+from mldesigner import Input, Output, command_component
+from azureml.model.mgmt.utils.logging_utils import get_logger
+from azureml.model.mgmt.utils.exceptions import swallow_all_exceptions
+
+logger = get_logger(__name__)
+
+
+@command_component
+@swallow_all_exceptions(logger)
+def validate(
+        model_framework: Input(type="string", optional=False)  # noqa: F821
+) -> Output(type="boolean", is_control=True):  # noqa: F821
+    """Entry function of model validation script."""
+    if model_framework == ModelFramework.MMLAB.value:
+        result = True
+    else:
+        result = False
+
+    logger.info(f"Model framework: {model_framework}, result: {result}")
+
+    return result
diff --git a/assets/training/model_management/src/run_model_preprocess.py b/assets/training/model_management/src/run_model_preprocess.py
index 6dfebedfb2..5a6ab6e32d 100644
--- a/assets/training/model_management/src/run_model_preprocess.py
+++ b/assets/training/model_management/src/run_model_preprocess.py
@@ -73,7 +73,8 @@ def _get_parser():
         required=False,
         help="Model download details",
     )
-    parser.add_argument("--model-path", type=Path, required=True, help="Model input path")
+    parser.add_argument("--model-path", type=Path, required=False, help="Model input path")
+    parser.add_argument("--model-path-mmd", type=Path, required=False, help="MMD Model input path")
     parser.add_argument("--license-file-path", type=Path, required=False, help="License file path")
     parser.add_argument(
         "--mlflow-model-output-dir",
@@ -107,7 +108,11 @@ def run():
     inference_base_image = args.inference_base_image
 
     model_download_metadata_path = args.model_download_metadata
+
     model_path = args.model_path
+    if model_framework == ModelFramework.MMLAB.value:
+        model_path = args.model_path_mmd
+
     mlflow_model_output_dir = args.mlflow_model_output_dir
     license_file_path = args.license_file_path
     TRUST_CODE_KEY = "trust_remote_code=True"