diff --git a/.github/workflows/build_wheels.yml b/.github/workflows/build_wheels.yml index 14ca70bcfc..3971392411 100644 --- a/.github/workflows/build_wheels.yml +++ b/.github/workflows/build_wheels.yml @@ -5,8 +5,8 @@ name: build_wheels # Devs should check out their fork, add a tag to the last master commit on their fork, and run the release off of their fork on the added tag to ensure wheels will be built correctly. on: workflow_dispatch: - tags: - - 'v*.*.*' + tags: + - 'v*.*.*' workflow_call: inputs: release_version: @@ -20,58 +20,10 @@ on: jobs: get-version: - runs-on: ubuntu-latest - outputs: - release_version: ${{ steps.get_release_version.outputs.release_version }} - version_without_prefix: ${{ steps.get_release_version_without_prefix.outputs.version_without_prefix }} - highest_semver_tag: ${{ steps.get_highest_semver.outputs.highest_semver_tag }} - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - persist-credentials: false - - name: Get release version - id: get_release_version - run: | - if [[ -n "${{ inputs.release_version }}" ]]; then - echo "Using provided release version: ${{ inputs.release_version }}" - echo "::set-output name=release_version::${{ inputs.release_version }}" - else - echo "No release version provided. Falling back to GITHUB_REF." - echo "::set-output name=release_version::${GITHUB_REF#refs/tags/}" - fi - - name: Get release version without prefix - id: get_release_version_without_prefix - env: - RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} - run: | - echo ::set-output name=version_without_prefix::${RELEASE_VERSION:1} - - name: Get highest semver - id: get_highest_semver - env: - RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} - run: | - if [[ -n "${{ inputs.highest_semver_tag }}" ]]; then - echo "Using provided highest semver version: ${{ inputs.highest_semver_tag }}" - echo "::set-output name=highest_semver_tag::${{ inputs.highest_semver_tag }}" - else - echo "No release version provided. Falling back to infra/scripts/setup-common-functions.sh." - source infra/scripts/setup-common-functions.sh - SEMVER_REGEX='^v[0-9]+\.[0-9]+\.[0-9]+(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$' - if echo "${RELEASE_VERSION}" | grep -P "$SEMVER_REGEX" &>/dev/null ; then - echo ::set-output name=highest_semver_tag::$(get_tag_release -m) - fi - fi - - name: Check output - id: check_output - env: - RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} - VERSION_WITHOUT_PREFIX: ${{ steps.get_release_version_without_prefix.outputs.version_without_prefix }} - HIGHEST_SEMVER_TAG: ${{ steps.get_highest_semver.outputs.highest_semver_tag }} - run: | - echo $RELEASE_VERSION - echo $VERSION_WITHOUT_PREFIX - echo $HIGHEST_SEMVER_TAG + uses: ./.github/workflows/get_semantic_release_version.yaml + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} build-python-wheel: name: Build wheels @@ -141,7 +93,7 @@ jobs: needs: get-version strategy: matrix: - component: [feature-server, feature-server-java, feature-transformation-server] + component: [ feature-server, feature-server-java, feature-transformation-server ] env: REGISTRY: feastdev steps: @@ -158,11 +110,11 @@ jobs: verify-python-wheels: runs-on: ${{ matrix.os }} - needs: [build-python-wheel, build-source-distribution, get-version] + needs: [ build-python-wheel, build-source-distribution, get-version ] strategy: matrix: - os: [ubuntu-latest, macos-13 ] - python-version: ["3.9", "3.10", "3.11"] + os: [ ubuntu-latest, macos-13 ] + python-version: [ "3.9", "3.10", "3.11" ] from-source: [ True, False ] env: # this script is for testing servers diff --git a/.github/workflows/get_semantic_release_version.yml b/.github/workflows/get_semantic_release_version.yml new file mode 100644 index 0000000000..c800810dce --- /dev/null +++ b/.github/workflows/get_semantic_release_version.yml @@ -0,0 +1,84 @@ +name: Get semantic release version + +on: + workflow_dispatch: # Allows manual trigger of the workflow + inputs: + custom_version: # Optional input for a custom version + description: 'Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing' + required: false + token: + description: 'Personal Access Token' + required: true + default: "" + type: string + +jobs: + get-version: + if: github.repository == 'feast-dev/feast' + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: ${{ github.event.inputs.token }} + GIT_AUTHOR_NAME: feast-ci-bot + GIT_AUTHOR_EMAIL: feast-ci-bot@willem.co + GIT_COMMITTER_NAME: feast-ci-bot + GIT_COMMITTER_EMAIL: feast-ci-bot@willem.co + outputs: + release_version: ${{ steps.get_release_version.outputs.release_version }} + version_without_prefix: ${{ steps.get_release_version_without_prefix.outputs.version_without_prefix }} + highest_semver_tag: ${{ steps.get_highest_semver.outputs.highest_semver_tag }} + steps: + - uses: actions/checkout@v4 + - name: Get release version + id: get_release_version + run: | + if [[ -n "${{ github.event.inputs.custom_version }}" ]]; then + VERSION_REGEX="^v[0-9]+\.[0-9]+\.[0-9]+$" + echo "Using custom version: ${{ github.event.inputs.custom_version }}" + if [[ ! "${{ github.event.inputs.custom_version }}" =~ $VERSION_REGEX ]]; then + echo "Error: custom_version must match semantic versioning (e.g., v1.2.3)." + exit 1 + fi + echo "::set-output name=release_version::${{ github.event.inputs.custom_version }}" + elif [[ "${GITHUB_REF}" == refs/tags/* ]]; then + echo "Using tag reference: ${GITHUB_REF#refs/tags/}" + echo "::set-output name=release_version::${GITHUB_REF#refs/tags/}" + else + echo "Defaulting to branch name: ${GITHUB_REF#refs/heads/}" + echo "::set-output name=release_version::${GITHUB_REF#refs/heads/}" + fi + - name: Get release version without prefix + id: get_release_version_without_prefix + env: + RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} + run: | + if [[ "${RELEASE_VERSION}" == v* ]]; then + echo "::set-output name=version_without_prefix::${RELEASE_VERSION:1}" + else + echo "::set-output name=version_without_prefix::${RELEASE_VERSION}" + fi + - name: Get highest semver + id: get_highest_semver + env: + RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} + run: | + if [[ -n "${{ github.event.inputs.custom_version }}" ]]; then + HIGHEST_SEMVER_TAG="${{ github.event.inputs.custom_version }}" + echo "::set-output name=highest_semver_tag::$HIGHEST_SEMVER_TAG" + echo "Using custom version as highest semantic version: $HIGHEST_SEMVER_TAG" + else + source infra/scripts/setup-common-functions.sh + SEMVER_REGEX='^v[0-9]+\.[0-9]+\.[0-9]+(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$' + if echo "${RELEASE_VERSION}" | grep -P "$SEMVER_REGEX" &>/dev/null ; then + echo ::set-output name=highest_semver_tag::$(get_tag_release -m) + echo "Using infra/scripts/setup-common-functions.sh to generate highest semantic version: $HIGHEST_SEMVER_TAG" + fi + fi + - name: Check output + env: + RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} + VERSION_WITHOUT_PREFIX: ${{ steps.get_release_version_without_prefix.outputs.version_without_prefix }} + HIGHEST_SEMVER_TAG: ${{ steps.get_highest_semver.outputs.highest_semver_tag }} + run: | + echo $RELEASE_VERSION + echo $VERSION_WITHOUT_PREFIX + echo $HIGHEST_SEMVER_TAG \ No newline at end of file diff --git a/.github/workflows/operator-e2e-integration-tests.yml b/.github/workflows/operator-e2e-integration-tests.yml index 23c250cc53..cbb505c3fe 100644 --- a/.github/workflows/operator-e2e-integration-tests.yml +++ b/.github/workflows/operator-e2e-integration-tests.yml @@ -13,6 +13,7 @@ on: jobs: operator-e2e-tests: + timeout-minutes: 40 if: ((github.event.action == 'labeled' && (github.event.label.name == 'approved' || github.event.label.name == 'lgtm' || github.event.label.name == 'ok-to-test')) || (github.event.action != 'labeled' && (contains(github.event.pull_request.labels.*.name, 'ok-to-test') || contains(github.event.pull_request.labels.*.name, 'approved') || contains(github.event.pull_request.labels.*.name, 'lgtm')))) && @@ -38,7 +39,7 @@ jobs: - name: Create KIND cluster run: | - kind create cluster --name $KIND_CLUSTER --wait 5m + kind create cluster --name $KIND_CLUSTER --wait 10m - name: Set up kubernetes context run: | @@ -51,8 +52,16 @@ jobs: cd infra/feast-operator/ make test-e2e + - name: Debug KIND Cluster when there is a failure + if: failure() + run: | + kubectl get pods --all-namespaces + kubectl describe nodes + - name: Clean up if: always() run: | # Delete the KIND cluster after tests kind delete cluster --name kind-$KIND_CLUSTER + + diff --git a/.github/workflows/pr_integration_tests.yml b/.github/workflows/pr_integration_tests.yml index 5a1b483b39..923c0b0335 100644 --- a/.github/workflows/pr_integration_tests.yml +++ b/.github/workflows/pr_integration_tests.yml @@ -6,6 +6,10 @@ on: - opened - synchronize - labeled + paths-ignore: + - 'community/**' + - 'docs/**' + - 'examples/**' # concurrency is currently broken, see details https://github.com/actions/runner/issues/1532 #concurrency: diff --git a/.github/workflows/pr_local_integration_tests.yml b/.github/workflows/pr_local_integration_tests.yml index 8b2f8c13d2..e6a9e3e8bd 100644 --- a/.github/workflows/pr_local_integration_tests.yml +++ b/.github/workflows/pr_local_integration_tests.yml @@ -7,6 +7,10 @@ on: - opened - synchronize - labeled + paths-ignore: + - 'community/**' + - 'docs/**' + - 'examples/**' jobs: integration-test-python-local: diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index fcd4a1b720..eb81ca193c 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -17,212 +17,34 @@ on: jobs: get-version: - if: github.repository == 'feast-dev/feast' - runs-on: ubuntu-latest - env: - GITHUB_TOKEN: ${{ github.event.inputs.token }} - GIT_AUTHOR_NAME: feast-ci-bot - GIT_AUTHOR_EMAIL: feast-ci-bot@willem.co - GIT_COMMITTER_NAME: feast-ci-bot - GIT_COMMITTER_EMAIL: feast-ci-bot@willem.co - outputs: - release_version: ${{ steps.get_release_version.outputs.release_version }} - version_without_prefix: ${{ steps.get_release_version_without_prefix.outputs.version_without_prefix }} - highest_semver_tag: ${{ steps.get_highest_semver.outputs.highest_semver_tag }} - steps: - - uses: actions/checkout@v4 - - name: Get release version - id: get_release_version - run: | - if [[ -n "${{ github.event.inputs.custom_version }}" ]]; then - VERSION_REGEX="^v[0-9]+\.[0-9]+\.[0-9]+$" - echo "Using custom version: ${{ github.event.inputs.custom_version }}" - if [[ ! "${{ github.event.inputs.custom_version }}" =~ $VERSION_REGEX ]]; then - echo "Error: custom_version must match semantic versioning (e.g., v1.2.3)." - exit 1 - fi - echo "::set-output name=release_version::${{ github.event.inputs.custom_version }}" - elif [[ "${GITHUB_REF}" == refs/tags/* ]]; then - echo "Using tag reference: ${GITHUB_REF#refs/tags/}" - echo "::set-output name=release_version::${GITHUB_REF#refs/tags/}" - else - echo "Defaulting to branch name: ${GITHUB_REF#refs/heads/}" - echo "::set-output name=release_version::${GITHUB_REF#refs/heads/}" - fi - - name: Get release version without prefix - id: get_release_version_without_prefix - env: - RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} - run: | - if [[ "${RELEASE_VERSION}" == v* ]]; then - echo "::set-output name=version_without_prefix::${RELEASE_VERSION:1}" - else - echo "::set-output name=version_without_prefix::${RELEASE_VERSION}" - fi - - name: Get highest semver - id: get_highest_semver - env: - RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} - run: | - if [[ -n "${{ github.event.inputs.custom_version }}" ]]; then - HIGHEST_SEMVER_TAG="${{ github.event.inputs.custom_version }}" - echo "::set-output name=highest_semver_tag::$HIGHEST_SEMVER_TAG" - echo "Using custom version as highest semantic version: $HIGHEST_SEMVER_TAG" - else - source infra/scripts/setup-common-functions.sh - SEMVER_REGEX='^v[0-9]+\.[0-9]+\.[0-9]+(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$' - if echo "${RELEASE_VERSION}" | grep -P "$SEMVER_REGEX" &>/dev/null ; then - echo ::set-output name=highest_semver_tag::$(get_tag_release -m) - echo "Using infra/scripts/setup-common-functions.sh to generate highest semantic version: $HIGHEST_SEMVER_TAG" - fi - fi - - name: Check output - env: - RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} - VERSION_WITHOUT_PREFIX: ${{ steps.get_release_version_without_prefix.outputs.version_without_prefix }} - HIGHEST_SEMVER_TAG: ${{ steps.get_highest_semver.outputs.highest_semver_tag }} - run: | - echo $RELEASE_VERSION - echo $VERSION_WITHOUT_PREFIX - echo $HIGHEST_SEMVER_TAG + uses: ./.github/workflows/get_semantic_release_version.yaml + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} - build-publish-docker-images: - runs-on: ubuntu-latest - needs: [get-version, publish-python-sdk] - strategy: - matrix: - component: [feature-server, feature-server-java, feature-transformation-server, feast-helm-operator, feast-operator] - env: - MAVEN_CACHE: gs://feast-templocation-kf-feast/.m2.2020-08-19.tar - REGISTRY: feastdev - steps: - - uses: actions/checkout@v4 - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - name: Login to DockerHub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Authenticate to Google Cloud - uses: 'google-github-actions/auth@v1' - with: - credentials_json: '${{ secrets.GCP_SA_KEY }}' - - name: Set up gcloud SDK - uses: google-github-actions/setup-gcloud@v1 - with: - project_id: ${{ secrets.GCP_PROJECT_ID }} - - name: Use gcloud CLI - run: gcloud info - - run: gcloud auth configure-docker --quiet - - name: Build image - run: | - make build-${{ matrix.component }}-docker REGISTRY=${REGISTRY} VERSION=${VERSION_WITHOUT_PREFIX} - env: - RELEASE_VERSION: ${{ needs.get-version.outputs.release_version }} - VERSION_WITHOUT_PREFIX: ${{ needs.get-version.outputs.version_without_prefix }} - HIGHEST_SEMVER_TAG: ${{ needs.get-version.outputs.highest_semver_tag }} - - name: Push versioned images - env: - RELEASE_VERSION: ${{ needs.get-version.outputs.release_version }} - VERSION_WITHOUT_PREFIX: ${{ needs.get-version.outputs.version_without_prefix }} - HIGHEST_SEMVER_TAG: ${{ needs.get-version.outputs.highest_semver_tag }} - run: | - make push-${{ matrix.component }}-docker REGISTRY=${REGISTRY} VERSION=${VERSION_WITHOUT_PREFIX} + publish-python-sdk: + uses: ./.github/workflows/publish_python_sdk.yaml + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} - echo "Only push to latest tag if tag is the highest semver version $HIGHEST_SEMVER_TAG" - if [ "${VERSION_WITHOUT_PREFIX}" = "${HIGHEST_SEMVER_TAG:1}" ] - then - docker tag feastdev/${{ matrix.component }}:${VERSION_WITHOUT_PREFIX} feastdev/${{ matrix.component }}:latest - docker push feastdev/${{ matrix.component }}:latest - fi + build-publish-docker-images: + uses: ./.github/workflows/publish_images.yaml + needs: [ get-version, publish-python-sdk ] + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} publish-helm-charts: - if: github.repository == 'feast-dev/feast' - runs-on: ubuntu-latest - needs: get-version - env: - HELM_VERSION: v3.8.0 - VERSION_WITHOUT_PREFIX: ${{ needs.get-version.outputs.version_without_prefix }} - steps: - - uses: actions/checkout@v4 - - name: Authenticate to Google Cloud - uses: 'google-github-actions/auth@v1' - with: - credentials_json: '${{ secrets.GCP_SA_KEY }}' - - name: Set up gcloud SDK - uses: google-github-actions/setup-gcloud@v1 - with: - project_id: ${{ secrets.GCP_PROJECT_ID }} - - run: gcloud auth configure-docker --quiet - - name: Remove previous Helm - run: sudo rm -rf $(which helm) - - name: Install Helm - run: ./infra/scripts/helm/install-helm.sh - - name: Validate Helm chart prior to publishing - run: ./infra/scripts/helm/validate-helm-chart-publish.sh - - name: Validate all version consistency - run: ./infra/scripts/helm/validate-helm-chart-versions.sh $VERSION_WITHOUT_PREFIX - - name: Publish Helm charts - run: ./infra/scripts/helm/push-helm-charts.sh $VERSION_WITHOUT_PREFIX - - build_wheels: - uses: ./.github/workflows/build_wheels.yml - needs: get-version + uses: ./.github/workflows/publish_helm_charts.yml + needs: [ get-version, publish-python-sdk ] with: - release_version: ${{ needs.get-version.outputs.release_version }} - highest_semver_tag: ${{ needs.get-version.outputs.highest_semver_tag }} - - publish-python-sdk: - if: github.repository == 'feast-dev/feast' - runs-on: ubuntu-latest - needs: [build_wheels] - steps: - - uses: actions/download-artifact@v4.1.7 - with: - name: python-wheels - path: dist - - uses: pypa/gh-action-pypi-publish@v1.4.2 - with: - user: __token__ - password: ${{ secrets.PYPI_PASSWORD }} + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} publish-java-sdk: - if: github.repository == 'feast-dev/feast' - container: maven:3.6-jdk-11 - runs-on: ubuntu-latest - needs: get-version - steps: - - uses: actions/checkout@v4 - with: - submodules: 'true' - - name: Set up JDK 11 - uses: actions/setup-java@v1 - with: - java-version: '11' - java-package: jdk - architecture: x64 - - uses: actions/setup-python@v5 - with: - python-version: '3.11' - architecture: 'x64' - - uses: actions/cache@v2 - with: - path: ~/.m2/repository - key: ${{ runner.os }}-it-maven-${{ hashFiles('**/pom.xml') }} - restore-keys: | - ${{ runner.os }}-it-maven- - - name: Publish java sdk - env: - VERSION_WITHOUT_PREFIX: ${{ needs.get-version.outputs.version_without_prefix }} - GPG_PUBLIC_KEY: ${{ secrets.GPG_PUBLIC_KEY }} - GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} - MAVEN_SETTINGS: ${{ secrets.MAVEN_SETTINGS }} - run: | - echo -n "$GPG_PUBLIC_KEY" > /root/public-key - echo -n "$GPG_PRIVATE_KEY" > /root/private-key - mkdir -p /root/.m2/ - echo -n "$MAVEN_SETTINGS" > /root/.m2/settings.xml - infra/scripts/publish-java-sdk.sh --revision ${VERSION_WITHOUT_PREFIX} --gpg-key-import-dir /root + uses: ./.github/workflows/publish_java_sdk.yml + needs: [ get-version, publish-python-sdk ] + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} diff --git a/.github/workflows/publish_helm_charts.yml b/.github/workflows/publish_helm_charts.yml new file mode 100644 index 0000000000..060bdb05d4 --- /dev/null +++ b/.github/workflows/publish_helm_charts.yml @@ -0,0 +1,50 @@ +name: publish images + +on: + workflow_dispatch: # Allows manual trigger of the workflow + inputs: + custom_version: # Optional input for a custom version + description: 'Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing' + required: false + token: + description: 'Personal Access Token' + required: true + default: "" + type: string + +jobs: + get-version: + uses: ./.github/workflows/get_semantic_release_version.yaml + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} + + publish-helm-charts: + if: github.repository == 'feast-dev/feast' + runs-on: ubuntu-latest + needs: get-version + env: + HELM_VERSION: v3.8.0 + VERSION_WITHOUT_PREFIX: ${{ needs.get-version.outputs.version_without_prefix }} + steps: + - uses: actions/checkout@v4 + - name: Authenticate to Google Cloud + uses: 'google-github-actions/auth@v1' + with: + credentials_json: '${{ secrets.GCP_SA_KEY }}' + - name: Set up gcloud SDK + uses: google-github-actions/setup-gcloud@v1 + with: + project_id: ${{ secrets.GCP_PROJECT_ID }} + - run: gcloud auth configure-docker --quiet + - name: Remove previous Helm + run: sudo rm -rf $(which helm) + - name: Install Helm + run: ./infra/scripts/helm/install-helm.sh + - name: Validate Helm chart prior to publishing + run: ./infra/scripts/helm/validate-helm-chart-publish.sh + - name: Validate all version consistency + run: ./infra/scripts/helm/validate-helm-chart-versions.sh $VERSION_WITHOUT_PREFIX + - name: Publish Helm charts + run: ./infra/scripts/helm/push-helm-charts.sh $VERSION_WITHOUT_PREFIX + diff --git a/.github/workflows/publish_images.yml b/.github/workflows/publish_images.yml index a80036cc9f..26201aaa5c 100644 --- a/.github/workflows/publish_images.yml +++ b/.github/workflows/publish_images.yml @@ -14,81 +14,18 @@ on: jobs: get-version: - if: github.repository == 'feast-dev/feast' - runs-on: ubuntu-latest - env: - GITHUB_TOKEN: ${{ github.event.inputs.token }} - GIT_AUTHOR_NAME: feast-ci-bot - GIT_AUTHOR_EMAIL: feast-ci-bot@willem.co - GIT_COMMITTER_NAME: feast-ci-bot - GIT_COMMITTER_EMAIL: feast-ci-bot@willem.co - outputs: - release_version: ${{ steps.get_release_version.outputs.release_version }} - version_without_prefix: ${{ steps.get_release_version_without_prefix.outputs.version_without_prefix }} - highest_semver_tag: ${{ steps.get_highest_semver.outputs.highest_semver_tag }} - steps: - - uses: actions/checkout@v4 - - name: Get release version - id: get_release_version - run: | - if [[ -n "${{ github.event.inputs.custom_version }}" ]]; then - VERSION_REGEX="^v[0-9]+\.[0-9]+\.[0-9]+$" - echo "Using custom version: ${{ github.event.inputs.custom_version }}" - if [[ ! "${{ github.event.inputs.custom_version }}" =~ $VERSION_REGEX ]]; then - echo "Error: custom_version must match semantic versioning (e.g., v1.2.3)." - exit 1 - fi - echo "::set-output name=release_version::${{ github.event.inputs.custom_version }}" - elif [[ "${GITHUB_REF}" == refs/tags/* ]]; then - echo "Using tag reference: ${GITHUB_REF#refs/tags/}" - echo "::set-output name=release_version::${GITHUB_REF#refs/tags/}" - else - echo "Defaulting to branch name: ${GITHUB_REF#refs/heads/}" - echo "::set-output name=release_version::${GITHUB_REF#refs/heads/}" - fi - - name: Get release version without prefix - id: get_release_version_without_prefix - env: - RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} - run: | - if [[ "${RELEASE_VERSION}" == v* ]]; then - echo "::set-output name=version_without_prefix::${RELEASE_VERSION:1}" - else - echo "::set-output name=version_without_prefix::${RELEASE_VERSION}" - fi - - name: Get highest semver - id: get_highest_semver - env: - RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} - run: | - if [[ -n "${{ github.event.inputs.custom_version }}" ]]; then - HIGHEST_SEMVER_TAG="${{ github.event.inputs.custom_version }}" - echo "::set-output name=highest_semver_tag::$HIGHEST_SEMVER_TAG" - echo "Using custom version as highest semantic version: $HIGHEST_SEMVER_TAG" - else - source infra/scripts/setup-common-functions.sh - SEMVER_REGEX='^v[0-9]+\.[0-9]+\.[0-9]+(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$' - if echo "${RELEASE_VERSION}" | grep -P "$SEMVER_REGEX" &>/dev/null ; then - echo ::set-output name=highest_semver_tag::$(get_tag_release -m) - echo "Using infra/scripts/setup-common-functions.sh to generate highest semantic version: $HIGHEST_SEMVER_TAG" - fi - fi - - name: Check output - env: - RELEASE_VERSION: ${{ steps.get_release_version.outputs.release_version }} - VERSION_WITHOUT_PREFIX: ${{ steps.get_release_version_without_prefix.outputs.version_without_prefix }} - HIGHEST_SEMVER_TAG: ${{ steps.get_highest_semver.outputs.highest_semver_tag }} - run: | - echo $RELEASE_VERSION - echo $VERSION_WITHOUT_PREFIX - echo $HIGHEST_SEMVER_TAG + uses: ./.github/workflows/get_semantic_release_version.yaml + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} build-publish-docker-images: + if: github.repository == 'feast-dev/feast' runs-on: ubuntu-latest - needs: [get-version] + needs: [ get-version ] strategy: matrix: - component: [feature-server, feature-server-java, feature-transformation-server, feast-helm-operator, feast-operator] + component: [ feature-server, feature-server-java, feature-transformation-server, feast-helm-operator, feast-operator ] env: MAVEN_CACHE: gs://feast-templocation-kf-feast/.m2.2020-08-19.tar REGISTRY: feastdev diff --git a/.github/workflows/publish_java_sdk.yml b/.github/workflows/publish_java_sdk.yml new file mode 100644 index 0000000000..c158010995 --- /dev/null +++ b/.github/workflows/publish_java_sdk.yml @@ -0,0 +1,58 @@ +name: publish java sdk + +on: + workflow_dispatch: # Allows manual trigger of the workflow + inputs: + custom_version: # Optional input for a custom version + description: 'Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing' + required: false + token: + description: 'Personal Access Token' + required: true + default: "" + type: string + +jobs: + get-version: + uses: ./.github/workflows/get_semantic_release_version.yaml + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} + + publish-java-sdk: + if: github.repository == 'feast-dev/feast' + container: maven:3.6-jdk-11 + runs-on: ubuntu-latest + needs: [ get-version, publish-python-sdk ] + steps: + - uses: actions/checkout@v4 + with: + submodules: 'true' + - name: Set up JDK 11 + uses: actions/setup-java@v1 + with: + java-version: '11' + java-package: jdk + architecture: x64 + - uses: actions/setup-python@v5 + with: + python-version: '3.11' + architecture: 'x64' + - uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-it-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-it-maven- + - name: Publish java sdk + env: + VERSION_WITHOUT_PREFIX: ${{ needs.get-version.outputs.version_without_prefix }} + GPG_PUBLIC_KEY: ${{ secrets.GPG_PUBLIC_KEY }} + GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} + MAVEN_SETTINGS: ${{ secrets.MAVEN_SETTINGS }} + run: | + echo -n "$GPG_PUBLIC_KEY" > /root/public-key + echo -n "$GPG_PRIVATE_KEY" > /root/private-key + mkdir -p /root/.m2/ + echo -n "$MAVEN_SETTINGS" > /root/.m2/settings.xml + infra/scripts/publish-java-sdk.sh --revision ${VERSION_WITHOUT_PREFIX} --gpg-key-import-dir /root diff --git a/.github/workflows/publish_python_sdk.yml b/.github/workflows/publish_python_sdk.yml new file mode 100644 index 0000000000..1a9c111de7 --- /dev/null +++ b/.github/workflows/publish_python_sdk.yml @@ -0,0 +1,41 @@ +name: publish python sdk + +on: + workflow_dispatch: # Allows manual trigger of the workflow + inputs: + custom_version: # Optional input for a custom version + description: 'Custom version to publish (e.g., v1.2.3) -- only edit if you know what you are doing' + required: false + token: + description: 'Personal Access Token' + required: true + default: "" + type: string + +jobs: + get-version: + uses: ./.github/workflows/get_semantic_release_version.yaml + with: + custom_version: ${{ github.event.inputs.custom_version }} + token: ${{ github.event.inputs.token }} + + build_wheels: + uses: ./.github/workflows/build_wheels.yml + needs: get-version + with: + release_version: ${{ needs.get-version.outputs.release_version }} + highest_semver_tag: ${{ needs.get-version.outputs.highest_semver_tag }} + + publish-python-sdk: + if: github.repository == 'feast-dev/feast' + runs-on: ubuntu-latest + needs: [ get-version, build_wheels ] + steps: + - uses: actions/download-artifact@v4.1.7 + with: + name: python-wheels + path: dist + - uses: pypa/gh-action-pypi-publish@v1.4.2 + with: + user: __token__ + password: ${{ secrets.PYPI_PASSWORD }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index ec7ffb29ed..79b845b101 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,8 +20,8 @@ on: type: boolean jobs: - get_dry_release_versions: + if: github.repository == 'feast-dev/feast' runs-on: ubuntu-latest env: GITHUB_TOKEN: ${{ github.event.inputs.token }} @@ -98,7 +98,7 @@ jobs: make build-installer bundle publish-web-ui-npm: - needs: [validate_version_bumps, get_dry_release_versions] + needs: [ validate_version_bumps, get_dry_release_versions ] runs-on: ubuntu-latest env: # This publish is working using an NPM automation token to bypass 2FA @@ -121,7 +121,7 @@ jobs: run: yarn build:lib - name: Publish UI package working-directory: ./ui - if: github.event.inputs.dry_run == 'false' && github.event.inputs.publish_ui == 'true' + if: github.event.inputs.dry_run == 'false' && github.event.inputs.publish_ui == 'true' run: npm publish env: # This publish is working using an NPM automation token to bypass 2FA @@ -138,25 +138,25 @@ jobs: GIT_COMMITTER_NAME: feast-ci-bot GIT_COMMITTER_EMAIL: feast-ci-bot@willem.co steps: - - name: Checkout - uses: actions/checkout@v4 - with: - persist-credentials: false - - name: Setup Node.js - uses: actions/setup-node@v3 - with: - node-version-file: './ui/.nvmrc' - - name: Set up Homebrew - id: set-up-homebrew - uses: Homebrew/actions/setup-homebrew@master - - name: Setup Helm-docs - run: | - brew install norwoodj/tap/helm-docs - - name: Release (Dry Run) - if: github.event.inputs.dry_run == 'true' - run: | - npx -p @semantic-release/changelog -p @semantic-release/git -p @semantic-release/exec -p semantic-release semantic-release --dry-run - - name: Release - if: github.event.inputs.dry_run == 'false' - run: | - npx -p @semantic-release/changelog -p @semantic-release/git -p @semantic-release/exec -p semantic-release semantic-release + - name: Checkout + uses: actions/checkout@v4 + with: + persist-credentials: false + - name: Setup Node.js + uses: actions/setup-node@v3 + with: + node-version-file: './ui/.nvmrc' + - name: Set up Homebrew + id: set-up-homebrew + uses: Homebrew/actions/setup-homebrew@master + - name: Setup Helm-docs + run: | + brew install norwoodj/tap/helm-docs + - name: Release (Dry Run) + if: github.event.inputs.dry_run == 'true' + run: | + npx -p @semantic-release/changelog -p @semantic-release/git -p @semantic-release/exec -p semantic-release semantic-release --dry-run + - name: Release + if: github.event.inputs.dry_run == 'false' + run: | + npx -p @semantic-release/changelog -p @semantic-release/git -p @semantic-release/exec -p semantic-release semantic-release diff --git a/.github/workflows/show_semantic_release.yml b/.github/workflows/show_semantic_release.yml deleted file mode 100644 index f4aef6be54..0000000000 --- a/.github/workflows/show_semantic_release.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: show semantic release versions - -on: - workflow_dispatch: - inputs: - token: - description: 'Personal Access Token' - required: true - default: "" - type: string - -jobs: - - get_dry_release_versions: - runs-on: ubuntu-latest - env: - GITHUB_TOKEN: ${{ github.event.inputs.token }} - outputs: - current_version: ${{ steps.get_versions.outputs.current_version }} - next_version: ${{ steps.get_versions.outputs.next_version }} - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - persist-credentials: false - - name: Setup Node.js - uses: actions/setup-node@v3 - with: - node-version: "lts/*" - - name: Release (Dry Run) - id: get_versions - run: | - CURRENT_VERSION=$(npx -p @semantic-release/changelog -p @semantic-release/git -p @semantic-release/exec -p semantic-release semantic-release --dry-run | grep "associated with version " | sed -E 's/.* version//' | sed -E 's/ on.*//') - NEXT_VERSION=$(npx -p @semantic-release/changelog -p @semantic-release/git -p @semantic-release/exec -p semantic-release semantic-release --dry-run | grep 'The next release version is' | sed -E 's/.* ([[:digit:].]+)$/\1/') - echo ::set-output name=current_version::$CURRENT_VERSION - echo ::set-output name=next_version::$NEXT_VERSION - echo "Current version is ${CURRENT_VERSION}" - echo "Next version is ${NEXT_VERSION}" diff --git a/.github/workflows/smoke_tests.yml b/.github/workflows/smoke_tests.yml index 774d58d22b..9a898dd4c5 100644 --- a/.github/workflows/smoke_tests.yml +++ b/.github/workflows/smoke_tests.yml @@ -1,6 +1,11 @@ name: smoke-tests -on: [pull_request] +on: + pull_request: + paths-ignore: + - 'community/**' + - 'docs/**' + - 'examples/**' jobs: unit-test-python: runs-on: ${{ matrix.os }} diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index a8ddd397e3..6f46d12963 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -1,6 +1,11 @@ name: unit-tests -on: [pull_request] +on: + pull_request: + paths-ignore: + - 'community/**' + - 'docs/**' + - 'examples/**' jobs: unit-test-python: runs-on: ${{ matrix.os }} diff --git a/docs/reference/feature-servers/README.md b/docs/reference/feature-servers/README.md index 2ceaf5807f..156e60c743 100644 --- a/docs/reference/feature-servers/README.md +++ b/docs/reference/feature-servers/README.md @@ -1,4 +1,4 @@ -# Feature servers +# Feast servers Feast users can choose to retrieve features from a feature server, as opposed to through the Python SDK. @@ -12,4 +12,8 @@ Feast users can choose to retrieve features from a feature server, as opposed to {% content-ref url="offline-feature-server.md" %} [offline-feature-server.md](offline-feature-server.md) +{% endcontent-ref %} + +{% content-ref url="registry-server.md" %} +[registry-server.md](registry-server.md) {% endcontent-ref %} \ No newline at end of file diff --git a/docs/reference/feature-servers/registry-server.md b/docs/reference/feature-servers/registry-server.md new file mode 100644 index 0000000000..9707a59703 --- /dev/null +++ b/docs/reference/feature-servers/registry-server.md @@ -0,0 +1,26 @@ +# Registry server + +## Description + +The Registry server uses the gRPC communication protocol to exchange data. +This enables users to communicate with the server using any programming language that can make gRPC requests. + +## How to configure the server + +## CLI + +There is a CLI command that starts the Registry server: `feast serve_registry`. By default, remote Registry Server uses port 6570, the port can be overridden with a `--port` flag. +To start the Registry Server in TLS mode, you need to provide the private and public keys using the `--key` and `--cert` arguments. +More info about TLS mode can be found in [feast-client-connecting-to-remote-registry-sever-started-in-tls-mode](../../how-to-guides/starting-feast-servers-tls-mode.md#starting-feast-registry-server-in-tls-mode) + +## How to configure the client + +Please see the detail how to configure Remote Registry client [remote.md](../registries/remote.md) + +# Registry Server Permissions and Access Control + +Please refer the [page](./../registry/registry-permissions.md) for more details on API Endpoints and Permissions. + +## How to configure Authentication and Authorization ? + +Please refer the [page](./../../../docs/getting-started/concepts/permission.md) for more details on how to configure authentication and authorization. \ No newline at end of file diff --git a/docs/reference/registries/remote.md b/docs/reference/registries/remote.md new file mode 100644 index 0000000000..3651aeb71e --- /dev/null +++ b/docs/reference/registries/remote.md @@ -0,0 +1,28 @@ +# Remote Registry + +## Description + +The Remote Registry is a gRPC client for the registry that implements the `RemoteRegistry` class using the existing `BaseRegistry` interface. + +## How to configure the client + +User needs to create a client side `feature_store.yaml` file, set the `registry_type` to `remote` and provide the server connection configuration. +The `path` parameter is a URL with a port (default is 6570) used by the client to connect with the Remote Registry server. + +{% code title="feature_store.yaml" %} +```yaml +registry: + registry_type: remote + path: http://localhost:6570 +``` +{% endcode %} + +The optional `cert` parameter can be configured as well, it should point to the public certificate path when the Registry Server starts in SSL mode. This may be needed if the Registry Server is started with a self-signed certificate, typically this file ends with *.crt, *.cer, or *.pem. +More info about the `cert` parameter can be found in [feast-client-connecting-to-remote-registry-sever-started-in-tls-mode](../../how-to-guides/starting-feast-servers-tls-mode.md#feast-client-connecting-to-remote-registry-sever-started-in-tls-mode) + +## How to configure the server + +Please see the detail how to configure registry server [registry-server.md](../feature-servers/registry-server.md) + +## How to configure Authentication and Authorization +Please refer the [page](./../../../docs/getting-started/concepts/permission.md) for more details on how to configure authentication and authorization. diff --git a/infra/feast-operator/Makefile b/infra/feast-operator/Makefile index 310d64afaa..6984ac66e7 100644 --- a/infra/feast-operator/Makefile +++ b/infra/feast-operator/Makefile @@ -117,7 +117,7 @@ test: build-installer fmt vet lint envtest ## Run tests. # Utilize Kind or modify the e2e tests to load the image locally, enabling compatibility with other vendors. .PHONY: test-e2e # Run the e2e tests against a Kind k8s instance that is spun up. test-e2e: - go test ./test/e2e/ -v -ginkgo.v + go test -timeout 30m ./test/e2e/ -v -ginkgo.v .PHONY: lint lint: golangci-lint ## Run golangci-lint linter & yamllint diff --git a/infra/feast-operator/api/v1alpha1/featurestore_types.go b/infra/feast-operator/api/v1alpha1/featurestore_types.go index 0b516630cd..84b4d8e841 100644 --- a/infra/feast-operator/api/v1alpha1/featurestore_types.go +++ b/infra/feast-operator/api/v1alpha1/featurestore_types.go @@ -74,22 +74,15 @@ type FeatureStoreServices struct { // OfflineStore configures the deployed offline store service type OfflineStore struct { - ServiceConfigs `json:",inline"` - Persistence *OfflineStorePersistence `json:"persistence,omitempty"` - TLS *OfflineTlsConfigs `json:"tls,omitempty"` + StoreServiceConfigs `json:",inline"` + Persistence *OfflineStorePersistence `json:"persistence,omitempty"` + TLS *TlsConfigs `json:"tls,omitempty"` // LogLevel sets the logging level for the offline store service // Allowed values: "debug", "info", "warning", "error", "critical". // +kubebuilder:validation:Enum=debug;info;warning;error;critical LogLevel string `json:"logLevel,omitempty"` } -// OfflineTlsConfigs configures server TLS for the offline feast service. in an openshift cluster, this is configured by default using service serving certificates. -type OfflineTlsConfigs struct { - TlsConfigs `json:",inline"` - // verify the client TLS certificate. - VerifyClient *bool `json:"verifyClient,omitempty"` -} - // OfflineStorePersistence configures the persistence settings for the offline store service // +kubebuilder:validation:XValidation:rule="[has(self.file), has(self.store)].exists_one(c, c)",message="One selection required between file or store." type OfflineStorePersistence struct { @@ -99,7 +92,7 @@ type OfflineStorePersistence struct { // OfflineStoreFilePersistence configures the file-based persistence for the offline store service type OfflineStoreFilePersistence struct { - // +kubebuilder:validation:Enum=dask;duckdb + // +kubebuilder:validation:Enum=file;dask;duckdb Type string `json:"type,omitempty"` PvcConfig *PvcConfig `json:"pvc,omitempty"` } @@ -107,11 +100,12 @@ type OfflineStoreFilePersistence struct { var ValidOfflineStoreFilePersistenceTypes = []string{ "dask", "duckdb", + "file", } // OfflineStoreDBStorePersistence configures the DB store persistence for the offline store service type OfflineStoreDBStorePersistence struct { - // +kubebuilder:validation:Enum=snowflake.offline;bigquery;redshift;spark;postgres;feast_trino.trino.TrinoOfflineStore;redis + // +kubebuilder:validation:Enum=snowflake.offline;bigquery;redshift;spark;postgres;trino;redis;athena;mssql Type string `json:"type"` // Data store parameters should be placed as-is from the "feature_store.yaml" under the secret key. "registry_type" & "type" fields should be removed. SecretRef corev1.LocalObjectReference `json:"secretRef"` @@ -125,15 +119,17 @@ var ValidOfflineStoreDBStorePersistenceTypes = []string{ "redshift", "spark", "postgres", - "feast_trino.trino.TrinoOfflineStore", + "trino", "redis", + "athena", + "mssql", } // OnlineStore configures the deployed online store service type OnlineStore struct { - ServiceConfigs `json:",inline"` - Persistence *OnlineStorePersistence `json:"persistence,omitempty"` - TLS *TlsConfigs `json:"tls,omitempty"` + StoreServiceConfigs `json:",inline"` + Persistence *OnlineStorePersistence `json:"persistence,omitempty"` + TLS *TlsConfigs `json:"tls,omitempty"` // LogLevel sets the logging level for the online store service // Allowed values: "debug", "info", "warning", "error", "critical". // +kubebuilder:validation:Enum=debug;info;warning;error;critical @@ -158,7 +154,7 @@ type OnlineStoreFilePersistence struct { // OnlineStoreDBStorePersistence configures the DB store persistence for the offline store service type OnlineStoreDBStorePersistence struct { - // +kubebuilder:validation:Enum=snowflake.online;redis;ikv;datastore;dynamodb;bigtable;postgres;cassandra;mysql;hazelcast;singlestore + // +kubebuilder:validation:Enum=snowflake.online;redis;ikv;datastore;dynamodb;bigtable;postgres;cassandra;mysql;hazelcast;singlestore;hbase;elasticsearch;qdrant;couchbase Type string `json:"type"` // Data store parameters should be placed as-is from the "feature_store.yaml" under the secret key. "registry_type" & "type" fields should be removed. SecretRef corev1.LocalObjectReference `json:"secretRef"` @@ -178,6 +174,10 @@ var ValidOnlineStoreDBStorePersistenceTypes = []string{ "mysql", "hazelcast", "singlestore", + "hbase", + "elasticsearch", + "qdrant", + "couchbase", } // LocalRegistryConfig configures the deployed registry service @@ -290,6 +290,14 @@ type DefaultConfigs struct { Image *string `json:"image,omitempty"` } +// StoreServiceConfigs k8s deployment settings +type StoreServiceConfigs struct { + // Replicas determines the number of pods for the feast service. + // When Replicas > 1, persistence is recommended. + Replicas *int32 `json:"replicas,omitempty"` + ServiceConfigs `json:",inline"` +} + // OptionalConfigs k8s container settings that are optional type OptionalConfigs struct { Env *[]corev1.EnvVar `json:"env,omitempty"` diff --git a/infra/feast-operator/api/v1alpha1/zz_generated.deepcopy.go b/infra/feast-operator/api/v1alpha1/zz_generated.deepcopy.go index 3f317c650e..6cba8e5923 100644 --- a/infra/feast-operator/api/v1alpha1/zz_generated.deepcopy.go +++ b/infra/feast-operator/api/v1alpha1/zz_generated.deepcopy.go @@ -273,7 +273,7 @@ func (in *LocalRegistryConfig) DeepCopy() *LocalRegistryConfig { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OfflineStore) DeepCopyInto(out *OfflineStore) { *out = *in - in.ServiceConfigs.DeepCopyInto(&out.ServiceConfigs) + in.StoreServiceConfigs.DeepCopyInto(&out.StoreServiceConfigs) if in.Persistence != nil { in, out := &in.Persistence, &out.Persistence *out = new(OfflineStorePersistence) @@ -281,7 +281,7 @@ func (in *OfflineStore) DeepCopyInto(out *OfflineStore) { } if in.TLS != nil { in, out := &in.TLS, &out.TLS - *out = new(OfflineTlsConfigs) + *out = new(TlsConfigs) (*in).DeepCopyInto(*out) } } @@ -357,27 +357,6 @@ func (in *OfflineStorePersistence) DeepCopy() *OfflineStorePersistence { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OfflineTlsConfigs) DeepCopyInto(out *OfflineTlsConfigs) { - *out = *in - in.TlsConfigs.DeepCopyInto(&out.TlsConfigs) - if in.VerifyClient != nil { - in, out := &in.VerifyClient, &out.VerifyClient - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OfflineTlsConfigs. -func (in *OfflineTlsConfigs) DeepCopy() *OfflineTlsConfigs { - if in == nil { - return nil - } - out := new(OfflineTlsConfigs) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OidcAuthz) DeepCopyInto(out *OidcAuthz) { *out = *in @@ -397,7 +376,7 @@ func (in *OidcAuthz) DeepCopy() *OidcAuthz { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OnlineStore) DeepCopyInto(out *OnlineStore) { *out = *in - in.ServiceConfigs.DeepCopyInto(&out.ServiceConfigs) + in.StoreServiceConfigs.DeepCopyInto(&out.StoreServiceConfigs) if in.Persistence != nil { in, out := &in.Persistence, &out.Persistence *out = new(OnlineStorePersistence) @@ -737,6 +716,27 @@ func (in *ServiceHostnames) DeepCopy() *ServiceHostnames { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StoreServiceConfigs) DeepCopyInto(out *StoreServiceConfigs) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.ServiceConfigs.DeepCopyInto(&out.ServiceConfigs) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoreServiceConfigs. +func (in *StoreServiceConfigs) DeepCopy() *StoreServiceConfigs { + if in == nil { + return nil + } + out := new(StoreServiceConfigs) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TlsConfigs) DeepCopyInto(out *TlsConfigs) { *out = *in diff --git a/infra/feast-operator/config/crd/bases/feast.dev_featurestores.yaml b/infra/feast-operator/config/crd/bases/feast.dev_featurestores.yaml index 6929796d34..7fbd38ed31 100644 --- a/infra/feast-operator/config/crd/bases/feast.dev_featurestores.yaml +++ b/infra/feast-operator/config/crd/bases/feast.dev_featurestores.yaml @@ -323,6 +323,7 @@ spec: rule: self.mountPath.matches('^/[^:]*$') type: enum: + - file - dask - duckdb type: string @@ -355,8 +356,10 @@ spec: - redshift - spark - postgres - - feast_trino.trino.TrinoOfflineStore + - trino - redis + - athena + - mssql type: string required: - secretRef @@ -366,6 +369,12 @@ spec: x-kubernetes-validations: - message: One selection required between file or store. rule: '[has(self.file), has(self.store)].exists_one(c, c)' + replicas: + description: |- + Replicas determines the number of pods for the feast service. + When Replicas > 1, persistence is recommended. + format: int32 + type: integer resources: description: ResourceRequirements describes the compute resource requirements. @@ -423,9 +432,9 @@ spec: type: object type: object tls: - description: OfflineTlsConfigs configures server TLS for the - offline feast service. in an openshift cluster, this is - configured by default using service serving certificates. + description: TlsConfigs configures server TLS for a feast + service. in an openshift cluster, this is configured by + default using service serving certificates. properties: disable: description: will disable TLS for the feast service. useful @@ -455,9 +464,6 @@ spec: type: string type: object x-kubernetes-map-type: atomic - verifyClient: - description: verify the client TLS certificate. - type: boolean type: object x-kubernetes-validations: - message: '`secretRef` required if `disable` is false.' @@ -729,6 +735,10 @@ spec: - mysql - hazelcast - singlestore + - hbase + - elasticsearch + - qdrant + - couchbase type: string required: - secretRef @@ -738,6 +748,12 @@ spec: x-kubernetes-validations: - message: One selection required between file or store. rule: '[has(self.file), has(self.store)].exists_one(c, c)' + replicas: + description: |- + Replicas determines the number of pods for the feast service. + When Replicas > 1, persistence is recommended. + format: int32 + type: integer resources: description: ResourceRequirements describes the compute resource requirements. @@ -1559,6 +1575,7 @@ spec: rule: self.mountPath.matches('^/[^:]*$') type: enum: + - file - dask - duckdb type: string @@ -1592,8 +1609,10 @@ spec: - redshift - spark - postgres - - feast_trino.trino.TrinoOfflineStore + - trino - redis + - athena + - mssql type: string required: - secretRef @@ -1604,6 +1623,12 @@ spec: - message: One selection required between file or store. rule: '[has(self.file), has(self.store)].exists_one(c, c)' + replicas: + description: |- + Replicas determines the number of pods for the feast service. + When Replicas > 1, persistence is recommended. + format: int32 + type: integer resources: description: ResourceRequirements describes the compute resource requirements. @@ -1662,10 +1687,9 @@ spec: type: object type: object tls: - description: OfflineTlsConfigs configures server TLS for - the offline feast service. in an openshift cluster, - this is configured by default using service serving - certificates. + description: TlsConfigs configures server TLS for a feast + service. in an openshift cluster, this is configured + by default using service serving certificates. properties: disable: description: will disable TLS for the feast service. @@ -1695,9 +1719,6 @@ spec: type: string type: object x-kubernetes-map-type: atomic - verifyClient: - description: verify the client TLS certificate. - type: boolean type: object x-kubernetes-validations: - message: '`secretRef` required if `disable` is false.' @@ -1973,6 +1994,10 @@ spec: - mysql - hazelcast - singlestore + - hbase + - elasticsearch + - qdrant + - couchbase type: string required: - secretRef @@ -1983,6 +2008,12 @@ spec: - message: One selection required between file or store. rule: '[has(self.file), has(self.store)].exists_one(c, c)' + replicas: + description: |- + Replicas determines the number of pods for the feast service. + When Replicas > 1, persistence is recommended. + format: int32 + type: integer resources: description: ResourceRequirements describes the compute resource requirements. diff --git a/infra/feast-operator/dist/install.yaml b/infra/feast-operator/dist/install.yaml index 9e213994eb..73abc3717b 100644 --- a/infra/feast-operator/dist/install.yaml +++ b/infra/feast-operator/dist/install.yaml @@ -331,6 +331,7 @@ spec: rule: self.mountPath.matches('^/[^:]*$') type: enum: + - file - dask - duckdb type: string @@ -363,8 +364,10 @@ spec: - redshift - spark - postgres - - feast_trino.trino.TrinoOfflineStore + - trino - redis + - athena + - mssql type: string required: - secretRef @@ -374,6 +377,12 @@ spec: x-kubernetes-validations: - message: One selection required between file or store. rule: '[has(self.file), has(self.store)].exists_one(c, c)' + replicas: + description: |- + Replicas determines the number of pods for the feast service. + When Replicas > 1, persistence is recommended. + format: int32 + type: integer resources: description: ResourceRequirements describes the compute resource requirements. @@ -431,9 +440,9 @@ spec: type: object type: object tls: - description: OfflineTlsConfigs configures server TLS for the - offline feast service. in an openshift cluster, this is - configured by default using service serving certificates. + description: TlsConfigs configures server TLS for a feast + service. in an openshift cluster, this is configured by + default using service serving certificates. properties: disable: description: will disable TLS for the feast service. useful @@ -463,9 +472,6 @@ spec: type: string type: object x-kubernetes-map-type: atomic - verifyClient: - description: verify the client TLS certificate. - type: boolean type: object x-kubernetes-validations: - message: '`secretRef` required if `disable` is false.' @@ -737,6 +743,10 @@ spec: - mysql - hazelcast - singlestore + - hbase + - elasticsearch + - qdrant + - couchbase type: string required: - secretRef @@ -746,6 +756,12 @@ spec: x-kubernetes-validations: - message: One selection required between file or store. rule: '[has(self.file), has(self.store)].exists_one(c, c)' + replicas: + description: |- + Replicas determines the number of pods for the feast service. + When Replicas > 1, persistence is recommended. + format: int32 + type: integer resources: description: ResourceRequirements describes the compute resource requirements. @@ -1567,6 +1583,7 @@ spec: rule: self.mountPath.matches('^/[^:]*$') type: enum: + - file - dask - duckdb type: string @@ -1600,8 +1617,10 @@ spec: - redshift - spark - postgres - - feast_trino.trino.TrinoOfflineStore + - trino - redis + - athena + - mssql type: string required: - secretRef @@ -1612,6 +1631,12 @@ spec: - message: One selection required between file or store. rule: '[has(self.file), has(self.store)].exists_one(c, c)' + replicas: + description: |- + Replicas determines the number of pods for the feast service. + When Replicas > 1, persistence is recommended. + format: int32 + type: integer resources: description: ResourceRequirements describes the compute resource requirements. @@ -1670,10 +1695,9 @@ spec: type: object type: object tls: - description: OfflineTlsConfigs configures server TLS for - the offline feast service. in an openshift cluster, - this is configured by default using service serving - certificates. + description: TlsConfigs configures server TLS for a feast + service. in an openshift cluster, this is configured + by default using service serving certificates. properties: disable: description: will disable TLS for the feast service. @@ -1703,9 +1727,6 @@ spec: type: string type: object x-kubernetes-map-type: atomic - verifyClient: - description: verify the client TLS certificate. - type: boolean type: object x-kubernetes-validations: - message: '`secretRef` required if `disable` is false.' @@ -1981,6 +2002,10 @@ spec: - mysql - hazelcast - singlestore + - hbase + - elasticsearch + - qdrant + - couchbase type: string required: - secretRef @@ -1991,6 +2016,12 @@ spec: - message: One selection required between file or store. rule: '[has(self.file), has(self.store)].exists_one(c, c)' + replicas: + description: |- + Replicas determines the number of pods for the feast service. + When Replicas > 1, persistence is recommended. + format: int32 + type: integer resources: description: ResourceRequirements describes the compute resource requirements. diff --git a/infra/feast-operator/internal/controller/featurestore_controller_db_store_test.go b/infra/feast-operator/internal/controller/featurestore_controller_db_store_test.go index 60235fe687..0ee269bda1 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_db_store_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_db_store_test.go @@ -78,7 +78,7 @@ sqlalchemy_config_kwargs: pool_pre_ping: true ` -var invalidSecretContainingTypeYamlString = ` +var secretContainingValidTypeYamlString = ` type: cassandra hosts: - 192.168.1.1 @@ -127,6 +127,7 @@ var _ = Describe("FeatureStore Controller - db storage services", func() { Context("When deploying a resource with all db storage services", func() { const resourceName = "cr-name" var pullPolicy = corev1.PullAlways + var replicas = int32(1) ctx := context.Background() @@ -205,7 +206,7 @@ var _ = Describe("FeatureStore Controller - db storage services", func() { By("creating the custom resource for the Kind FeatureStore") err = k8sClient.Get(ctx, typeNamespacedName, featurestore) if err != nil && errors.IsNotFound(err) { - resource := createFeatureStoreResource(resourceName, image, pullPolicy, &[]corev1.EnvVar{}) + resource := createFeatureStoreResource(resourceName, image, pullPolicy, replicas, &[]corev1.EnvVar{}) resource.Spec.Services.OfflineStore.Persistence = &feastdevv1alpha1.OfflineStorePersistence{ DBPersistence: &feastdevv1alpha1.OfflineStoreDBStorePersistence{ Type: string(offlineType), @@ -305,37 +306,12 @@ var _ = Describe("FeatureStore Controller - db storage services", func() { Expect(err.Error()).To(Equal("secret key invalid.secret.key doesn't exist in secret online-store-secret")) - By("Referring to a secret that contains parameter named type") - resource = &feastdevv1alpha1.FeatureStore{} - err = k8sClient.Get(ctx, typeNamespacedName, resource) - Expect(err).NotTo(HaveOccurred()) - - secret := &corev1.Secret{} - err = k8sClient.Get(ctx, onlineSecretNamespacedName, secret) - Expect(err).NotTo(HaveOccurred()) - secret.Data[string(services.OnlineDBPersistenceCassandraConfigType)] = []byte(invalidSecretContainingTypeYamlString) - Expect(k8sClient.Update(ctx, secret)).To(Succeed()) - - resource.Spec.Services.OnlineStore.Persistence.DBPersistence.SecretRef = corev1.LocalObjectReference{Name: "online-store-secret"} - resource.Spec.Services.OnlineStore.Persistence.DBPersistence.SecretKeyName = "" - Expect(k8sClient.Update(ctx, resource)).To(Succeed()) - resource = &feastdevv1alpha1.FeatureStore{} - err = k8sClient.Get(ctx, typeNamespacedName, resource) - Expect(err).NotTo(HaveOccurred()) - - _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, - }) - Expect(err).To(HaveOccurred()) - - Expect(err.Error()).To(Equal("secret key cassandra in secret online-store-secret contains invalid tag named type")) - By("Referring to a secret that contains parameter named type with invalid value") resource = &feastdevv1alpha1.FeatureStore{} err = k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - secret = &corev1.Secret{} + secret := &corev1.Secret{} err = k8sClient.Get(ctx, onlineSecretNamespacedName, secret) Expect(err).NotTo(HaveOccurred()) secret.Data[string(services.OnlineDBPersistenceCassandraConfigType)] = []byte(invalidSecretTypeYamlString) @@ -353,39 +329,7 @@ var _ = Describe("FeatureStore Controller - db storage services", func() { }) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("secret key cassandra in secret online-store-secret contains invalid tag named type")) - - By("Referring to a secret that contains parameter named registry_type") - resource = &feastdevv1alpha1.FeatureStore{} - err = k8sClient.Get(ctx, typeNamespacedName, resource) - Expect(err).NotTo(HaveOccurred()) - - secret = &corev1.Secret{} - err = k8sClient.Get(ctx, onlineSecretNamespacedName, secret) - Expect(err).NotTo(HaveOccurred()) - secret.Data[string(services.OnlineDBPersistenceCassandraConfigType)] = []byte(cassandraYamlString) - Expect(k8sClient.Update(ctx, secret)).To(Succeed()) - - secret = &corev1.Secret{} - err = k8sClient.Get(ctx, registrySecretNamespacedName, secret) - Expect(err).NotTo(HaveOccurred()) - secret.Data["sql_custom_registry_key"] = nil - secret.Data[string(services.RegistryDBPersistenceSQLConfigType)] = []byte(invalidSecretRegistryTypeYamlString) - Expect(k8sClient.Update(ctx, secret)).To(Succeed()) - - resource.Spec.Services.Registry.Local.Persistence.DBPersistence.SecretRef = corev1.LocalObjectReference{Name: "registry-store-secret"} - resource.Spec.Services.Registry.Local.Persistence.DBPersistence.SecretKeyName = "" - Expect(k8sClient.Update(ctx, resource)).To(Succeed()) - resource = &feastdevv1alpha1.FeatureStore{} - err = k8sClient.Get(ctx, typeNamespacedName, resource) - Expect(err).NotTo(HaveOccurred()) - - _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, - }) - Expect(err).To(HaveOccurred()) - - Expect(err.Error()).To(Equal("secret key sql in secret registry-store-secret contains invalid tag named registry_type")) + Expect(err.Error()).To(Equal("secret key cassandra in secret online-store-secret contains tag named type with value wrong")) }) It("should successfully reconcile the resource", func() { @@ -506,6 +450,60 @@ var _ = Describe("FeatureStore Controller - db storage services", func() { Expect(err).NotTo(HaveOccurred()) Expect(controllerutil.HasControllerReference(svc)).To(BeTrue()) Expect(svc.Spec.Ports[0].TargetPort).To(Equal(intstr.FromInt(int(services.FeastServiceConstants[services.RegistryFeastType].TargetHttpPort)))) + + By("Referring to a secret that contains parameter named type") + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + secret := &corev1.Secret{} + err = k8sClient.Get(ctx, onlineSecretNamespacedName, secret) + Expect(err).NotTo(HaveOccurred()) + secret.Data[string(services.OnlineDBPersistenceCassandraConfigType)] = []byte(secretContainingValidTypeYamlString) + Expect(k8sClient.Update(ctx, secret)).To(Succeed()) + + resource.Spec.Services.OnlineStore.Persistence.DBPersistence.SecretRef = corev1.LocalObjectReference{Name: "online-store-secret"} + resource.Spec.Services.OnlineStore.Persistence.DBPersistence.SecretKeyName = "" + Expect(k8sClient.Update(ctx, resource)).To(Succeed()) + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + + Expect(err).To(Not(HaveOccurred())) + + By("Referring to a secret that contains parameter named registry_type") + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + secret = &corev1.Secret{} + err = k8sClient.Get(ctx, onlineSecretNamespacedName, secret) + Expect(err).NotTo(HaveOccurred()) + secret.Data[string(services.OnlineDBPersistenceCassandraConfigType)] = []byte(cassandraYamlString) + Expect(k8sClient.Update(ctx, secret)).To(Succeed()) + + secret = &corev1.Secret{} + err = k8sClient.Get(ctx, registrySecretNamespacedName, secret) + Expect(err).NotTo(HaveOccurred()) + secret.Data["sql_custom_registry_key"] = nil + secret.Data[string(services.RegistryDBPersistenceSQLConfigType)] = []byte(invalidSecretRegistryTypeYamlString) + Expect(k8sClient.Update(ctx, secret)).To(Succeed()) + + resource.Spec.Services.Registry.Local.Persistence.DBPersistence.SecretRef = corev1.LocalObjectReference{Name: "registry-store-secret"} + resource.Spec.Services.Registry.Local.Persistence.DBPersistence.SecretKeyName = "" + Expect(k8sClient.Update(ctx, resource)).To(Succeed()) + resource = &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).To(Not(HaveOccurred())) }) It("should properly encode a feature_store.yaml config", func() { diff --git a/infra/feast-operator/internal/controller/featurestore_controller_ephemeral_test.go b/infra/feast-operator/internal/controller/featurestore_controller_ephemeral_test.go index 796de8e526..a762faa5a2 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_ephemeral_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_ephemeral_test.go @@ -48,6 +48,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { const resourceName = "services-ephemeral" const offlineType = "duckdb" var pullPolicy = corev1.PullAlways + var replicas = int32(1) var testEnvVarName = "testEnvVarName" var testEnvVarValue = "testEnvVarValue" @@ -65,7 +66,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { By("creating the custom resource for the Kind FeatureStore") err := k8sClient.Get(ctx, typeNamespacedName, featurestore) if err != nil && errors.IsNotFound(err) { - resource := createFeatureStoreResource(resourceName, image, pullPolicy, &[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, + resource := createFeatureStoreResource(resourceName, image, pullPolicy, replicas, &[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}}) resource.Spec.Services.OfflineStore.Persistence = &feastdevv1alpha1.OfflineStorePersistence{ FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ diff --git a/infra/feast-operator/internal/controller/featurestore_controller_kubernetes_auth_test.go b/infra/feast-operator/internal/controller/featurestore_controller_kubernetes_auth_test.go index 4930f3fc59..57dd3a290d 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_kubernetes_auth_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_kubernetes_auth_test.go @@ -48,6 +48,7 @@ var _ = Describe("FeatureStore Controller-Kubernetes authorization", func() { Context("When deploying a resource with all ephemeral services and Kubernetes authorization", func() { const resourceName = "kubernetes-authorization" var pullPolicy = corev1.PullAlways + var replicas = int32(1) ctx := context.Background() @@ -62,7 +63,7 @@ var _ = Describe("FeatureStore Controller-Kubernetes authorization", func() { By("creating the custom resource for the Kind FeatureStore") err := k8sClient.Get(ctx, typeNamespacedName, featurestore) if err != nil && errors.IsNotFound(err) { - resource := createFeatureStoreResource(resourceName, image, pullPolicy, &[]corev1.EnvVar{}) + resource := createFeatureStoreResource(resourceName, image, pullPolicy, replicas, &[]corev1.EnvVar{}) resource.Spec.AuthzConfig = &feastdevv1alpha1.AuthzConfig{KubernetesAuthz: &feastdevv1alpha1.KubernetesAuthz{ Roles: roles, }} diff --git a/infra/feast-operator/internal/controller/featurestore_controller_objectstore_test.go b/infra/feast-operator/internal/controller/featurestore_controller_objectstore_test.go index db07418c92..f4a21a28f1 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_objectstore_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_objectstore_test.go @@ -46,6 +46,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { Context("When deploying a resource with all ephemeral services", func() { const resourceName = "services-object-store" var pullPolicy = corev1.PullAlways + var replicas = int32(1) var testEnvVarName = "testEnvVarName" var testEnvVarValue = "testEnvVarValue" @@ -67,7 +68,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { By("creating the custom resource for the Kind FeatureStore") err := k8sClient.Get(ctx, typeNamespacedName, featurestore) if err != nil && errors.IsNotFound(err) { - resource := createFeatureStoreResource(resourceName, image, pullPolicy, &[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, + resource := createFeatureStoreResource(resourceName, image, pullPolicy, replicas, &[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}}) resource.Spec.Services.OnlineStore = nil resource.Spec.Services.OfflineStore = nil diff --git a/infra/feast-operator/internal/controller/featurestore_controller_oidc_auth_test.go b/infra/feast-operator/internal/controller/featurestore_controller_oidc_auth_test.go index c062a573df..eb320c5bb3 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_oidc_auth_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_oidc_auth_test.go @@ -49,6 +49,7 @@ var _ = Describe("FeatureStore Controller-OIDC authorization", func() { const resourceName = "oidc-authorization" const oidcSecretName = "oidc-secret" var pullPolicy = corev1.PullAlways + var replicas = int32(1) ctx := context.Background() @@ -73,7 +74,7 @@ var _ = Describe("FeatureStore Controller-OIDC authorization", func() { By("creating the custom resource for the Kind FeatureStore") err = k8sClient.Get(ctx, typeNamespacedName, featurestore) if err != nil && errors.IsNotFound(err) { - resource := createFeatureStoreResource(resourceName, image, pullPolicy, &[]corev1.EnvVar{}) + resource := createFeatureStoreResource(resourceName, image, pullPolicy, replicas, &[]corev1.EnvVar{}) resource.Spec.AuthzConfig = &feastdevv1alpha1.AuthzConfig{OidcAuthz: &feastdevv1alpha1.OidcAuthz{ SecretRef: corev1.LocalObjectReference{ Name: oidcSecretName, diff --git a/infra/feast-operator/internal/controller/featurestore_controller_pvc_test.go b/infra/feast-operator/internal/controller/featurestore_controller_pvc_test.go index d0adc62c7c..fe0caa38e6 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_pvc_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_pvc_test.go @@ -50,6 +50,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { Context("When deploying a resource with all ephemeral services", func() { const resourceName = "services-pvc" var pullPolicy = corev1.PullAlways + var replicas = int32(1) var testEnvVarName = "testEnvVarName" var testEnvVarValue = "testEnvVarValue" @@ -77,7 +78,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { By("creating the custom resource for the Kind FeatureStore") err := k8sClient.Get(ctx, typeNamespacedName, featurestore) if err != nil && errors.IsNotFound(err) { - resource := createFeatureStoreResource(resourceName, image, pullPolicy, &[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, + resource := createFeatureStoreResource(resourceName, image, pullPolicy, replicas, &[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}}) resource.Spec.Services.OfflineStore.Persistence = &feastdevv1alpha1.OfflineStorePersistence{ FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ diff --git a/infra/feast-operator/internal/controller/featurestore_controller_test.go b/infra/feast-operator/internal/controller/featurestore_controller_test.go index 44c81eca59..debd63300b 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_test.go @@ -404,6 +404,7 @@ var _ = Describe("FeatureStore Controller", func() { Context("When reconciling a resource with all services enabled", func() { const resourceName = "services" var pullPolicy = corev1.PullAlways + var replicas = int32(1) var testEnvVarName = "testEnvVarName" var testEnvVarValue = "testEnvVarValue" @@ -419,7 +420,7 @@ var _ = Describe("FeatureStore Controller", func() { By("creating the custom resource for the Kind FeatureStore") err := k8sClient.Get(ctx, typeNamespacedName, featurestore) if err != nil && errors.IsNotFound(err) { - resource := createFeatureStoreResource(resourceName, image, pullPolicy, &[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, + resource := createFeatureStoreResource(resourceName, image, pullPolicy, replicas, &[]corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}}) Expect(k8sClient.Create(ctx, resource)).To(Succeed()) } @@ -870,6 +871,114 @@ var _ = Describe("FeatureStore Controller", func() { Expect(areEnvVarArraysEqual(deploy.Spec.Template.Spec.Containers[0].Env, []corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue + "1"}, {Name: services.FeatureStoreYamlEnvVar, Value: fsYamlStr}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.name"}}}})).To(BeTrue()) }) + It("Should scale online/offline store service", func() { + By("Reconciling the created resource") + controllerReconciler := &FeatureStoreReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + resource := &feastdevv1alpha1.FeatureStore{} + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + req, err := labels.NewRequirement(services.NameLabelKey, selection.Equals, []string{resource.Name}) + Expect(err).NotTo(HaveOccurred()) + labelSelector := labels.NewSelector().Add(*req) + listOpts := &client.ListOptions{Namespace: resource.Namespace, LabelSelector: labelSelector} + deployList := appsv1.DeploymentList{} + err = k8sClient.List(ctx, &deployList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(deployList.Items).To(HaveLen(3)) + + svcList := corev1.ServiceList{} + err = k8sClient.List(ctx, &svcList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(svcList.Items).To(HaveLen(3)) + + cmList := corev1.ConfigMapList{} + err = k8sClient.List(ctx, &cmList, listOpts) + Expect(err).NotTo(HaveOccurred()) + Expect(cmList.Items).To(HaveLen(1)) + + feast := services.FeastServices{ + Handler: handler.FeastHandler{ + Client: controllerReconciler.Client, + Context: ctx, + Scheme: controllerReconciler.Scheme, + FeatureStore: resource, + }, + } + + fsYamlStr := "" + fsYamlStr, err = feast.GetServiceFeatureStoreYamlBase64(services.OnlineFeastType) + Expect(err).NotTo(HaveOccurred()) + + // check online config + deploy_online := &appsv1.Deployment{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: feast.GetFeastServiceName(services.OnlineFeastType), + Namespace: resource.Namespace, + }, + deploy_online) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy_online.Spec.Template.Spec.ServiceAccountName).To(Equal(deploy_online.Name)) + Expect(deploy_online.Spec.Template.Spec.Containers).To(HaveLen(1)) + Expect(deploy_online.Spec.Template.Spec.Containers[0].Env).To(HaveLen(3)) + Expect(areEnvVarArraysEqual(deploy_online.Spec.Template.Spec.Containers[0].Env, []corev1.EnvVar{{Name: testEnvVarName, Value: testEnvVarValue}, {Name: services.FeatureStoreYamlEnvVar, Value: fsYamlStr}, {Name: "fieldRefName", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.namespace"}}}})).To(BeTrue()) + Expect(deploy_online.Spec.Template.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways)) + + // check offline config + deploy_offline := &appsv1.Deployment{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: feast.GetFeastServiceName(services.OfflineFeastType), + Namespace: resource.Namespace, + }, + deploy_offline) + Expect(err).NotTo(HaveOccurred()) + Expect(deploy_offline.Spec.Template.Spec.ServiceAccountName).To(Equal(deploy_offline.Name)) + Expect(deploy_offline.Spec.Template.Spec.Containers).To(HaveLen(1)) + Expect(deploy_offline.Spec.Template.Spec.Containers[0].Env).To(HaveLen(1)) + Expect(deploy_offline.Spec.Template.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullIfNotPresent)) + + // change feast project and reconcile + // scale online replicas to 2 + resourceNew := resource.DeepCopy() + new_replicas := int32(2) + resourceNew.Spec.Services.OnlineStore.Replicas = &new_replicas + resourceNew.Spec.Services.OfflineStore.Replicas = &new_replicas + + err = k8sClient.Update(ctx, resourceNew) + Expect(err).NotTo(HaveOccurred()) + _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + + err = k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: feast.GetFeastServiceName(services.OnlineFeastType), + Namespace: resource.Namespace, + }, + deploy_online) + Expect(err).NotTo(HaveOccurred()) + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: feast.GetFeastServiceName(services.OfflineFeastType), + Namespace: resource.Namespace, + }, + deploy_offline) + Expect(err).NotTo(HaveOccurred()) + + Expect(deploy_online.Spec.Replicas).To(Equal(&new_replicas)) + Expect(deploy_offline.Spec.Replicas).To(Equal(&new_replicas)) + }) + It("Should delete k8s objects owned by the FeatureStore CR", func() { By("changing which feast services are configured in the CR") controllerReconciler := &FeatureStoreReconciler{ @@ -1253,7 +1362,7 @@ var _ = Describe("FeatureStore Controller", func() { }) }) -func createFeatureStoreResource(resourceName string, image string, pullPolicy corev1.PullPolicy, envVars *[]corev1.EnvVar) *feastdevv1alpha1.FeatureStore { +func createFeatureStoreResource(resourceName string, image string, pullPolicy corev1.PullPolicy, replicas int32, envVars *[]corev1.EnvVar) *feastdevv1alpha1.FeatureStore { return &feastdevv1alpha1.FeatureStore{ ObjectMeta: metav1.ObjectMeta{ Name: resourceName, @@ -1264,14 +1373,17 @@ func createFeatureStoreResource(resourceName string, image string, pullPolicy co Services: &feastdevv1alpha1.FeatureStoreServices{ OfflineStore: &feastdevv1alpha1.OfflineStore{}, OnlineStore: &feastdevv1alpha1.OnlineStore{ - ServiceConfigs: feastdevv1alpha1.ServiceConfigs{ - DefaultConfigs: feastdevv1alpha1.DefaultConfigs{ - Image: &image, - }, - OptionalConfigs: feastdevv1alpha1.OptionalConfigs{ - Env: envVars, - ImagePullPolicy: &pullPolicy, - Resources: &corev1.ResourceRequirements{}, + StoreServiceConfigs: feastdevv1alpha1.StoreServiceConfigs{ + Replicas: &replicas, + ServiceConfigs: feastdevv1alpha1.ServiceConfigs{ + DefaultConfigs: feastdevv1alpha1.DefaultConfigs{ + Image: &image, + }, + OptionalConfigs: feastdevv1alpha1.OptionalConfigs{ + Env: envVars, + ImagePullPolicy: &pullPolicy, + Resources: &corev1.ResourceRequirements{}, + }, }, }, }, diff --git a/infra/feast-operator/internal/controller/featurestore_controller_tls_test.go b/infra/feast-operator/internal/controller/featurestore_controller_tls_test.go index 45cda31740..c191dae332 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_tls_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_tls_test.go @@ -56,7 +56,7 @@ var _ = Describe("FeatureStore Controller - Feast service TLS", func() { } featurestore := &feastdevv1alpha1.FeatureStore{} localRef := corev1.LocalObjectReference{Name: "test"} - tlsConfigs := feastdevv1alpha1.TlsConfigs{ + tlsConfigs := &feastdevv1alpha1.TlsConfigs{ SecretRef: &localRef, } BeforeEach(func() { @@ -72,16 +72,14 @@ var _ = Describe("FeatureStore Controller - Feast service TLS", func() { FeastProject: feastProject, Services: &feastdevv1alpha1.FeatureStoreServices{ OnlineStore: &feastdevv1alpha1.OnlineStore{ - TLS: &tlsConfigs, + TLS: tlsConfigs, }, OfflineStore: &feastdevv1alpha1.OfflineStore{ - TLS: &feastdevv1alpha1.OfflineTlsConfigs{ - TlsConfigs: tlsConfigs, - }, + TLS: tlsConfigs, }, Registry: &feastdevv1alpha1.Registry{ Local: &feastdevv1alpha1.LocalRegistryConfig{ - TLS: &tlsConfigs, + TLS: tlsConfigs, }, }, }, @@ -396,9 +394,7 @@ var _ = Describe("FeatureStore Controller - Feast service TLS", func() { }, }, OfflineStore: &feastdevv1alpha1.OfflineStore{ - TLS: &feastdevv1alpha1.OfflineTlsConfigs{ - TlsConfigs: tlsConfigs, - }, + TLS: tlsConfigs, }, Registry: &feastdevv1alpha1.Registry{ Remote: &feastdevv1alpha1.RemoteRegistryConfig{ diff --git a/infra/feast-operator/internal/controller/services/repo_config.go b/infra/feast-operator/internal/controller/services/repo_config.go index 22052aa724..5433e99acf 100644 --- a/infra/feast-operator/internal/controller/services/repo_config.go +++ b/infra/feast-operator/internal/controller/services/repo_config.go @@ -50,7 +50,7 @@ func (feast *FeastServices) getServiceRepoConfig(feastType FeastServiceType) (Re func getServiceRepoConfig( feastType FeastServiceType, featureStore *feastdevv1alpha1.FeatureStore, - secretExtractionFunc func(secretRef string, secretKeyName string) (map[string]interface{}, error)) (RepoConfig, error) { + secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error)) (RepoConfig, error) { appliedSpec := featureStore.Status.Applied repoConfig, err := getClientRepoConfig(featureStore, secretExtractionFunc) @@ -59,9 +59,9 @@ func getServiceRepoConfig( } if appliedSpec.AuthzConfig != nil && appliedSpec.AuthzConfig.OidcAuthz != nil { - propertiesMap, err := secretExtractionFunc(appliedSpec.AuthzConfig.OidcAuthz.SecretRef.Name, "") - if err != nil { - return repoConfig, err + propertiesMap, authSecretErr := secretExtractionFunc("", appliedSpec.AuthzConfig.OidcAuthz.SecretRef.Name, "") + if authSecretErr != nil { + return repoConfig, authSecretErr } oidcServerProperties := map[string]interface{}{} @@ -109,7 +109,7 @@ func getServiceRepoConfig( return repoConfig, nil } -func setRepoConfigRegistry(services *feastdevv1alpha1.FeatureStoreServices, secretExtractionFunc func(secretRef string, secretKeyName string) (map[string]interface{}, error), repoConfig *RepoConfig) error { +func setRepoConfigRegistry(services *feastdevv1alpha1.FeatureStoreServices, secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error), repoConfig *RepoConfig) error { repoConfig.Registry = RegistryConfig{} repoConfig.Registry.Path = DefaultRegistryEphemeralPath registryPersistence := services.Registry.Local.Persistence @@ -129,7 +129,7 @@ func setRepoConfigRegistry(services *feastdevv1alpha1.FeatureStoreServices, secr if len(secretKeyName) == 0 { secretKeyName = string(repoConfig.Registry.RegistryType) } - parametersMap, err := secretExtractionFunc(dbPersistence.SecretRef.Name, secretKeyName) + parametersMap, err := secretExtractionFunc(dbPersistence.Type, dbPersistence.SecretRef.Name, secretKeyName) if err != nil { return err } @@ -149,7 +149,7 @@ func setRepoConfigRegistry(services *feastdevv1alpha1.FeatureStoreServices, secr return nil } -func setRepoConfigOnline(services *feastdevv1alpha1.FeatureStoreServices, secretExtractionFunc func(secretRef string, secretKeyName string) (map[string]interface{}, error), repoConfig *RepoConfig) error { +func setRepoConfigOnline(services *feastdevv1alpha1.FeatureStoreServices, secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error), repoConfig *RepoConfig) error { repoConfig.OnlineStore = OnlineStoreConfig{} repoConfig.OnlineStore.Path = DefaultOnlineStoreEphemeralPath @@ -170,7 +170,7 @@ func setRepoConfigOnline(services *feastdevv1alpha1.FeatureStoreServices, secret secretKeyName = string(repoConfig.OnlineStore.Type) } - parametersMap, err := secretExtractionFunc(dbPersistence.SecretRef.Name, secretKeyName) + parametersMap, err := secretExtractionFunc(dbPersistence.Type, dbPersistence.SecretRef.Name, secretKeyName) if err != nil { return err } @@ -187,7 +187,7 @@ func setRepoConfigOnline(services *feastdevv1alpha1.FeatureStoreServices, secret return nil } -func setRepoConfigOffline(services *feastdevv1alpha1.FeatureStoreServices, secretExtractionFunc func(secretRef string, secretKeyName string) (map[string]interface{}, error), repoConfig *RepoConfig) error { +func setRepoConfigOffline(services *feastdevv1alpha1.FeatureStoreServices, secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error), repoConfig *RepoConfig) error { repoConfig.OfflineStore = OfflineStoreConfig{} repoConfig.OfflineStore.Type = OfflineFilePersistenceDaskConfigType offlineStorePersistence := services.OfflineStore.Persistence @@ -205,7 +205,7 @@ func setRepoConfigOffline(services *feastdevv1alpha1.FeatureStoreServices, secre secretKeyName = string(repoConfig.OfflineStore.Type) } - parametersMap, err := secretExtractionFunc(dbPersistence.SecretRef.Name, secretKeyName) + parametersMap, err := secretExtractionFunc(dbPersistence.Type, dbPersistence.SecretRef.Name, secretKeyName) if err != nil { return err } @@ -224,7 +224,7 @@ func setRepoConfigOffline(services *feastdevv1alpha1.FeatureStoreServices, secre return nil } -func (feast *FeastServices) getClientFeatureStoreYaml(secretExtractionFunc func(secretRef string, secretKeyName string) (map[string]interface{}, error)) ([]byte, error) { +func (feast *FeastServices) getClientFeatureStoreYaml(secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error)) ([]byte, error) { clientRepo, err := getClientRepoConfig(feast.Handler.FeatureStore, secretExtractionFunc) if err != nil { return []byte{}, err @@ -234,7 +234,7 @@ func (feast *FeastServices) getClientFeatureStoreYaml(secretExtractionFunc func( func getClientRepoConfig( featureStore *feastdevv1alpha1.FeatureStore, - secretExtractionFunc func(secretRef string, secretKeyName string) (map[string]interface{}, error)) (RepoConfig, error) { + secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error)) (RepoConfig, error) { status := featureStore.Status appliedServices := status.Applied.Services clientRepoConfig := RepoConfig{ @@ -248,9 +248,8 @@ func getClientRepoConfig( Host: strings.Split(status.ServiceHostnames.OfflineStore, ":")[0], Port: HttpPort, } - if appliedServices.OfflineStore != nil && appliedServices.OfflineStore.TLS != nil && - (&appliedServices.OfflineStore.TLS.TlsConfigs).IsTLS() { - clientRepoConfig.OfflineStore.Cert = GetTlsPath(OfflineFeastType) + appliedServices.OfflineStore.TLS.TlsConfigs.SecretKeyNames.TlsCrt + if appliedServices.OfflineStore != nil && appliedServices.OfflineStore.TLS.IsTLS() { + clientRepoConfig.OfflineStore.Cert = GetTlsPath(OfflineFeastType) + appliedServices.OfflineStore.TLS.SecretKeyNames.TlsCrt clientRepoConfig.OfflineStore.Port = HttpsPort clientRepoConfig.OfflineStore.Scheme = HttpsScheme } @@ -292,7 +291,7 @@ func getClientRepoConfig( Type: OidcAuthType, } - propertiesMap, err := secretExtractionFunc(status.Applied.AuthzConfig.OidcAuthz.SecretRef.Name, "") + propertiesMap, err := secretExtractionFunc("", status.Applied.AuthzConfig.OidcAuthz.SecretRef.Name, "") if err != nil { return clientRepoConfig, err } @@ -318,7 +317,7 @@ func getActualPath(filePath string, pvcConfig *feastdevv1alpha1.PvcConfig) strin return path.Join(pvcConfig.MountPath, filePath) } -func (feast *FeastServices) extractConfigFromSecret(secretRef string, secretKeyName string) (map[string]interface{}, error) { +func (feast *FeastServices) extractConfigFromSecret(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error) { secret, err := feast.getSecret(secretRef) if err != nil { return nil, err @@ -330,18 +329,20 @@ func (feast *FeastServices) extractConfigFromSecret(secretRef string, secretKeyN if !exists { return nil, fmt.Errorf("secret key %s doesn't exist in secret %s", secretKeyName, secretRef) } + err = yaml.Unmarshal(val, ¶meters) if err != nil { return nil, fmt.Errorf("secret %s contains invalid value", secretKeyName) } - _, exists = parameters["type"] - if exists { - return nil, fmt.Errorf("secret key %s in secret %s contains invalid tag named type", secretKeyName, secretRef) + + typeVal, typeExists := parameters["type"] + if typeExists && storeType != typeVal { + return nil, fmt.Errorf("secret key %s in secret %s contains tag named type with value %s", secretKeyName, secretRef, typeVal) } - _, exists = parameters["registry_type"] - if exists { - return nil, fmt.Errorf("secret key %s in secret %s contains invalid tag named registry_type", secretKeyName, secretRef) + typeVal, typeExists = parameters["registry_type"] + if typeExists && storeType != typeVal { + return nil, fmt.Errorf("secret key %s in secret %s contains tag named registry_type with value %s", secretKeyName, secretRef, typeVal) } } else { for k, v := range secret.Data { diff --git a/infra/feast-operator/internal/controller/services/repo_config_test.go b/infra/feast-operator/internal/controller/services/repo_config_test.go index b148f90470..7f017f4d10 100644 --- a/infra/feast-operator/internal/controller/services/repo_config_test.go +++ b/infra/feast-operator/internal/controller/services/repo_config_test.go @@ -486,17 +486,17 @@ func minimalFeatureStoreWithAllServices() *feastdevv1alpha1.FeatureStore { return feast } -func emptyMockExtractConfigFromSecret(secretRef string, secretKeyName string) (map[string]interface{}, error) { +func emptyMockExtractConfigFromSecret(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error) { return map[string]interface{}{}, nil } -func mockExtractConfigFromSecret(secretRef string, secretKeyName string) (map[string]interface{}, error) { +func mockExtractConfigFromSecret(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error) { return createParameterMap(), nil } func mockOidcConfigFromSecret( - oidcProperties map[string]interface{}) func(secretRef string, secretKeyName string) (map[string]interface{}, error) { - return func(secretRef string, secretKeyName string) (map[string]interface{}, error) { + oidcProperties map[string]interface{}) func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error) { + return func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error) { return oidcProperties, nil } } diff --git a/infra/feast-operator/internal/controller/services/services.go b/infra/feast-operator/internal/controller/services/services.go index 60aabebe02..f85597e648 100644 --- a/infra/feast-operator/internal/controller/services/services.go +++ b/infra/feast-operator/internal/controller/services/services.go @@ -293,7 +293,7 @@ func (feast *FeastServices) setDeployment(deploy *appsv1.Deployment, feastType F probeHandler := getProbeHandler(feastType, tls) deploy.Spec = appsv1.DeploymentSpec{ - Replicas: &DefaultReplicas, + Replicas: feast.getServiceReplicas(feastType), Selector: metav1.SetAsLabelSelector(deploy.GetLabels()), Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -373,13 +373,6 @@ func (feast *FeastServices) getContainerCommand(feastType FeastServiceType) []st } deploySettings.Args = append(deploySettings.Args, []string{"-p", strconv.Itoa(int(targetPort))}...) - if feastType == OfflineFeastType { - if tls.IsTLS() && feast.Handler.FeatureStore.Status.Applied.Services.OfflineStore.TLS.VerifyClient != nil { - deploySettings.Args = append(deploySettings.Args, - []string{"--verify_client", strconv.FormatBool(*feast.Handler.FeatureStore.Status.Applied.Services.OfflineStore.TLS.VerifyClient)}...) - } - } - // Combine base command, options, and arguments feastCommand := append([]string{baseCommand}, options...) feastCommand = append(feastCommand, deploySettings.Args...) @@ -485,6 +478,21 @@ func (feast *FeastServices) getServiceConfigs(feastType FeastServiceType) feastd return feastdevv1alpha1.ServiceConfigs{} } +func (feast *FeastServices) getServiceReplicas(feastType FeastServiceType) *int32 { + appliedServices := feast.Handler.FeatureStore.Status.Applied.Services + switch feastType { + case OfflineFeastType: + if feast.isOfflinStore() { + return appliedServices.OfflineStore.Replicas + } + case OnlineFeastType: + if feast.isOnlinStore() { + return appliedServices.OnlineStore.Replicas + } + } + return &DefaultReplicas +} + func (feast *FeastServices) getLogLevelForType(feastType FeastServiceType) *string { services := feast.Handler.FeatureStore.Status.Applied.Services switch feastType { @@ -534,11 +542,8 @@ func (feast *FeastServices) setServiceHostnames() error { domain := svcDomain + ":" if feast.isOfflinStore() { objMeta := feast.GetObjectMeta(OfflineFeastType) - port := strconv.Itoa(HttpPort) - if feast.offlineTls() { - port = strconv.Itoa(HttpsPort) - } - feast.Handler.FeatureStore.Status.ServiceHostnames.OfflineStore = objMeta.Name + "." + objMeta.Namespace + domain + port + feast.Handler.FeatureStore.Status.ServiceHostnames.OfflineStore = objMeta.Name + "." + objMeta.Namespace + domain + + getPortStr(feast.Handler.FeatureStore.Status.Applied.Services.OfflineStore.TLS) } if feast.isOnlinStore() { objMeta := feast.GetObjectMeta(OnlineFeastType) diff --git a/infra/feast-operator/internal/controller/services/tls.go b/infra/feast-operator/internal/controller/services/tls.go index c92c4d8de2..a52cc707eb 100644 --- a/infra/feast-operator/internal/controller/services/tls.go +++ b/infra/feast-operator/internal/controller/services/tls.go @@ -29,7 +29,7 @@ func (feast *FeastServices) setTlsDefaults() error { } appliedServices := feast.Handler.FeatureStore.Status.Applied.Services if feast.isOfflinStore() && appliedServices.OfflineStore.TLS != nil { - tlsDefaults(&appliedServices.OfflineStore.TLS.TlsConfigs) + tlsDefaults(appliedServices.OfflineStore.TLS) } if feast.isOnlinStore() { tlsDefaults(appliedServices.OnlineStore.TLS) @@ -43,11 +43,9 @@ func (feast *FeastServices) setTlsDefaults() error { func (feast *FeastServices) setOpenshiftTls() error { appliedServices := feast.Handler.FeatureStore.Status.Applied.Services if feast.offlineOpenshiftTls() { - appliedServices.OfflineStore.TLS = &feastdevv1alpha1.OfflineTlsConfigs{ - TlsConfigs: feastdevv1alpha1.TlsConfigs{ - SecretRef: &corev1.LocalObjectReference{ - Name: feast.initFeastSvc(OfflineFeastType).Name + tlsNameSuffix, - }, + appliedServices.OfflineStore.TLS = &feastdevv1alpha1.TlsConfigs{ + SecretRef: &corev1.LocalObjectReference{ + Name: feast.initFeastSvc(OfflineFeastType).Name + tlsNameSuffix, }, } } @@ -103,8 +101,8 @@ func (feast *FeastServices) getTlsConfigs(feastType FeastServiceType) (tls *feas appliedServices := feast.Handler.FeatureStore.Status.Applied.Services switch feastType { case OfflineFeastType: - if feast.isOfflinStore() && appliedServices.OfflineStore.TLS != nil { - tls = &appliedServices.OfflineStore.TLS.TlsConfigs + if feast.isOfflinStore() { + tls = appliedServices.OfflineStore.TLS } case OnlineFeastType: if feast.isOnlinStore() { @@ -154,12 +152,6 @@ func (feast *FeastServices) remoteRegistryOpenshiftTls() (bool, error) { return false, nil } -func (feast *FeastServices) offlineTls() bool { - return feast.isOfflinStore() && - feast.Handler.FeatureStore.Status.Applied.Services.OfflineStore.TLS != nil && - (&feast.Handler.FeatureStore.Status.Applied.Services.OfflineStore.TLS.TlsConfigs).IsTLS() -} - func (feast *FeastServices) localRegistryTls() bool { return localRegistryTls(feast.Handler.FeatureStore) } diff --git a/infra/feast-operator/internal/controller/services/tls_test.go b/infra/feast-operator/internal/controller/services/tls_test.go index 2a66d8a4fd..17d23dcf72 100644 --- a/infra/feast-operator/internal/controller/services/tls_test.go +++ b/infra/feast-operator/internal/controller/services/tls_test.go @@ -58,7 +58,6 @@ var _ = Describe("TLS Config", func() { Expect(tls.IsTLS()).To(BeFalse()) Expect(getPortStr(tls)).To(Equal("80")) - Expect(feast.offlineTls()).To(BeFalse()) Expect(feast.remoteRegistryTls()).To(BeFalse()) Expect(feast.localRegistryTls()).To(BeFalse()) Expect(feast.isOpenShiftTls(OfflineFeastType)).To(BeFalse()) @@ -87,7 +86,6 @@ var _ = Describe("TLS Config", func() { Expect(getPortStr(tls)).To(Equal("443")) Expect(GetTlsPath(RegistryFeastType)).To(Equal("/tls/registry/")) - Expect(feast.offlineTls()).To(BeFalse()) Expect(feast.remoteRegistryTls()).To(BeFalse()) Expect(feast.localRegistryTls()).To(BeTrue()) Expect(feast.isOpenShiftTls(OfflineFeastType)).To(BeFalse()) @@ -127,7 +125,6 @@ var _ = Describe("TLS Config", func() { Expect(tls.SecretKeyNames).To(Equal(secretKeyNames)) Expect(tls.IsTLS()).To(BeTrue()) - Expect(feast.offlineTls()).To(BeTrue()) Expect(feast.remoteRegistryTls()).To(BeFalse()) Expect(feast.localRegistryTls()).To(BeTrue()) Expect(feast.isOpenShiftTls(OfflineFeastType)).To(BeTrue()) @@ -189,7 +186,6 @@ var _ = Describe("TLS Config", func() { Expect(getPortStr(tls)).To(Equal("443")) Expect(GetTlsPath(RegistryFeastType)).To(Equal("/tls/registry/")) - Expect(feast.offlineTls()).To(BeFalse()) Expect(feast.remoteRegistryTls()).To(BeFalse()) Expect(feast.localRegistryTls()).To(BeTrue()) Expect(feast.isOpenShiftTls(OfflineFeastType)).To(BeFalse()) @@ -238,7 +234,6 @@ var _ = Describe("TLS Config", func() { Expect(getPortStr(tls)).To(Equal("80")) Expect(GetTlsPath(RegistryFeastType)).To(Equal("/tls/registry/")) - Expect(feast.offlineTls()).To(BeTrue()) Expect(feast.remoteRegistryTls()).To(BeFalse()) Expect(feast.localRegistryTls()).To(BeFalse()) Expect(feast.isOpenShiftTls(OfflineFeastType)).To(BeTrue()) diff --git a/infra/feast-operator/internal/controller/services/util.go b/infra/feast-operator/internal/controller/services/util.go index 85bd02e653..631709d6ba 100644 --- a/infra/feast-operator/internal/controller/services/util.go +++ b/infra/feast-operator/internal/controller/services/util.go @@ -122,7 +122,7 @@ func ApplyDefaultsToStatus(cr *feastdevv1alpha1.FeatureStore) { } } - setServiceDefaultConfigs(&services.OfflineStore.ServiceConfigs.DefaultConfigs) + setStoreServiceDefaultConfigs(&services.OfflineStore.StoreServiceConfigs) } if services.OnlineStore != nil { @@ -147,7 +147,7 @@ func ApplyDefaultsToStatus(cr *feastdevv1alpha1.FeatureStore) { } } - setServiceDefaultConfigs(&services.OnlineStore.ServiceConfigs.DefaultConfigs) + setStoreServiceDefaultConfigs(&services.OnlineStore.StoreServiceConfigs) } // overwrite status.applied with every reconcile applied.DeepCopyInto(&cr.Status.Applied) @@ -159,6 +159,13 @@ func setServiceDefaultConfigs(defaultConfigs *feastdevv1alpha1.DefaultConfigs) { } } +func setStoreServiceDefaultConfigs(storeServiceConfigs *feastdevv1alpha1.StoreServiceConfigs) { + if storeServiceConfigs.Replicas == nil { + storeServiceConfigs.Replicas = &DefaultReplicas + } + setServiceDefaultConfigs(&storeServiceConfigs.ServiceConfigs.DefaultConfigs) +} + func checkOfflineStoreFilePersistenceType(value string) error { if slices.Contains(feastdevv1alpha1.ValidOfflineStoreFilePersistenceTypes, value) { return nil diff --git a/infra/feast-operator/test/api/featurestore_types_test.go b/infra/feast-operator/test/api/featurestore_types_test.go index 302abef938..126991266d 100644 --- a/infra/feast-operator/test/api/featurestore_types_test.go +++ b/infra/feast-operator/test/api/featurestore_types_test.go @@ -377,7 +377,7 @@ var _ = Describe("FeatureStore API", func() { }) It("should fail when db persistence type is invalid", func() { - attemptInvalidCreationAndAsserts(ctx, onlineStoreWithDBPersistenceType("invalid", featurestore), "Unsupported value: \"invalid\": supported values: \"snowflake.online\", \"redis\", \"ikv\", \"datastore\", \"dynamodb\", \"bigtable\", \"postgres\", \"cassandra\", \"mysql\", \"hazelcast\", \"singlestore\"") + attemptInvalidCreationAndAsserts(ctx, onlineStoreWithDBPersistenceType("invalid", featurestore), "Unsupported value: \"invalid\": supported values: \"snowflake.online\", \"redis\", \"ikv\", \"datastore\", \"dynamodb\", \"bigtable\", \"postgres\", \"cassandra\", \"mysql\", \"hazelcast\", \"singlestore\", \"hbase\", \"elasticsearch\", \"qdrant\", \"couchbase\"") }) }) @@ -388,7 +388,7 @@ var _ = Describe("FeatureStore API", func() { attemptInvalidCreationAndAsserts(ctx, offlineStoreWithUnmanagedFileType(featurestore), "Unsupported value") }) It("should fail when db persistence type is invalid", func() { - attemptInvalidCreationAndAsserts(ctx, offlineStoreWithDBPersistenceType("invalid", featurestore), "Unsupported value: \"invalid\": supported values: \"snowflake.offline\", \"bigquery\", \"redshift\", \"spark\", \"postgres\", \"feast_trino.trino.TrinoOfflineStore\", \"redis\"") + attemptInvalidCreationAndAsserts(ctx, offlineStoreWithDBPersistenceType("invalid", featurestore), "Unsupported value: \"invalid\": supported values: \"snowflake.offline\", \"bigquery\", \"redshift\", \"spark\", \"postgres\", \"trino\", \"redis\", \"athena\", \"mssql\"") }) }) diff --git a/infra/feast-operator/test/e2e/e2e_test.go b/infra/feast-operator/test/e2e/e2e_test.go index 7d9fb9af05..fdf58d8f3b 100644 --- a/infra/feast-operator/test/e2e/e2e_test.go +++ b/infra/feast-operator/test/e2e/e2e_test.go @@ -28,145 +28,189 @@ import ( ) const feastControllerNamespace = "feast-operator-system" +const timeout = 2 * time.Minute +const controllerDeploymentName = "feast-operator-controller-manager" var _ = Describe("controller", Ordered, func() { BeforeAll(func() { By("creating manager namespace") cmd := exec.Command("kubectl", "create", "ns", feastControllerNamespace) _, _ = utils.Run(cmd) + var err error + // projectimage stores the name of the image used in the example + var projectimage = "localhost/feast-operator:v0.0.1" + + By("building the manager(Operator) image") + cmd = exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectimage)) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("loading the the manager(Operator) image on Kind") + err = utils.LoadImageToKindClusterWithName(projectimage) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("building the feast image") + cmd = exec.Command("make", "feast-ci-dev-docker-img") + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + // this image will be built in above make target. + var feastImage = "feastdev/feature-server:dev" + var feastLocalImage = "localhost/feastdev/feature-server:dev" + + By("Tag the local feast image for the integration tests") + cmd = exec.Command("docker", "image", "tag", feastImage, feastLocalImage) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("loading the the feast image on Kind cluster") + err = utils.LoadImageToKindClusterWithName(feastLocalImage) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("installing CRDs") + cmd = exec.Command("make", "install") + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("deploying the controller-manager") + cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectimage)) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Validating that the controller-manager deployment is in available state") + err = checkIfDeploymentExistsAndAvailable(feastControllerNamespace, controllerDeploymentName, timeout) + Expect(err).To(BeNil(), fmt.Sprintf( + "Deployment %s is not available but expected to be available. \nError: %v\n", + controllerDeploymentName, err, + )) + fmt.Printf("Feast Control Manager Deployment %s is available\n", controllerDeploymentName) }) AfterAll(func() { //Add any post clean up code here. + By("Uninstalling the feast CRD") + cmd := exec.Command("kubectl", "delete", "deployment", controllerDeploymentName, "-n", feastControllerNamespace) + _, err := utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) }) - Context("Operator", func() { + Context("Operator E2E Tests", func() { It("Should be able to deploy and run a default feature store CR successfully", func() { - //var controllerPodName string - var err error - - // projectimage stores the name of the image used in the example - var projectimage = "localhost/feast-operator:v0.0.1" - - By("building the manager(Operator) image") - cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectimage)) - _, err = utils.Run(cmd) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - - By("loading the the manager(Operator) image on Kind") - err = utils.LoadImageToKindClusterWithName(projectimage) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - - By("building the feast image") - cmd = exec.Command("make", "feast-ci-dev-docker-img") - _, err = utils.Run(cmd) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - // this image will be built in above make target. - var feastImage = "feastdev/feature-server:dev" - var feastLocalImage = "localhost/feastdev/feature-server:dev" - - By("Tag the local feast image for the integration tests") - cmd = exec.Command("docker", "image", "tag", feastImage, feastLocalImage) - _, err = utils.Run(cmd) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - - By("loading the the feast image on Kind cluster") - err = utils.LoadImageToKindClusterWithName(feastLocalImage) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - - By("installing CRDs") - cmd = exec.Command("make", "install") - _, err = utils.Run(cmd) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - - By("deploying the controller-manager") - cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectimage)) - _, err = utils.Run(cmd) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - - timeout := 2 * time.Minute - - controllerDeploymentName := "feast-operator-controller-manager" - By("Validating that the controller-manager deployment is in available state") - err = checkIfDeploymentExistsAndAvailable(feastControllerNamespace, controllerDeploymentName, timeout) - Expect(err).To(BeNil(), fmt.Sprintf( - "Deployment %s is not available but expected to be available. \nError: %v\n", - controllerDeploymentName, err, - )) - fmt.Printf("Feast Control Manager Deployment %s is available\n", controllerDeploymentName) - By("deploying the Simple Feast Custom Resource to Kubernetes") - cmd = exec.Command("kubectl", "apply", "-f", - "test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml") + namespace := "default" + cmd := exec.Command("kubectl", "apply", "-f", + "test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml", "-n", namespace) _, cmdOutputerr := utils.Run(cmd) ExpectWithOffset(1, cmdOutputerr).NotTo(HaveOccurred()) - namespace := "default" - - deploymentNames := [3]string{"feast-simple-feast-setup-registry", "feast-simple-feast-setup-online", - "feast-simple-feast-setup-offline"} - for _, deploymentName := range deploymentNames { - By(fmt.Sprintf("validate the feast deployment: %s is up and in availability state.", deploymentName)) - err = checkIfDeploymentExistsAndAvailable(namespace, deploymentName, timeout) - Expect(err).To(BeNil(), fmt.Sprintf( - "Deployment %s is not available but expected to be available. \nError: %v\n", - deploymentName, err, - )) - fmt.Printf("Feast Deployment %s is available\n", deploymentName) - } - - By("Check if the feast client - kubernetes config map exists.") - configMapName := "feast-simple-feast-setup-client" - err = checkIfConfigMapExists(namespace, configMapName) - Expect(err).To(BeNil(), fmt.Sprintf( - "config map %s is not available but expected to be available. \nError: %v\n", - configMapName, err, - )) - fmt.Printf("Feast Deployment %s is available\n", configMapName) - - serviceAccountNames := [3]string{"feast-simple-feast-setup-registry", "feast-simple-feast-setup-online", - "feast-simple-feast-setup-offline"} - for _, serviceAccountName := range serviceAccountNames { - By(fmt.Sprintf("validate the feast service account: %s is available.", serviceAccountName)) - err = checkIfServiceAccountExists(namespace, serviceAccountName) - Expect(err).To(BeNil(), fmt.Sprintf( - "Service account %s does not exist in namespace %s. Error: %v", - serviceAccountName, namespace, err, - )) - fmt.Printf("Service account %s exists in namespace %s\n", serviceAccountName, namespace) - } - - serviceNames := [3]string{"feast-simple-feast-setup-registry", "feast-simple-feast-setup-online", - "feast-simple-feast-setup-offline"} - for _, serviceName := range serviceNames { - By(fmt.Sprintf("validate the kubernetes service name: %s is available.", serviceName)) - err = checkIfKubernetesServiceExists(namespace, serviceName) - Expect(err).To(BeNil(), fmt.Sprintf( - "kubernetes service %s is not available but expected to be available. \nError: %v\n", - serviceName, err, - )) - fmt.Printf("kubernetes service %s is available\n", serviceName) - } - - By(fmt.Sprintf("Checking FeatureStore customer resource: %s is in Ready Status.", "simple-feast-setup")) - err = checkIfFeatureStoreCustomResourceConditionsInReady("simple-feast-setup", namespace) - Expect(err).To(BeNil(), fmt.Sprintf( - "FeatureStore custom resource %s all conditions are not in ready state. \nError: %v\n", - "simple-feast-setup", err, - )) - fmt.Printf("FeatureStore customer resource %s conditions are in Ready State\n", "simple-feast-setup") + featureStoreName := "simple-feast-setup" + validateTheFeatureStoreCustomResource(namespace, featureStoreName, timeout) By("deleting the feast deployment") cmd = exec.Command("kubectl", "delete", "-f", "test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml") _, cmdOutputerr = utils.Run(cmd) ExpectWithOffset(1, cmdOutputerr).NotTo(HaveOccurred()) + }) + + It("Should be able to deploy and run a feature store with remote registry CR successfully", func() { + By("deploying the Simple Feast Custom Resource to Kubernetes") + namespace := "default" + cmd := exec.Command("kubectl", "apply", "-f", + "test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml", "-n", namespace) + _, cmdOutputerr := utils.Run(cmd) + ExpectWithOffset(1, cmdOutputerr).NotTo(HaveOccurred()) + + featureStoreName := "simple-feast-setup" + validateTheFeatureStoreCustomResource(namespace, featureStoreName, timeout) + + var remoteRegistryNs = "remote-registry" + By(fmt.Sprintf("Creating the remote registry namespace=%s", remoteRegistryNs)) + cmd = exec.Command("kubectl", "create", "ns", remoteRegistryNs) + _, _ = utils.Run(cmd) + + By("deploying the Simple Feast remote registry Custom Resource on Kubernetes") + cmd = exec.Command("kubectl", "apply", "-f", + "test/testdata/feast_integration_test_crs/v1alpha1_remote_registry_featurestore.yaml", "-n", remoteRegistryNs) + _, cmdOutputerr = utils.Run(cmd) + ExpectWithOffset(1, cmdOutputerr).NotTo(HaveOccurred()) - By("Uninstalling the feast CRD") - cmd = exec.Command("kubectl", "delete", "deployment", controllerDeploymentName, "-n", feastControllerNamespace) - _, err = utils.Run(cmd) - ExpectWithOffset(1, err).NotTo(HaveOccurred()) + remoteFeatureStoreName := "simple-feast-remote-setup" + + validateTheFeatureStoreCustomResource(remoteRegistryNs, remoteFeatureStoreName, timeout) + + By("deleting the feast remote registry deployment") + cmd = exec.Command("kubectl", "delete", "-f", + "test/testdata/feast_integration_test_crs/v1alpha1_remote_registry_featurestore.yaml", "-n", remoteRegistryNs) + _, cmdOutputerr = utils.Run(cmd) + ExpectWithOffset(1, cmdOutputerr).NotTo(HaveOccurred()) + By("deleting the feast deployment") + cmd = exec.Command("kubectl", "delete", "-f", + "test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml", "-n", namespace) + _, cmdOutputerr = utils.Run(cmd) + ExpectWithOffset(1, cmdOutputerr).NotTo(HaveOccurred()) }) }) }) + +func validateTheFeatureStoreCustomResource(namespace string, featureStoreName string, timeout time.Duration) { + hasRemoteRegistry, err := isFeatureStoreHavingRemoteRegistry(namespace, featureStoreName) + Expect(err).To(BeNil(), fmt.Sprintf( + "Error occurred while checking FeatureStore %s is having remote registry or not. \nError: %v\n", + featureStoreName, err)) + + k8ResourceNames := []string{fmt.Sprintf("feast-%s-online", featureStoreName), + fmt.Sprintf("feast-%s-offline", featureStoreName), + } + + if !hasRemoteRegistry { + k8ResourceNames = append(k8ResourceNames, fmt.Sprintf("feast-%s-registry", featureStoreName)) + } + + for _, deploymentName := range k8ResourceNames { + By(fmt.Sprintf("validate the feast deployment: %s is up and in availability state.", deploymentName)) + err = checkIfDeploymentExistsAndAvailable(namespace, deploymentName, timeout) + Expect(err).To(BeNil(), fmt.Sprintf( + "Deployment %s is not available but expected to be available. \nError: %v\n", + deploymentName, err, + )) + fmt.Printf("Feast Deployment %s is available\n", deploymentName) + } + + By("Check if the feast client - kubernetes config map exists.") + configMapName := fmt.Sprintf("feast-%s-client", featureStoreName) + err = checkIfConfigMapExists(namespace, configMapName) + Expect(err).To(BeNil(), fmt.Sprintf( + "config map %s is not available but expected to be available. \nError: %v\n", + configMapName, err, + )) + fmt.Printf("Feast Deployment client config map %s is available\n", configMapName) + + for _, serviceAccountName := range k8ResourceNames { + By(fmt.Sprintf("validate the feast service account: %s is available.", serviceAccountName)) + err = checkIfServiceAccountExists(namespace, serviceAccountName) + Expect(err).To(BeNil(), fmt.Sprintf( + "Service account %s does not exist in namespace %s. Error: %v", + serviceAccountName, namespace, err, + )) + fmt.Printf("Service account %s exists in namespace %s\n", serviceAccountName, namespace) + } + + for _, serviceName := range k8ResourceNames { + By(fmt.Sprintf("validate the kubernetes service name: %s is available.", serviceName)) + err = checkIfKubernetesServiceExists(namespace, serviceName) + Expect(err).To(BeNil(), fmt.Sprintf( + "kubernetes service %s is not available but expected to be available. \nError: %v\n", + serviceName, err, + )) + fmt.Printf("kubernetes service %s is available\n", serviceName) + } + + By(fmt.Sprintf("Checking FeatureStore customer resource: %s is in Ready Status.", featureStoreName)) + err = checkIfFeatureStoreCustomResourceConditionsInReady(featureStoreName, namespace) + Expect(err).To(BeNil(), fmt.Sprintf( + "FeatureStore custom resource %s all conditions are not in ready state. \nError: %v\n", + featureStoreName, err, + )) + fmt.Printf("FeatureStore custom resource %s conditions are in Ready State\n", featureStoreName) +} diff --git a/infra/feast-operator/test/e2e/test_util.go b/infra/feast-operator/test/e2e/test_util.go index f30d8cbebf..d92f719fb9 100644 --- a/infra/feast-operator/test/e2e/test_util.go +++ b/infra/feast-operator/test/e2e/test_util.go @@ -3,10 +3,15 @@ package e2e import ( "bytes" "encoding/json" + "errors" "fmt" "os/exec" "strings" "time" + + appsv1 "k8s.io/api/apps/v1" + + "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" ) // dynamically checks if all conditions of custom resource featurestore are in "Ready" state. @@ -23,36 +28,17 @@ func checkIfFeatureStoreCustomResourceConditionsInReady(featureStoreName, namesp featureStoreName, namespace, err, stderr.String()) } - // Parse the JSON into a generic map - var resource map[string]interface{} + // Parse the JSON into FeatureStore + var resource v1alpha1.FeatureStore if err := json.Unmarshal(out.Bytes(), &resource); err != nil { return fmt.Errorf("failed to parse the resource JSON. Error: %v", err) } - // Traverse the JSON structure to extract conditions - status, ok := resource["status"].(map[string]interface{}) - if !ok { - return fmt.Errorf("status field is missing or invalid in the resource JSON") - } - - conditions, ok := status["conditions"].([]interface{}) - if !ok { - return fmt.Errorf("conditions field is missing or invalid in the status section") - } - // Validate all conditions - for _, condition := range conditions { - conditionMap, ok := condition.(map[string]interface{}) - if !ok { - return fmt.Errorf("invalid condition format") - } - - conditionType := conditionMap["type"].(string) - conditionStatus := conditionMap["status"].(string) - - if conditionStatus != "True" { + for _, condition := range resource.Status.Conditions { + if condition.Status != "True" { return fmt.Errorf(" FeatureStore=%s condition '%s' is not in 'Ready' state. Status: %s", - featureStoreName, conditionType, conditionStatus) + featureStoreName, condition.Type, condition.Status) } } @@ -84,30 +70,15 @@ func checkIfDeploymentExistsAndAvailable(namespace string, deploymentName string continue } - // Parse the JSON output into a map - var result map[string]interface{} + // Parse the JSON output into Deployment + var result appsv1.Deployment if err := json.Unmarshal(output.Bytes(), &result); err != nil { return fmt.Errorf("failed to parse deployment JSON: %v", err) } - // Navigate to status.conditions - status, ok := result["status"].(map[string]interface{}) - if !ok { - return fmt.Errorf("failed to get status field from deployment JSON") - } - - conditions, ok := status["conditions"].([]interface{}) - if !ok { - return fmt.Errorf("failed to get conditions field from deployment JSON") - } - // Check for Available condition - for _, condition := range conditions { - cond, ok := condition.(map[string]interface{}) - if !ok { - continue - } - if cond["type"] == "Available" && cond["status"] == "True" { + for _, condition := range result.Status.Conditions { + if condition.Type == "Available" && condition.Status == "True" { return nil // Deployment is available } } @@ -184,3 +155,46 @@ func checkIfKubernetesServiceExists(namespace, serviceName string) error { return nil } + +func isFeatureStoreHavingRemoteRegistry(namespace, featureStoreName string) (bool, error) { + cmd := exec.Command("kubectl", "get", "featurestore", featureStoreName, "-n", namespace, + "-o=jsonpath='{.spec.services.registry}'") + + // Capture the output + output, err := cmd.Output() + if err != nil { + return false, err // Return false on command execution failure + } + + // Convert output to string and trim any extra spaces + result := strings.TrimSpace(string(output)) + + // Remove single quotes if present + if strings.HasPrefix(result, "'") && strings.HasSuffix(result, "'") { + result = strings.Trim(result, "'") + } + + if result == "" { + return false, errors.New("kubectl get featurestore command returned empty output") + } + + // Parse the JSON into a map + var registryConfig v1alpha1.Registry + if err := json.Unmarshal([]byte(result), ®istryConfig); err != nil { + return false, err // Return false on JSON parsing failure + } + + if registryConfig.Remote == nil { + return false, nil + } + + hasHostname := registryConfig.Remote.Hostname != nil + hasValidFeastRef := registryConfig.Remote.FeastRef != nil && + registryConfig.Remote.FeastRef.Name != "" + + if hasHostname || hasValidFeastRef { + return true, nil + } + + return false, nil +} diff --git a/infra/feast-operator/test/testdata/feast_integration_test_crs/v1alpha1_remote_registry_featurestore.yaml b/infra/feast-operator/test/testdata/feast_integration_test_crs/v1alpha1_remote_registry_featurestore.yaml new file mode 100644 index 0000000000..61c010f057 --- /dev/null +++ b/infra/feast-operator/test/testdata/feast_integration_test_crs/v1alpha1_remote_registry_featurestore.yaml @@ -0,0 +1,16 @@ +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: simple-feast-remote-setup +spec: + feastProject: my_project + services: + onlineStore: + image: 'localhost/feastdev/feature-server:dev' + offlineStore: + image: 'localhost/feastdev/feature-server:dev' + registry: + remote: + feastRef: + name: simple-feast-setup + namespace: default \ No newline at end of file diff --git a/sdk/python/feast/cli.py b/sdk/python/feast/cli.py index a02013b11f..ccfcd1471c 100644 --- a/sdk/python/feast/cli.py +++ b/sdk/python/feast/cli.py @@ -865,6 +865,7 @@ def materialize_incremental_command(ctx: click.Context, end_ts: str, views: List "cassandra", "hazelcast", "ikv", + "couchbase", ], case_sensitive=False, ), @@ -1132,15 +1133,6 @@ def serve_registry_command( show_default=False, help="path to TLS certificate public key. You need to pass --key as well to start server in TLS mode", ) -@click.option( - "--verify_client", - "-v", - "tls_verify_client", - type=click.BOOL, - default="True", - show_default=True, - help="Verify the client or not for the TLS client certificate.", -) @click.pass_context def serve_offline_command( ctx: click.Context, @@ -1148,7 +1140,6 @@ def serve_offline_command( port: int, tls_key_path: str, tls_cert_path: str, - tls_verify_client: bool, ): """Start a remote server locally on a given host, port.""" if (tls_key_path and not tls_cert_path) or (not tls_key_path and tls_cert_path): @@ -1157,7 +1148,7 @@ def serve_offline_command( ) store = create_feature_store(ctx) - store.serve_offline(host, port, tls_key_path, tls_cert_path, tls_verify_client) + store.serve_offline(host, port, tls_key_path, tls_cert_path) @cli.command("validate") diff --git a/sdk/python/feast/entity.py b/sdk/python/feast/entity.py index 290e6307a4..9c529115c8 100644 --- a/sdk/python/feast/entity.py +++ b/sdk/python/feast/entity.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import warnings from datetime import datetime from typing import Dict, List, Optional @@ -79,6 +80,13 @@ def __init__( ValueError: Parameters are specified incorrectly. """ self.name = name + if value_type is None: + warnings.warn( + "Entity value_type will be mandatory in the next release. " + "Please specify a value_type for entity '%s'." % name, + DeprecationWarning, + stacklevel=2, + ) self.value_type = value_type or ValueType.UNKNOWN if join_keys and len(join_keys) > 1: diff --git a/sdk/python/feast/feature_store.py b/sdk/python/feast/feature_store.py index 79a0d752ef..4497590201 100644 --- a/sdk/python/feast/feature_store.py +++ b/sdk/python/feast/feature_store.py @@ -1964,14 +1964,11 @@ def serve_offline( port: int, tls_key_path: str = "", tls_cert_path: str = "", - tls_verify_client: bool = True, ) -> None: """Start offline server locally on a given port.""" from feast import offline_server - offline_server.start_server( - self, host, port, tls_key_path, tls_cert_path, tls_verify_client - ) + offline_server.start_server(self, host, port, tls_key_path, tls_cert_path) def serve_transformations(self, port: int) -> None: """Start the feature transformation server locally on a given port.""" diff --git a/sdk/python/feast/infra/feature_servers/multicloud/Dockerfile b/sdk/python/feast/infra/feature_servers/multicloud/Dockerfile index f6bcbae8cd..c1da48f55d 100644 --- a/sdk/python/feast/infra/feature_servers/multicloud/Dockerfile +++ b/sdk/python/feast/infra/feature_servers/multicloud/Dockerfile @@ -1,7 +1,7 @@ FROM python:3.11-slim-bullseye RUN pip install --no-cache-dir pip --upgrade -RUN pip install --no-cache-dir "feast[aws,gcp,snowflake,redis,go,mysql,postgres,opentelemetry,grpcio]" +RUN pip install --no-cache-dir "feast[aws,gcp,snowflake,redis,go,mysql,postgres,opentelemetry,grpcio,k8s]" RUN apt update && apt install -y -V ca-certificates lsb-release wget && \ diff --git a/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark.py b/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark.py index aeb9e3cd68..4b50188632 100644 --- a/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark.py +++ b/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark.py @@ -99,6 +99,8 @@ def pull_latest_from_table_or_query( fields_as_string = ", ".join(fields_with_aliases) aliases_as_string = ", ".join(aliases) + date_partition_column = data_source.date_partition_column + start_date_str = _format_datetime(start_date) end_date_str = _format_datetime(end_date) query = f""" @@ -109,7 +111,7 @@ def pull_latest_from_table_or_query( SELECT {fields_as_string}, ROW_NUMBER() OVER({partition_by_join_key_string} ORDER BY {timestamp_desc_string}) AS feast_row_ FROM {from_expression} t1 - WHERE {timestamp_field} BETWEEN TIMESTAMP('{start_date_str}') AND TIMESTAMP('{end_date_str}') + WHERE {timestamp_field} BETWEEN TIMESTAMP('{start_date_str}') AND TIMESTAMP('{end_date_str}'){" AND "+date_partition_column+" >= '"+start_date.strftime('%Y-%m-%d')+"' AND "+date_partition_column+" <= '"+end_date.strftime('%Y-%m-%d')+"' " if date_partition_column != "" and date_partition_column is not None else ''} ) t2 WHERE feast_row_ = 1 """ @@ -641,8 +643,15 @@ def _cast_data_frame( {% endfor %} FROM {{ featureview.table_subquery }} WHERE {{ featureview.timestamp_field }} <= '{{ featureview.max_event_timestamp }}' + {% if featureview.date_partition_column != "" and featureview.date_partition_column is not none %} + AND {{ featureview.date_partition_column }} <= '{{ featureview.max_event_timestamp[:10] }}' + {% endif %} + {% if featureview.ttl == 0 %}{% else %} AND {{ featureview.timestamp_field }} >= '{{ featureview.min_event_timestamp }}' + {% if featureview.date_partition_column != "" and featureview.date_partition_column is not none %} + AND {{ featureview.date_partition_column }} >= '{{ featureview.min_event_timestamp[:10] }}' + {% endif %} {% endif %} ), diff --git a/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py b/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py index 209e3b87e8..7ad331239f 100644 --- a/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py +++ b/sdk/python/feast/infra/offline_stores/contrib/spark_offline_store/spark_source.py @@ -45,6 +45,7 @@ def __init__( tags: Optional[Dict[str, str]] = None, owner: Optional[str] = "", timestamp_field: Optional[str] = None, + date_partition_column: Optional[str] = None, ): """Creates a SparkSource object. @@ -64,6 +65,8 @@ def __init__( maintainer. timestamp_field: Event timestamp field used for point-in-time joins of feature values. + date_partition_column: The column to partition the data on for faster + retrieval. This is useful for large tables and will limit the number ofi """ # If no name, use the table as the default name. if name is None and table is None: @@ -77,6 +80,7 @@ def __init__( created_timestamp_column=created_timestamp_column, field_mapping=field_mapping, description=description, + date_partition_column=date_partition_column, tags=tags, owner=owner, ) @@ -135,6 +139,7 @@ def from_proto(data_source: DataSourceProto) -> Any: query=spark_options.query, path=spark_options.path, file_format=spark_options.file_format, + date_partition_column=data_source.date_partition_column, timestamp_field=data_source.timestamp_field, created_timestamp_column=data_source.created_timestamp_column, description=data_source.description, @@ -148,6 +153,7 @@ def to_proto(self) -> DataSourceProto: type=DataSourceProto.BATCH_SPARK, data_source_class_type="feast.infra.offline_stores.contrib.spark_offline_store.spark_source.SparkSource", field_mapping=self.field_mapping, + date_partition_column=self.date_partition_column, spark_options=self.spark_options.to_proto(), description=self.description, tags=self.tags, diff --git a/sdk/python/feast/offline_server.py b/sdk/python/feast/offline_server.py index 8774dea8ae..1b714a45c7 100644 --- a/sdk/python/feast/offline_server.py +++ b/sdk/python/feast/offline_server.py @@ -45,7 +45,6 @@ def __init__( location: str, host: str = "localhost", tls_certificates: List = [], - verify_client=False, **kwargs, ): super(OfflineServer, self).__init__( @@ -54,7 +53,7 @@ def __init__( str_to_auth_manager_type(store.config.auth_config.type) ), tls_certificates=tls_certificates, - verify_client=verify_client, + verify_client=False, # this is needed for when we don't need mTLS **kwargs, ) self._location = location @@ -568,7 +567,6 @@ def start_server( port: int, tls_key_path: str = "", tls_cert_path: str = "", - tls_verify_client: bool = True, ): _init_auth_manager(store) @@ -591,7 +589,6 @@ def start_server( location=location, host=host, tls_certificates=tls_certificates, - verify_client=tls_verify_client, ) try: logger.info(f"Offline store server serving at: {location}") diff --git a/sdk/python/feast/templates/couchbase/__init__.py b/sdk/python/feast/templates/couchbase/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/sdk/python/feast/templates/couchbase/feature_repo/__init__.py b/sdk/python/feast/templates/couchbase/feature_repo/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/sdk/python/feast/templates/couchbase/feature_repo/feature_store.yaml b/sdk/python/feast/templates/couchbase/feature_repo/feature_store.yaml new file mode 100644 index 0000000000..bc21e44def --- /dev/null +++ b/sdk/python/feast/templates/couchbase/feature_repo/feature_store.yaml @@ -0,0 +1,11 @@ +project: my_project +registry: /path/to/registry.db +provider: local +online_store: + type: couchbase + connection_string: COUCHBASE_CONNECTION_STRING # Couchbase connection string, copied from 'Connect' page in Couchbase Capella console + user: COUCHBASE_USER # Couchbase username from database access credentials + password: COUCHBASE_PASSWORD # Couchbase password from database access credentials + bucket_name: COUCHBASE_BUCKET_NAME # Couchbase bucket name, defaults to feast + kv_port: COUCHBASE_KV_PORT # Couchbase key-value port, defaults to 11210. Required if custom ports are used. +entity_key_serialization_version: 2 diff --git a/sdk/python/feast/templates/couchbase/gitignore b/sdk/python/feast/templates/couchbase/gitignore new file mode 100644 index 0000000000..e86277f60f --- /dev/null +++ b/sdk/python/feast/templates/couchbase/gitignore @@ -0,0 +1,45 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*.pyo +*.pyd + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +*.egg-info/ +dist/ +build/ +.venv + +# Pytest +.cache +*.cover +*.log +.coverage +nosetests.xml +coverage.xml +*.hypothesis/ +*.pytest_cache/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IDEs and Editors +.vscode/ +.idea/ +*.swp +*.swo +*.sublime-workspace +*.sublime-project + +# OS generated files +.DS_Store +Thumbs.db diff --git a/sdk/python/tests/integration/feature_repos/universal/data_sources/file.py b/sdk/python/tests/integration/feature_repos/universal/data_sources/file.py index dc716f45e1..fbfb418278 100644 --- a/sdk/python/tests/integration/feature_repos/universal/data_sources/file.py +++ b/sdk/python/tests/integration/feature_repos/universal/data_sources/file.py @@ -452,9 +452,6 @@ def setup(self, registry: RegistryConfig): str(tls_key_path), "--cert", str(self.tls_cert_path), - # This is needed for the self-signed certificate, disabled verify_client for integration tests. - "--verify_client", - str(False), ] self.proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL diff --git a/sdk/python/tests/integration/feature_repos/universal/online_store/milvus.py b/sdk/python/tests/integration/feature_repos/universal/online_store/milvus.py index 7de60db2da..8ffee04c12 100644 --- a/sdk/python/tests/integration/feature_repos/universal/online_store/milvus.py +++ b/sdk/python/tests/integration/feature_repos/universal/online_store/milvus.py @@ -32,5 +32,4 @@ def create_online_store(self) -> Dict[str, Any]: } def teardown(self): - # assert 1 == 4 self.container.stop() diff --git a/sdk/python/tests/unit/infra/offline_stores/contrib/spark_offline_store/test_spark.py b/sdk/python/tests/unit/infra/offline_stores/contrib/spark_offline_store/test_spark.py index b8f8cc4247..307ba4058c 100644 --- a/sdk/python/tests/unit/infra/offline_stores/contrib/spark_offline_store/test_spark.py +++ b/sdk/python/tests/unit/infra/offline_stores/contrib/spark_offline_store/test_spark.py @@ -71,6 +71,68 @@ def test_pull_latest_from_table_with_nested_timestamp_or_query(mock_get_spark_se assert retrieval_job.query.strip() == expected_query.strip() +@patch( + "feast.infra.offline_stores.contrib.spark_offline_store.spark.get_spark_session_or_start_new_with_repoconfig" +) +def test_pull_latest_from_table_with_nested_timestamp_or_query_and_date_partition_column_set( + mock_get_spark_session, +): + mock_spark_session = MagicMock() + mock_get_spark_session.return_value = mock_spark_session + + test_repo_config = RepoConfig( + project="test_project", + registry="test_registry", + provider="local", + offline_store=SparkOfflineStoreConfig(type="spark"), + ) + + test_data_source = SparkSource( + name="test_nested_batch_source", + description="test_nested_batch_source", + table="offline_store_database_name.offline_store_table_name", + timestamp_field="nested_timestamp", + field_mapping={ + "event_header.event_published_datetime_utc": "nested_timestamp", + }, + date_partition_column="effective_date", + ) + + # Define the parameters for the method + join_key_columns = ["key1", "key2"] + feature_name_columns = ["feature1", "feature2"] + timestamp_field = "event_header.event_published_datetime_utc" + created_timestamp_column = "created_timestamp" + start_date = datetime(2021, 1, 1) + end_date = datetime(2021, 1, 2) + + # Call the method + retrieval_job = SparkOfflineStore.pull_latest_from_table_or_query( + config=test_repo_config, + data_source=test_data_source, + join_key_columns=join_key_columns, + feature_name_columns=feature_name_columns, + timestamp_field=timestamp_field, + created_timestamp_column=created_timestamp_column, + start_date=start_date, + end_date=end_date, + ) + + expected_query = """SELECT + key1, key2, feature1, feature2, nested_timestamp, created_timestamp + + FROM ( + SELECT key1, key2, feature1, feature2, event_header.event_published_datetime_utc AS nested_timestamp, created_timestamp, + ROW_NUMBER() OVER(PARTITION BY key1, key2 ORDER BY event_header.event_published_datetime_utc DESC, created_timestamp DESC) AS feast_row_ + FROM `offline_store_database_name`.`offline_store_table_name` t1 + WHERE event_header.event_published_datetime_utc BETWEEN TIMESTAMP('2021-01-01 00:00:00.000000') AND TIMESTAMP('2021-01-02 00:00:00.000000') AND effective_date >= '2021-01-01' AND effective_date <= '2021-01-02' + ) t2 + WHERE feast_row_ = 1""" # noqa: W293, W291 + + assert isinstance(retrieval_job, RetrievalJob) + assert retrieval_job.query.strip() == expected_query.strip() + + @patch( "feast.infra.offline_stores.contrib.spark_offline_store.spark.get_spark_session_or_start_new_with_repoconfig" ) @@ -127,3 +189,62 @@ def test_pull_latest_from_table_without_nested_timestamp_or_query( assert isinstance(retrieval_job, RetrievalJob) assert retrieval_job.query.strip() == expected_query.strip() + + +@patch( + "feast.infra.offline_stores.contrib.spark_offline_store.spark.get_spark_session_or_start_new_with_repoconfig" +) +def test_pull_latest_from_table_without_nested_timestamp_or_query_and_date_partition_column_set( + mock_get_spark_session, +): + mock_spark_session = MagicMock() + mock_get_spark_session.return_value = mock_spark_session + + test_repo_config = RepoConfig( + project="test_project", + registry="test_registry", + provider="local", + offline_store=SparkOfflineStoreConfig(type="spark"), + ) + + test_data_source = SparkSource( + name="test_batch_source", + description="test_nested_batch_source", + table="offline_store_database_name.offline_store_table_name", + timestamp_field="event_published_datetime_utc", + date_partition_column="effective_date", + ) + + # Define the parameters for the method + join_key_columns = ["key1", "key2"] + feature_name_columns = ["feature1", "feature2"] + timestamp_field = "event_published_datetime_utc" + created_timestamp_column = "created_timestamp" + start_date = datetime(2021, 1, 1) + end_date = datetime(2021, 1, 2) + + # Call the method + retrieval_job = SparkOfflineStore.pull_latest_from_table_or_query( + config=test_repo_config, + data_source=test_data_source, + join_key_columns=join_key_columns, + feature_name_columns=feature_name_columns, + timestamp_field=timestamp_field, + created_timestamp_column=created_timestamp_column, + start_date=start_date, + end_date=end_date, + ) + + expected_query = """SELECT + key1, key2, feature1, feature2, event_published_datetime_utc, created_timestamp + + FROM ( + SELECT key1, key2, feature1, feature2, event_published_datetime_utc, created_timestamp, + ROW_NUMBER() OVER(PARTITION BY key1, key2 ORDER BY event_published_datetime_utc DESC, created_timestamp DESC) AS feast_row_ + FROM `offline_store_database_name`.`offline_store_table_name` t1 + WHERE event_published_datetime_utc BETWEEN TIMESTAMP('2021-01-01 00:00:00.000000') AND TIMESTAMP('2021-01-02 00:00:00.000000') AND effective_date >= '2021-01-01' AND effective_date <= '2021-01-02' + ) t2 + WHERE feast_row_ = 1""" # noqa: W293, W291 + + assert isinstance(retrieval_job, RetrievalJob) + assert retrieval_job.query.strip() == expected_query.strip() diff --git a/sdk/python/tests/unit/test_entity.py b/sdk/python/tests/unit/test_entity.py index 78f7123104..b36f363a6f 100644 --- a/sdk/python/tests/unit/test_entity.py +++ b/sdk/python/tests/unit/test_entity.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import warnings + import assertpy import pytest @@ -73,3 +75,16 @@ def test_hash(): s4 = {entity1, entity2, entity3, entity4} assert len(s4) == 3 + + +def test_entity_without_value_type_warns(): + with pytest.warns(DeprecationWarning, match="Entity value_type will be mandatory"): + entity = Entity(name="my-entity") + assert entity.value_type == ValueType.UNKNOWN + + +def test_entity_with_value_type_no_warning(): + with warnings.catch_warnings(): + warnings.simplefilter("error") + entity = Entity(name="my-entity", value_type=ValueType.STRING) + assert entity.value_type == ValueType.STRING