From af3ae29aa04183fac756f227f627f3c52d9be0a9 Mon Sep 17 00:00:00 2001 From: JeffreyDallas <39912573+JeffreyDallas@users.noreply.github.com> Date: Fri, 17 Jan 2025 16:06:35 -0600 Subject: [PATCH 1/4] chore: account init before relay deployment (#1154) Signed-off-by: Jeffrey Tang --- .github/workflows/flow-task-test.yaml | 15 +++++-- .github/workflows/script/helper.sh | 4 +- .github/workflows/script/solo_smoke_test.sh | 46 +++++++++++++++++++++ docs/content/User/StepByStepGuide.md | 8 +++- examples/address-book/README.md | 14 +++++-- src/commands/flags.ts | 4 +- src/commands/mirror_node.ts | 37 ++++++++++++++--- src/commands/relay.ts | 25 +++++++++-- 8 files changed, 133 insertions(+), 20 deletions(-) diff --git a/.github/workflows/flow-task-test.yaml b/.github/workflows/flow-task-test.yaml index 32b0fc903..5f3d17286 100644 --- a/.github/workflows/flow-task-test.yaml +++ b/.github/workflows/flow-task-test.yaml @@ -38,6 +38,9 @@ jobs: example-task-file-test: timeout-minutes: 20 runs-on: solo-linux-large + strategy: + matrix: + type: ["NO_ACCOUNT_INIT", "ACCOUNT_INIT"] steps: - name: Harden Runner uses: step-security/harden-runner@c95a14d0e5bab51a9f56296a4eb0e416910cd350 # v2.10.3 @@ -53,6 +56,11 @@ jobs: node-version: 20 cache: npm + - name: Setup Helm + uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 # v4.2.0 + with: + version: "v3.12.3" # helm version + - name: Setup Kind uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0 with: @@ -63,9 +71,8 @@ jobs: verbosity: 3 wait: 120s - - name: Run Example Task File Test + - name: Run Example Task File Test with type ${{ matrix.type }} run: | - task default-with-relay - sleep 10 - .github/workflows/script/solo_smoke_test.sh + task default + .github/workflows/script/solo_smoke_test.sh ${{ matrix.type }} task clean diff --git a/.github/workflows/script/helper.sh b/.github/workflows/script/helper.sh index ffed2745d..768d34c1a 100644 --- a/.github/workflows/script/helper.sh +++ b/.github/workflows/script/helper.sh @@ -14,7 +14,9 @@ function create_test_account () # get private key of the account npm run solo-test -- account get -n solo-e2e --account-id ${OPERATOR_ID} --private-key > test.log - export OPERATOR_KEY=$(grep "privateKey" test.log | awk '{print $2}' | sed 's/"//g'| sed 's/,//g') + + # retrieve the field privateKey but not privateKeyRaw + export OPERATOR_KEY=$(grep "privateKey" test.log | grep -v "privateKeyRaw" | awk '{print $2}' | sed 's/"//g'| sed 's/,//g') export CONTRACT_TEST_KEY_ONE=0x$(grep "privateKeyRaw" test.log | awk '{print $2}' | sed 's/"//g'| sed 's/,//g') echo "CONTRACT_TEST_KEY_ONE=${CONTRACT_TEST_KEY_ONE}" rm test.log diff --git a/.github/workflows/script/solo_smoke_test.sh b/.github/workflows/script/solo_smoke_test.sh index 401e39e60..da89cde9b 100755 --- a/.github/workflows/script/solo_smoke_test.sh +++ b/.github/workflows/script/solo_smoke_test.sh @@ -86,7 +86,48 @@ function start_sdk_test () return $result } +function check_monitor_log() +{ + # get the logs of mirror-monitor + kubectl get pods -n solo-e2e | grep mirror-monitor | awk '{print $1}' | xargs kubectl logs -n solo-e2e > mirror-monitor.log + + if grep -q "ERROR" mirror-monitor.log; then + echo "mirror-monitor.log contains ERROR" + exit 1 + fi + + # any line contains "Scenario pinger published" should contain the string "Errors: {}" + if grep -q "Scenario pinger published" mirror-monitor.log; then + if grep -q "Errors: {}" mirror-monitor.log; then + echo "mirror-monitor.log contains Scenario pinger published and Errors: {}" + else + echo "mirror-monitor.log contains Scenario pinger published but not Errors: {}" + exit 1 + fi + fi +} + +function check_importer_log() +{ + kubectl get pods -n solo-e2e | grep mirror-importer | awk '{print $1}' | xargs kubectl logs -n solo-e2e > mirror-importer.log + if grep -q "ERROR" mirror-importer.log; then + echo "mirror-importer.log contains ERROR" + exit 1 + fi +} + +# if first parameter equals to ACCOUNT_INIT, +# then call solo account init before deploy mirror and relay node +if [ "$1" == "ACCOUNT_INIT" ]; then + echo "Call solo account init" + npm run solo-test -- account init -n solo-e2e +fi + +task solo:mirror-node +task solo:relay + echo "Change to parent directory" + cd ../ create_test_account clone_smart_contract_repo @@ -97,3 +138,8 @@ start_contract_test start_sdk_test echo "Sleep a while to wait background transactions to finish" sleep 30 + +echo "Run mirror node acceptance test" +helm test mirror -n solo-e2e --timeout 10m +check_monitor_log +check_importer_log diff --git a/docs/content/User/StepByStepGuide.md b/docs/content/User/StepByStepGuide.md index b84b9bbc9..43c791667 100644 --- a/docs/content/User/StepByStepGuide.md +++ b/docs/content/User/StepByStepGuide.md @@ -1,5 +1,7 @@ ## Advanced User Guide + For those who would like to have more control or need some customized setups, here are some step by step instructions of how to setup and deploy a solo network. + ### Setup Kubernetes cluster #### Remote cluster @@ -28,6 +30,7 @@ Then run the following command to set the kubectl context to the new cluster: ```bash kind create cluster -n "${SOLO_CLUSTER_NAME}" ``` + Example output ``` @@ -48,7 +51,6 @@ Thanks for using kind! 😊 You may now view pods in your cluster using `k9s -A` as below: - ``` Context: kind-solo <0> all Attach Delete | |/ _/ __ \______ @@ -75,7 +77,6 @@ You may now view pods in your cluster using `k9s -A` as below: └─────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` - ### Step by Step Instructions * Initialize `solo` directories: @@ -136,13 +137,16 @@ Kubernetes Cluster : kind-solo ✔ Generate gRPC TLS Keys ✔ Finalize ``` + PEM key files are generated in `~/.solo/keys` directory. + ``` hedera-node1.crt hedera-node3.crt s-private-node1.pem s-public-node1.pem unused-gossip-pem hedera-node1.key hedera-node3.key s-private-node2.pem s-public-node2.pem unused-tls hedera-node2.crt hedera-node4.crt s-private-node3.pem s-public-node3.pem hedera-node2.key hedera-node4.key s-private-node4.pem s-public-node4.pem ``` + * Setup cluster with shared components ``` diff --git a/examples/address-book/README.md b/examples/address-book/README.md index 96b35b073..392b3517b 100644 --- a/examples/address-book/README.md +++ b/examples/address-book/README.md @@ -1,12 +1,13 @@ # Yahcli Address Book Example -This is an example of how to use Yahcli to pull the ledger and mirror node address book. And to update the ledger address book. It updates File 101 (the ledger address book file) and File 102 (the ledger node details file). +This is an example of how to use Yahcli to pull the ledger and mirror node address book. And to update the ledger address book. It updates File 101 (the ledger address book file) and File 102 (the ledger node details file). NOTE: Mirror Node refers to File 102 as its address book. ## Usage -To get the address book from the ledger, this requires a port forward to be setup on port 50211 to consensus node with node ID = 0. +To get the address book from the ledger, this requires a port forward to be setup on port 50211 to consensus node with node ID = 0. + ```bash # try and detect if the port forward is already setup netstat -na | grep 50211 @@ -17,28 +18,35 @@ kubectl port-forward -n "${SOLO_NAMESPACE}" pod/network-node1-0 50211:50211 ``` To get the address book from the ledger, run the following command: + ```bash cd /examples/address-book task get:ledger:addressbook ``` -It will output the address book in JSON format to: + +It will output the address book in JSON format to: + * `examples/address-book/localhost/sysfiles/addressBook.json` * `examples/address-book/localhost/sysfiles/nodeDetails.json` You can update the address book files with your favorite text editor. Once the files are ready, you can upload them to the ledger by running the following command: + ```bash cd /examples/address-book task update:ledger:addressbook ``` To get the address book from the mirror node, run the following command: + ```bash cd /examples/address-book task get:mirror:addressbook ``` + NOTE: Mirror Node may not pick up the changes automatically, it might require running some transactions through, example: + ```bash cd npm run solo -- account create diff --git a/src/commands/flags.ts b/src/commands/flags.ts index 7eba8887a..1af812d92 100644 --- a/src/commands/flags.ts +++ b/src/commands/flags.ts @@ -619,7 +619,7 @@ export class Flags { name: 'operator-id', definition: { describe: 'Operator ID', - defaultValue: constants.OPERATOR_ID, + defaultValue: undefined, type: 'string', }, prompt: async function promptOperatorId(task: ListrTaskWrapper, input: any) { @@ -640,7 +640,7 @@ export class Flags { name: 'operator-key', definition: { describe: 'Operator Key', - defaultValue: constants.OPERATOR_KEY, + defaultValue: undefined, type: 'string', }, prompt: async function promptOperatorKey(task: ListrTaskWrapper, input: any) { diff --git a/src/commands/mirror_node.ts b/src/commands/mirror_node.ts index 51dbf9a87..6e2f63f4f 100644 --- a/src/commands/mirror_node.ts +++ b/src/commands/mirror_node.ts @@ -33,6 +33,7 @@ import * as fs from 'node:fs'; import * as path from 'node:path'; import type {Optional, SoloListrTask} from '../types/index.js'; import type {Namespace} from '../core/config/remote/types.js'; +import * as Base64 from 'js-base64'; interface MirrorNodeDeployConfigClass { chartDirectory: string; @@ -53,6 +54,8 @@ interface MirrorNodeDeployConfigClass { clusterSetupNamespace: string; soloChartVersion: string; pinger: boolean; + operatorId: string; + operatorKey: string; customMirrorNodeDatabaseValuePath: Optional; storageType: constants.StorageType; storageAccessKey: string; @@ -104,6 +107,8 @@ export class MirrorNodeCommand extends BaseCommand { flags.clusterSetupNamespace, flags.soloChartVersion, flags.customMirrorNodeDatabaseValuePath, + flags.operatorId, + flags.operatorKey, flags.storageType, flags.storageAccessKey, flags.storageSecrets, @@ -234,6 +239,8 @@ export class MirrorNodeCommand extends BaseCommand { flags.valuesFile, flags.mirrorNodeVersion, flags.pinger, + flags.operatorId, + flags.operatorKey, flags.soloChartVersion, ]); @@ -255,6 +262,8 @@ export class MirrorNodeCommand extends BaseCommand { // user defined values later to override predefined values ctx.config.valuesArg += await self.prepareValuesArg(ctx.config); + await self.accountManager.loadNodeClient(ctx.config.namespace); + if (ctx.config.pinger) { const startAccId = constants.HEDERA_NODE_ACCOUNT_ID_START; const networkPods = await this.k8.getPodsByLabel(['solo.hedera.com/type=network-node']); @@ -263,9 +272,29 @@ export class MirrorNodeCommand extends BaseCommand { const pod = networkPods[0]; ctx.config.valuesArg += ` --set monitor.config.hedera.mirror.monitor.nodes.0.accountId=${startAccId}`; ctx.config.valuesArg += ` --set monitor.config.hedera.mirror.monitor.nodes.0.host=${pod.status.podIP}`; - - ctx.config.valuesArg += ` --set monitor.config.hedera.mirror.monitor.operator.accountId=${constants.OPERATOR_ID}`; - ctx.config.valuesArg += ` --set monitor.config.hedera.mirror.monitor.operator.privateKey=${constants.OPERATOR_KEY}`; + ctx.config.valuesArg += ' --set monitor.config.hedera.mirror.monitor.nodes.0.nodeId=0'; + + const operatorId = ctx.config.operatorId || constants.OPERATOR_ID; + ctx.config.valuesArg += ` --set monitor.config.hedera.mirror.monitor.operator.accountId=${operatorId}`; + + if (ctx.config.operatorKey) { + this.logger.info('Using provided operator key'); + ctx.config.valuesArg += ` --set monitor.config.hedera.mirror.monitor.operator.privateKey=${ctx.config.operatorKey}`; + } else { + try { + const secrets = await this.k8.getSecretsByLabel([`solo.hedera.com/account-id=${operatorId}`]); + if (secrets.length === 0) { + this.logger.info(`No k8s secret found for operator account id ${operatorId}, use default one`); + ctx.config.valuesArg += ` --set monitor.config.hedera.mirror.monitor.operator.privateKey=${constants.OPERATOR_KEY}`; + } else { + this.logger.info('Using operator key from k8s secret'); + const operatorKeyFromK8 = Base64.decode(secrets[0].data.privateKey); + ctx.config.valuesArg += ` --set monitor.config.hedera.mirror.monitor.operator.privateKey=${operatorKeyFromK8}`; + } + } catch (e: Error | any) { + throw new SoloError(`Error getting operator key: ${e.message}`, e); + } + } } } @@ -273,8 +302,6 @@ export class MirrorNodeCommand extends BaseCommand { throw new SoloError(`namespace ${ctx.config.namespace} does not exist`); } - await self.accountManager.loadNodeClient(ctx.config.namespace); - return ListrLease.newAcquireLeaseTask(lease, task); }, }, diff --git a/src/commands/relay.ts b/src/commands/relay.ts index 5754984f4..1be3c97c1 100644 --- a/src/commands/relay.ts +++ b/src/commands/relay.ts @@ -29,6 +29,7 @@ import {type Opts} from '../types/command_types.js'; import {ListrLease} from '../core/lease/listr_lease.js'; import {RelayComponent} from '../core/config/remote/components/relay_component.js'; import {ComponentType} from '../core/config/remote/enumerations.js'; +import * as Base64 from 'js-base64'; export class RelayCommand extends BaseCommand { private readonly profileManager: ProfileManager; @@ -105,12 +106,26 @@ export class RelayCommand extends BaseCommand { valuesArg += ` --set replicaCount=${replicaCount}`; } - if (operatorID) { - valuesArg += ` --set config.OPERATOR_ID_MAIN=${operatorID}`; - } + const operatorIdUsing = operatorID || constants.OPERATOR_ID; + valuesArg += ` --set config.OPERATOR_ID_MAIN=${operatorIdUsing}`; if (operatorKey) { + // use user provided operatorKey if available valuesArg += ` --set config.OPERATOR_KEY_MAIN=${operatorKey}`; + } else { + try { + const secrets = await this.k8.getSecretsByLabel([`solo.hedera.com/account-id=${operatorIdUsing}`]); + if (secrets.length === 0) { + this.logger.info(`No k8s secret found for operator account id ${operatorIdUsing}, use default one`); + valuesArg += ` --set config.OPERATOR_KEY_MAIN=${constants.OPERATOR_KEY}`; + } else { + this.logger.info('Using operator key from k8s secret'); + const operatorKeyFromK8 = Base64.decode(secrets[0].data.privateKey); + valuesArg += ` --set config.OPERATOR_KEY_MAIN=${operatorKeyFromK8}`; + } + } catch (e: Error | any) { + throw new SoloError(`Error getting operator key: ${e.message}`, e); + } } if (!nodeAliases) { @@ -202,6 +217,8 @@ export class RelayCommand extends BaseCommand { self.configManager.update(argv); + flags.disablePrompts([flags.operatorId, flags.operatorKey]); + await self.configManager.executePrompt(task, RelayCommand.DEPLOY_FLAGS_LIST); // prompt if inputs are empty and set it in the context @@ -231,6 +248,7 @@ export class RelayCommand extends BaseCommand { constants.JSON_RPC_RELAY_CHART, constants.JSON_RPC_RELAY_CHART, ); + await self.accountManager.loadNodeClient(ctx.config.namespace); config.valuesArg = await self.prepareValuesArg( config.valuesFile, config.nodeAliases, @@ -298,6 +316,7 @@ export class RelayCommand extends BaseCommand { throw new SoloError('Error installing relays', e); } finally { await lease.release(); + await self.accountManager.close(); } return true; From 4ca488beb14cbc0967ac6b0ebd60f962b9f889bc Mon Sep 17 00:00:00 2001 From: Jan Milenkov Date: Sat, 18 Jan 2025 10:17:02 +0200 Subject: [PATCH 2/4] feat: for v0.59.x or greater set the internal IP address to 127.0.0.1 to avoid an ISS, in config.txt (#1162) Signed-off-by: instamenta Signed-off-by: Jeromy Cannon Signed-off-by: Alex Kuzmin Co-authored-by: Jeromy Cannon Co-authored-by: Alex Kuzmin --- .github/workflows/flow-task-test.yaml | 1 + .github/workflows/zxc-e2e-test.yaml | 2 +- Taskfile.helper.yml | 2 +- .../performance-tuning/latitude/Taskfile.yml | 2 +- .../latitude/application.properties | 2 + resources/templates/application.env | 0 resources/templates/application.properties | 2 + src/commands/flags.ts | 7 ++- src/core/profile_manager.ts | 43 ++++++++++++++----- test/e2e/commands/node_local_hedera.test.ts | 2 + test/e2e/commands/node_local_ptt.test.ts | 2 + test/test_add.ts | 4 +- test/unit/core/profile_manager.test.ts | 14 +++++- version.ts | 5 ++- 14 files changed, 69 insertions(+), 19 deletions(-) create mode 100644 resources/templates/application.env diff --git a/.github/workflows/flow-task-test.yaml b/.github/workflows/flow-task-test.yaml index 5f3d17286..22d553e15 100644 --- a/.github/workflows/flow-task-test.yaml +++ b/.github/workflows/flow-task-test.yaml @@ -73,6 +73,7 @@ jobs: - name: Run Example Task File Test with type ${{ matrix.type }} run: | + export CONSENSUS_NODE_VERSION=v0.58.3 task default .github/workflows/script/solo_smoke_test.sh ${{ matrix.type }} task clean diff --git a/.github/workflows/zxc-e2e-test.yaml b/.github/workflows/zxc-e2e-test.yaml index 5a9ddc0c8..bbde0a076 100644 --- a/.github/workflows/zxc-e2e-test.yaml +++ b/.github/workflows/zxc-e2e-test.yaml @@ -167,7 +167,7 @@ jobs: if: ${{ runner.os == 'linux' && (inputs.npm-test-script == 'test-e2e-node-local-hedera' || inputs.npm-test-script == 'test-e2e-node-local-ptt' || inputs.npm-test-script == 'test-e2e-node-add-local') && !cancelled() && !failure() }} run: | cd .. - git clone https://github.com/hashgraph/hedera-services.git --depth 1 --branch v0.58.1 + git clone https://github.com/hashgraph/hedera-services.git --depth 1 --branch v0.58.3 cd hedera-services ls -ltr ${{ env.CG_EXEC }} ./gradlew assemble --stacktrace --info diff --git a/Taskfile.helper.yml b/Taskfile.helper.yml index 8b7dcdccd..2953abee8 100644 --- a/Taskfile.helper.yml +++ b/Taskfile.helper.yml @@ -196,7 +196,7 @@ tasks: cmds: - | if [[ "${CONSENSUS_NODE_VERSION}" != "" ]]; then - export CONSENSUS_NODE_FLAG='--release-tag ${CONSENSUS_NODE_VERSION}' + export CONSENSUS_NODE_FLAG='--release-tag {{.CONSENSUS_NODE_VERSION}}' fi SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node setup --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} ${CONSENSUS_NODE_FLAG} ${LOCAL_BUILD_FLAG} -q --dev diff --git a/examples/performance-tuning/latitude/Taskfile.yml b/examples/performance-tuning/latitude/Taskfile.yml index ba356d4e5..147efbd11 100644 --- a/examples/performance-tuning/latitude/Taskfile.yml +++ b/examples/performance-tuning/latitude/Taskfile.yml @@ -8,7 +8,7 @@ vars: env: SOLO_NETWORK_SIZE: 10 SOLO_NAMESPACE: %SOLO_NAMESPACE% - SOLO_CHART_VERSION: 0.42.3 + SOLO_CHART_VERSION: 0.42.10 #CONSENSUS_NODE_VERSION: 0.0.0 VALUES_FLAG: "--values-file {{.USER_WORKING_DIR}}/init-containers-values.yaml" SETTINGS_FLAG: "--settings-txt {{.USER_WORKING_DIR}}/settings.txt" diff --git a/examples/performance-tuning/latitude/application.properties b/examples/performance-tuning/latitude/application.properties index 03e4cc076..f8e26acb9 100644 --- a/examples/performance-tuning/latitude/application.properties +++ b/examples/performance-tuning/latitude/application.properties @@ -1,3 +1,5 @@ contracts.chainId=298 entities.unlimitedAutoAssociationsEnabled=true bootstrap.throttleJsonDef.resource=genesis/throttles-dev.json +networkAdmin.exportCandidateRoster=true +addressBook.useRosterLifecycle=true diff --git a/resources/templates/application.env b/resources/templates/application.env new file mode 100644 index 000000000..e69de29bb diff --git a/resources/templates/application.properties b/resources/templates/application.properties index 0b4fdccdf..31d655cf6 100644 --- a/resources/templates/application.properties +++ b/resources/templates/application.properties @@ -18,3 +18,5 @@ blockStream.streamMode=RECORDS scheduling.longTermEnabled=false # TODO: uncomment this when we are ready to use genesis-network.json #addressBook.useRosterLifecycle=true +# TODO: we can remove this after we no longer need less than v0.59.x +networkAdmin.exportCandidateRoster=true diff --git a/src/commands/flags.ts b/src/commands/flags.ts index 1af812d92..82e2fb637 100644 --- a/src/commands/flags.ts +++ b/src/commands/flags.ts @@ -884,8 +884,10 @@ export class Flags { constName: 'applicationEnv', name: 'application-env', definition: { - describe: 'application.env file for node', - defaultValue: '', + describe: + 'the application.env file for the node provides environment variables to the solo-container' + + ' to be used when the hedera platform is started', + defaultValue: path.join(constants.SOLO_CACHE_DIR, 'templates', 'application.env'), type: 'string', }, prompt: undefined, @@ -1838,6 +1840,7 @@ export class Flags { static readonly nodeConfigFileFlags = new Map( [ Flags.apiPermissionProperties, + Flags.applicationEnv, Flags.applicationProperties, Flags.bootstrapProperties, Flags.log4j2Xml, diff --git a/src/core/profile_manager.ts b/src/core/profile_manager.ts index 29ce21e84..d323ae1c9 100644 --- a/src/core/profile_manager.ts +++ b/src/core/profile_manager.ts @@ -34,7 +34,7 @@ import type {AnyObject, DirPath, NodeAlias, NodeAliases, Path} from '../types/al import type {Optional} from '../types/index.js'; import {inject, injectable} from 'tsyringe-neo'; import {patchInject} from './container_helper.js'; -import {HEDERA_PLATFORM_VERSION} from '../../version.js'; +import * as versions from '../../version.js'; @injectable() export class ProfileManager { @@ -254,13 +254,11 @@ export class ProfileManager { yamlRoot, ); - if (this.configManager.getFlag(flags.applicationEnv)) { - this._setFileContentsAsValue( - 'hedera.configMaps.applicationEnv', - this.configManager.getFlag(flags.applicationEnv), - yamlRoot, - ); - } + this._setFileContentsAsValue( + 'hedera.configMaps.applicationEnv', + path.join(stagingDir, 'templates', 'application.env'), + yamlRoot, + ); if (profile.consensus) { // set default for consensus pod @@ -473,7 +471,7 @@ export class ProfileManager { throw new MissingArgumentError('nodeAccountMap the map of node IDs to account IDs is required'); } - if (!releaseTag) releaseTag = HEDERA_PLATFORM_VERSION; + if (!releaseTag) releaseTag = versions.HEDERA_PLATFORM_VERSION; if (!fs.existsSync(destPath)) { throw new IllegalArgumentError(`config destPath does not exist: ${destPath}`, destPath); @@ -494,7 +492,32 @@ export class ProfileManager { let nodeSeq = 0; for (const nodeAlias of nodeAccountMap.keys()) { - const internalIP = Templates.renderFullyQualifiedNetworkPodName(namespace, nodeAlias); + let internalIP: string; + + //? Explanation: for v0.59.x the internal IP address is set to 127.0.0.1 to avoid an ISS + + // for versions that satisfy 0.59.x + if (semver.satisfies(releaseVersion, '^0.59.0', {includePrerelease: true})) { + internalIP = '127.0.0.1'; + } + + // versions less than 0.59.0 + else if ( + semver.lt( + releaseVersion, + '0.59.0', + // @ts-expect-error TS2353: Object literal may only specify known properties + {includePrerelease: true}, + ) + ) { + internalIP = Templates.renderFullyQualifiedNetworkPodName(namespace, nodeAlias); + } + + // versions greater than 0.59.0 + else { + internalIP = '127.0.0.1'; + } + const externalIP = Templates.renderFullyQualifiedNetworkSvcName(namespace, nodeAlias); const account = nodeAccountMap.get(nodeAlias); diff --git a/test/e2e/commands/node_local_hedera.test.ts b/test/e2e/commands/node_local_hedera.test.ts index 556e70dcd..bf794d57b 100644 --- a/test/e2e/commands/node_local_hedera.test.ts +++ b/test/e2e/commands/node_local_hedera.test.ts @@ -28,6 +28,7 @@ import {Duration} from '../../../src/core/time/duration.js'; import {type NodeCommand} from '../../../src/commands/node/index.js'; import {type AccountCommand} from '../../../src/commands/account.js'; import {type AccountManager} from '../../../src/core/account_manager.js'; +import {LOCAL_HEDERA_PLATFORM_VERSION} from '../../../version.js'; const LOCAL_HEDERA = 'local-hedera-app'; const argv = getDefaultArgv(); @@ -43,6 +44,7 @@ let hederaK8: K8; console.log('Starting local build for Hedera app'); argv[flags.localBuildPath.name] = 'node1=../hedera-services/hedera-node/data/,../hedera-services/hedera-node/data'; argv[flags.namespace.name] = LOCAL_HEDERA; +argv[flags.releaseTag.name] = LOCAL_HEDERA_PLATFORM_VERSION; e2eTestSuite( LOCAL_HEDERA, diff --git a/test/e2e/commands/node_local_ptt.test.ts b/test/e2e/commands/node_local_ptt.test.ts index 341d572fa..fb4e7b93d 100644 --- a/test/e2e/commands/node_local_ptt.test.ts +++ b/test/e2e/commands/node_local_ptt.test.ts @@ -20,6 +20,7 @@ import {Flags as flags} from '../../../src/commands/flags.js'; import {e2eTestSuite, getDefaultArgv, TEST_CLUSTER} from '../../test_util.js'; import {Duration} from '../../../src/core/time/duration.js'; import {type K8} from '../../../src/core/k8.js'; +import {LOCAL_HEDERA_PLATFORM_VERSION} from '../../../version.js'; const LOCAL_PTT = 'local-ptt-app'; const argv = getDefaultArgv(); @@ -37,6 +38,7 @@ argv[flags.app.name] = 'PlatformTestingTool.jar'; argv[flags.appConfig.name] = '../hedera-services/platform-sdk/platform-apps/tests/PlatformTestingTool/src/main/resources/FCMFCQ-Basic-2.5k-5m.json'; argv[flags.namespace.name] = LOCAL_PTT; +argv[flags.releaseTag.name] = LOCAL_HEDERA_PLATFORM_VERSION; e2eTestSuite(LOCAL_PTT, argv, undefined, undefined, undefined, undefined, undefined, undefined, true, bootstrapResp => { describe('Node for platform app should start successfully', () => { diff --git a/test/test_add.ts b/test/test_add.ts index 0cf7de96f..8ed2a8856 100644 --- a/test/test_add.ts +++ b/test/test_add.ts @@ -31,6 +31,7 @@ import * as NodeCommandConfigs from '../src/commands/node/configs.js'; import type {NodeAlias} from '../src/types/aliases.js'; import type {NetworkNodeServices} from '../src/core/network_node_services.js'; import {Duration} from '../src/core/time/duration.js'; +import {LOCAL_HEDERA_PLATFORM_VERSION} from '../version.js'; const defaultTimeout = Duration.ofMinutes(2).toMillis(); @@ -48,7 +49,8 @@ export function testNodeAdd( argv[flags.generateTlsKeys.name] = true; // set the env variable SOLO_CHARTS_DIR if developer wants to use local Solo charts argv[flags.chartDirectory.name] = process.env.SOLO_CHARTS_DIR ?? undefined; - argv[flags.releaseTag.name] = HEDERA_PLATFORM_VERSION_TAG; + argv[flags.releaseTag.name] = + !localBuildPath || localBuildPath === '' ? HEDERA_PLATFORM_VERSION_TAG : LOCAL_HEDERA_PLATFORM_VERSION; argv[flags.namespace.name] = namespace; argv[flags.force.name] = true; argv[flags.persistentVolumeClaims.name] = true; diff --git a/test/unit/core/profile_manager.test.ts b/test/unit/core/profile_manager.test.ts index 0803368e6..2f3c880da 100644 --- a/test/unit/core/profile_manager.test.ts +++ b/test/unit/core/profile_manager.test.ts @@ -29,11 +29,13 @@ import * as version from '../../../version.js'; import type {NodeAlias} from '../../../src/types/aliases.js'; import {container} from 'tsyringe-neo'; import {resetTestContainer} from '../../test_container.js'; +import {Templates} from '../../../src/core/templates.js'; describe('ProfileManager', () => { let tmpDir: string, configManager: ConfigManager, profileManager: ProfileManager, cacheDir: string; const testProfileFile = path.join('test', 'data', 'test-profiles.yaml'); + let stagingDir = ''; before(() => { resetTestContainer(); @@ -45,10 +47,18 @@ describe('ProfileManager', () => { configManager.setFlag(flags.releaseTag, version.HEDERA_PLATFORM_VERSION); cacheDir = configManager.getFlag(flags.cacheDir) as string; configManager.setFlag(flags.apiPermissionProperties, path.join(cacheDir, 'templates', 'api-permission.properties')); + configManager.setFlag(flags.applicationEnv, path.join(cacheDir, 'templates', 'application.env')); configManager.setFlag(flags.applicationProperties, path.join(cacheDir, 'templates', 'application.properties')); configManager.setFlag(flags.bootstrapProperties, path.join(cacheDir, 'templates', 'bootstrap.properties')); configManager.setFlag(flags.log4j2Xml, path.join(cacheDir, 'templates', 'log4j2.xml')); configManager.setFlag(flags.settingTxt, path.join(cacheDir, 'templates', 'settings.txt')); + stagingDir = Templates.renderStagingDir( + configManager.getFlag(flags.cacheDir), + configManager.getFlag(flags.releaseTag), + ); + if (!fs.existsSync(stagingDir)) { + fs.mkdirSync(stagingDir, {recursive: true}); + } }); after(() => { @@ -130,10 +140,12 @@ describe('ProfileManager', () => { configManager.setFlag(flags.profileFile, testProfileFile); // profileManager.loadProfiles(true) - const file = path.join(tmpDir, '_setFileContentsAsValue.txt'); + const file = path.join(tmpDir, 'application.env'); const fileContents = '# row 1\n# row 2\n# row 3'; fs.writeFileSync(file, fileContents); configManager.setFlag(flags.applicationEnv, file); + const destFile = path.join(stagingDir, 'templates', 'application.env'); + fs.cpSync(file, destFile, {force: true}); const cachedValuesFile = await profileManager.prepareValuesForSoloChart('test'); const valuesYaml: any = yaml.parse(fs.readFileSync(cachedValuesFile).toString()); expect(valuesYaml.hedera.configMaps.applicationEnv).to.equal(fileContents); diff --git a/version.ts b/version.ts index 6396fd36a..0b2b6bdfc 100644 --- a/version.ts +++ b/version.ts @@ -20,8 +20,9 @@ */ export const HELM_VERSION = 'v3.14.2'; -export const SOLO_CHART_VERSION = '0.42.4'; -export const HEDERA_PLATFORM_VERSION = 'v0.58.3'; +export const SOLO_CHART_VERSION = '0.42.10'; +export const HEDERA_PLATFORM_VERSION = 'v0.59.0-main.x5322bdc'; +export const LOCAL_HEDERA_PLATFORM_VERSION = 'v0.58.3'; export const MIRROR_NODE_VERSION = '0.120.1'; export const HEDERA_EXPLORER_VERSION = '0.2.1'; export const HEDERA_JSON_RPC_RELAY_VERSION = 'v0.63.2'; From fe42eddb331741408b8f4b8bc215225e0b3b9536 Mon Sep 17 00:00:00 2001 From: Jan Milenkov Date: Sat, 18 Jan 2025 10:22:37 +0200 Subject: [PATCH 3/4] feat: `solo deployment create` should use the context and cluster provided for where to save the remote config (#1142) Signed-off-by: instamenta --- src/commands/cluster/configs.ts | 11 ++ src/commands/cluster/handlers.ts | 4 +- src/commands/cluster/tasks.ts | 165 ++++++++++++++++------------- src/commands/deployment.ts | 59 ++++++++--- src/core/config/local_config.ts | 9 +- src/core/k8.ts | 2 +- test/unit/commands/cluster.test.ts | 25 +++-- 7 files changed, 171 insertions(+), 104 deletions(-) diff --git a/src/commands/cluster/configs.ts b/src/commands/cluster/configs.ts index 2aea57685..d1f9ece95 100644 --- a/src/commands/cluster/configs.ts +++ b/src/commands/cluster/configs.ts @@ -20,6 +20,7 @@ import {Flags as flags} from '../flags.js'; import * as constants from '../../core/constants.js'; import {ListrEnquirerPromptAdapter} from '@listr2/prompt-adapter-enquirer'; import {SoloError} from '../../core/errors.js'; +import {type Namespace} from '../../core/config/remote/types.js'; export const CONNECT_CONFIGS_NAME = 'connectConfig'; @@ -123,3 +124,13 @@ export interface ClusterResetConfigClass { clusterName: string; clusterSetupNamespace: string; } + +export interface SelectClusterContextContext { + config: { + quiet: boolean; + namespace: Namespace; + clusterName: string; + context: string; + clusters: string[]; + }; +} diff --git a/src/commands/cluster/handlers.ts b/src/commands/cluster/handlers.ts index 924edd723..f361288f8 100644 --- a/src/commands/cluster/handlers.ts +++ b/src/commands/cluster/handlers.ts @@ -45,10 +45,10 @@ export class ClusterCommandHandlers implements CommandHandlers { this.tasks.initialize(argv, connectConfigBuilder.bind(this)), this.parent.setupHomeDirectoryTask(), this.parent.getLocalConfig().promptLocalConfigTask(this.parent.getK8()), - this.tasks.selectContext(argv), + this.tasks.selectContext(), RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.tasks.readClustersFromRemoteConfig(argv), - this.tasks.updateLocalConfig(argv), + this.tasks.updateLocalConfig(), ], { concurrent: false, diff --git a/src/commands/cluster/tasks.ts b/src/commands/cluster/tasks.ts index 9325d79e0..51bec5f4e 100644 --- a/src/commands/cluster/tasks.ts +++ b/src/commands/cluster/tasks.ts @@ -26,13 +26,15 @@ import chalk from 'chalk'; import {ListrLease} from '../../core/lease/listr_lease.js'; import {ErrorMessages} from '../../core/error_messages.js'; import {SoloError} from '../../core/errors.js'; -import {type Context} from '@kubernetes/client-node'; import {RemoteConfigManager} from '../../core/config/remote/remote_config_manager.js'; -import {type RemoteConfigDataWrapper} from '../../core/config/remote/remote_config_data_wrapper.js'; -import {type K8} from '../../core/k8.js'; +import type {RemoteConfigDataWrapper} from '../../core/config/remote/remote_config_data_wrapper.js'; +import type {K8} from '../../core/k8.js'; +import type {Cluster} from '@kubernetes/client-node/dist/config_types.js'; +import type {SoloListrTask, SoloListrTaskWrapper} from '../../types/index.js'; +import type {SelectClusterContextContext} from './configs.js'; +import type {Namespace} from '../../core/config/remote/types.js'; +import type {LocalConfig} from '../../core/config/local_config.js'; import {ListrEnquirerPromptAdapter} from '@listr2/prompt-adapter-enquirer'; -import {type LocalConfig} from '../../core/config/local_config.js'; -import {type Cluster} from '@kubernetes/client-node/dist/config_types.js'; export class ClusterCommandTasks { private readonly parent: BaseCommand; @@ -44,11 +46,11 @@ export class ClusterCommandTasks { this.parent = parent; } - testConnectionToCluster(cluster: string, localConfig: LocalConfig, parentTask: ListrTaskWrapper) { + testConnectionToCluster(cluster: string, localConfig: LocalConfig, parentTask: ListrTaskWrapper) { const self = this; return { title: `Test connection to cluster: ${chalk.cyan(cluster)}`, - task: async (_, subTask: ListrTaskWrapper) => { + task: async (_, subTask: ListrTaskWrapper) => { let context = localConfig.clusterContextMapping[cluster]; if (!context) { const isQuiet = self.parent.getConfigManager().getFlag(flags.quiet); @@ -77,7 +79,7 @@ export class ClusterCommandTasks { const self = this; return { title: `Pull and validate remote configuration for cluster: ${chalk.cyan(cluster)}`, - task: async (_, subTask: ListrTaskWrapper) => { + task: async (_, subTask: ListrTaskWrapper) => { const context = localConfig.clusterContextMapping[cluster]; self.parent.getK8().setCurrentContext(context); const remoteConfigFromOtherCluster = await self.parent.getRemoteConfigManager().get(); @@ -118,7 +120,7 @@ export class ClusterCommandTasks { }; } - updateLocalConfig(argv) { + updateLocalConfig(): SoloListrTask { return new Task('Update local configuration', async (ctx: any, task: ListrTaskWrapper) => { this.parent.logger.info('Compare local and remote configuration...'); const configManager = this.parent.getConfigManager(); @@ -128,7 +130,7 @@ export class ClusterCommandTasks { // Update current deployment with cluster list from remoteConfig const localConfig = this.parent.getLocalConfig(); const localDeployments = localConfig.deployments; - const remoteClusterList = []; + const remoteClusterList: string[] = []; const namespace = remoteConfig.metadata.name; localConfig.currentDeploymentName = remoteConfig.metadata.name; @@ -155,11 +157,11 @@ export class ClusterCommandTasks { const cluster = ctx.config.clusters[i]; const context = contexts[i]; - // If a context is provided use it to update the mapping + // If a context is provided, use it to update the mapping if (context) { localConfig.clusterContextMapping[cluster] = context; } else if (!localConfig.clusterContextMapping[cluster]) { - // In quiet mode use the currently selected context to update the mapping + // In quiet mode, use the currently selected context to update the mapping if (isQuiet) { localConfig.clusterContextMapping[cluster] = this.parent.getK8().getKubeConfig().getCurrentContext(); } @@ -176,7 +178,12 @@ export class ClusterCommandTasks { }); } - private async getSelectedContext(task, selectedCluster, localConfig, isQuiet) { + private async getSelectedContext( + task: SoloListrTaskWrapper, + selectedCluster: string, + localConfig: LocalConfig, + isQuiet: boolean, + ) { let selectedContext; if (isQuiet) { selectedContext = this.parent.getK8().getKubeConfig().getCurrentContext(); @@ -187,7 +194,7 @@ export class ClusterCommandTasks { return selectedContext; } - private async promptForContext(task, cluster) { + private async promptForContext(task: SoloListrTaskWrapper, cluster: string) { const kubeContexts = this.parent.getK8().getContexts(); return flags.context.prompt( task, @@ -196,14 +203,19 @@ export class ClusterCommandTasks { ); } - private async selectContextForFirstCluster(task, clusters, localConfig, isQuiet) { + private async selectContextForFirstCluster( + task: SoloListrTaskWrapper, + clusters: string[], + localConfig: LocalConfig, + isQuiet: boolean, + ) { const selectedCluster = clusters[0]; if (localConfig.clusterContextMapping[selectedCluster]) { return localConfig.clusterContextMapping[selectedCluster]; } - // If cluster does not exist in LocalConfig mapping prompt the user to select a context or use the current one + // If a cluster does not exist in LocalConfig mapping prompt the user to select a context or use the current one else { return this.getSelectedContext(task, selectedCluster, localConfig, isQuiet); } @@ -252,78 +264,81 @@ export class ClusterCommandTasks { ); } - selectContext(argv) { - return new Task('Read local configuration settings', async (ctx: any, task: ListrTaskWrapper) => { - this.parent.logger.info('Read local configuration settings...'); - const configManager = this.parent.getConfigManager(); - const isQuiet = configManager.getFlag(flags.quiet); - const deploymentName: string = configManager.getFlag(flags.namespace); - let clusters = splitFlagInput(configManager.getFlag(flags.clusterName)); - const contexts = splitFlagInput(configManager.getFlag(flags.context)); - const localConfig = this.parent.getLocalConfig(); - let selectedContext; - let selectedCluster; - - // If one or more contexts are provided use the first one - if (contexts.length) { - selectedContext = contexts[0]; - } - - // If one or more clusters are provided use the first one to determine the context - // from the mapping in the LocalConfig - else if (clusters.length) { - selectedCluster = clusters[0]; - selectedContext = await this.selectContextForFirstCluster(task, clusters, localConfig, isQuiet); - } + selectContext(): SoloListrTask { + return { + title: 'Read local configuration settings', + task: async (_, task) => { + this.parent.logger.info('Read local configuration settings...'); + const configManager = this.parent.getConfigManager(); + const isQuiet = configManager.getFlag(flags.quiet); + const deploymentName: string = configManager.getFlag(flags.namespace); + let clusters = splitFlagInput(configManager.getFlag(flags.clusterName)); + const contexts = splitFlagInput(configManager.getFlag(flags.context)); + const localConfig = this.parent.getLocalConfig(); + let selectedContext: string; + let selectedCluster: string; - // If a deployment name is provided get the clusters associated with the deployment from the LocalConfig - // and select the context from the mapping, corresponding to the first deployment cluster - else if (deploymentName) { - const deployment = localConfig.deployments[deploymentName]; + // If one or more contexts are provided, use the first one + if (contexts.length) { + selectedContext = contexts[0]; + } - if (deployment && deployment.clusters.length) { - selectedCluster = deployment.clusters[0]; - selectedContext = await this.selectContextForFirstCluster(task, deployment.clusters, localConfig, isQuiet); + // If one or more clusters are provided, use the first one to determine the context + // from the mapping in the LocalConfig + else if (clusters.length) { + selectedCluster = clusters[0]; + selectedContext = await this.selectContextForFirstCluster(task, clusters, localConfig, isQuiet); } - // The provided deployment does not exist in the LocalConfig - else { - // Add the deployment to the LocalConfig with the currently selected cluster and context in KubeConfig - if (isQuiet) { - selectedContext = this.parent.getK8().getKubeConfig().getCurrentContext(); - selectedCluster = this.parent.getK8().getKubeConfig().getCurrentCluster().name; - localConfig.deployments[deploymentName] = { - clusters: [selectedCluster], - }; - - if (!localConfig.clusterContextMapping[selectedCluster]) { - localConfig.clusterContextMapping[selectedCluster] = selectedContext; - } + // If a deployment name is provided, get the clusters associated with the deployment from the LocalConfig + // and select the context from the mapping, corresponding to the first deployment cluster + else if (deploymentName) { + const deployment = localConfig.deployments[deploymentName]; + + if (deployment && deployment.clusters.length) { + selectedCluster = deployment.clusters[0]; + selectedContext = await this.selectContextForFirstCluster(task, deployment.clusters, localConfig, isQuiet); } - // Prompt user for clusters and contexts + // The provided deployment does not exist in the LocalConfig else { - const promptedClusters = await flags.clusterName.prompt(task, ''); - clusters = splitFlagInput(promptedClusters); - - for (const cluster of clusters) { - if (!localConfig.clusterContextMapping[cluster]) { - localConfig.clusterContextMapping[cluster] = await this.promptForContext(task, cluster); + // Add the deployment to the LocalConfig with the currently selected cluster and context in KubeConfig + if (isQuiet) { + selectedContext = this.parent.getK8().getKubeConfig().getCurrentContext(); + selectedCluster = this.parent.getK8().getKubeConfig().getCurrentCluster().name; + localConfig.deployments[deploymentName] = { + clusters: [selectedCluster], + }; + + if (!localConfig.clusterContextMapping[selectedCluster]) { + localConfig.clusterContextMapping[selectedCluster] = selectedContext; } } - selectedCluster = clusters[0]; - selectedContext = localConfig.clusterContextMapping[clusters[0]]; + // Prompt user for clusters and contexts + else { + const promptedClusters = await flags.clusterName.prompt(task, ''); + clusters = splitFlagInput(promptedClusters); + + for (const cluster of clusters) { + if (!localConfig.clusterContextMapping[cluster]) { + localConfig.clusterContextMapping[cluster] = await this.promptForContext(task, cluster); + } + } + + selectedCluster = clusters[0]; + selectedContext = localConfig.clusterContextMapping[clusters[0]]; + } } } - } - const connectionValid = await this.parent.getK8().testClusterConnection(selectedContext, selectedCluster); - if (!connectionValid) { - throw new SoloError(ErrorMessages.INVALID_CONTEXT_FOR_CLUSTER(selectedContext)); - } - this.parent.getK8().setCurrentContext(selectedContext); - }); + const connectionValid = await this.parent.getK8().testClusterConnection(selectedContext, selectedCluster); + if (!connectionValid) { + throw new SoloError(ErrorMessages.INVALID_CONTEXT_FOR_CLUSTER(selectedContext)); + } + this.parent.getK8().setCurrentContext(selectedContext); + }, + }; } initialize(argv: any, configInit: ConfigBuilder) { diff --git a/src/commands/deployment.ts b/src/commands/deployment.ts index cb2ed87b5..8ace56ebc 100644 --- a/src/commands/deployment.ts +++ b/src/commands/deployment.ts @@ -14,7 +14,7 @@ * limitations under the License. * */ -import {Listr, type ListrTaskWrapper} from 'listr2'; +import {Listr} from 'listr2'; import {SoloError} from '../core/errors.js'; import {BaseCommand} from './base.js'; import {Flags as flags} from './flags.js'; @@ -22,16 +22,27 @@ import * as constants from '../core/constants.js'; import {Templates} from '../core/templates.js'; import chalk from 'chalk'; import {RemoteConfigTasks} from '../core/config/remote/remote_config_tasks.js'; -import {ListrLease} from '../core/lease/listr_lease.js'; +import {ClusterCommandTasks} from './cluster/tasks.js'; import type {Namespace} from '../core/config/remote/types.js'; -import {type ContextClusterStructure} from '../types/config_types.js'; -import {type CommandFlag} from '../types/flag_types.js'; -import {type CommandBuilder} from '../types/aliases.js'; +import type {ContextClusterStructure} from '../types/config_types.js'; +import type {CommandFlag} from '../types/flag_types.js'; +import type {CommandBuilder} from '../types/aliases.js'; +import type {Opts} from '../types/command_types.js'; +import type {SoloListrTask} from '../types/index.js'; export class DeploymentCommand extends BaseCommand { + readonly tasks: ClusterCommandTasks; + + constructor(opts: Opts) { + super(opts); + + this.tasks = new ClusterCommandTasks(this, this.k8); + } + private static get DEPLOY_FLAGS_LIST(): CommandFlag[] { return [ flags.quiet, + flags.context, flags.namespace, flags.userEmailAddress, flags.deploymentClusters, @@ -41,9 +52,9 @@ export class DeploymentCommand extends BaseCommand { private async create(argv: any): Promise { const self = this; - const lease = await self.leaseManager.create(); interface Config { + context: string; namespace: Namespace; contextClusterUnparsed: string; contextCluster: ContextClusterStructure; @@ -56,7 +67,7 @@ export class DeploymentCommand extends BaseCommand { [ { title: 'Initialize', - task: async (ctx, task): Promise> => { + task: async (ctx, task) => { self.configManager.update(argv); self.logger.debug('Updated config with argv', {config: self.configManager.config}); @@ -72,23 +83,40 @@ export class DeploymentCommand extends BaseCommand { } as Config; ctx.config.contextCluster = Templates.parseContextCluster(ctx.config.contextClusterUnparsed); - self.logger.debug('Prepared config', {config: ctx.config, cachedConfig: self.configManager.config}); - return ListrLease.newAcquireLeaseTask(lease, task); + self.logger.debug('Prepared config', {config: ctx.config, cachedConfig: self.configManager.config}); }, }, this.setupHomeDirectoryTask(), this.localConfig.promptLocalConfigTask(self.k8), + this.tasks.selectContext(), + { + title: 'Validate context', + task: async (ctx, task) => { + ctx.config.context = ctx.config.context ?? self.configManager.getFlag(flags.context); + const availableContexts = self.k8.getContextNames(); + + if (availableContexts.includes(ctx.config.context)) { + task.title += chalk.green(`- validated context ${ctx.config.context}`); + return; + } + + throw new SoloError( + `Context with name ${ctx.config.context} not found, available contexts include ${availableContexts.join(', ')}`, + ); + }, + }, + this.tasks.updateLocalConfig(), { title: 'Validate cluster connections', - task: async (ctx, task): Promise> => { - const subTasks = []; + task: async (ctx, task) => { + const subTasks: SoloListrTask[] = []; for (const context of Object.keys(ctx.config.contextCluster)) { const cluster = ctx.config.contextCluster[context]; subTasks.push({ title: `Testing connection to cluster: ${chalk.cyan(cluster)}`, - task: async (_: Context, task: ListrTaskWrapper) => { + task: async (_, task) => { if (!(await self.k8.testClusterConnection(context, cluster))) { task.title = `${task.title} - ${chalk.red('Cluster connection failed')}`; throw new SoloError(`Cluster connection failed for: ${cluster}`); @@ -106,7 +134,8 @@ export class DeploymentCommand extends BaseCommand { { title: 'Create remoteConfig in clusters', task: async (ctx, task) => { - const subTasks = []; + const subTasks: SoloListrTask[] = []; + for (const context of Object.keys(ctx.config.contextCluster)) { const cluster = ctx.config.contextCluster[context]; subTasks.push(RemoteConfigTasks.createRemoteConfig.bind(this)(cluster, context, ctx.config.namespace)); @@ -127,10 +156,8 @@ export class DeploymentCommand extends BaseCommand { try { await tasks.run(); - } catch (e: Error | any) { + } catch (e: Error | unknown) { throw new SoloError(`Error installing chart ${constants.SOLO_DEPLOYMENT_CHART}`, e); - } finally { - await lease.release(); } return true; diff --git a/src/core/config/local_config.ts b/src/core/config/local_config.ts index 679c1921d..697e524f8 100644 --- a/src/core/config/local_config.ts +++ b/src/core/config/local_config.ts @@ -206,16 +206,18 @@ export class LocalConfig implements LocalConfigData { if (parsedContexts.length < parsedClusters.length) { if (!isQuiet) { - const promptedContexts = []; + const promptedContexts: string[] = []; for (const cluster of parsedClusters) { const kubeContexts = k8.getContexts(); - const context = await flags.context.prompt( + const context: string = await flags.context.prompt( task, kubeContexts.map(c => c.name), cluster, ); self.clusterContextMapping[cluster] = context; promptedContexts.push(context); + + self.configManager.setFlag(flags.context, context); } self.configManager.setFlag(flags.context, promptedContexts.join(',')); } else { @@ -229,12 +231,15 @@ export class LocalConfig implements LocalConfigData { for (let i = 0; i < parsedClusters.length; i++) { const cluster = parsedClusters[i]; self.clusterContextMapping[cluster] = parsedContexts[i]; + + self.configManager.setFlag(flags.context, parsedContexts[i]); } } self.userEmailAddress = userEmailAddress; self.deployments = deployments; self.currentDeploymentName = deploymentName; + self.validate(); await self.write(); }, diff --git a/src/core/k8.ts b/src/core/k8.ts index e025f1275..c19aa9650 100644 --- a/src/core/k8.ts +++ b/src/core/k8.ts @@ -1209,7 +1209,7 @@ export class K8 { const tempKubeClient = this.kubeConfig.makeApiClient(k8s.CoreV1Api); return await tempKubeClient .listNamespace() - .then(() => this.getKubeConfig().getCurrentCluster().name === cluster) + .then(() => true) .catch(() => false); } diff --git a/test/unit/commands/cluster.test.ts b/test/unit/commands/cluster.test.ts index 2b1777271..dda661392 100644 --- a/test/unit/commands/cluster.test.ts +++ b/test/unit/commands/cluster.test.ts @@ -158,7 +158,8 @@ describe('ClusterCommand unit tests', () => { const getBaseCommandOpts = ( sandbox: sinon.SinonSandbox, remoteConfig: any = {}, - // @ts-ignore + + // @ts-expect-error - TS2344: Type CommandFlag does not satisfy the constraint string | number | symbol stubbedFlags: Record[] = [], opts: any = { testClusterConnectionError: false, @@ -228,9 +229,13 @@ describe('ClusterCommand unit tests', () => { describe('updateLocalConfig', () => { async function runUpdateLocalConfigTask(opts) { command = new ClusterCommand(opts); + tasks = new ClusterCommandTasks(command, opts.k8); + + // @ts-expect-error - TS2554: Expected 0 arguments, but got 1. const taskObj = tasks.updateLocalConfig({}); - await taskObj.task({config: {}}, sandbox.stub() as unknown as ListrTaskWrapper); + + await taskObj.task({config: {}} as any, sandbox.stub() as unknown as ListrTaskWrapper); return command; } @@ -268,7 +273,7 @@ describe('ClusterCommand unit tests', () => { }, }); const opts = getBaseCommandOpts(sandbox, remoteConfig, []); - command = await runUpdateLocalConfigTask(opts); // @ts-ignore + command = await runUpdateLocalConfigTask(opts); localConfig = new LocalConfig(filePath); expect(localConfig.currentDeploymentName).to.equal('deployment'); @@ -286,7 +291,7 @@ describe('ClusterCommand unit tests', () => { }, }); const opts = getBaseCommandOpts(sandbox, remoteConfig, [[flags.context, 'provided-context']]); - command = await runUpdateLocalConfigTask(opts); // @ts-ignore + command = await runUpdateLocalConfigTask(opts); localConfig = new LocalConfig(filePath); expect(localConfig.currentDeploymentName).to.equal('deployment'); @@ -308,7 +313,7 @@ describe('ClusterCommand unit tests', () => { const opts = getBaseCommandOpts(sandbox, remoteConfig, [ [flags.context, 'provided-context-2,provided-context-3,provided-context-4'], ]); - command = await runUpdateLocalConfigTask(opts); // @ts-ignore + command = await runUpdateLocalConfigTask(opts); localConfig = new LocalConfig(filePath); expect(localConfig.currentDeploymentName).to.equal('deployment'); @@ -329,7 +334,7 @@ describe('ClusterCommand unit tests', () => { }, }); const opts = getBaseCommandOpts(sandbox, remoteConfig, [[flags.quiet, true]]); - command = await runUpdateLocalConfigTask(opts); // @ts-ignore + command = await runUpdateLocalConfigTask(opts); localConfig = new LocalConfig(filePath); expect(localConfig.currentDeploymentName).to.equal('deployment'); @@ -350,7 +355,7 @@ describe('ClusterCommand unit tests', () => { }); const opts = getBaseCommandOpts(sandbox, remoteConfig, []); - command = await runUpdateLocalConfigTask(opts); // @ts-ignore + command = await runUpdateLocalConfigTask(opts); localConfig = new LocalConfig(filePath); expect(localConfig.currentDeploymentName).to.equal('deployment'); @@ -366,9 +371,13 @@ describe('ClusterCommand unit tests', () => { describe('selectContext', () => { async function runSelectContextTask(opts) { command = new ClusterCommand(opts); + tasks = new ClusterCommandTasks(command, opts.k8); + + // @ts-expect-error - TS2554: Expected 0 arguments, but got 1 const taskObj = tasks.selectContext({}); - await taskObj.task({config: {}}, sandbox.stub() as unknown as ListrTaskWrapper); + + await taskObj.task({config: {}} as any, sandbox.stub() as unknown as ListrTaskWrapper); return command; } From 473a650730efa401a23305579534254654425354 Mon Sep 17 00:00:00 2001 From: Ivo Yankov Date: Sat, 18 Jan 2025 10:23:21 +0200 Subject: [PATCH 4/4] feat: Update solo to load remote config near entry point (#1176) Signed-off-by: Ivo Yankov --- src/commands/mirror_node.ts | 2 - src/commands/network.ts | 1 - src/commands/node/handlers.ts | 23 +------- src/commands/relay.ts | 2 - .../config/remote/remote_config_manager.ts | 55 +++++++++++-------- src/index.ts | 19 ++++++- 6 files changed, 52 insertions(+), 50 deletions(-) diff --git a/src/commands/mirror_node.ts b/src/commands/mirror_node.ts index 6e2f63f4f..f48027ccf 100644 --- a/src/commands/mirror_node.ts +++ b/src/commands/mirror_node.ts @@ -305,7 +305,6 @@ export class MirrorNodeCommand extends BaseCommand { return ListrLease.newAcquireLeaseTask(lease, task); }, }, - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), { title: 'Enable mirror-node', task: (_, parentTask) => { @@ -624,7 +623,6 @@ export class MirrorNodeCommand extends BaseCommand { return ListrLease.newAcquireLeaseTask(lease, task); }, }, - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), { title: 'Destroy mirror-node', task: async ctx => { diff --git a/src/commands/network.ts b/src/commands/network.ts index 5dea8d64e..2c20699f5 100644 --- a/src/commands/network.ts +++ b/src/commands/network.ts @@ -509,7 +509,6 @@ export class NetworkCommand extends BaseCommand { return ListrLease.newAcquireLeaseTask(lease, task); }, }, - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), { title: 'Copy gRPC TLS Certificates', task: (ctx, parentTask) => diff --git a/src/commands/node/handlers.ts b/src/commands/node/handlers.ts index 47f247451..e26a72f05 100644 --- a/src/commands/node/handlers.ts +++ b/src/commands/node/handlers.ts @@ -104,7 +104,6 @@ export class NodeCommandHandlers implements CommandHandlers { deletePrepareTaskList(argv: any, lease: Lease) { return [ this.tasks.initialize(argv, deleteConfigBuilder.bind(this), lease), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.validateSingleNodeState({excludedStates: []}), this.tasks.identifyExistingNodes(), this.tasks.loadAdminKey(), @@ -148,7 +147,6 @@ export class NodeCommandHandlers implements CommandHandlers { addPrepareTasks(argv: any, lease: Lease) { return [ this.tasks.initialize(argv, addConfigBuilder.bind(this), lease), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.validateSingleNodeState({excludedStates: []}), this.tasks.checkPVCsEnabled(), this.tasks.identifyExistingNodes(), @@ -201,7 +199,6 @@ export class NodeCommandHandlers implements CommandHandlers { updatePrepareTasks(argv, lease: Lease) { return [ this.tasks.initialize(argv, updateConfigBuilder.bind(this), lease), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.validateSingleNodeState({excludedStates: []}), this.tasks.identifyExistingNodes(), this.tasks.loadAdminKey(), @@ -246,7 +243,6 @@ export class NodeCommandHandlers implements CommandHandlers { upgradePrepareTasks(argv, lease: Lease) { return [ this.tasks.initialize(argv, upgradeConfigBuilder.bind(this), lease), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.validateSingleNodeState({excludedStates: []}), this.tasks.identifyExistingNodes(), this.tasks.loadAdminKey(), @@ -282,7 +278,6 @@ export class NodeCommandHandlers implements CommandHandlers { const action = this.parent.commandActionBuilder( [ this.tasks.initialize(argv, prepareUpgradeConfigBuilder.bind(this), lease), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.tasks.prepareUpgradeZip(), this.tasks.sendPrepareUpgradeTransaction(), ], @@ -304,7 +299,6 @@ export class NodeCommandHandlers implements CommandHandlers { const action = this.parent.commandActionBuilder( [ this.tasks.initialize(argv, prepareUpgradeConfigBuilder.bind(this), null), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.tasks.prepareUpgradeZip(), this.tasks.sendFreezeUpgradeTransaction(), ], @@ -328,7 +322,6 @@ export class NodeCommandHandlers implements CommandHandlers { const action = this.parent.commandActionBuilder( [ this.tasks.initialize(argv, downloadGeneratedFilesConfigBuilder.bind(this), lease), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.tasks.identifyExistingNodes(), this.tasks.downloadNodeGeneratedFiles(), ], @@ -394,7 +387,6 @@ export class NodeCommandHandlers implements CommandHandlers { const action = this.parent.commandActionBuilder( [ this.tasks.initialize(argv, updateConfigBuilder.bind(this), lease), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.tasks.loadContextData(argv, NodeCommandHandlers.UPDATE_CONTEXT_FILE, NodeHelper.updateLoadContextParser), ...this.updateSubmitTransactionsTasks(argv), ], @@ -416,7 +408,6 @@ export class NodeCommandHandlers implements CommandHandlers { const action = this.parent.commandActionBuilder( [ this.tasks.initialize(argv, updateConfigBuilder.bind(this), lease, false), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.tasks.loadContextData(argv, NodeCommandHandlers.UPDATE_CONTEXT_FILE, NodeHelper.updateLoadContextParser), ...this.updateExecuteTasks(argv), ], @@ -457,7 +448,6 @@ export class NodeCommandHandlers implements CommandHandlers { const action = this.parent.commandActionBuilder( [ this.tasks.initialize(argv, upgradeConfigBuilder.bind(this), lease), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.tasks.loadContextData(argv, NodeCommandHandlers.UPGRADE_CONTEXT_FILE, NodeHelper.upgradeLoadContextParser), ...this.upgradeSubmitTransactionsTasks(argv), ], @@ -479,7 +469,6 @@ export class NodeCommandHandlers implements CommandHandlers { const action = this.parent.commandActionBuilder( [ this.tasks.initialize(argv, upgradeConfigBuilder.bind(this), lease, false), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.tasks.loadContextData(argv, NodeCommandHandlers.UPGRADE_CONTEXT_FILE, NodeHelper.upgradeLoadContextParser), ...this.upgradeExecuteTasks(argv), ], @@ -654,7 +643,6 @@ export class NodeCommandHandlers implements CommandHandlers { const action = this.parent.commandActionBuilder( [ this.tasks.initialize(argv, addConfigBuilder.bind(this), lease), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.tasks.loadContextData(argv, NodeCommandHandlers.ADD_CONTEXT_FILE, helpers.addLoadContextParser), ...this.addSubmitTransactionsTasks(argv), ], @@ -678,7 +666,6 @@ export class NodeCommandHandlers implements CommandHandlers { const action = this.parent.commandActionBuilder( [ this.tasks.initialize(argv, addConfigBuilder.bind(this), lease, false), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.tasks.identifyExistingNodes(), this.tasks.loadContextData(argv, NodeCommandHandlers.ADD_CONTEXT_FILE, helpers.addLoadContextParser), ...this.addExecuteTasks(argv), @@ -698,11 +685,7 @@ export class NodeCommandHandlers implements CommandHandlers { async logs(argv: any) { argv = helpers.addFlagsToArgv(argv, NodeFlags.LOGS_FLAGS); const action = this.parent.commandActionBuilder( - [ - this.tasks.initialize(argv, logsConfigBuilder.bind(this), null), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), - this.tasks.getNodeLogsAndConfigs(), - ], + [this.tasks.initialize(argv, logsConfigBuilder.bind(this), null), this.tasks.getNodeLogsAndConfigs()], { concurrent: false, rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION, @@ -740,7 +723,6 @@ export class NodeCommandHandlers implements CommandHandlers { const action = this.parent.commandActionBuilder( [ this.tasks.initialize(argv, refreshConfigBuilder.bind(this), lease), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.validateAllNodeStates({ acceptedStates: [ConsensusNodeStates.STARTED, ConsensusNodeStates.SETUP, ConsensusNodeStates.INITIALIZED], }), @@ -794,7 +776,6 @@ export class NodeCommandHandlers implements CommandHandlers { const action = this.parent.commandActionBuilder( [ this.tasks.initialize(argv, stopConfigBuilder.bind(this), lease), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.validateAllNodeStates({ acceptedStates: [ConsensusNodeStates.STARTED, ConsensusNodeStates.SETUP], }), @@ -822,7 +803,6 @@ export class NodeCommandHandlers implements CommandHandlers { const action = this.parent.commandActionBuilder( [ this.tasks.initialize(argv, startConfigBuilder.bind(this), lease), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.validateAllNodeStates({acceptedStates: [ConsensusNodeStates.SETUP]}), this.tasks.identifyExistingNodes(), this.tasks.uploadStateFiles((ctx: any) => ctx.config.stateFile.length === 0), @@ -853,7 +833,6 @@ export class NodeCommandHandlers implements CommandHandlers { const action = this.parent.commandActionBuilder( [ this.tasks.initialize(argv, setupConfigBuilder.bind(this), lease), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), this.validateAllNodeStates({ acceptedStates: [ConsensusNodeStates.INITIALIZED], }), diff --git a/src/commands/relay.ts b/src/commands/relay.ts index 1be3c97c1..bc5b2fab7 100644 --- a/src/commands/relay.ts +++ b/src/commands/relay.ts @@ -238,7 +238,6 @@ export class RelayCommand extends BaseCommand { return ListrLease.newAcquireLeaseTask(lease, task); }, }, - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), { title: 'Prepare chart values', task: async ctx => { @@ -369,7 +368,6 @@ export class RelayCommand extends BaseCommand { return ListrLease.newAcquireLeaseTask(lease, task); }, }, - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), { title: 'Destroy JSON RPC Relay', task: async ctx => { diff --git a/src/core/config/remote/remote_config_manager.ts b/src/core/config/remote/remote_config_manager.ts index d2cd892ee..1d8a2aa92 100644 --- a/src/core/config/remote/remote_config_manager.ts +++ b/src/core/config/remote/remote_config_manager.ts @@ -184,39 +184,50 @@ export class RemoteConfigManager { /* ---------- Listr Task Builders ---------- */ /** - * Builds a task for loading the remote configuration, intended for use with Listr task management. + * Performs the loading of the remote configuration. * Checks if the configuration is already loaded, otherwise loads and adds the command to history. * * @param argv - arguments containing command input for historical reference. - * @returns a Listr task which loads the remote configuration. */ - public buildLoadTask(argv: {_: string[]}): SoloListrTask { + public async loadAndValidate(argv: {_: string[]}) { const self = this; + try { + self.setDefaultNamespaceIfNotSet(); + self.setDefaultContextIfNotSet(); + } catch (e) { + self.logger.showUser(chalk.red(e.message)); + return; + } - return { - title: 'Load remote config', - task: async (_, task): Promise => { - try { - self.setDefaultNamespaceIfNotSet(); - self.setDefaultContextIfNotSet(); - } catch { - return; // TODO - } + if (!(await self.load())) { + self.logger.showUser(chalk.red('remote config not found')); + + // TODO see if this should be disabled to make it an optional feature + return; + // throw new SoloError('Failed to load remote config') + } - if (!(await self.load())) { - task.title = `${task.title} - ${chalk.red('remote config not found')}`; + await RemoteConfigValidator.validateComponents(self.remoteConfig.components, self.k8); - // TODO see if this should be disabled to make it an optional feature - return; - // throw new SoloError('Failed to load remote config') - } + const currentCommand = argv._.join(' '); + self.remoteConfig!.addCommandToHistory(currentCommand); - await RemoteConfigValidator.validateComponents(self.remoteConfig.components, self.k8); + await self.save(); + } - const currentCommand = argv._.join(' '); - self.remoteConfig!.addCommandToHistory(currentCommand); + /** + * Builds a listr task for loading the remote configuration. + * + * @param argv - arguments containing command input for historical reference. + * @returns a Listr task which loads the remote configuration. + */ + public buildLoadTask(argv: {_: string[]}): SoloListrTask { + const self = this; - await self.save(); + return { + title: 'Load remote config', + task: async (_, task): Promise => { + await self.loadAndValidate(argv); }, }; } diff --git a/src/index.ts b/src/index.ts index 896855015..bd4f0698d 100644 --- a/src/index.ts +++ b/src/index.ts @@ -132,6 +132,23 @@ export function main(argv: any) { return argv; }; + const loadRemoteConfig = async (argv: any, yargs: any): Promise => { + const command = argv._[0]; + const subCommand = argv._[1]; + const skip = + command === 'init' || + (command === 'node' && subCommand === 'keys') || + (command === 'cluster' && subCommand === 'connect') || + (command === 'cluster' && subCommand === 'info') || + (command === 'cluster' && subCommand === 'list') || + (command === 'deployment' && subCommand === 'create'); + if (!skip) { + await remoteConfigManager.loadAndValidate(argv); + } + + return argv; + }; + return ( yargs(hideBin(argv)) .scriptName('') @@ -146,7 +163,7 @@ export function main(argv: any) { .wrap(120) .demand(1, 'Select a command') // @ts-ignore - .middleware(processArguments, false) // applyBeforeValidate = false as otherwise middleware is called twice + .middleware([processArguments, loadRemoteConfig], false) // applyBeforeValidate = false as otherwise middleware is called twice .parse() ); } catch (e: Error | any) {